##// END OF EJS Templates
py3: manually import getattr where it is needed...
Gregory Szorc -
r43359:c59eb156 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,331 +1,334
1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import errno
9 import errno
10 import os
10 import os
11 import re
11 import re
12 import socket
12 import socket
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.pycompat import open
15 from mercurial.pycompat import (
16 getattr,
17 open,
18 )
16 from mercurial import (
19 from mercurial import (
17 encoding,
20 encoding,
18 error,
21 error,
19 util,
22 util,
20 )
23 )
21 from mercurial.utils import (
24 from mercurial.utils import (
22 dateutil,
25 dateutil,
23 procutil,
26 procutil,
24 )
27 )
25
28
26 from . import (
29 from . import (
27 common,
30 common,
28 cvsps,
31 cvsps,
29 )
32 )
30
33
31 stringio = util.stringio
34 stringio = util.stringio
32 checktool = common.checktool
35 checktool = common.checktool
33 commit = common.commit
36 commit = common.commit
34 converter_source = common.converter_source
37 converter_source = common.converter_source
35 makedatetimestamp = common.makedatetimestamp
38 makedatetimestamp = common.makedatetimestamp
36 NoRepo = common.NoRepo
39 NoRepo = common.NoRepo
37
40
38
41
39 class convert_cvs(converter_source):
42 class convert_cvs(converter_source):
40 def __init__(self, ui, repotype, path, revs=None):
43 def __init__(self, ui, repotype, path, revs=None):
41 super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
44 super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
42
45
43 cvs = os.path.join(path, b"CVS")
46 cvs = os.path.join(path, b"CVS")
44 if not os.path.exists(cvs):
47 if not os.path.exists(cvs):
45 raise NoRepo(_(b"%s does not look like a CVS checkout") % path)
48 raise NoRepo(_(b"%s does not look like a CVS checkout") % path)
46
49
47 checktool(b'cvs')
50 checktool(b'cvs')
48
51
49 self.changeset = None
52 self.changeset = None
50 self.files = {}
53 self.files = {}
51 self.tags = {}
54 self.tags = {}
52 self.lastbranch = {}
55 self.lastbranch = {}
53 self.socket = None
56 self.socket = None
54 self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
57 self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
55 self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
58 self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
56 self.encoding = encoding.encoding
59 self.encoding = encoding.encoding
57
60
58 self._connect()
61 self._connect()
59
62
60 def _parse(self):
63 def _parse(self):
61 if self.changeset is not None:
64 if self.changeset is not None:
62 return
65 return
63 self.changeset = {}
66 self.changeset = {}
64
67
65 maxrev = 0
68 maxrev = 0
66 if self.revs:
69 if self.revs:
67 if len(self.revs) > 1:
70 if len(self.revs) > 1:
68 raise error.Abort(
71 raise error.Abort(
69 _(
72 _(
70 b'cvs source does not support specifying '
73 b'cvs source does not support specifying '
71 b'multiple revs'
74 b'multiple revs'
72 )
75 )
73 )
76 )
74 # TODO: handle tags
77 # TODO: handle tags
75 try:
78 try:
76 # patchset number?
79 # patchset number?
77 maxrev = int(self.revs[0])
80 maxrev = int(self.revs[0])
78 except ValueError:
81 except ValueError:
79 raise error.Abort(
82 raise error.Abort(
80 _(b'revision %s is not a patchset number') % self.revs[0]
83 _(b'revision %s is not a patchset number') % self.revs[0]
81 )
84 )
82
85
83 d = encoding.getcwd()
86 d = encoding.getcwd()
84 try:
87 try:
85 os.chdir(self.path)
88 os.chdir(self.path)
86
89
87 cache = b'update'
90 cache = b'update'
88 if not self.ui.configbool(b'convert', b'cvsps.cache'):
91 if not self.ui.configbool(b'convert', b'cvsps.cache'):
89 cache = None
92 cache = None
90 db = cvsps.createlog(self.ui, cache=cache)
93 db = cvsps.createlog(self.ui, cache=cache)
91 db = cvsps.createchangeset(
94 db = cvsps.createchangeset(
92 self.ui,
95 self.ui,
93 db,
96 db,
94 fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')),
97 fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')),
95 mergeto=self.ui.config(b'convert', b'cvsps.mergeto'),
98 mergeto=self.ui.config(b'convert', b'cvsps.mergeto'),
96 mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'),
99 mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'),
97 )
100 )
98
101
99 for cs in db:
102 for cs in db:
100 if maxrev and cs.id > maxrev:
103 if maxrev and cs.id > maxrev:
101 break
104 break
102 id = b"%d" % cs.id
105 id = b"%d" % cs.id
103 cs.author = self.recode(cs.author)
106 cs.author = self.recode(cs.author)
104 self.lastbranch[cs.branch] = id
107 self.lastbranch[cs.branch] = id
105 cs.comment = self.recode(cs.comment)
108 cs.comment = self.recode(cs.comment)
106 if self.ui.configbool(b'convert', b'localtimezone'):
109 if self.ui.configbool(b'convert', b'localtimezone'):
107 cs.date = makedatetimestamp(cs.date[0])
110 cs.date = makedatetimestamp(cs.date[0])
108 date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2')
111 date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2')
109 self.tags.update(dict.fromkeys(cs.tags, id))
112 self.tags.update(dict.fromkeys(cs.tags, id))
110
113
111 files = {}
114 files = {}
112 for f in cs.entries:
115 for f in cs.entries:
113 files[f.file] = b"%s%s" % (
116 files[f.file] = b"%s%s" % (
114 b'.'.join([(b"%d" % x) for x in f.revision]),
117 b'.'.join([(b"%d" % x) for x in f.revision]),
115 [b'', b'(DEAD)'][f.dead],
118 [b'', b'(DEAD)'][f.dead],
116 )
119 )
117
120
118 # add current commit to set
121 # add current commit to set
119 c = commit(
122 c = commit(
120 author=cs.author,
123 author=cs.author,
121 date=date,
124 date=date,
122 parents=[(b"%d" % p.id) for p in cs.parents],
125 parents=[(b"%d" % p.id) for p in cs.parents],
123 desc=cs.comment,
126 desc=cs.comment,
124 branch=cs.branch or b'',
127 branch=cs.branch or b'',
125 )
128 )
126 self.changeset[id] = c
129 self.changeset[id] = c
127 self.files[id] = files
130 self.files[id] = files
128
131
129 self.heads = self.lastbranch.values()
132 self.heads = self.lastbranch.values()
130 finally:
133 finally:
131 os.chdir(d)
134 os.chdir(d)
132
135
133 def _connect(self):
136 def _connect(self):
134 root = self.cvsroot
137 root = self.cvsroot
135 conntype = None
138 conntype = None
136 user, host = None, None
139 user, host = None, None
137 cmd = [b'cvs', b'server']
140 cmd = [b'cvs', b'server']
138
141
139 self.ui.status(_(b"connecting to %s\n") % root)
142 self.ui.status(_(b"connecting to %s\n") % root)
140
143
141 if root.startswith(b":pserver:"):
144 if root.startswith(b":pserver:"):
142 root = root[9:]
145 root = root[9:]
143 m = re.match(
146 m = re.match(
144 r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)', root
147 r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)', root
145 )
148 )
146 if m:
149 if m:
147 conntype = b"pserver"
150 conntype = b"pserver"
148 user, passw, serv, port, root = m.groups()
151 user, passw, serv, port, root = m.groups()
149 if not user:
152 if not user:
150 user = b"anonymous"
153 user = b"anonymous"
151 if not port:
154 if not port:
152 port = 2401
155 port = 2401
153 else:
156 else:
154 port = int(port)
157 port = int(port)
155 format0 = b":pserver:%s@%s:%s" % (user, serv, root)
158 format0 = b":pserver:%s@%s:%s" % (user, serv, root)
156 format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root)
159 format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root)
157
160
158 if not passw:
161 if not passw:
159 passw = b"A"
162 passw = b"A"
160 cvspass = os.path.expanduser(b"~/.cvspass")
163 cvspass = os.path.expanduser(b"~/.cvspass")
161 try:
164 try:
162 pf = open(cvspass, b'rb')
165 pf = open(cvspass, b'rb')
163 for line in pf.read().splitlines():
166 for line in pf.read().splitlines():
164 part1, part2 = line.split(b' ', 1)
167 part1, part2 = line.split(b' ', 1)
165 # /1 :pserver:user@example.com:2401/cvsroot/foo
168 # /1 :pserver:user@example.com:2401/cvsroot/foo
166 # Ah<Z
169 # Ah<Z
167 if part1 == b'/1':
170 if part1 == b'/1':
168 part1, part2 = part2.split(b' ', 1)
171 part1, part2 = part2.split(b' ', 1)
169 format = format1
172 format = format1
170 # :pserver:user@example.com:/cvsroot/foo Ah<Z
173 # :pserver:user@example.com:/cvsroot/foo Ah<Z
171 else:
174 else:
172 format = format0
175 format = format0
173 if part1 == format:
176 if part1 == format:
174 passw = part2
177 passw = part2
175 break
178 break
176 pf.close()
179 pf.close()
177 except IOError as inst:
180 except IOError as inst:
178 if inst.errno != errno.ENOENT:
181 if inst.errno != errno.ENOENT:
179 if not getattr(inst, 'filename', None):
182 if not getattr(inst, 'filename', None):
180 inst.filename = cvspass
183 inst.filename = cvspass
181 raise
184 raise
182
185
183 sck = socket.socket()
186 sck = socket.socket()
184 sck.connect((serv, port))
187 sck.connect((serv, port))
185 sck.send(
188 sck.send(
186 b"\n".join(
189 b"\n".join(
187 [
190 [
188 b"BEGIN AUTH REQUEST",
191 b"BEGIN AUTH REQUEST",
189 root,
192 root,
190 user,
193 user,
191 passw,
194 passw,
192 b"END AUTH REQUEST",
195 b"END AUTH REQUEST",
193 b"",
196 b"",
194 ]
197 ]
195 )
198 )
196 )
199 )
197 if sck.recv(128) != b"I LOVE YOU\n":
200 if sck.recv(128) != b"I LOVE YOU\n":
198 raise error.Abort(_(b"CVS pserver authentication failed"))
201 raise error.Abort(_(b"CVS pserver authentication failed"))
199
202
200 self.writep = self.readp = sck.makefile(b'r+')
203 self.writep = self.readp = sck.makefile(b'r+')
201
204
202 if not conntype and root.startswith(b":local:"):
205 if not conntype and root.startswith(b":local:"):
203 conntype = b"local"
206 conntype = b"local"
204 root = root[7:]
207 root = root[7:]
205
208
206 if not conntype:
209 if not conntype:
207 # :ext:user@host/home/user/path/to/cvsroot
210 # :ext:user@host/home/user/path/to/cvsroot
208 if root.startswith(b":ext:"):
211 if root.startswith(b":ext:"):
209 root = root[5:]
212 root = root[5:]
210 m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
213 m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
211 # Do not take Windows path "c:\foo\bar" for a connection strings
214 # Do not take Windows path "c:\foo\bar" for a connection strings
212 if os.path.isdir(root) or not m:
215 if os.path.isdir(root) or not m:
213 conntype = b"local"
216 conntype = b"local"
214 else:
217 else:
215 conntype = b"rsh"
218 conntype = b"rsh"
216 user, host, root = m.group(1), m.group(2), m.group(3)
219 user, host, root = m.group(1), m.group(2), m.group(3)
217
220
218 if conntype != b"pserver":
221 if conntype != b"pserver":
219 if conntype == b"rsh":
222 if conntype == b"rsh":
220 rsh = encoding.environ.get(b"CVS_RSH") or b"ssh"
223 rsh = encoding.environ.get(b"CVS_RSH") or b"ssh"
221 if user:
224 if user:
222 cmd = [rsh, b'-l', user, host] + cmd
225 cmd = [rsh, b'-l', user, host] + cmd
223 else:
226 else:
224 cmd = [rsh, host] + cmd
227 cmd = [rsh, host] + cmd
225
228
226 # popen2 does not support argument lists under Windows
229 # popen2 does not support argument lists under Windows
227 cmd = [procutil.shellquote(arg) for arg in cmd]
230 cmd = [procutil.shellquote(arg) for arg in cmd]
228 cmd = procutil.quotecommand(b' '.join(cmd))
231 cmd = procutil.quotecommand(b' '.join(cmd))
229 self.writep, self.readp = procutil.popen2(cmd)
232 self.writep, self.readp = procutil.popen2(cmd)
230
233
231 self.realroot = root
234 self.realroot = root
232
235
233 self.writep.write(b"Root %s\n" % root)
236 self.writep.write(b"Root %s\n" % root)
234 self.writep.write(
237 self.writep.write(
235 b"Valid-responses ok error Valid-requests Mode"
238 b"Valid-responses ok error Valid-requests Mode"
236 b" M Mbinary E Checked-in Created Updated"
239 b" M Mbinary E Checked-in Created Updated"
237 b" Merged Removed\n"
240 b" Merged Removed\n"
238 )
241 )
239 self.writep.write(b"valid-requests\n")
242 self.writep.write(b"valid-requests\n")
240 self.writep.flush()
243 self.writep.flush()
241 r = self.readp.readline()
244 r = self.readp.readline()
242 if not r.startswith(b"Valid-requests"):
245 if not r.startswith(b"Valid-requests"):
243 raise error.Abort(
246 raise error.Abort(
244 _(
247 _(
245 b'unexpected response from CVS server '
248 b'unexpected response from CVS server '
246 b'(expected "Valid-requests", but got %r)'
249 b'(expected "Valid-requests", but got %r)'
247 )
250 )
248 % r
251 % r
249 )
252 )
250 if b"UseUnchanged" in r:
253 if b"UseUnchanged" in r:
251 self.writep.write(b"UseUnchanged\n")
254 self.writep.write(b"UseUnchanged\n")
252 self.writep.flush()
255 self.writep.flush()
253 self.readp.readline()
256 self.readp.readline()
254
257
255 def getheads(self):
258 def getheads(self):
256 self._parse()
259 self._parse()
257 return self.heads
260 return self.heads
258
261
259 def getfile(self, name, rev):
262 def getfile(self, name, rev):
260 def chunkedread(fp, count):
263 def chunkedread(fp, count):
261 # file-objects returned by socket.makefile() do not handle
264 # file-objects returned by socket.makefile() do not handle
262 # large read() requests very well.
265 # large read() requests very well.
263 chunksize = 65536
266 chunksize = 65536
264 output = stringio()
267 output = stringio()
265 while count > 0:
268 while count > 0:
266 data = fp.read(min(count, chunksize))
269 data = fp.read(min(count, chunksize))
267 if not data:
270 if not data:
268 raise error.Abort(
271 raise error.Abort(
269 _(b"%d bytes missing from remote file") % count
272 _(b"%d bytes missing from remote file") % count
270 )
273 )
271 count -= len(data)
274 count -= len(data)
272 output.write(data)
275 output.write(data)
273 return output.getvalue()
276 return output.getvalue()
274
277
275 self._parse()
278 self._parse()
276 if rev.endswith(b"(DEAD)"):
279 if rev.endswith(b"(DEAD)"):
277 return None, None
280 return None, None
278
281
279 args = (b"-N -P -kk -r %s --" % rev).split()
282 args = (b"-N -P -kk -r %s --" % rev).split()
280 args.append(self.cvsrepo + b'/' + name)
283 args.append(self.cvsrepo + b'/' + name)
281 for x in args:
284 for x in args:
282 self.writep.write(b"Argument %s\n" % x)
285 self.writep.write(b"Argument %s\n" % x)
283 self.writep.write(b"Directory .\n%s\nco\n" % self.realroot)
286 self.writep.write(b"Directory .\n%s\nco\n" % self.realroot)
284 self.writep.flush()
287 self.writep.flush()
285
288
286 data = b""
289 data = b""
287 mode = None
290 mode = None
288 while True:
291 while True:
289 line = self.readp.readline()
292 line = self.readp.readline()
290 if line.startswith(b"Created ") or line.startswith(b"Updated "):
293 if line.startswith(b"Created ") or line.startswith(b"Updated "):
291 self.readp.readline() # path
294 self.readp.readline() # path
292 self.readp.readline() # entries
295 self.readp.readline() # entries
293 mode = self.readp.readline()[:-1]
296 mode = self.readp.readline()[:-1]
294 count = int(self.readp.readline()[:-1])
297 count = int(self.readp.readline()[:-1])
295 data = chunkedread(self.readp, count)
298 data = chunkedread(self.readp, count)
296 elif line.startswith(b" "):
299 elif line.startswith(b" "):
297 data += line[1:]
300 data += line[1:]
298 elif line.startswith(b"M "):
301 elif line.startswith(b"M "):
299 pass
302 pass
300 elif line.startswith(b"Mbinary "):
303 elif line.startswith(b"Mbinary "):
301 count = int(self.readp.readline()[:-1])
304 count = int(self.readp.readline()[:-1])
302 data = chunkedread(self.readp, count)
305 data = chunkedread(self.readp, count)
303 else:
306 else:
304 if line == b"ok\n":
307 if line == b"ok\n":
305 if mode is None:
308 if mode is None:
306 raise error.Abort(_(b'malformed response from CVS'))
309 raise error.Abort(_(b'malformed response from CVS'))
307 return (data, b"x" in mode and b"x" or b"")
310 return (data, b"x" in mode and b"x" or b"")
308 elif line.startswith(b"E "):
311 elif line.startswith(b"E "):
309 self.ui.warn(_(b"cvs server: %s\n") % line[2:])
312 self.ui.warn(_(b"cvs server: %s\n") % line[2:])
310 elif line.startswith(b"Remove"):
313 elif line.startswith(b"Remove"):
311 self.readp.readline()
314 self.readp.readline()
312 else:
315 else:
313 raise error.Abort(_(b"unknown CVS response: %s") % line)
316 raise error.Abort(_(b"unknown CVS response: %s") % line)
314
317
315 def getchanges(self, rev, full):
318 def getchanges(self, rev, full):
316 if full:
319 if full:
317 raise error.Abort(_(b"convert from cvs does not support --full"))
320 raise error.Abort(_(b"convert from cvs does not support --full"))
318 self._parse()
321 self._parse()
319 return sorted(self.files[rev].iteritems()), {}, set()
322 return sorted(self.files[rev].iteritems()), {}, set()
320
323
321 def getcommit(self, rev):
324 def getcommit(self, rev):
322 self._parse()
325 self._parse()
323 return self.changeset[rev]
326 return self.changeset[rev]
324
327
325 def gettags(self):
328 def gettags(self):
326 self._parse()
329 self._parse()
327 return self.tags
330 return self.tags
328
331
329 def getchangedfiles(self, rev, i):
332 def getchangedfiles(self, rev, i):
330 self._parse()
333 self._parse()
331 return sorted(self.files[rev])
334 return sorted(self.files[rev])
@@ -1,157 +1,158
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
4 # This is a stripped-down version of the original bzr-svn transport.py,
4 # This is a stripped-down version of the original bzr-svn transport.py,
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
6
6
7 # This program is free software; you can redistribute it and/or modify
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
10 # (at your option) any later version.
11
11
12 # This program is distributed in the hope that it will be useful,
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
15 # GNU General Public License for more details.
16
16
17 # You should have received a copy of the GNU General Public License
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19 from __future__ import absolute_import
19 from __future__ import absolute_import
20
20
21 import svn.client
21 import svn.client
22 import svn.core
22 import svn.core
23 import svn.ra
23 import svn.ra
24
24
25 Pool = svn.core.Pool
25 Pool = svn.core.Pool
26 SubversionException = svn.core.SubversionException
26 SubversionException = svn.core.SubversionException
27
27
28 from mercurial.pycompat import getattr
28 from mercurial import util
29 from mercurial import util
29
30
30 # Some older versions of the Python bindings need to be
31 # Some older versions of the Python bindings need to be
31 # explicitly initialized. But what we want to do probably
32 # explicitly initialized. But what we want to do probably
32 # won't work worth a darn against those libraries anyway!
33 # won't work worth a darn against those libraries anyway!
33 svn.ra.initialize()
34 svn.ra.initialize()
34
35
35 svn_config = None
36 svn_config = None
36
37
37
38
38 def _create_auth_baton(pool):
39 def _create_auth_baton(pool):
39 """Create a Subversion authentication baton. """
40 """Create a Subversion authentication baton. """
40 import svn.client
41 import svn.client
41
42
42 # Give the client context baton a suite of authentication
43 # Give the client context baton a suite of authentication
43 # providers.h
44 # providers.h
44 providers = [
45 providers = [
45 svn.client.get_simple_provider(pool),
46 svn.client.get_simple_provider(pool),
46 svn.client.get_username_provider(pool),
47 svn.client.get_username_provider(pool),
47 svn.client.get_ssl_client_cert_file_provider(pool),
48 svn.client.get_ssl_client_cert_file_provider(pool),
48 svn.client.get_ssl_client_cert_pw_file_provider(pool),
49 svn.client.get_ssl_client_cert_pw_file_provider(pool),
49 svn.client.get_ssl_server_trust_file_provider(pool),
50 svn.client.get_ssl_server_trust_file_provider(pool),
50 ]
51 ]
51 # Platform-dependent authentication methods
52 # Platform-dependent authentication methods
52 getprovider = getattr(
53 getprovider = getattr(
53 svn.core, 'svn_auth_get_platform_specific_provider', None
54 svn.core, 'svn_auth_get_platform_specific_provider', None
54 )
55 )
55 if getprovider:
56 if getprovider:
56 # Available in svn >= 1.6
57 # Available in svn >= 1.6
57 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
58 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
58 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
59 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
59 p = getprovider(name, type, pool)
60 p = getprovider(name, type, pool)
60 if p:
61 if p:
61 providers.append(p)
62 providers.append(p)
62 else:
63 else:
63 if util.safehasattr(svn.client, b'get_windows_simple_provider'):
64 if util.safehasattr(svn.client, b'get_windows_simple_provider'):
64 providers.append(svn.client.get_windows_simple_provider(pool))
65 providers.append(svn.client.get_windows_simple_provider(pool))
65
66
66 return svn.core.svn_auth_open(providers, pool)
67 return svn.core.svn_auth_open(providers, pool)
67
68
68
69
69 class NotBranchError(SubversionException):
70 class NotBranchError(SubversionException):
70 pass
71 pass
71
72
72
73
73 class SvnRaTransport(object):
74 class SvnRaTransport(object):
74 """
75 """
75 Open an ra connection to a Subversion repository.
76 Open an ra connection to a Subversion repository.
76 """
77 """
77
78
78 def __init__(self, url=b"", ra=None):
79 def __init__(self, url=b"", ra=None):
79 self.pool = Pool()
80 self.pool = Pool()
80 self.svn_url = url
81 self.svn_url = url
81 self.username = b''
82 self.username = b''
82 self.password = b''
83 self.password = b''
83
84
84 # Only Subversion 1.4 has reparent()
85 # Only Subversion 1.4 has reparent()
85 if ra is None or not util.safehasattr(svn.ra, b'reparent'):
86 if ra is None or not util.safehasattr(svn.ra, b'reparent'):
86 self.client = svn.client.create_context(self.pool)
87 self.client = svn.client.create_context(self.pool)
87 ab = _create_auth_baton(self.pool)
88 ab = _create_auth_baton(self.pool)
88 self.client.auth_baton = ab
89 self.client.auth_baton = ab
89 global svn_config
90 global svn_config
90 if svn_config is None:
91 if svn_config is None:
91 svn_config = svn.core.svn_config_get_config(None)
92 svn_config = svn.core.svn_config_get_config(None)
92 self.client.config = svn_config
93 self.client.config = svn_config
93 try:
94 try:
94 self.ra = svn.client.open_ra_session(
95 self.ra = svn.client.open_ra_session(
95 self.svn_url, self.client, self.pool
96 self.svn_url, self.client, self.pool
96 )
97 )
97 except SubversionException as xxx_todo_changeme:
98 except SubversionException as xxx_todo_changeme:
98 (inst, num) = xxx_todo_changeme.args
99 (inst, num) = xxx_todo_changeme.args
99 if num in (
100 if num in (
100 svn.core.SVN_ERR_RA_ILLEGAL_URL,
101 svn.core.SVN_ERR_RA_ILLEGAL_URL,
101 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
102 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
102 svn.core.SVN_ERR_BAD_URL,
103 svn.core.SVN_ERR_BAD_URL,
103 ):
104 ):
104 raise NotBranchError(url)
105 raise NotBranchError(url)
105 raise
106 raise
106 else:
107 else:
107 self.ra = ra
108 self.ra = ra
108 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
109 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
109
110
110 class Reporter(object):
111 class Reporter(object):
111 def __init__(self, reporter_data):
112 def __init__(self, reporter_data):
112 self._reporter, self._baton = reporter_data
113 self._reporter, self._baton = reporter_data
113
114
114 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
115 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
115 svn.ra.reporter2_invoke_set_path(
116 svn.ra.reporter2_invoke_set_path(
116 self._reporter,
117 self._reporter,
117 self._baton,
118 self._baton,
118 path,
119 path,
119 revnum,
120 revnum,
120 start_empty,
121 start_empty,
121 lock_token,
122 lock_token,
122 pool,
123 pool,
123 )
124 )
124
125
125 def delete_path(self, path, pool=None):
126 def delete_path(self, path, pool=None):
126 svn.ra.reporter2_invoke_delete_path(
127 svn.ra.reporter2_invoke_delete_path(
127 self._reporter, self._baton, path, pool
128 self._reporter, self._baton, path, pool
128 )
129 )
129
130
130 def link_path(
131 def link_path(
131 self, path, url, revision, start_empty, lock_token, pool=None
132 self, path, url, revision, start_empty, lock_token, pool=None
132 ):
133 ):
133 svn.ra.reporter2_invoke_link_path(
134 svn.ra.reporter2_invoke_link_path(
134 self._reporter,
135 self._reporter,
135 self._baton,
136 self._baton,
136 path,
137 path,
137 url,
138 url,
138 revision,
139 revision,
139 start_empty,
140 start_empty,
140 lock_token,
141 lock_token,
141 pool,
142 pool,
142 )
143 )
143
144
144 def finish_report(self, pool=None):
145 def finish_report(self, pool=None):
145 svn.ra.reporter2_invoke_finish_report(
146 svn.ra.reporter2_invoke_finish_report(
146 self._reporter, self._baton, pool
147 self._reporter, self._baton, pool
147 )
148 )
148
149
149 def abort_report(self, pool=None):
150 def abort_report(self, pool=None):
150 svn.ra.reporter2_invoke_abort_report(
151 svn.ra.reporter2_invoke_abort_report(
151 self._reporter, self._baton, pool
152 self._reporter, self._baton, pool
152 )
153 )
153
154
154 def do_update(self, revnum, path, *args, **kwargs):
155 def do_update(self, revnum, path, *args, **kwargs):
155 return self.Reporter(
156 return self.Reporter(
156 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
157 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
157 )
158 )
@@ -1,855 +1,856
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # context: context needed to annotate a file
3 # context: context needed to annotate a file
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import hashlib
12 import hashlib
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.pycompat import (
16 from mercurial.pycompat import (
17 getattr,
17 open,
18 open,
18 setattr,
19 setattr,
19 )
20 )
20 from mercurial import (
21 from mercurial import (
21 error,
22 error,
22 linelog as linelogmod,
23 linelog as linelogmod,
23 lock as lockmod,
24 lock as lockmod,
24 mdiff,
25 mdiff,
25 node,
26 node,
26 pycompat,
27 pycompat,
27 scmutil,
28 scmutil,
28 util,
29 util,
29 )
30 )
30 from mercurial.utils import stringutil
31 from mercurial.utils import stringutil
31
32
32 from . import (
33 from . import (
33 error as faerror,
34 error as faerror,
34 revmap as revmapmod,
35 revmap as revmapmod,
35 )
36 )
36
37
37 # given path, get filelog, cached
38 # given path, get filelog, cached
38 @util.lrucachefunc
39 @util.lrucachefunc
39 def _getflog(repo, path):
40 def _getflog(repo, path):
40 return repo.file(path)
41 return repo.file(path)
41
42
42
43
43 # extracted from mercurial.context.basefilectx.annotate
44 # extracted from mercurial.context.basefilectx.annotate
44 def _parents(f, follow=True):
45 def _parents(f, follow=True):
45 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
46 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
46 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
47 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
47 # from the topmost introrev (= srcrev) down to p.linkrev() if it
48 # from the topmost introrev (= srcrev) down to p.linkrev() if it
48 # isn't an ancestor of the srcrev.
49 # isn't an ancestor of the srcrev.
49 f._changeid
50 f._changeid
50 pl = f.parents()
51 pl = f.parents()
51
52
52 # Don't return renamed parents if we aren't following.
53 # Don't return renamed parents if we aren't following.
53 if not follow:
54 if not follow:
54 pl = [p for p in pl if p.path() == f.path()]
55 pl = [p for p in pl if p.path() == f.path()]
55
56
56 # renamed filectx won't have a filelog yet, so set it
57 # renamed filectx won't have a filelog yet, so set it
57 # from the cache to save time
58 # from the cache to save time
58 for p in pl:
59 for p in pl:
59 if not b'_filelog' in p.__dict__:
60 if not b'_filelog' in p.__dict__:
60 p._filelog = _getflog(f._repo, p.path())
61 p._filelog = _getflog(f._repo, p.path())
61
62
62 return pl
63 return pl
63
64
64
65
65 # extracted from mercurial.context.basefilectx.annotate. slightly modified
66 # extracted from mercurial.context.basefilectx.annotate. slightly modified
66 # so it takes a fctx instead of a pair of text and fctx.
67 # so it takes a fctx instead of a pair of text and fctx.
67 def _decorate(fctx):
68 def _decorate(fctx):
68 text = fctx.data()
69 text = fctx.data()
69 linecount = text.count(b'\n')
70 linecount = text.count(b'\n')
70 if text and not text.endswith(b'\n'):
71 if text and not text.endswith(b'\n'):
71 linecount += 1
72 linecount += 1
72 return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
73 return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
73
74
74
75
75 # extracted from mercurial.context.basefilectx.annotate. slightly modified
76 # extracted from mercurial.context.basefilectx.annotate. slightly modified
76 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
77 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
77 # calculating diff here.
78 # calculating diff here.
78 def _pair(parent, child, blocks):
79 def _pair(parent, child, blocks):
79 for (a1, a2, b1, b2), t in blocks:
80 for (a1, a2, b1, b2), t in blocks:
80 # Changed blocks ('!') or blocks made only of blank lines ('~')
81 # Changed blocks ('!') or blocks made only of blank lines ('~')
81 # belong to the child.
82 # belong to the child.
82 if t == b'=':
83 if t == b'=':
83 child[0][b1:b2] = parent[0][a1:a2]
84 child[0][b1:b2] = parent[0][a1:a2]
84 return child
85 return child
85
86
86
87
87 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
88 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
88 # could be reused
89 # could be reused
89 _revsingle = util.lrucachefunc(scmutil.revsingle)
90 _revsingle = util.lrucachefunc(scmutil.revsingle)
90
91
91
92
92 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
93 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
93 """(repo, str, str) -> fctx
94 """(repo, str, str) -> fctx
94
95
95 get the filectx object from repo, rev, path, in an efficient way.
96 get the filectx object from repo, rev, path, in an efficient way.
96
97
97 if resolverev is True, "rev" is a revision specified by the revset
98 if resolverev is True, "rev" is a revision specified by the revset
98 language, otherwise "rev" is a nodeid, or a revision number that can
99 language, otherwise "rev" is a nodeid, or a revision number that can
99 be consumed by repo.__getitem__.
100 be consumed by repo.__getitem__.
100
101
101 if adjustctx is not None, the returned fctx will point to a changeset
102 if adjustctx is not None, the returned fctx will point to a changeset
102 that introduces the change (last modified the file). if adjustctx
103 that introduces the change (last modified the file). if adjustctx
103 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
104 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
104 faster for big repos but is incorrect for some cases.
105 faster for big repos but is incorrect for some cases.
105 """
106 """
106 if resolverev and not isinstance(rev, int) and rev is not None:
107 if resolverev and not isinstance(rev, int) and rev is not None:
107 ctx = _revsingle(repo, rev)
108 ctx = _revsingle(repo, rev)
108 else:
109 else:
109 ctx = repo[rev]
110 ctx = repo[rev]
110
111
111 # If we don't need to adjust the linkrev, create the filectx using the
112 # If we don't need to adjust the linkrev, create the filectx using the
112 # changectx instead of using ctx[path]. This means it already has the
113 # changectx instead of using ctx[path]. This means it already has the
113 # changectx information, so blame -u will be able to look directly at the
114 # changectx information, so blame -u will be able to look directly at the
114 # commitctx object instead of having to resolve it by going through the
115 # commitctx object instead of having to resolve it by going through the
115 # manifest. In a lazy-manifest world this can prevent us from downloading a
116 # manifest. In a lazy-manifest world this can prevent us from downloading a
116 # lot of data.
117 # lot of data.
117 if adjustctx is None:
118 if adjustctx is None:
118 # ctx.rev() is None means it's the working copy, which is a special
119 # ctx.rev() is None means it's the working copy, which is a special
119 # case.
120 # case.
120 if ctx.rev() is None:
121 if ctx.rev() is None:
121 fctx = ctx[path]
122 fctx = ctx[path]
122 else:
123 else:
123 fctx = repo.filectx(path, changeid=ctx.rev())
124 fctx = repo.filectx(path, changeid=ctx.rev())
124 else:
125 else:
125 fctx = ctx[path]
126 fctx = ctx[path]
126 if adjustctx == b'linkrev':
127 if adjustctx == b'linkrev':
127 introrev = fctx.linkrev()
128 introrev = fctx.linkrev()
128 else:
129 else:
129 introrev = fctx.introrev()
130 introrev = fctx.introrev()
130 if introrev != ctx.rev():
131 if introrev != ctx.rev():
131 fctx._changeid = introrev
132 fctx._changeid = introrev
132 fctx._changectx = repo[introrev]
133 fctx._changectx = repo[introrev]
133 return fctx
134 return fctx
134
135
135
136
136 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
137 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
137 def encodedir(path):
138 def encodedir(path):
138 return (
139 return (
139 path.replace(b'.hg/', b'.hg.hg/')
140 path.replace(b'.hg/', b'.hg.hg/')
140 .replace(b'.l/', b'.l.hg/')
141 .replace(b'.l/', b'.l.hg/')
141 .replace(b'.m/', b'.m.hg/')
142 .replace(b'.m/', b'.m.hg/')
142 .replace(b'.lock/', b'.lock.hg/')
143 .replace(b'.lock/', b'.lock.hg/')
143 )
144 )
144
145
145
146
146 def hashdiffopts(diffopts):
147 def hashdiffopts(diffopts):
147 diffoptstr = stringutil.pprint(
148 diffoptstr = stringutil.pprint(
148 sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
149 sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
149 )
150 )
150 return node.hex(hashlib.sha1(diffoptstr).digest())[:6]
151 return node.hex(hashlib.sha1(diffoptstr).digest())[:6]
151
152
152
153
153 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
154 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
154
155
155
156
156 class annotateopts(object):
157 class annotateopts(object):
157 """like mercurial.mdiff.diffopts, but is for annotate
158 """like mercurial.mdiff.diffopts, but is for annotate
158
159
159 followrename: follow renames, like "hg annotate -f"
160 followrename: follow renames, like "hg annotate -f"
160 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
161 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
161 """
162 """
162
163
163 defaults = {
164 defaults = {
164 b'diffopts': None,
165 b'diffopts': None,
165 b'followrename': True,
166 b'followrename': True,
166 b'followmerge': True,
167 b'followmerge': True,
167 }
168 }
168
169
169 def __init__(self, **opts):
170 def __init__(self, **opts):
170 opts = pycompat.byteskwargs(opts)
171 opts = pycompat.byteskwargs(opts)
171 for k, v in self.defaults.iteritems():
172 for k, v in self.defaults.iteritems():
172 setattr(self, k, opts.get(k, v))
173 setattr(self, k, opts.get(k, v))
173
174
174 @util.propertycache
175 @util.propertycache
175 def shortstr(self):
176 def shortstr(self):
176 """represent opts in a short string, suitable for a directory name"""
177 """represent opts in a short string, suitable for a directory name"""
177 result = b''
178 result = b''
178 if not self.followrename:
179 if not self.followrename:
179 result += b'r0'
180 result += b'r0'
180 if not self.followmerge:
181 if not self.followmerge:
181 result += b'm0'
182 result += b'm0'
182 if self.diffopts is not None:
183 if self.diffopts is not None:
183 assert isinstance(self.diffopts, mdiff.diffopts)
184 assert isinstance(self.diffopts, mdiff.diffopts)
184 diffopthash = hashdiffopts(self.diffopts)
185 diffopthash = hashdiffopts(self.diffopts)
185 if diffopthash != _defaultdiffopthash:
186 if diffopthash != _defaultdiffopthash:
186 result += b'i' + diffopthash
187 result += b'i' + diffopthash
187 return result or b'default'
188 return result or b'default'
188
189
189
190
190 defaultopts = annotateopts()
191 defaultopts = annotateopts()
191
192
192
193
193 class _annotatecontext(object):
194 class _annotatecontext(object):
194 """do not use this class directly as it does not use lock to protect
195 """do not use this class directly as it does not use lock to protect
195 writes. use "with annotatecontext(...)" instead.
196 writes. use "with annotatecontext(...)" instead.
196 """
197 """
197
198
198 def __init__(self, repo, path, linelogpath, revmappath, opts):
199 def __init__(self, repo, path, linelogpath, revmappath, opts):
199 self.repo = repo
200 self.repo = repo
200 self.ui = repo.ui
201 self.ui = repo.ui
201 self.path = path
202 self.path = path
202 self.opts = opts
203 self.opts = opts
203 self.linelogpath = linelogpath
204 self.linelogpath = linelogpath
204 self.revmappath = revmappath
205 self.revmappath = revmappath
205 self._linelog = None
206 self._linelog = None
206 self._revmap = None
207 self._revmap = None
207 self._node2path = {} # {str: str}
208 self._node2path = {} # {str: str}
208
209
209 @property
210 @property
210 def linelog(self):
211 def linelog(self):
211 if self._linelog is None:
212 if self._linelog is None:
212 if os.path.exists(self.linelogpath):
213 if os.path.exists(self.linelogpath):
213 with open(self.linelogpath, b'rb') as f:
214 with open(self.linelogpath, b'rb') as f:
214 try:
215 try:
215 self._linelog = linelogmod.linelog.fromdata(f.read())
216 self._linelog = linelogmod.linelog.fromdata(f.read())
216 except linelogmod.LineLogError:
217 except linelogmod.LineLogError:
217 self._linelog = linelogmod.linelog()
218 self._linelog = linelogmod.linelog()
218 else:
219 else:
219 self._linelog = linelogmod.linelog()
220 self._linelog = linelogmod.linelog()
220 return self._linelog
221 return self._linelog
221
222
222 @property
223 @property
223 def revmap(self):
224 def revmap(self):
224 if self._revmap is None:
225 if self._revmap is None:
225 self._revmap = revmapmod.revmap(self.revmappath)
226 self._revmap = revmapmod.revmap(self.revmappath)
226 return self._revmap
227 return self._revmap
227
228
228 def close(self):
229 def close(self):
229 if self._revmap is not None:
230 if self._revmap is not None:
230 self._revmap.flush()
231 self._revmap.flush()
231 self._revmap = None
232 self._revmap = None
232 if self._linelog is not None:
233 if self._linelog is not None:
233 with open(self.linelogpath, b'wb') as f:
234 with open(self.linelogpath, b'wb') as f:
234 f.write(self._linelog.encode())
235 f.write(self._linelog.encode())
235 self._linelog = None
236 self._linelog = None
236
237
237 __del__ = close
238 __del__ = close
238
239
239 def rebuild(self):
240 def rebuild(self):
240 """delete linelog and revmap, useful for rebuilding"""
241 """delete linelog and revmap, useful for rebuilding"""
241 self.close()
242 self.close()
242 self._node2path.clear()
243 self._node2path.clear()
243 _unlinkpaths([self.revmappath, self.linelogpath])
244 _unlinkpaths([self.revmappath, self.linelogpath])
244
245
245 @property
246 @property
246 def lastnode(self):
247 def lastnode(self):
247 """return last node in revmap, or None if revmap is empty"""
248 """return last node in revmap, or None if revmap is empty"""
248 if self._revmap is None:
249 if self._revmap is None:
249 # fast path, read revmap without loading its full content
250 # fast path, read revmap without loading its full content
250 return revmapmod.getlastnode(self.revmappath)
251 return revmapmod.getlastnode(self.revmappath)
251 else:
252 else:
252 return self._revmap.rev2hsh(self._revmap.maxrev)
253 return self._revmap.rev2hsh(self._revmap.maxrev)
253
254
254 def isuptodate(self, master, strict=True):
255 def isuptodate(self, master, strict=True):
255 """return True if the revmap / linelog is up-to-date, or the file
256 """return True if the revmap / linelog is up-to-date, or the file
256 does not exist in the master revision. False otherwise.
257 does not exist in the master revision. False otherwise.
257
258
258 it tries to be fast and could return false negatives, because of the
259 it tries to be fast and could return false negatives, because of the
259 use of linkrev instead of introrev.
260 use of linkrev instead of introrev.
260
261
261 useful for both server and client to decide whether to update
262 useful for both server and client to decide whether to update
262 fastannotate cache or not.
263 fastannotate cache or not.
263
264
264 if strict is True, even if fctx exists in the revmap, but is not the
265 if strict is True, even if fctx exists in the revmap, but is not the
265 last node, isuptodate will return False. it's good for performance - no
266 last node, isuptodate will return False. it's good for performance - no
266 expensive check was done.
267 expensive check was done.
267
268
268 if strict is False, if fctx exists in the revmap, this function may
269 if strict is False, if fctx exists in the revmap, this function may
269 return True. this is useful for the client to skip downloading the
270 return True. this is useful for the client to skip downloading the
270 cache if the client's master is behind the server's.
271 cache if the client's master is behind the server's.
271 """
272 """
272 lastnode = self.lastnode
273 lastnode = self.lastnode
273 try:
274 try:
274 f = self._resolvefctx(master, resolverev=True)
275 f = self._resolvefctx(master, resolverev=True)
275 # choose linkrev instead of introrev as the check is meant to be
276 # choose linkrev instead of introrev as the check is meant to be
276 # *fast*.
277 # *fast*.
277 linknode = self.repo.changelog.node(f.linkrev())
278 linknode = self.repo.changelog.node(f.linkrev())
278 if not strict and lastnode and linknode != lastnode:
279 if not strict and lastnode and linknode != lastnode:
279 # check if f.node() is in the revmap. note: this loads the
280 # check if f.node() is in the revmap. note: this loads the
280 # revmap and can be slow.
281 # revmap and can be slow.
281 return self.revmap.hsh2rev(linknode) is not None
282 return self.revmap.hsh2rev(linknode) is not None
282 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
283 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
283 # false negatives are acceptable in this case.
284 # false negatives are acceptable in this case.
284 return linknode == lastnode
285 return linknode == lastnode
285 except LookupError:
286 except LookupError:
286 # master does not have the file, or the revmap is ahead
287 # master does not have the file, or the revmap is ahead
287 return True
288 return True
288
289
289 def annotate(self, rev, master=None, showpath=False, showlines=False):
290 def annotate(self, rev, master=None, showpath=False, showlines=False):
290 """incrementally update the cache so it includes revisions in the main
291 """incrementally update the cache so it includes revisions in the main
291 branch till 'master'. and run annotate on 'rev', which may or may not be
292 branch till 'master'. and run annotate on 'rev', which may or may not be
292 included in the main branch.
293 included in the main branch.
293
294
294 if master is None, do not update linelog.
295 if master is None, do not update linelog.
295
296
296 the first value returned is the annotate result, it is [(node, linenum)]
297 the first value returned is the annotate result, it is [(node, linenum)]
297 by default. [(node, linenum, path)] if showpath is True.
298 by default. [(node, linenum, path)] if showpath is True.
298
299
299 if showlines is True, a second value will be returned, it is a list of
300 if showlines is True, a second value will be returned, it is a list of
300 corresponding line contents.
301 corresponding line contents.
301 """
302 """
302
303
303 # the fast path test requires commit hash, convert rev number to hash,
304 # the fast path test requires commit hash, convert rev number to hash,
304 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
305 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
305 # command could give us a revision number even if the user passes a
306 # command could give us a revision number even if the user passes a
306 # commit hash.
307 # commit hash.
307 if isinstance(rev, int):
308 if isinstance(rev, int):
308 rev = node.hex(self.repo.changelog.node(rev))
309 rev = node.hex(self.repo.changelog.node(rev))
309
310
310 # fast path: if rev is in the main branch already
311 # fast path: if rev is in the main branch already
311 directly, revfctx = self.canannotatedirectly(rev)
312 directly, revfctx = self.canannotatedirectly(rev)
312 if directly:
313 if directly:
313 if self.ui.debugflag:
314 if self.ui.debugflag:
314 self.ui.debug(
315 self.ui.debug(
315 b'fastannotate: %s: using fast path '
316 b'fastannotate: %s: using fast path '
316 b'(resolved fctx: %s)\n'
317 b'(resolved fctx: %s)\n'
317 % (
318 % (
318 self.path,
319 self.path,
319 stringutil.pprint(util.safehasattr(revfctx, b'node')),
320 stringutil.pprint(util.safehasattr(revfctx, b'node')),
320 )
321 )
321 )
322 )
322 return self.annotatedirectly(revfctx, showpath, showlines)
323 return self.annotatedirectly(revfctx, showpath, showlines)
323
324
324 # resolve master
325 # resolve master
325 masterfctx = None
326 masterfctx = None
326 if master:
327 if master:
327 try:
328 try:
328 masterfctx = self._resolvefctx(
329 masterfctx = self._resolvefctx(
329 master, resolverev=True, adjustctx=True
330 master, resolverev=True, adjustctx=True
330 )
331 )
331 except LookupError: # master does not have the file
332 except LookupError: # master does not have the file
332 pass
333 pass
333 else:
334 else:
334 if masterfctx in self.revmap: # no need to update linelog
335 if masterfctx in self.revmap: # no need to update linelog
335 masterfctx = None
336 masterfctx = None
336
337
337 # ... - @ <- rev (can be an arbitrary changeset,
338 # ... - @ <- rev (can be an arbitrary changeset,
338 # / not necessarily a descendant
339 # / not necessarily a descendant
339 # master -> o of master)
340 # master -> o of master)
340 # |
341 # |
341 # a merge -> o 'o': new changesets in the main branch
342 # a merge -> o 'o': new changesets in the main branch
342 # |\ '#': revisions in the main branch that
343 # |\ '#': revisions in the main branch that
343 # o * exist in linelog / revmap
344 # o * exist in linelog / revmap
344 # | . '*': changesets in side branches, or
345 # | . '*': changesets in side branches, or
345 # last master -> # . descendants of master
346 # last master -> # . descendants of master
346 # | .
347 # | .
347 # # * joint: '#', and is a parent of a '*'
348 # # * joint: '#', and is a parent of a '*'
348 # |/
349 # |/
349 # a joint -> # ^^^^ --- side branches
350 # a joint -> # ^^^^ --- side branches
350 # |
351 # |
351 # ^ --- main branch (in linelog)
352 # ^ --- main branch (in linelog)
352
353
353 # these DFSes are similar to the traditional annotate algorithm.
354 # these DFSes are similar to the traditional annotate algorithm.
354 # we cannot really reuse the code for perf reason.
355 # we cannot really reuse the code for perf reason.
355
356
356 # 1st DFS calculates merges, joint points, and needed.
357 # 1st DFS calculates merges, joint points, and needed.
357 # "needed" is a simple reference counting dict to free items in
358 # "needed" is a simple reference counting dict to free items in
358 # "hist", reducing its memory usage otherwise could be huge.
359 # "hist", reducing its memory usage otherwise could be huge.
359 initvisit = [revfctx]
360 initvisit = [revfctx]
360 if masterfctx:
361 if masterfctx:
361 if masterfctx.rev() is None:
362 if masterfctx.rev() is None:
362 raise error.Abort(
363 raise error.Abort(
363 _(b'cannot update linelog to wdir()'),
364 _(b'cannot update linelog to wdir()'),
364 hint=_(b'set fastannotate.mainbranch'),
365 hint=_(b'set fastannotate.mainbranch'),
365 )
366 )
366 initvisit.append(masterfctx)
367 initvisit.append(masterfctx)
367 visit = initvisit[:]
368 visit = initvisit[:]
368 pcache = {}
369 pcache = {}
369 needed = {revfctx: 1}
370 needed = {revfctx: 1}
370 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
371 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
371 while visit:
372 while visit:
372 f = visit.pop()
373 f = visit.pop()
373 if f in pcache or f in hist:
374 if f in pcache or f in hist:
374 continue
375 continue
375 if f in self.revmap: # in the old main branch, it's a joint
376 if f in self.revmap: # in the old main branch, it's a joint
376 llrev = self.revmap.hsh2rev(f.node())
377 llrev = self.revmap.hsh2rev(f.node())
377 self.linelog.annotate(llrev)
378 self.linelog.annotate(llrev)
378 result = self.linelog.annotateresult
379 result = self.linelog.annotateresult
379 hist[f] = (result, f.data())
380 hist[f] = (result, f.data())
380 continue
381 continue
381 pl = self._parentfunc(f)
382 pl = self._parentfunc(f)
382 pcache[f] = pl
383 pcache[f] = pl
383 for p in pl:
384 for p in pl:
384 needed[p] = needed.get(p, 0) + 1
385 needed[p] = needed.get(p, 0) + 1
385 if p not in pcache:
386 if p not in pcache:
386 visit.append(p)
387 visit.append(p)
387
388
388 # 2nd (simple) DFS calculates new changesets in the main branch
389 # 2nd (simple) DFS calculates new changesets in the main branch
389 # ('o' nodes in # the above graph), so we know when to update linelog.
390 # ('o' nodes in # the above graph), so we know when to update linelog.
390 newmainbranch = set()
391 newmainbranch = set()
391 f = masterfctx
392 f = masterfctx
392 while f and f not in self.revmap:
393 while f and f not in self.revmap:
393 newmainbranch.add(f)
394 newmainbranch.add(f)
394 pl = pcache[f]
395 pl = pcache[f]
395 if pl:
396 if pl:
396 f = pl[0]
397 f = pl[0]
397 else:
398 else:
398 f = None
399 f = None
399 break
400 break
400
401
401 # f, if present, is the position where the last build stopped at, and
402 # f, if present, is the position where the last build stopped at, and
402 # should be the "master" last time. check to see if we can continue
403 # should be the "master" last time. check to see if we can continue
403 # building the linelog incrementally. (we cannot if diverged)
404 # building the linelog incrementally. (we cannot if diverged)
404 if masterfctx is not None:
405 if masterfctx is not None:
405 self._checklastmasterhead(f)
406 self._checklastmasterhead(f)
406
407
407 if self.ui.debugflag:
408 if self.ui.debugflag:
408 if newmainbranch:
409 if newmainbranch:
409 self.ui.debug(
410 self.ui.debug(
410 b'fastannotate: %s: %d new changesets in the main'
411 b'fastannotate: %s: %d new changesets in the main'
411 b' branch\n' % (self.path, len(newmainbranch))
412 b' branch\n' % (self.path, len(newmainbranch))
412 )
413 )
413 elif not hist: # no joints, no updates
414 elif not hist: # no joints, no updates
414 self.ui.debug(
415 self.ui.debug(
415 b'fastannotate: %s: linelog cannot help in '
416 b'fastannotate: %s: linelog cannot help in '
416 b'annotating this revision\n' % self.path
417 b'annotating this revision\n' % self.path
417 )
418 )
418
419
419 # prepare annotateresult so we can update linelog incrementally
420 # prepare annotateresult so we can update linelog incrementally
420 self.linelog.annotate(self.linelog.maxrev)
421 self.linelog.annotate(self.linelog.maxrev)
421
422
422 # 3rd DFS does the actual annotate
423 # 3rd DFS does the actual annotate
423 visit = initvisit[:]
424 visit = initvisit[:]
424 progress = self.ui.makeprogress(
425 progress = self.ui.makeprogress(
425 b'building cache', total=len(newmainbranch)
426 b'building cache', total=len(newmainbranch)
426 )
427 )
427 while visit:
428 while visit:
428 f = visit[-1]
429 f = visit[-1]
429 if f in hist:
430 if f in hist:
430 visit.pop()
431 visit.pop()
431 continue
432 continue
432
433
433 ready = True
434 ready = True
434 pl = pcache[f]
435 pl = pcache[f]
435 for p in pl:
436 for p in pl:
436 if p not in hist:
437 if p not in hist:
437 ready = False
438 ready = False
438 visit.append(p)
439 visit.append(p)
439 if not ready:
440 if not ready:
440 continue
441 continue
441
442
442 visit.pop()
443 visit.pop()
443 blocks = None # mdiff blocks, used for appending linelog
444 blocks = None # mdiff blocks, used for appending linelog
444 ismainbranch = f in newmainbranch
445 ismainbranch = f in newmainbranch
445 # curr is the same as the traditional annotate algorithm,
446 # curr is the same as the traditional annotate algorithm,
446 # if we only care about linear history (do not follow merge),
447 # if we only care about linear history (do not follow merge),
447 # then curr is not actually used.
448 # then curr is not actually used.
448 assert f not in hist
449 assert f not in hist
449 curr = _decorate(f)
450 curr = _decorate(f)
450 for i, p in enumerate(pl):
451 for i, p in enumerate(pl):
451 bs = list(self._diffblocks(hist[p][1], curr[1]))
452 bs = list(self._diffblocks(hist[p][1], curr[1]))
452 if i == 0 and ismainbranch:
453 if i == 0 and ismainbranch:
453 blocks = bs
454 blocks = bs
454 curr = _pair(hist[p], curr, bs)
455 curr = _pair(hist[p], curr, bs)
455 if needed[p] == 1:
456 if needed[p] == 1:
456 del hist[p]
457 del hist[p]
457 del needed[p]
458 del needed[p]
458 else:
459 else:
459 needed[p] -= 1
460 needed[p] -= 1
460
461
461 hist[f] = curr
462 hist[f] = curr
462 del pcache[f]
463 del pcache[f]
463
464
464 if ismainbranch: # need to write to linelog
465 if ismainbranch: # need to write to linelog
465 progress.increment()
466 progress.increment()
466 bannotated = None
467 bannotated = None
467 if len(pl) == 2 and self.opts.followmerge: # merge
468 if len(pl) == 2 and self.opts.followmerge: # merge
468 bannotated = curr[0]
469 bannotated = curr[0]
469 if blocks is None: # no parents, add an empty one
470 if blocks is None: # no parents, add an empty one
470 blocks = list(self._diffblocks(b'', curr[1]))
471 blocks = list(self._diffblocks(b'', curr[1]))
471 self._appendrev(f, blocks, bannotated)
472 self._appendrev(f, blocks, bannotated)
472 elif showpath: # not append linelog, but we need to record path
473 elif showpath: # not append linelog, but we need to record path
473 self._node2path[f.node()] = f.path()
474 self._node2path[f.node()] = f.path()
474
475
475 progress.complete()
476 progress.complete()
476
477
477 result = [
478 result = [
478 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
479 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
479 for fr, l in hist[revfctx][0]
480 for fr, l in hist[revfctx][0]
480 ] # [(node, linenumber)]
481 ] # [(node, linenumber)]
481 return self._refineannotateresult(result, revfctx, showpath, showlines)
482 return self._refineannotateresult(result, revfctx, showpath, showlines)
482
483
483 def canannotatedirectly(self, rev):
484 def canannotatedirectly(self, rev):
484 """(str) -> bool, fctx or node.
485 """(str) -> bool, fctx or node.
485 return (True, f) if we can annotate without updating the linelog, pass
486 return (True, f) if we can annotate without updating the linelog, pass
486 f to annotatedirectly.
487 f to annotatedirectly.
487 return (False, f) if we need extra calculation. f is the fctx resolved
488 return (False, f) if we need extra calculation. f is the fctx resolved
488 from rev.
489 from rev.
489 """
490 """
490 result = True
491 result = True
491 f = None
492 f = None
492 if not isinstance(rev, int) and rev is not None:
493 if not isinstance(rev, int) and rev is not None:
493 hsh = {20: bytes, 40: node.bin}.get(len(rev), lambda x: None)(rev)
494 hsh = {20: bytes, 40: node.bin}.get(len(rev), lambda x: None)(rev)
494 if hsh is not None and (hsh, self.path) in self.revmap:
495 if hsh is not None and (hsh, self.path) in self.revmap:
495 f = hsh
496 f = hsh
496 if f is None:
497 if f is None:
497 adjustctx = b'linkrev' if self._perfhack else True
498 adjustctx = b'linkrev' if self._perfhack else True
498 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
499 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
499 result = f in self.revmap
500 result = f in self.revmap
500 if not result and self._perfhack:
501 if not result and self._perfhack:
501 # redo the resolution without perfhack - as we are going to
502 # redo the resolution without perfhack - as we are going to
502 # do write operations, we need a correct fctx.
503 # do write operations, we need a correct fctx.
503 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
504 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
504 return result, f
505 return result, f
505
506
506 def annotatealllines(self, rev, showpath=False, showlines=False):
507 def annotatealllines(self, rev, showpath=False, showlines=False):
507 """(rev : str) -> [(node : str, linenum : int, path : str)]
508 """(rev : str) -> [(node : str, linenum : int, path : str)]
508
509
509 the result has the same format with annotate, but include all (including
510 the result has the same format with annotate, but include all (including
510 deleted) lines up to rev. call this after calling annotate(rev, ...) for
511 deleted) lines up to rev. call this after calling annotate(rev, ...) for
511 better performance and accuracy.
512 better performance and accuracy.
512 """
513 """
513 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
514 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
514
515
515 # find a chain from rev to anything in the mainbranch
516 # find a chain from rev to anything in the mainbranch
516 if revfctx not in self.revmap:
517 if revfctx not in self.revmap:
517 chain = [revfctx]
518 chain = [revfctx]
518 a = b''
519 a = b''
519 while True:
520 while True:
520 f = chain[-1]
521 f = chain[-1]
521 pl = self._parentfunc(f)
522 pl = self._parentfunc(f)
522 if not pl:
523 if not pl:
523 break
524 break
524 if pl[0] in self.revmap:
525 if pl[0] in self.revmap:
525 a = pl[0].data()
526 a = pl[0].data()
526 break
527 break
527 chain.append(pl[0])
528 chain.append(pl[0])
528
529
529 # both self.linelog and self.revmap is backed by filesystem. now
530 # both self.linelog and self.revmap is backed by filesystem. now
530 # we want to modify them but do not want to write changes back to
531 # we want to modify them but do not want to write changes back to
531 # files. so we create in-memory objects and copy them. it's like
532 # files. so we create in-memory objects and copy them. it's like
532 # a "fork".
533 # a "fork".
533 linelog = linelogmod.linelog()
534 linelog = linelogmod.linelog()
534 linelog.copyfrom(self.linelog)
535 linelog.copyfrom(self.linelog)
535 linelog.annotate(linelog.maxrev)
536 linelog.annotate(linelog.maxrev)
536 revmap = revmapmod.revmap()
537 revmap = revmapmod.revmap()
537 revmap.copyfrom(self.revmap)
538 revmap.copyfrom(self.revmap)
538
539
539 for f in reversed(chain):
540 for f in reversed(chain):
540 b = f.data()
541 b = f.data()
541 blocks = list(self._diffblocks(a, b))
542 blocks = list(self._diffblocks(a, b))
542 self._doappendrev(linelog, revmap, f, blocks)
543 self._doappendrev(linelog, revmap, f, blocks)
543 a = b
544 a = b
544 else:
545 else:
545 # fastpath: use existing linelog, revmap as we don't write to them
546 # fastpath: use existing linelog, revmap as we don't write to them
546 linelog = self.linelog
547 linelog = self.linelog
547 revmap = self.revmap
548 revmap = self.revmap
548
549
549 lines = linelog.getalllines()
550 lines = linelog.getalllines()
550 hsh = revfctx.node()
551 hsh = revfctx.node()
551 llrev = revmap.hsh2rev(hsh)
552 llrev = revmap.hsh2rev(hsh)
552 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
553 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
553 # cannot use _refineannotateresult since we need custom logic for
554 # cannot use _refineannotateresult since we need custom logic for
554 # resolving line contents
555 # resolving line contents
555 if showpath:
556 if showpath:
556 result = self._addpathtoresult(result, revmap)
557 result = self._addpathtoresult(result, revmap)
557 if showlines:
558 if showlines:
558 linecontents = self._resolvelines(result, revmap, linelog)
559 linecontents = self._resolvelines(result, revmap, linelog)
559 result = (result, linecontents)
560 result = (result, linecontents)
560 return result
561 return result
561
562
562 def _resolvelines(self, annotateresult, revmap, linelog):
563 def _resolvelines(self, annotateresult, revmap, linelog):
563 """(annotateresult) -> [line]. designed for annotatealllines.
564 """(annotateresult) -> [line]. designed for annotatealllines.
564 this is probably the most inefficient code in the whole fastannotate
565 this is probably the most inefficient code in the whole fastannotate
565 directory. but we have made a decision that the linelog does not
566 directory. but we have made a decision that the linelog does not
566 store line contents. so getting them requires random accesses to
567 store line contents. so getting them requires random accesses to
567 the revlog data, since they can be many, it can be very slow.
568 the revlog data, since they can be many, it can be very slow.
568 """
569 """
569 # [llrev]
570 # [llrev]
570 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
571 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
571 result = [None] * len(annotateresult)
572 result = [None] * len(annotateresult)
572 # {(rev, linenum): [lineindex]}
573 # {(rev, linenum): [lineindex]}
573 key2idxs = collections.defaultdict(list)
574 key2idxs = collections.defaultdict(list)
574 for i in pycompat.xrange(len(result)):
575 for i in pycompat.xrange(len(result)):
575 key2idxs[(revs[i], annotateresult[i][1])].append(i)
576 key2idxs[(revs[i], annotateresult[i][1])].append(i)
576 while key2idxs:
577 while key2idxs:
577 # find an unresolved line and its linelog rev to annotate
578 # find an unresolved line and its linelog rev to annotate
578 hsh = None
579 hsh = None
579 try:
580 try:
580 for (rev, _linenum), idxs in key2idxs.iteritems():
581 for (rev, _linenum), idxs in key2idxs.iteritems():
581 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
582 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
582 continue
583 continue
583 hsh = annotateresult[idxs[0]][0]
584 hsh = annotateresult[idxs[0]][0]
584 break
585 break
585 except StopIteration: # no more unresolved lines
586 except StopIteration: # no more unresolved lines
586 return result
587 return result
587 if hsh is None:
588 if hsh is None:
588 # the remaining key2idxs are not in main branch, resolving them
589 # the remaining key2idxs are not in main branch, resolving them
589 # using the hard way...
590 # using the hard way...
590 revlines = {}
591 revlines = {}
591 for (rev, linenum), idxs in key2idxs.iteritems():
592 for (rev, linenum), idxs in key2idxs.iteritems():
592 if rev not in revlines:
593 if rev not in revlines:
593 hsh = annotateresult[idxs[0]][0]
594 hsh = annotateresult[idxs[0]][0]
594 if self.ui.debugflag:
595 if self.ui.debugflag:
595 self.ui.debug(
596 self.ui.debug(
596 b'fastannotate: reading %s line #%d '
597 b'fastannotate: reading %s line #%d '
597 b'to resolve lines %r\n'
598 b'to resolve lines %r\n'
598 % (node.short(hsh), linenum, idxs)
599 % (node.short(hsh), linenum, idxs)
599 )
600 )
600 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
601 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
601 lines = mdiff.splitnewlines(fctx.data())
602 lines = mdiff.splitnewlines(fctx.data())
602 revlines[rev] = lines
603 revlines[rev] = lines
603 for idx in idxs:
604 for idx in idxs:
604 result[idx] = revlines[rev][linenum]
605 result[idx] = revlines[rev][linenum]
605 assert all(x is not None for x in result)
606 assert all(x is not None for x in result)
606 return result
607 return result
607
608
608 # run the annotate and the lines should match to the file content
609 # run the annotate and the lines should match to the file content
609 self.ui.debug(
610 self.ui.debug(
610 b'fastannotate: annotate %s to resolve lines\n'
611 b'fastannotate: annotate %s to resolve lines\n'
611 % node.short(hsh)
612 % node.short(hsh)
612 )
613 )
613 linelog.annotate(rev)
614 linelog.annotate(rev)
614 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
615 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
615 annotated = linelog.annotateresult
616 annotated = linelog.annotateresult
616 lines = mdiff.splitnewlines(fctx.data())
617 lines = mdiff.splitnewlines(fctx.data())
617 if len(lines) != len(annotated):
618 if len(lines) != len(annotated):
618 raise faerror.CorruptedFileError(b'unexpected annotated lines')
619 raise faerror.CorruptedFileError(b'unexpected annotated lines')
619 # resolve lines from the annotate result
620 # resolve lines from the annotate result
620 for i, line in enumerate(lines):
621 for i, line in enumerate(lines):
621 k = annotated[i]
622 k = annotated[i]
622 if k in key2idxs:
623 if k in key2idxs:
623 for idx in key2idxs[k]:
624 for idx in key2idxs[k]:
624 result[idx] = line
625 result[idx] = line
625 del key2idxs[k]
626 del key2idxs[k]
626 return result
627 return result
627
628
628 def annotatedirectly(self, f, showpath, showlines):
629 def annotatedirectly(self, f, showpath, showlines):
629 """like annotate, but when we know that f is in linelog.
630 """like annotate, but when we know that f is in linelog.
630 f can be either a 20-char str (node) or a fctx. this is for perf - in
631 f can be either a 20-char str (node) or a fctx. this is for perf - in
631 the best case, the user provides a node and we don't need to read the
632 the best case, the user provides a node and we don't need to read the
632 filelog or construct any filecontext.
633 filelog or construct any filecontext.
633 """
634 """
634 if isinstance(f, bytes):
635 if isinstance(f, bytes):
635 hsh = f
636 hsh = f
636 else:
637 else:
637 hsh = f.node()
638 hsh = f.node()
638 llrev = self.revmap.hsh2rev(hsh)
639 llrev = self.revmap.hsh2rev(hsh)
639 if not llrev:
640 if not llrev:
640 raise faerror.CorruptedFileError(
641 raise faerror.CorruptedFileError(
641 b'%s is not in revmap' % node.hex(hsh)
642 b'%s is not in revmap' % node.hex(hsh)
642 )
643 )
643 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
644 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
644 raise faerror.CorruptedFileError(
645 raise faerror.CorruptedFileError(
645 b'%s is not in revmap mainbranch' % node.hex(hsh)
646 b'%s is not in revmap mainbranch' % node.hex(hsh)
646 )
647 )
647 self.linelog.annotate(llrev)
648 self.linelog.annotate(llrev)
648 result = [
649 result = [
649 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
650 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
650 ]
651 ]
651 return self._refineannotateresult(result, f, showpath, showlines)
652 return self._refineannotateresult(result, f, showpath, showlines)
652
653
653 def _refineannotateresult(self, result, f, showpath, showlines):
654 def _refineannotateresult(self, result, f, showpath, showlines):
654 """add the missing path or line contents, they can be expensive.
655 """add the missing path or line contents, they can be expensive.
655 f could be either node or fctx.
656 f could be either node or fctx.
656 """
657 """
657 if showpath:
658 if showpath:
658 result = self._addpathtoresult(result)
659 result = self._addpathtoresult(result)
659 if showlines:
660 if showlines:
660 if isinstance(f, bytes): # f: node or fctx
661 if isinstance(f, bytes): # f: node or fctx
661 llrev = self.revmap.hsh2rev(f)
662 llrev = self.revmap.hsh2rev(f)
662 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
663 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
663 else:
664 else:
664 fctx = f
665 fctx = f
665 lines = mdiff.splitnewlines(fctx.data())
666 lines = mdiff.splitnewlines(fctx.data())
666 if len(lines) != len(result): # linelog is probably corrupted
667 if len(lines) != len(result): # linelog is probably corrupted
667 raise faerror.CorruptedFileError()
668 raise faerror.CorruptedFileError()
668 result = (result, lines)
669 result = (result, lines)
669 return result
670 return result
670
671
671 def _appendrev(self, fctx, blocks, bannotated=None):
672 def _appendrev(self, fctx, blocks, bannotated=None):
672 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
673 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
673
674
674 def _diffblocks(self, a, b):
675 def _diffblocks(self, a, b):
675 return mdiff.allblocks(a, b, self.opts.diffopts)
676 return mdiff.allblocks(a, b, self.opts.diffopts)
676
677
677 @staticmethod
678 @staticmethod
678 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
679 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
679 """append a revision to linelog and revmap"""
680 """append a revision to linelog and revmap"""
680
681
681 def getllrev(f):
682 def getllrev(f):
682 """(fctx) -> int"""
683 """(fctx) -> int"""
683 # f should not be a linelog revision
684 # f should not be a linelog revision
684 if isinstance(f, int):
685 if isinstance(f, int):
685 raise error.ProgrammingError(b'f should not be an int')
686 raise error.ProgrammingError(b'f should not be an int')
686 # f is a fctx, allocate linelog rev on demand
687 # f is a fctx, allocate linelog rev on demand
687 hsh = f.node()
688 hsh = f.node()
688 rev = revmap.hsh2rev(hsh)
689 rev = revmap.hsh2rev(hsh)
689 if rev is None:
690 if rev is None:
690 rev = revmap.append(hsh, sidebranch=True, path=f.path())
691 rev = revmap.append(hsh, sidebranch=True, path=f.path())
691 return rev
692 return rev
692
693
693 # append sidebranch revisions to revmap
694 # append sidebranch revisions to revmap
694 siderevs = []
695 siderevs = []
695 siderevmap = {} # node: int
696 siderevmap = {} # node: int
696 if bannotated is not None:
697 if bannotated is not None:
697 for (a1, a2, b1, b2), op in blocks:
698 for (a1, a2, b1, b2), op in blocks:
698 if op != b'=':
699 if op != b'=':
699 # f could be either linelong rev, or fctx.
700 # f could be either linelong rev, or fctx.
700 siderevs += [
701 siderevs += [
701 f
702 f
702 for f, l in bannotated[b1:b2]
703 for f, l in bannotated[b1:b2]
703 if not isinstance(f, int)
704 if not isinstance(f, int)
704 ]
705 ]
705 siderevs = set(siderevs)
706 siderevs = set(siderevs)
706 if fctx in siderevs: # mainnode must be appended seperately
707 if fctx in siderevs: # mainnode must be appended seperately
707 siderevs.remove(fctx)
708 siderevs.remove(fctx)
708 for f in siderevs:
709 for f in siderevs:
709 siderevmap[f] = getllrev(f)
710 siderevmap[f] = getllrev(f)
710
711
711 # the changeset in the main branch, could be a merge
712 # the changeset in the main branch, could be a merge
712 llrev = revmap.append(fctx.node(), path=fctx.path())
713 llrev = revmap.append(fctx.node(), path=fctx.path())
713 siderevmap[fctx] = llrev
714 siderevmap[fctx] = llrev
714
715
715 for (a1, a2, b1, b2), op in reversed(blocks):
716 for (a1, a2, b1, b2), op in reversed(blocks):
716 if op == b'=':
717 if op == b'=':
717 continue
718 continue
718 if bannotated is None:
719 if bannotated is None:
719 linelog.replacelines(llrev, a1, a2, b1, b2)
720 linelog.replacelines(llrev, a1, a2, b1, b2)
720 else:
721 else:
721 blines = [
722 blines = [
722 ((r if isinstance(r, int) else siderevmap[r]), l)
723 ((r if isinstance(r, int) else siderevmap[r]), l)
723 for r, l in bannotated[b1:b2]
724 for r, l in bannotated[b1:b2]
724 ]
725 ]
725 linelog.replacelines_vec(llrev, a1, a2, blines)
726 linelog.replacelines_vec(llrev, a1, a2, blines)
726
727
727 def _addpathtoresult(self, annotateresult, revmap=None):
728 def _addpathtoresult(self, annotateresult, revmap=None):
728 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
729 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
729 if revmap is None:
730 if revmap is None:
730 revmap = self.revmap
731 revmap = self.revmap
731
732
732 def _getpath(nodeid):
733 def _getpath(nodeid):
733 path = self._node2path.get(nodeid)
734 path = self._node2path.get(nodeid)
734 if path is None:
735 if path is None:
735 path = revmap.rev2path(revmap.hsh2rev(nodeid))
736 path = revmap.rev2path(revmap.hsh2rev(nodeid))
736 self._node2path[nodeid] = path
737 self._node2path[nodeid] = path
737 return path
738 return path
738
739
739 return [(n, l, _getpath(n)) for n, l in annotateresult]
740 return [(n, l, _getpath(n)) for n, l in annotateresult]
740
741
741 def _checklastmasterhead(self, fctx):
742 def _checklastmasterhead(self, fctx):
742 """check if fctx is the master's head last time, raise if not"""
743 """check if fctx is the master's head last time, raise if not"""
743 if fctx is None:
744 if fctx is None:
744 llrev = 0
745 llrev = 0
745 else:
746 else:
746 llrev = self.revmap.hsh2rev(fctx.node())
747 llrev = self.revmap.hsh2rev(fctx.node())
747 if not llrev:
748 if not llrev:
748 raise faerror.CannotReuseError()
749 raise faerror.CannotReuseError()
749 if self.linelog.maxrev != llrev:
750 if self.linelog.maxrev != llrev:
750 raise faerror.CannotReuseError()
751 raise faerror.CannotReuseError()
751
752
752 @util.propertycache
753 @util.propertycache
753 def _parentfunc(self):
754 def _parentfunc(self):
754 """-> (fctx) -> [fctx]"""
755 """-> (fctx) -> [fctx]"""
755 followrename = self.opts.followrename
756 followrename = self.opts.followrename
756 followmerge = self.opts.followmerge
757 followmerge = self.opts.followmerge
757
758
758 def parents(f):
759 def parents(f):
759 pl = _parents(f, follow=followrename)
760 pl = _parents(f, follow=followrename)
760 if not followmerge:
761 if not followmerge:
761 pl = pl[:1]
762 pl = pl[:1]
762 return pl
763 return pl
763
764
764 return parents
765 return parents
765
766
766 @util.propertycache
767 @util.propertycache
767 def _perfhack(self):
768 def _perfhack(self):
768 return self.ui.configbool(b'fastannotate', b'perfhack')
769 return self.ui.configbool(b'fastannotate', b'perfhack')
769
770
770 def _resolvefctx(self, rev, path=None, **kwds):
771 def _resolvefctx(self, rev, path=None, **kwds):
771 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
772 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
772
773
773
774
774 def _unlinkpaths(paths):
775 def _unlinkpaths(paths):
775 """silent, best-effort unlink"""
776 """silent, best-effort unlink"""
776 for path in paths:
777 for path in paths:
777 try:
778 try:
778 util.unlink(path)
779 util.unlink(path)
779 except OSError:
780 except OSError:
780 pass
781 pass
781
782
782
783
783 class pathhelper(object):
784 class pathhelper(object):
784 """helper for getting paths for lockfile, linelog and revmap"""
785 """helper for getting paths for lockfile, linelog and revmap"""
785
786
786 def __init__(self, repo, path, opts=defaultopts):
787 def __init__(self, repo, path, opts=defaultopts):
787 # different options use different directories
788 # different options use different directories
788 self._vfspath = os.path.join(
789 self._vfspath = os.path.join(
789 b'fastannotate', opts.shortstr, encodedir(path)
790 b'fastannotate', opts.shortstr, encodedir(path)
790 )
791 )
791 self._repo = repo
792 self._repo = repo
792
793
793 @property
794 @property
794 def dirname(self):
795 def dirname(self):
795 return os.path.dirname(self._repo.vfs.join(self._vfspath))
796 return os.path.dirname(self._repo.vfs.join(self._vfspath))
796
797
797 @property
798 @property
798 def linelogpath(self):
799 def linelogpath(self):
799 return self._repo.vfs.join(self._vfspath + b'.l')
800 return self._repo.vfs.join(self._vfspath + b'.l')
800
801
801 def lock(self):
802 def lock(self):
802 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
803 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
803
804
804 @property
805 @property
805 def revmappath(self):
806 def revmappath(self):
806 return self._repo.vfs.join(self._vfspath + b'.m')
807 return self._repo.vfs.join(self._vfspath + b'.m')
807
808
808
809
809 @contextlib.contextmanager
810 @contextlib.contextmanager
810 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
811 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
811 """context needed to perform (fast) annotate on a file
812 """context needed to perform (fast) annotate on a file
812
813
813 an annotatecontext of a single file consists of two structures: the
814 an annotatecontext of a single file consists of two structures: the
814 linelog and the revmap. this function takes care of locking. only 1
815 linelog and the revmap. this function takes care of locking. only 1
815 process is allowed to write that file's linelog and revmap at a time.
816 process is allowed to write that file's linelog and revmap at a time.
816
817
817 when something goes wrong, this function will assume the linelog and the
818 when something goes wrong, this function will assume the linelog and the
818 revmap are in a bad state, and remove them from disk.
819 revmap are in a bad state, and remove them from disk.
819
820
820 use this function in the following way:
821 use this function in the following way:
821
822
822 with annotatecontext(...) as actx:
823 with annotatecontext(...) as actx:
823 actx. ....
824 actx. ....
824 """
825 """
825 helper = pathhelper(repo, path, opts)
826 helper = pathhelper(repo, path, opts)
826 util.makedirs(helper.dirname)
827 util.makedirs(helper.dirname)
827 revmappath = helper.revmappath
828 revmappath = helper.revmappath
828 linelogpath = helper.linelogpath
829 linelogpath = helper.linelogpath
829 actx = None
830 actx = None
830 try:
831 try:
831 with helper.lock():
832 with helper.lock():
832 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
833 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
833 if rebuild:
834 if rebuild:
834 actx.rebuild()
835 actx.rebuild()
835 yield actx
836 yield actx
836 except Exception:
837 except Exception:
837 if actx is not None:
838 if actx is not None:
838 actx.rebuild()
839 actx.rebuild()
839 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
840 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
840 raise
841 raise
841 finally:
842 finally:
842 if actx is not None:
843 if actx is not None:
843 actx.close()
844 actx.close()
844
845
845
846
846 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
847 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
847 """like annotatecontext but get the context from a fctx. convenient when
848 """like annotatecontext but get the context from a fctx. convenient when
848 used in fctx.annotate
849 used in fctx.annotate
849 """
850 """
850 repo = fctx._repo
851 repo = fctx._repo
851 path = fctx._path
852 path = fctx._path
852 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
853 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
853 follow = True
854 follow = True
854 aopts = annotateopts(diffopts=diffopts, followrename=follow)
855 aopts = annotateopts(diffopts=diffopts, followrename=follow)
855 return annotatecontext(repo, path, aopts, rebuild)
856 return annotatecontext(repo, path, aopts, rebuild)
@@ -1,137 +1,138
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # support: fastannotate support for hgweb, and filectx
3 # support: fastannotate support for hgweb, and filectx
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial.pycompat import getattr
10 from mercurial import (
11 from mercurial import (
11 context as hgcontext,
12 context as hgcontext,
12 dagop,
13 dagop,
13 extensions,
14 extensions,
14 hgweb,
15 hgweb,
15 patch,
16 patch,
16 util,
17 util,
17 )
18 )
18
19
19 from . import (
20 from . import (
20 context,
21 context,
21 revmap,
22 revmap,
22 )
23 )
23
24
24
25
25 class _lazyfctx(object):
26 class _lazyfctx(object):
26 """delegates to fctx but do not construct fctx when unnecessary"""
27 """delegates to fctx but do not construct fctx when unnecessary"""
27
28
28 def __init__(self, repo, node, path):
29 def __init__(self, repo, node, path):
29 self._node = node
30 self._node = node
30 self._path = path
31 self._path = path
31 self._repo = repo
32 self._repo = repo
32
33
33 def node(self):
34 def node(self):
34 return self._node
35 return self._node
35
36
36 def path(self):
37 def path(self):
37 return self._path
38 return self._path
38
39
39 @util.propertycache
40 @util.propertycache
40 def _fctx(self):
41 def _fctx(self):
41 return context.resolvefctx(self._repo, self._node, self._path)
42 return context.resolvefctx(self._repo, self._node, self._path)
42
43
43 def __getattr__(self, name):
44 def __getattr__(self, name):
44 return getattr(self._fctx, name)
45 return getattr(self._fctx, name)
45
46
46
47
47 def _convertoutputs(repo, annotated, contents):
48 def _convertoutputs(repo, annotated, contents):
48 """convert fastannotate outputs to vanilla annotate format"""
49 """convert fastannotate outputs to vanilla annotate format"""
49 # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
50 # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
50 # convert to what fctx.annotate returns: [annotateline]
51 # convert to what fctx.annotate returns: [annotateline]
51 results = []
52 results = []
52 fctxmap = {}
53 fctxmap = {}
53 annotateline = dagop.annotateline
54 annotateline = dagop.annotateline
54 for i, (hsh, linenum, path) in enumerate(annotated):
55 for i, (hsh, linenum, path) in enumerate(annotated):
55 if (hsh, path) not in fctxmap:
56 if (hsh, path) not in fctxmap:
56 fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
57 fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
57 # linenum: the user wants 1-based, we have 0-based.
58 # linenum: the user wants 1-based, we have 0-based.
58 lineno = linenum + 1
59 lineno = linenum + 1
59 fctx = fctxmap[(hsh, path)]
60 fctx = fctxmap[(hsh, path)]
60 line = contents[i]
61 line = contents[i]
61 results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
62 results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
62 return results
63 return results
63
64
64
65
65 def _getmaster(fctx):
66 def _getmaster(fctx):
66 """(fctx) -> str"""
67 """(fctx) -> str"""
67 return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
68 return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
68
69
69
70
70 def _doannotate(fctx, follow=True, diffopts=None):
71 def _doannotate(fctx, follow=True, diffopts=None):
71 """like the vanilla fctx.annotate, but do it via fastannotate, and make
72 """like the vanilla fctx.annotate, but do it via fastannotate, and make
72 the output format compatible with the vanilla fctx.annotate.
73 the output format compatible with the vanilla fctx.annotate.
73 may raise Exception, and always return line numbers.
74 may raise Exception, and always return line numbers.
74 """
75 """
75 master = _getmaster(fctx)
76 master = _getmaster(fctx)
76 annotated = contents = None
77 annotated = contents = None
77
78
78 with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
79 with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
79 try:
80 try:
80 annotated, contents = ac.annotate(
81 annotated, contents = ac.annotate(
81 fctx.rev(), master=master, showpath=True, showlines=True
82 fctx.rev(), master=master, showpath=True, showlines=True
82 )
83 )
83 except Exception:
84 except Exception:
84 ac.rebuild() # try rebuild once
85 ac.rebuild() # try rebuild once
85 fctx._repo.ui.debug(
86 fctx._repo.ui.debug(
86 b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
87 b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
87 )
88 )
88 try:
89 try:
89 annotated, contents = ac.annotate(
90 annotated, contents = ac.annotate(
90 fctx.rev(), master=master, showpath=True, showlines=True
91 fctx.rev(), master=master, showpath=True, showlines=True
91 )
92 )
92 except Exception:
93 except Exception:
93 raise
94 raise
94
95
95 assert annotated and contents
96 assert annotated and contents
96 return _convertoutputs(fctx._repo, annotated, contents)
97 return _convertoutputs(fctx._repo, annotated, contents)
97
98
98
99
99 def _hgwebannotate(orig, fctx, ui):
100 def _hgwebannotate(orig, fctx, ui):
100 diffopts = patch.difffeatureopts(
101 diffopts = patch.difffeatureopts(
101 ui, untrusted=True, section=b'annotate', whitespace=True
102 ui, untrusted=True, section=b'annotate', whitespace=True
102 )
103 )
103 return _doannotate(fctx, diffopts=diffopts)
104 return _doannotate(fctx, diffopts=diffopts)
104
105
105
106
106 def _fctxannotate(
107 def _fctxannotate(
107 orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
108 orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
108 ):
109 ):
109 if skiprevs:
110 if skiprevs:
110 # skiprevs is not supported yet
111 # skiprevs is not supported yet
111 return orig(
112 return orig(
112 self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
113 self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
113 )
114 )
114 try:
115 try:
115 return _doannotate(self, follow, diffopts)
116 return _doannotate(self, follow, diffopts)
116 except Exception as ex:
117 except Exception as ex:
117 self._repo.ui.debug(
118 self._repo.ui.debug(
118 b'fastannotate: falling back to the vanilla ' b'annotate: %r\n' % ex
119 b'fastannotate: falling back to the vanilla ' b'annotate: %r\n' % ex
119 )
120 )
120 return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
121 return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
121
122
122
123
123 def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
124 def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
124 # skipset: a set-like used to test if a fctx needs to be downloaded
125 # skipset: a set-like used to test if a fctx needs to be downloaded
125 with context.fctxannotatecontext(self, follow, diffopts) as ac:
126 with context.fctxannotatecontext(self, follow, diffopts) as ac:
126 skipset = revmap.revmap(ac.revmappath)
127 skipset = revmap.revmap(ac.revmappath)
127 return orig(
128 return orig(
128 self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
129 self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
129 )
130 )
130
131
131
132
132 def replacehgwebannotate():
133 def replacehgwebannotate():
133 extensions.wrapfunction(hgweb.webutil, b'annotate', _hgwebannotate)
134 extensions.wrapfunction(hgweb.webutil, b'annotate', _hgwebannotate)
134
135
135
136
136 def replacefctxannotate():
137 def replacefctxannotate():
137 extensions.wrapfunction(hgcontext.basefilectx, b'annotate', _fctxannotate)
138 extensions.wrapfunction(hgcontext.basefilectx, b'annotate', _fctxannotate)
@@ -1,2610 +1,2613
1 # histedit.py - interactive history editing for mercurial
1 # histedit.py - interactive history editing for mercurial
2 #
2 #
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """interactive history editing
7 """interactive history editing
8
8
9 With this extension installed, Mercurial gains one new command: histedit. Usage
9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 is as follows, assuming the following history::
10 is as follows, assuming the following history::
11
11
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 | Add delta
13 | Add delta
14 |
14 |
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 | Add gamma
16 | Add gamma
17 |
17 |
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 | Add beta
19 | Add beta
20 |
20 |
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 Add alpha
22 Add alpha
23
23
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 file open in your editor::
25 file open in your editor::
26
26
27 pick c561b4e977df Add beta
27 pick c561b4e977df Add beta
28 pick 030b686bedc4 Add gamma
28 pick 030b686bedc4 Add gamma
29 pick 7c2fd3b9020c Add delta
29 pick 7c2fd3b9020c Add delta
30
30
31 # Edit history between c561b4e977df and 7c2fd3b9020c
31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 #
32 #
33 # Commits are listed from least to most recent
33 # Commits are listed from least to most recent
34 #
34 #
35 # Commands:
35 # Commands:
36 # p, pick = use commit
36 # p, pick = use commit
37 # e, edit = use commit, but stop for amending
37 # e, edit = use commit, but stop for amending
38 # f, fold = use commit, but combine it with the one above
38 # f, fold = use commit, but combine it with the one above
39 # r, roll = like fold, but discard this commit's description and date
39 # r, roll = like fold, but discard this commit's description and date
40 # d, drop = remove commit from history
40 # d, drop = remove commit from history
41 # m, mess = edit commit message without changing commit content
41 # m, mess = edit commit message without changing commit content
42 # b, base = checkout changeset and apply further changesets from there
42 # b, base = checkout changeset and apply further changesets from there
43 #
43 #
44
44
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
46 for each revision in your history. For example, if you had meant to add gamma
46 for each revision in your history. For example, if you had meant to add gamma
47 before beta, and then wanted to add delta in the same revision as beta, you
47 before beta, and then wanted to add delta in the same revision as beta, you
48 would reorganize the file to look like this::
48 would reorganize the file to look like this::
49
49
50 pick 030b686bedc4 Add gamma
50 pick 030b686bedc4 Add gamma
51 pick c561b4e977df Add beta
51 pick c561b4e977df Add beta
52 fold 7c2fd3b9020c Add delta
52 fold 7c2fd3b9020c Add delta
53
53
54 # Edit history between c561b4e977df and 7c2fd3b9020c
54 # Edit history between c561b4e977df and 7c2fd3b9020c
55 #
55 #
56 # Commits are listed from least to most recent
56 # Commits are listed from least to most recent
57 #
57 #
58 # Commands:
58 # Commands:
59 # p, pick = use commit
59 # p, pick = use commit
60 # e, edit = use commit, but stop for amending
60 # e, edit = use commit, but stop for amending
61 # f, fold = use commit, but combine it with the one above
61 # f, fold = use commit, but combine it with the one above
62 # r, roll = like fold, but discard this commit's description and date
62 # r, roll = like fold, but discard this commit's description and date
63 # d, drop = remove commit from history
63 # d, drop = remove commit from history
64 # m, mess = edit commit message without changing commit content
64 # m, mess = edit commit message without changing commit content
65 # b, base = checkout changeset and apply further changesets from there
65 # b, base = checkout changeset and apply further changesets from there
66 #
66 #
67
67
68 At which point you close the editor and ``histedit`` starts working. When you
68 At which point you close the editor and ``histedit`` starts working. When you
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
70 those revisions together, offering you a chance to clean up the commit message::
70 those revisions together, offering you a chance to clean up the commit message::
71
71
72 Add beta
72 Add beta
73 ***
73 ***
74 Add delta
74 Add delta
75
75
76 Edit the commit message to your liking, then close the editor. The date used
76 Edit the commit message to your liking, then close the editor. The date used
77 for the commit will be the later of the two commits' dates. For this example,
77 for the commit will be the later of the two commits' dates. For this example,
78 let's assume that the commit message was changed to ``Add beta and delta.``
78 let's assume that the commit message was changed to ``Add beta and delta.``
79 After histedit has run and had a chance to remove any old or temporary
79 After histedit has run and had a chance to remove any old or temporary
80 revisions it needed, the history looks like this::
80 revisions it needed, the history looks like this::
81
81
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
83 | Add beta and delta.
83 | Add beta and delta.
84 |
84 |
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
86 | Add gamma
86 | Add gamma
87 |
87 |
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
89 Add alpha
89 Add alpha
90
90
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
92 ones) until after it has completed all the editing operations, so it will
92 ones) until after it has completed all the editing operations, so it will
93 probably perform several strip operations when it's done. For the above example,
93 probably perform several strip operations when it's done. For the above example,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
95 so you might need to be a little patient. You can choose to keep the original
95 so you might need to be a little patient. You can choose to keep the original
96 revisions by passing the ``--keep`` flag.
96 revisions by passing the ``--keep`` flag.
97
97
98 The ``edit`` operation will drop you back to a command prompt,
98 The ``edit`` operation will drop you back to a command prompt,
99 allowing you to edit files freely, or even use ``hg record`` to commit
99 allowing you to edit files freely, or even use ``hg record`` to commit
100 some changes as a separate commit. When you're done, any remaining
100 some changes as a separate commit. When you're done, any remaining
101 uncommitted changes will be committed as well. When done, run ``hg
101 uncommitted changes will be committed as well. When done, run ``hg
102 histedit --continue`` to finish this step. If there are uncommitted
102 histedit --continue`` to finish this step. If there are uncommitted
103 changes, you'll be prompted for a new commit message, but the default
103 changes, you'll be prompted for a new commit message, but the default
104 commit message will be the original message for the ``edit`` ed
104 commit message will be the original message for the ``edit`` ed
105 revision, and the date of the original commit will be preserved.
105 revision, and the date of the original commit will be preserved.
106
106
107 The ``message`` operation will give you a chance to revise a commit
107 The ``message`` operation will give you a chance to revise a commit
108 message without changing the contents. It's a shortcut for doing
108 message without changing the contents. It's a shortcut for doing
109 ``edit`` immediately followed by `hg histedit --continue``.
109 ``edit`` immediately followed by `hg histedit --continue``.
110
110
111 If ``histedit`` encounters a conflict when moving a revision (while
111 If ``histedit`` encounters a conflict when moving a revision (while
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
113 ``edit`` with the difference that it won't prompt you for a commit
113 ``edit`` with the difference that it won't prompt you for a commit
114 message when done. If you decide at this point that you don't like how
114 message when done. If you decide at this point that you don't like how
115 much work it will be to rearrange history, or that you made a mistake,
115 much work it will be to rearrange history, or that you made a mistake,
116 you can use ``hg histedit --abort`` to abandon the new changes you
116 you can use ``hg histedit --abort`` to abandon the new changes you
117 have made and return to the state before you attempted to edit your
117 have made and return to the state before you attempted to edit your
118 history.
118 history.
119
119
120 If we clone the histedit-ed example repository above and add four more
120 If we clone the histedit-ed example repository above and add four more
121 changes, such that we have the following history::
121 changes, such that we have the following history::
122
122
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
124 | Add theta
124 | Add theta
125 |
125 |
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
127 | Add eta
127 | Add eta
128 |
128 |
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
130 | Add zeta
130 | Add zeta
131 |
131 |
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
133 | Add epsilon
133 | Add epsilon
134 |
134 |
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
136 | Add beta and delta.
136 | Add beta and delta.
137 |
137 |
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
139 | Add gamma
139 | Add gamma
140 |
140 |
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
142 Add alpha
142 Add alpha
143
143
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
145 as running ``hg histedit 836302820282``. If you need plan to push to a
145 as running ``hg histedit 836302820282``. If you need plan to push to a
146 repository that Mercurial does not detect to be related to the source
146 repository that Mercurial does not detect to be related to the source
147 repo, you can add a ``--force`` option.
147 repo, you can add a ``--force`` option.
148
148
149 Config
149 Config
150 ------
150 ------
151
151
152 Histedit rule lines are truncated to 80 characters by default. You
152 Histedit rule lines are truncated to 80 characters by default. You
153 can customize this behavior by setting a different length in your
153 can customize this behavior by setting a different length in your
154 configuration file::
154 configuration file::
155
155
156 [histedit]
156 [histedit]
157 linelen = 120 # truncate rule lines at 120 characters
157 linelen = 120 # truncate rule lines at 120 characters
158
158
159 The summary of a change can be customized as well::
159 The summary of a change can be customized as well::
160
160
161 [histedit]
161 [histedit]
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
163
163
164 The customized summary should be kept short enough that rule lines
164 The customized summary should be kept short enough that rule lines
165 will fit in the configured line length. See above if that requires
165 will fit in the configured line length. See above if that requires
166 customization.
166 customization.
167
167
168 ``hg histedit`` attempts to automatically choose an appropriate base
168 ``hg histedit`` attempts to automatically choose an appropriate base
169 revision to use. To change which base revision is used, define a
169 revision to use. To change which base revision is used, define a
170 revset in your configuration file::
170 revset in your configuration file::
171
171
172 [histedit]
172 [histedit]
173 defaultrev = only(.) & draft()
173 defaultrev = only(.) & draft()
174
174
175 By default each edited revision needs to be present in histedit commands.
175 By default each edited revision needs to be present in histedit commands.
176 To remove revision you need to use ``drop`` operation. You can configure
176 To remove revision you need to use ``drop`` operation. You can configure
177 the drop to be implicit for missing commits by adding::
177 the drop to be implicit for missing commits by adding::
178
178
179 [histedit]
179 [histedit]
180 dropmissing = True
180 dropmissing = True
181
181
182 By default, histedit will close the transaction after each action. For
182 By default, histedit will close the transaction after each action. For
183 performance purposes, you can configure histedit to use a single transaction
183 performance purposes, you can configure histedit to use a single transaction
184 across the entire histedit. WARNING: This setting introduces a significant risk
184 across the entire histedit. WARNING: This setting introduces a significant risk
185 of losing the work you've done in a histedit if the histedit aborts
185 of losing the work you've done in a histedit if the histedit aborts
186 unexpectedly::
186 unexpectedly::
187
187
188 [histedit]
188 [histedit]
189 singletransaction = True
189 singletransaction = True
190
190
191 """
191 """
192
192
193 from __future__ import absolute_import
193 from __future__ import absolute_import
194
194
195 # chistedit dependencies that are not available everywhere
195 # chistedit dependencies that are not available everywhere
196 try:
196 try:
197 import fcntl
197 import fcntl
198 import termios
198 import termios
199 except ImportError:
199 except ImportError:
200 fcntl = None
200 fcntl = None
201 termios = None
201 termios = None
202
202
203 import functools
203 import functools
204 import locale
204 import locale
205 import os
205 import os
206 import struct
206 import struct
207
207
208 from mercurial.i18n import _
208 from mercurial.i18n import _
209 from mercurial.pycompat import open
209 from mercurial.pycompat import (
210 getattr,
211 open,
212 )
210 from mercurial import (
213 from mercurial import (
211 bundle2,
214 bundle2,
212 cmdutil,
215 cmdutil,
213 context,
216 context,
214 copies,
217 copies,
215 destutil,
218 destutil,
216 discovery,
219 discovery,
217 error,
220 error,
218 exchange,
221 exchange,
219 extensions,
222 extensions,
220 hg,
223 hg,
221 logcmdutil,
224 logcmdutil,
222 merge as mergemod,
225 merge as mergemod,
223 mergeutil,
226 mergeutil,
224 node,
227 node,
225 obsolete,
228 obsolete,
226 pycompat,
229 pycompat,
227 registrar,
230 registrar,
228 repair,
231 repair,
229 scmutil,
232 scmutil,
230 state as statemod,
233 state as statemod,
231 util,
234 util,
232 )
235 )
233 from mercurial.utils import (
236 from mercurial.utils import (
234 dateutil,
237 dateutil,
235 stringutil,
238 stringutil,
236 )
239 )
237
240
238 pickle = util.pickle
241 pickle = util.pickle
239 cmdtable = {}
242 cmdtable = {}
240 command = registrar.command(cmdtable)
243 command = registrar.command(cmdtable)
241
244
242 configtable = {}
245 configtable = {}
243 configitem = registrar.configitem(configtable)
246 configitem = registrar.configitem(configtable)
244 configitem(
247 configitem(
245 b'experimental', b'histedit.autoverb', default=False,
248 b'experimental', b'histedit.autoverb', default=False,
246 )
249 )
247 configitem(
250 configitem(
248 b'histedit', b'defaultrev', default=None,
251 b'histedit', b'defaultrev', default=None,
249 )
252 )
250 configitem(
253 configitem(
251 b'histedit', b'dropmissing', default=False,
254 b'histedit', b'dropmissing', default=False,
252 )
255 )
253 configitem(
256 configitem(
254 b'histedit', b'linelen', default=80,
257 b'histedit', b'linelen', default=80,
255 )
258 )
256 configitem(
259 configitem(
257 b'histedit', b'singletransaction', default=False,
260 b'histedit', b'singletransaction', default=False,
258 )
261 )
259 configitem(
262 configitem(
260 b'ui', b'interface.histedit', default=None,
263 b'ui', b'interface.histedit', default=None,
261 )
264 )
262 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
265 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
263
266
264 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
267 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
265 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
268 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
266 # be specifying the version(s) of Mercurial they are tested with, or
269 # be specifying the version(s) of Mercurial they are tested with, or
267 # leave the attribute unspecified.
270 # leave the attribute unspecified.
268 testedwith = b'ships-with-hg-core'
271 testedwith = b'ships-with-hg-core'
269
272
270 actiontable = {}
273 actiontable = {}
271 primaryactions = set()
274 primaryactions = set()
272 secondaryactions = set()
275 secondaryactions = set()
273 tertiaryactions = set()
276 tertiaryactions = set()
274 internalactions = set()
277 internalactions = set()
275
278
276
279
277 def geteditcomment(ui, first, last):
280 def geteditcomment(ui, first, last):
278 """ construct the editor comment
281 """ construct the editor comment
279 The comment includes::
282 The comment includes::
280 - an intro
283 - an intro
281 - sorted primary commands
284 - sorted primary commands
282 - sorted short commands
285 - sorted short commands
283 - sorted long commands
286 - sorted long commands
284 - additional hints
287 - additional hints
285
288
286 Commands are only included once.
289 Commands are only included once.
287 """
290 """
288 intro = _(
291 intro = _(
289 """Edit history between %s and %s
292 """Edit history between %s and %s
290
293
291 Commits are listed from least to most recent
294 Commits are listed from least to most recent
292
295
293 You can reorder changesets by reordering the lines
296 You can reorder changesets by reordering the lines
294
297
295 Commands:
298 Commands:
296 """
299 """
297 )
300 )
298 actions = []
301 actions = []
299
302
300 def addverb(v):
303 def addverb(v):
301 a = actiontable[v]
304 a = actiontable[v]
302 lines = a.message.split(b"\n")
305 lines = a.message.split(b"\n")
303 if len(a.verbs):
306 if len(a.verbs):
304 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
307 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
305 actions.append(b" %s = %s" % (v, lines[0]))
308 actions.append(b" %s = %s" % (v, lines[0]))
306 actions.extend([b' %s' for l in lines[1:]])
309 actions.extend([b' %s' for l in lines[1:]])
307
310
308 for v in (
311 for v in (
309 sorted(primaryactions)
312 sorted(primaryactions)
310 + sorted(secondaryactions)
313 + sorted(secondaryactions)
311 + sorted(tertiaryactions)
314 + sorted(tertiaryactions)
312 ):
315 ):
313 addverb(v)
316 addverb(v)
314 actions.append(b'')
317 actions.append(b'')
315
318
316 hints = []
319 hints = []
317 if ui.configbool(b'histedit', b'dropmissing'):
320 if ui.configbool(b'histedit', b'dropmissing'):
318 hints.append(
321 hints.append(
319 b"Deleting a changeset from the list "
322 b"Deleting a changeset from the list "
320 b"will DISCARD it from the edited history!"
323 b"will DISCARD it from the edited history!"
321 )
324 )
322
325
323 lines = (intro % (first, last)).split(b'\n') + actions + hints
326 lines = (intro % (first, last)).split(b'\n') + actions + hints
324
327
325 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
328 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
326
329
327
330
328 class histeditstate(object):
331 class histeditstate(object):
329 def __init__(self, repo):
332 def __init__(self, repo):
330 self.repo = repo
333 self.repo = repo
331 self.actions = None
334 self.actions = None
332 self.keep = None
335 self.keep = None
333 self.topmost = None
336 self.topmost = None
334 self.parentctxnode = None
337 self.parentctxnode = None
335 self.lock = None
338 self.lock = None
336 self.wlock = None
339 self.wlock = None
337 self.backupfile = None
340 self.backupfile = None
338 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
341 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
339 self.replacements = []
342 self.replacements = []
340
343
341 def read(self):
344 def read(self):
342 """Load histedit state from disk and set fields appropriately."""
345 """Load histedit state from disk and set fields appropriately."""
343 if not self.stateobj.exists():
346 if not self.stateobj.exists():
344 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
347 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
345
348
346 data = self._read()
349 data = self._read()
347
350
348 self.parentctxnode = data[b'parentctxnode']
351 self.parentctxnode = data[b'parentctxnode']
349 actions = parserules(data[b'rules'], self)
352 actions = parserules(data[b'rules'], self)
350 self.actions = actions
353 self.actions = actions
351 self.keep = data[b'keep']
354 self.keep = data[b'keep']
352 self.topmost = data[b'topmost']
355 self.topmost = data[b'topmost']
353 self.replacements = data[b'replacements']
356 self.replacements = data[b'replacements']
354 self.backupfile = data[b'backupfile']
357 self.backupfile = data[b'backupfile']
355
358
356 def _read(self):
359 def _read(self):
357 fp = self.repo.vfs.read(b'histedit-state')
360 fp = self.repo.vfs.read(b'histedit-state')
358 if fp.startswith(b'v1\n'):
361 if fp.startswith(b'v1\n'):
359 data = self._load()
362 data = self._load()
360 parentctxnode, rules, keep, topmost, replacements, backupfile = data
363 parentctxnode, rules, keep, topmost, replacements, backupfile = data
361 else:
364 else:
362 data = pickle.loads(fp)
365 data = pickle.loads(fp)
363 parentctxnode, rules, keep, topmost, replacements = data
366 parentctxnode, rules, keep, topmost, replacements = data
364 backupfile = None
367 backupfile = None
365 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
368 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
366
369
367 return {
370 return {
368 b'parentctxnode': parentctxnode,
371 b'parentctxnode': parentctxnode,
369 b"rules": rules,
372 b"rules": rules,
370 b"keep": keep,
373 b"keep": keep,
371 b"topmost": topmost,
374 b"topmost": topmost,
372 b"replacements": replacements,
375 b"replacements": replacements,
373 b"backupfile": backupfile,
376 b"backupfile": backupfile,
374 }
377 }
375
378
376 def write(self, tr=None):
379 def write(self, tr=None):
377 if tr:
380 if tr:
378 tr.addfilegenerator(
381 tr.addfilegenerator(
379 b'histedit-state',
382 b'histedit-state',
380 (b'histedit-state',),
383 (b'histedit-state',),
381 self._write,
384 self._write,
382 location=b'plain',
385 location=b'plain',
383 )
386 )
384 else:
387 else:
385 with self.repo.vfs(b"histedit-state", b"w") as f:
388 with self.repo.vfs(b"histedit-state", b"w") as f:
386 self._write(f)
389 self._write(f)
387
390
388 def _write(self, fp):
391 def _write(self, fp):
389 fp.write(b'v1\n')
392 fp.write(b'v1\n')
390 fp.write(b'%s\n' % node.hex(self.parentctxnode))
393 fp.write(b'%s\n' % node.hex(self.parentctxnode))
391 fp.write(b'%s\n' % node.hex(self.topmost))
394 fp.write(b'%s\n' % node.hex(self.topmost))
392 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
395 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
393 fp.write(b'%d\n' % len(self.actions))
396 fp.write(b'%d\n' % len(self.actions))
394 for action in self.actions:
397 for action in self.actions:
395 fp.write(b'%s\n' % action.tostate())
398 fp.write(b'%s\n' % action.tostate())
396 fp.write(b'%d\n' % len(self.replacements))
399 fp.write(b'%d\n' % len(self.replacements))
397 for replacement in self.replacements:
400 for replacement in self.replacements:
398 fp.write(
401 fp.write(
399 b'%s%s\n'
402 b'%s%s\n'
400 % (
403 % (
401 node.hex(replacement[0]),
404 node.hex(replacement[0]),
402 b''.join(node.hex(r) for r in replacement[1]),
405 b''.join(node.hex(r) for r in replacement[1]),
403 )
406 )
404 )
407 )
405 backupfile = self.backupfile
408 backupfile = self.backupfile
406 if not backupfile:
409 if not backupfile:
407 backupfile = b''
410 backupfile = b''
408 fp.write(b'%s\n' % backupfile)
411 fp.write(b'%s\n' % backupfile)
409
412
410 def _load(self):
413 def _load(self):
411 fp = self.repo.vfs(b'histedit-state', b'r')
414 fp = self.repo.vfs(b'histedit-state', b'r')
412 lines = [l[:-1] for l in fp.readlines()]
415 lines = [l[:-1] for l in fp.readlines()]
413
416
414 index = 0
417 index = 0
415 lines[index] # version number
418 lines[index] # version number
416 index += 1
419 index += 1
417
420
418 parentctxnode = node.bin(lines[index])
421 parentctxnode = node.bin(lines[index])
419 index += 1
422 index += 1
420
423
421 topmost = node.bin(lines[index])
424 topmost = node.bin(lines[index])
422 index += 1
425 index += 1
423
426
424 keep = lines[index] == b'True'
427 keep = lines[index] == b'True'
425 index += 1
428 index += 1
426
429
427 # Rules
430 # Rules
428 rules = []
431 rules = []
429 rulelen = int(lines[index])
432 rulelen = int(lines[index])
430 index += 1
433 index += 1
431 for i in pycompat.xrange(rulelen):
434 for i in pycompat.xrange(rulelen):
432 ruleaction = lines[index]
435 ruleaction = lines[index]
433 index += 1
436 index += 1
434 rule = lines[index]
437 rule = lines[index]
435 index += 1
438 index += 1
436 rules.append((ruleaction, rule))
439 rules.append((ruleaction, rule))
437
440
438 # Replacements
441 # Replacements
439 replacements = []
442 replacements = []
440 replacementlen = int(lines[index])
443 replacementlen = int(lines[index])
441 index += 1
444 index += 1
442 for i in pycompat.xrange(replacementlen):
445 for i in pycompat.xrange(replacementlen):
443 replacement = lines[index]
446 replacement = lines[index]
444 original = node.bin(replacement[:40])
447 original = node.bin(replacement[:40])
445 succ = [
448 succ = [
446 node.bin(replacement[i : i + 40])
449 node.bin(replacement[i : i + 40])
447 for i in range(40, len(replacement), 40)
450 for i in range(40, len(replacement), 40)
448 ]
451 ]
449 replacements.append((original, succ))
452 replacements.append((original, succ))
450 index += 1
453 index += 1
451
454
452 backupfile = lines[index]
455 backupfile = lines[index]
453 index += 1
456 index += 1
454
457
455 fp.close()
458 fp.close()
456
459
457 return parentctxnode, rules, keep, topmost, replacements, backupfile
460 return parentctxnode, rules, keep, topmost, replacements, backupfile
458
461
459 def clear(self):
462 def clear(self):
460 if self.inprogress():
463 if self.inprogress():
461 self.repo.vfs.unlink(b'histedit-state')
464 self.repo.vfs.unlink(b'histedit-state')
462
465
463 def inprogress(self):
466 def inprogress(self):
464 return self.repo.vfs.exists(b'histedit-state')
467 return self.repo.vfs.exists(b'histedit-state')
465
468
466
469
467 class histeditaction(object):
470 class histeditaction(object):
468 def __init__(self, state, node):
471 def __init__(self, state, node):
469 self.state = state
472 self.state = state
470 self.repo = state.repo
473 self.repo = state.repo
471 self.node = node
474 self.node = node
472
475
473 @classmethod
476 @classmethod
474 def fromrule(cls, state, rule):
477 def fromrule(cls, state, rule):
475 """Parses the given rule, returning an instance of the histeditaction.
478 """Parses the given rule, returning an instance of the histeditaction.
476 """
479 """
477 ruleid = rule.strip().split(b' ', 1)[0]
480 ruleid = rule.strip().split(b' ', 1)[0]
478 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
481 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
479 # Check for validation of rule ids and get the rulehash
482 # Check for validation of rule ids and get the rulehash
480 try:
483 try:
481 rev = node.bin(ruleid)
484 rev = node.bin(ruleid)
482 except TypeError:
485 except TypeError:
483 try:
486 try:
484 _ctx = scmutil.revsingle(state.repo, ruleid)
487 _ctx = scmutil.revsingle(state.repo, ruleid)
485 rulehash = _ctx.hex()
488 rulehash = _ctx.hex()
486 rev = node.bin(rulehash)
489 rev = node.bin(rulehash)
487 except error.RepoLookupError:
490 except error.RepoLookupError:
488 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
491 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
489 return cls(state, rev)
492 return cls(state, rev)
490
493
491 def verify(self, prev, expected, seen):
494 def verify(self, prev, expected, seen):
492 """ Verifies semantic correctness of the rule"""
495 """ Verifies semantic correctness of the rule"""
493 repo = self.repo
496 repo = self.repo
494 ha = node.hex(self.node)
497 ha = node.hex(self.node)
495 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
498 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
496 if self.node is None:
499 if self.node is None:
497 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
500 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
498 self._verifynodeconstraints(prev, expected, seen)
501 self._verifynodeconstraints(prev, expected, seen)
499
502
500 def _verifynodeconstraints(self, prev, expected, seen):
503 def _verifynodeconstraints(self, prev, expected, seen):
501 # by default command need a node in the edited list
504 # by default command need a node in the edited list
502 if self.node not in expected:
505 if self.node not in expected:
503 raise error.ParseError(
506 raise error.ParseError(
504 _(b'%s "%s" changeset was not a candidate')
507 _(b'%s "%s" changeset was not a candidate')
505 % (self.verb, node.short(self.node)),
508 % (self.verb, node.short(self.node)),
506 hint=_(b'only use listed changesets'),
509 hint=_(b'only use listed changesets'),
507 )
510 )
508 # and only one command per node
511 # and only one command per node
509 if self.node in seen:
512 if self.node in seen:
510 raise error.ParseError(
513 raise error.ParseError(
511 _(b'duplicated command for changeset %s')
514 _(b'duplicated command for changeset %s')
512 % node.short(self.node)
515 % node.short(self.node)
513 )
516 )
514
517
515 def torule(self):
518 def torule(self):
516 """build a histedit rule line for an action
519 """build a histedit rule line for an action
517
520
518 by default lines are in the form:
521 by default lines are in the form:
519 <hash> <rev> <summary>
522 <hash> <rev> <summary>
520 """
523 """
521 ctx = self.repo[self.node]
524 ctx = self.repo[self.node]
522 ui = self.repo.ui
525 ui = self.repo.ui
523 summary = (
526 summary = (
524 cmdutil.rendertemplate(
527 cmdutil.rendertemplate(
525 ctx, ui.config(b'histedit', b'summary-template')
528 ctx, ui.config(b'histedit', b'summary-template')
526 )
529 )
527 or b''
530 or b''
528 )
531 )
529 summary = summary.splitlines()[0]
532 summary = summary.splitlines()[0]
530 line = b'%s %s %s' % (self.verb, ctx, summary)
533 line = b'%s %s %s' % (self.verb, ctx, summary)
531 # trim to 75 columns by default so it's not stupidly wide in my editor
534 # trim to 75 columns by default so it's not stupidly wide in my editor
532 # (the 5 more are left for verb)
535 # (the 5 more are left for verb)
533 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
536 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
534 maxlen = max(maxlen, 22) # avoid truncating hash
537 maxlen = max(maxlen, 22) # avoid truncating hash
535 return stringutil.ellipsis(line, maxlen)
538 return stringutil.ellipsis(line, maxlen)
536
539
537 def tostate(self):
540 def tostate(self):
538 """Print an action in format used by histedit state files
541 """Print an action in format used by histedit state files
539 (the first line is a verb, the remainder is the second)
542 (the first line is a verb, the remainder is the second)
540 """
543 """
541 return b"%s\n%s" % (self.verb, node.hex(self.node))
544 return b"%s\n%s" % (self.verb, node.hex(self.node))
542
545
543 def run(self):
546 def run(self):
544 """Runs the action. The default behavior is simply apply the action's
547 """Runs the action. The default behavior is simply apply the action's
545 rulectx onto the current parentctx."""
548 rulectx onto the current parentctx."""
546 self.applychange()
549 self.applychange()
547 self.continuedirty()
550 self.continuedirty()
548 return self.continueclean()
551 return self.continueclean()
549
552
550 def applychange(self):
553 def applychange(self):
551 """Applies the changes from this action's rulectx onto the current
554 """Applies the changes from this action's rulectx onto the current
552 parentctx, but does not commit them."""
555 parentctx, but does not commit them."""
553 repo = self.repo
556 repo = self.repo
554 rulectx = repo[self.node]
557 rulectx = repo[self.node]
555 repo.ui.pushbuffer(error=True, labeled=True)
558 repo.ui.pushbuffer(error=True, labeled=True)
556 hg.update(repo, self.state.parentctxnode, quietempty=True)
559 hg.update(repo, self.state.parentctxnode, quietempty=True)
557 repo.ui.popbuffer()
560 repo.ui.popbuffer()
558 stats = applychanges(repo.ui, repo, rulectx, {})
561 stats = applychanges(repo.ui, repo, rulectx, {})
559 repo.dirstate.setbranch(rulectx.branch())
562 repo.dirstate.setbranch(rulectx.branch())
560 if stats.unresolvedcount:
563 if stats.unresolvedcount:
561 raise error.InterventionRequired(
564 raise error.InterventionRequired(
562 _(b'Fix up the change (%s %s)')
565 _(b'Fix up the change (%s %s)')
563 % (self.verb, node.short(self.node)),
566 % (self.verb, node.short(self.node)),
564 hint=_(b'hg histedit --continue to resume'),
567 hint=_(b'hg histedit --continue to resume'),
565 )
568 )
566
569
567 def continuedirty(self):
570 def continuedirty(self):
568 """Continues the action when changes have been applied to the working
571 """Continues the action when changes have been applied to the working
569 copy. The default behavior is to commit the dirty changes."""
572 copy. The default behavior is to commit the dirty changes."""
570 repo = self.repo
573 repo = self.repo
571 rulectx = repo[self.node]
574 rulectx = repo[self.node]
572
575
573 editor = self.commiteditor()
576 editor = self.commiteditor()
574 commit = commitfuncfor(repo, rulectx)
577 commit = commitfuncfor(repo, rulectx)
575 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
578 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
576 date = dateutil.makedate()
579 date = dateutil.makedate()
577 else:
580 else:
578 date = rulectx.date()
581 date = rulectx.date()
579 commit(
582 commit(
580 text=rulectx.description(),
583 text=rulectx.description(),
581 user=rulectx.user(),
584 user=rulectx.user(),
582 date=date,
585 date=date,
583 extra=rulectx.extra(),
586 extra=rulectx.extra(),
584 editor=editor,
587 editor=editor,
585 )
588 )
586
589
587 def commiteditor(self):
590 def commiteditor(self):
588 """The editor to be used to edit the commit message."""
591 """The editor to be used to edit the commit message."""
589 return False
592 return False
590
593
591 def continueclean(self):
594 def continueclean(self):
592 """Continues the action when the working copy is clean. The default
595 """Continues the action when the working copy is clean. The default
593 behavior is to accept the current commit as the new version of the
596 behavior is to accept the current commit as the new version of the
594 rulectx."""
597 rulectx."""
595 ctx = self.repo[b'.']
598 ctx = self.repo[b'.']
596 if ctx.node() == self.state.parentctxnode:
599 if ctx.node() == self.state.parentctxnode:
597 self.repo.ui.warn(
600 self.repo.ui.warn(
598 _(b'%s: skipping changeset (no changes)\n')
601 _(b'%s: skipping changeset (no changes)\n')
599 % node.short(self.node)
602 % node.short(self.node)
600 )
603 )
601 return ctx, [(self.node, tuple())]
604 return ctx, [(self.node, tuple())]
602 if ctx.node() == self.node:
605 if ctx.node() == self.node:
603 # Nothing changed
606 # Nothing changed
604 return ctx, []
607 return ctx, []
605 return ctx, [(self.node, (ctx.node(),))]
608 return ctx, [(self.node, (ctx.node(),))]
606
609
607
610
608 def commitfuncfor(repo, src):
611 def commitfuncfor(repo, src):
609 """Build a commit function for the replacement of <src>
612 """Build a commit function for the replacement of <src>
610
613
611 This function ensure we apply the same treatment to all changesets.
614 This function ensure we apply the same treatment to all changesets.
612
615
613 - Add a 'histedit_source' entry in extra.
616 - Add a 'histedit_source' entry in extra.
614
617
615 Note that fold has its own separated logic because its handling is a bit
618 Note that fold has its own separated logic because its handling is a bit
616 different and not easily factored out of the fold method.
619 different and not easily factored out of the fold method.
617 """
620 """
618 phasemin = src.phase()
621 phasemin = src.phase()
619
622
620 def commitfunc(**kwargs):
623 def commitfunc(**kwargs):
621 overrides = {(b'phases', b'new-commit'): phasemin}
624 overrides = {(b'phases', b'new-commit'): phasemin}
622 with repo.ui.configoverride(overrides, b'histedit'):
625 with repo.ui.configoverride(overrides, b'histedit'):
623 extra = kwargs.get(r'extra', {}).copy()
626 extra = kwargs.get(r'extra', {}).copy()
624 extra[b'histedit_source'] = src.hex()
627 extra[b'histedit_source'] = src.hex()
625 kwargs[r'extra'] = extra
628 kwargs[r'extra'] = extra
626 return repo.commit(**kwargs)
629 return repo.commit(**kwargs)
627
630
628 return commitfunc
631 return commitfunc
629
632
630
633
631 def applychanges(ui, repo, ctx, opts):
634 def applychanges(ui, repo, ctx, opts):
632 """Merge changeset from ctx (only) in the current working directory"""
635 """Merge changeset from ctx (only) in the current working directory"""
633 wcpar = repo.dirstate.p1()
636 wcpar = repo.dirstate.p1()
634 if ctx.p1().node() == wcpar:
637 if ctx.p1().node() == wcpar:
635 # edits are "in place" we do not need to make any merge,
638 # edits are "in place" we do not need to make any merge,
636 # just applies changes on parent for editing
639 # just applies changes on parent for editing
637 ui.pushbuffer()
640 ui.pushbuffer()
638 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
641 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
639 stats = mergemod.updateresult(0, 0, 0, 0)
642 stats = mergemod.updateresult(0, 0, 0, 0)
640 ui.popbuffer()
643 ui.popbuffer()
641 else:
644 else:
642 try:
645 try:
643 # ui.forcemerge is an internal variable, do not document
646 # ui.forcemerge is an internal variable, do not document
644 repo.ui.setconfig(
647 repo.ui.setconfig(
645 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
648 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
646 )
649 )
647 stats = mergemod.graft(repo, ctx, ctx.p1(), [b'local', b'histedit'])
650 stats = mergemod.graft(repo, ctx, ctx.p1(), [b'local', b'histedit'])
648 finally:
651 finally:
649 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
652 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
650 return stats
653 return stats
651
654
652
655
653 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
656 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
654 """collapse the set of revisions from first to last as new one.
657 """collapse the set of revisions from first to last as new one.
655
658
656 Expected commit options are:
659 Expected commit options are:
657 - message
660 - message
658 - date
661 - date
659 - username
662 - username
660 Commit message is edited in all cases.
663 Commit message is edited in all cases.
661
664
662 This function works in memory."""
665 This function works in memory."""
663 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
666 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
664 if not ctxs:
667 if not ctxs:
665 return None
668 return None
666 for c in ctxs:
669 for c in ctxs:
667 if not c.mutable():
670 if not c.mutable():
668 raise error.ParseError(
671 raise error.ParseError(
669 _(b"cannot fold into public change %s") % node.short(c.node())
672 _(b"cannot fold into public change %s") % node.short(c.node())
670 )
673 )
671 base = firstctx.p1()
674 base = firstctx.p1()
672
675
673 # commit a new version of the old changeset, including the update
676 # commit a new version of the old changeset, including the update
674 # collect all files which might be affected
677 # collect all files which might be affected
675 files = set()
678 files = set()
676 for ctx in ctxs:
679 for ctx in ctxs:
677 files.update(ctx.files())
680 files.update(ctx.files())
678
681
679 # Recompute copies (avoid recording a -> b -> a)
682 # Recompute copies (avoid recording a -> b -> a)
680 copied = copies.pathcopies(base, lastctx)
683 copied = copies.pathcopies(base, lastctx)
681
684
682 # prune files which were reverted by the updates
685 # prune files which were reverted by the updates
683 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
686 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
684 # commit version of these files as defined by head
687 # commit version of these files as defined by head
685 headmf = lastctx.manifest()
688 headmf = lastctx.manifest()
686
689
687 def filectxfn(repo, ctx, path):
690 def filectxfn(repo, ctx, path):
688 if path in headmf:
691 if path in headmf:
689 fctx = lastctx[path]
692 fctx = lastctx[path]
690 flags = fctx.flags()
693 flags = fctx.flags()
691 mctx = context.memfilectx(
694 mctx = context.memfilectx(
692 repo,
695 repo,
693 ctx,
696 ctx,
694 fctx.path(),
697 fctx.path(),
695 fctx.data(),
698 fctx.data(),
696 islink=b'l' in flags,
699 islink=b'l' in flags,
697 isexec=b'x' in flags,
700 isexec=b'x' in flags,
698 copysource=copied.get(path),
701 copysource=copied.get(path),
699 )
702 )
700 return mctx
703 return mctx
701 return None
704 return None
702
705
703 if commitopts.get(b'message'):
706 if commitopts.get(b'message'):
704 message = commitopts[b'message']
707 message = commitopts[b'message']
705 else:
708 else:
706 message = firstctx.description()
709 message = firstctx.description()
707 user = commitopts.get(b'user')
710 user = commitopts.get(b'user')
708 date = commitopts.get(b'date')
711 date = commitopts.get(b'date')
709 extra = commitopts.get(b'extra')
712 extra = commitopts.get(b'extra')
710
713
711 parents = (firstctx.p1().node(), firstctx.p2().node())
714 parents = (firstctx.p1().node(), firstctx.p2().node())
712 editor = None
715 editor = None
713 if not skipprompt:
716 if not skipprompt:
714 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
717 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
715 new = context.memctx(
718 new = context.memctx(
716 repo,
719 repo,
717 parents=parents,
720 parents=parents,
718 text=message,
721 text=message,
719 files=files,
722 files=files,
720 filectxfn=filectxfn,
723 filectxfn=filectxfn,
721 user=user,
724 user=user,
722 date=date,
725 date=date,
723 extra=extra,
726 extra=extra,
724 editor=editor,
727 editor=editor,
725 )
728 )
726 return repo.commitctx(new)
729 return repo.commitctx(new)
727
730
728
731
729 def _isdirtywc(repo):
732 def _isdirtywc(repo):
730 return repo[None].dirty(missing=True)
733 return repo[None].dirty(missing=True)
731
734
732
735
733 def abortdirty():
736 def abortdirty():
734 raise error.Abort(
737 raise error.Abort(
735 _(b'working copy has pending changes'),
738 _(b'working copy has pending changes'),
736 hint=_(
739 hint=_(
737 b'amend, commit, or revert them and run histedit '
740 b'amend, commit, or revert them and run histedit '
738 b'--continue, or abort with histedit --abort'
741 b'--continue, or abort with histedit --abort'
739 ),
742 ),
740 )
743 )
741
744
742
745
743 def action(verbs, message, priority=False, internal=False):
746 def action(verbs, message, priority=False, internal=False):
744 def wrap(cls):
747 def wrap(cls):
745 assert not priority or not internal
748 assert not priority or not internal
746 verb = verbs[0]
749 verb = verbs[0]
747 if priority:
750 if priority:
748 primaryactions.add(verb)
751 primaryactions.add(verb)
749 elif internal:
752 elif internal:
750 internalactions.add(verb)
753 internalactions.add(verb)
751 elif len(verbs) > 1:
754 elif len(verbs) > 1:
752 secondaryactions.add(verb)
755 secondaryactions.add(verb)
753 else:
756 else:
754 tertiaryactions.add(verb)
757 tertiaryactions.add(verb)
755
758
756 cls.verb = verb
759 cls.verb = verb
757 cls.verbs = verbs
760 cls.verbs = verbs
758 cls.message = message
761 cls.message = message
759 for verb in verbs:
762 for verb in verbs:
760 actiontable[verb] = cls
763 actiontable[verb] = cls
761 return cls
764 return cls
762
765
763 return wrap
766 return wrap
764
767
765
768
766 @action([b'pick', b'p'], _(b'use commit'), priority=True)
769 @action([b'pick', b'p'], _(b'use commit'), priority=True)
767 class pick(histeditaction):
770 class pick(histeditaction):
768 def run(self):
771 def run(self):
769 rulectx = self.repo[self.node]
772 rulectx = self.repo[self.node]
770 if rulectx.p1().node() == self.state.parentctxnode:
773 if rulectx.p1().node() == self.state.parentctxnode:
771 self.repo.ui.debug(b'node %s unchanged\n' % node.short(self.node))
774 self.repo.ui.debug(b'node %s unchanged\n' % node.short(self.node))
772 return rulectx, []
775 return rulectx, []
773
776
774 return super(pick, self).run()
777 return super(pick, self).run()
775
778
776
779
777 @action([b'edit', b'e'], _(b'use commit, but stop for amending'), priority=True)
780 @action([b'edit', b'e'], _(b'use commit, but stop for amending'), priority=True)
778 class edit(histeditaction):
781 class edit(histeditaction):
779 def run(self):
782 def run(self):
780 repo = self.repo
783 repo = self.repo
781 rulectx = repo[self.node]
784 rulectx = repo[self.node]
782 hg.update(repo, self.state.parentctxnode, quietempty=True)
785 hg.update(repo, self.state.parentctxnode, quietempty=True)
783 applychanges(repo.ui, repo, rulectx, {})
786 applychanges(repo.ui, repo, rulectx, {})
784 raise error.InterventionRequired(
787 raise error.InterventionRequired(
785 _(b'Editing (%s), you may commit or record as needed now.')
788 _(b'Editing (%s), you may commit or record as needed now.')
786 % node.short(self.node),
789 % node.short(self.node),
787 hint=_(b'hg histedit --continue to resume'),
790 hint=_(b'hg histedit --continue to resume'),
788 )
791 )
789
792
790 def commiteditor(self):
793 def commiteditor(self):
791 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
794 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
792
795
793
796
794 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
797 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
795 class fold(histeditaction):
798 class fold(histeditaction):
796 def verify(self, prev, expected, seen):
799 def verify(self, prev, expected, seen):
797 """ Verifies semantic correctness of the fold rule"""
800 """ Verifies semantic correctness of the fold rule"""
798 super(fold, self).verify(prev, expected, seen)
801 super(fold, self).verify(prev, expected, seen)
799 repo = self.repo
802 repo = self.repo
800 if not prev:
803 if not prev:
801 c = repo[self.node].p1()
804 c = repo[self.node].p1()
802 elif not prev.verb in (b'pick', b'base'):
805 elif not prev.verb in (b'pick', b'base'):
803 return
806 return
804 else:
807 else:
805 c = repo[prev.node]
808 c = repo[prev.node]
806 if not c.mutable():
809 if not c.mutable():
807 raise error.ParseError(
810 raise error.ParseError(
808 _(b"cannot fold into public change %s") % node.short(c.node())
811 _(b"cannot fold into public change %s") % node.short(c.node())
809 )
812 )
810
813
811 def continuedirty(self):
814 def continuedirty(self):
812 repo = self.repo
815 repo = self.repo
813 rulectx = repo[self.node]
816 rulectx = repo[self.node]
814
817
815 commit = commitfuncfor(repo, rulectx)
818 commit = commitfuncfor(repo, rulectx)
816 commit(
819 commit(
817 text=b'fold-temp-revision %s' % node.short(self.node),
820 text=b'fold-temp-revision %s' % node.short(self.node),
818 user=rulectx.user(),
821 user=rulectx.user(),
819 date=rulectx.date(),
822 date=rulectx.date(),
820 extra=rulectx.extra(),
823 extra=rulectx.extra(),
821 )
824 )
822
825
823 def continueclean(self):
826 def continueclean(self):
824 repo = self.repo
827 repo = self.repo
825 ctx = repo[b'.']
828 ctx = repo[b'.']
826 rulectx = repo[self.node]
829 rulectx = repo[self.node]
827 parentctxnode = self.state.parentctxnode
830 parentctxnode = self.state.parentctxnode
828 if ctx.node() == parentctxnode:
831 if ctx.node() == parentctxnode:
829 repo.ui.warn(_(b'%s: empty changeset\n') % node.short(self.node))
832 repo.ui.warn(_(b'%s: empty changeset\n') % node.short(self.node))
830 return ctx, [(self.node, (parentctxnode,))]
833 return ctx, [(self.node, (parentctxnode,))]
831
834
832 parentctx = repo[parentctxnode]
835 parentctx = repo[parentctxnode]
833 newcommits = set(
836 newcommits = set(
834 c.node()
837 c.node()
835 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
838 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
836 )
839 )
837 if not newcommits:
840 if not newcommits:
838 repo.ui.warn(
841 repo.ui.warn(
839 _(
842 _(
840 b'%s: cannot fold - working copy is not a '
843 b'%s: cannot fold - working copy is not a '
841 b'descendant of previous commit %s\n'
844 b'descendant of previous commit %s\n'
842 )
845 )
843 % (node.short(self.node), node.short(parentctxnode))
846 % (node.short(self.node), node.short(parentctxnode))
844 )
847 )
845 return ctx, [(self.node, (ctx.node(),))]
848 return ctx, [(self.node, (ctx.node(),))]
846
849
847 middlecommits = newcommits.copy()
850 middlecommits = newcommits.copy()
848 middlecommits.discard(ctx.node())
851 middlecommits.discard(ctx.node())
849
852
850 return self.finishfold(
853 return self.finishfold(
851 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
854 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
852 )
855 )
853
856
854 def skipprompt(self):
857 def skipprompt(self):
855 """Returns true if the rule should skip the message editor.
858 """Returns true if the rule should skip the message editor.
856
859
857 For example, 'fold' wants to show an editor, but 'rollup'
860 For example, 'fold' wants to show an editor, but 'rollup'
858 doesn't want to.
861 doesn't want to.
859 """
862 """
860 return False
863 return False
861
864
862 def mergedescs(self):
865 def mergedescs(self):
863 """Returns true if the rule should merge messages of multiple changes.
866 """Returns true if the rule should merge messages of multiple changes.
864
867
865 This exists mainly so that 'rollup' rules can be a subclass of
868 This exists mainly so that 'rollup' rules can be a subclass of
866 'fold'.
869 'fold'.
867 """
870 """
868 return True
871 return True
869
872
870 def firstdate(self):
873 def firstdate(self):
871 """Returns true if the rule should preserve the date of the first
874 """Returns true if the rule should preserve the date of the first
872 change.
875 change.
873
876
874 This exists mainly so that 'rollup' rules can be a subclass of
877 This exists mainly so that 'rollup' rules can be a subclass of
875 'fold'.
878 'fold'.
876 """
879 """
877 return False
880 return False
878
881
879 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
882 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
880 parent = ctx.p1().node()
883 parent = ctx.p1().node()
881 hg.updaterepo(repo, parent, overwrite=False)
884 hg.updaterepo(repo, parent, overwrite=False)
882 ### prepare new commit data
885 ### prepare new commit data
883 commitopts = {}
886 commitopts = {}
884 commitopts[b'user'] = ctx.user()
887 commitopts[b'user'] = ctx.user()
885 # commit message
888 # commit message
886 if not self.mergedescs():
889 if not self.mergedescs():
887 newmessage = ctx.description()
890 newmessage = ctx.description()
888 else:
891 else:
889 newmessage = (
892 newmessage = (
890 b'\n***\n'.join(
893 b'\n***\n'.join(
891 [ctx.description()]
894 [ctx.description()]
892 + [repo[r].description() for r in internalchanges]
895 + [repo[r].description() for r in internalchanges]
893 + [oldctx.description()]
896 + [oldctx.description()]
894 )
897 )
895 + b'\n'
898 + b'\n'
896 )
899 )
897 commitopts[b'message'] = newmessage
900 commitopts[b'message'] = newmessage
898 # date
901 # date
899 if self.firstdate():
902 if self.firstdate():
900 commitopts[b'date'] = ctx.date()
903 commitopts[b'date'] = ctx.date()
901 else:
904 else:
902 commitopts[b'date'] = max(ctx.date(), oldctx.date())
905 commitopts[b'date'] = max(ctx.date(), oldctx.date())
903 # if date is to be updated to current
906 # if date is to be updated to current
904 if ui.configbool(b'rewrite', b'update-timestamp'):
907 if ui.configbool(b'rewrite', b'update-timestamp'):
905 commitopts[b'date'] = dateutil.makedate()
908 commitopts[b'date'] = dateutil.makedate()
906
909
907 extra = ctx.extra().copy()
910 extra = ctx.extra().copy()
908 # histedit_source
911 # histedit_source
909 # note: ctx is likely a temporary commit but that the best we can do
912 # note: ctx is likely a temporary commit but that the best we can do
910 # here. This is sufficient to solve issue3681 anyway.
913 # here. This is sufficient to solve issue3681 anyway.
911 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
914 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
912 commitopts[b'extra'] = extra
915 commitopts[b'extra'] = extra
913 phasemin = max(ctx.phase(), oldctx.phase())
916 phasemin = max(ctx.phase(), oldctx.phase())
914 overrides = {(b'phases', b'new-commit'): phasemin}
917 overrides = {(b'phases', b'new-commit'): phasemin}
915 with repo.ui.configoverride(overrides, b'histedit'):
918 with repo.ui.configoverride(overrides, b'histedit'):
916 n = collapse(
919 n = collapse(
917 repo,
920 repo,
918 ctx,
921 ctx,
919 repo[newnode],
922 repo[newnode],
920 commitopts,
923 commitopts,
921 skipprompt=self.skipprompt(),
924 skipprompt=self.skipprompt(),
922 )
925 )
923 if n is None:
926 if n is None:
924 return ctx, []
927 return ctx, []
925 hg.updaterepo(repo, n, overwrite=False)
928 hg.updaterepo(repo, n, overwrite=False)
926 replacements = [
929 replacements = [
927 (oldctx.node(), (newnode,)),
930 (oldctx.node(), (newnode,)),
928 (ctx.node(), (n,)),
931 (ctx.node(), (n,)),
929 (newnode, (n,)),
932 (newnode, (n,)),
930 ]
933 ]
931 for ich in internalchanges:
934 for ich in internalchanges:
932 replacements.append((ich, (n,)))
935 replacements.append((ich, (n,)))
933 return repo[n], replacements
936 return repo[n], replacements
934
937
935
938
936 @action(
939 @action(
937 [b'base', b'b'],
940 [b'base', b'b'],
938 _(b'checkout changeset and apply further changesets from there'),
941 _(b'checkout changeset and apply further changesets from there'),
939 )
942 )
940 class base(histeditaction):
943 class base(histeditaction):
941 def run(self):
944 def run(self):
942 if self.repo[b'.'].node() != self.node:
945 if self.repo[b'.'].node() != self.node:
943 mergemod.update(self.repo, self.node, branchmerge=False, force=True)
946 mergemod.update(self.repo, self.node, branchmerge=False, force=True)
944 return self.continueclean()
947 return self.continueclean()
945
948
946 def continuedirty(self):
949 def continuedirty(self):
947 abortdirty()
950 abortdirty()
948
951
949 def continueclean(self):
952 def continueclean(self):
950 basectx = self.repo[b'.']
953 basectx = self.repo[b'.']
951 return basectx, []
954 return basectx, []
952
955
953 def _verifynodeconstraints(self, prev, expected, seen):
956 def _verifynodeconstraints(self, prev, expected, seen):
954 # base can only be use with a node not in the edited set
957 # base can only be use with a node not in the edited set
955 if self.node in expected:
958 if self.node in expected:
956 msg = _(b'%s "%s" changeset was an edited list candidate')
959 msg = _(b'%s "%s" changeset was an edited list candidate')
957 raise error.ParseError(
960 raise error.ParseError(
958 msg % (self.verb, node.short(self.node)),
961 msg % (self.verb, node.short(self.node)),
959 hint=_(b'base must only use unlisted changesets'),
962 hint=_(b'base must only use unlisted changesets'),
960 )
963 )
961
964
962
965
963 @action(
966 @action(
964 [b'_multifold'],
967 [b'_multifold'],
965 _(
968 _(
966 """fold subclass used for when multiple folds happen in a row
969 """fold subclass used for when multiple folds happen in a row
967
970
968 We only want to fire the editor for the folded message once when
971 We only want to fire the editor for the folded message once when
969 (say) four changes are folded down into a single change. This is
972 (say) four changes are folded down into a single change. This is
970 similar to rollup, but we should preserve both messages so that
973 similar to rollup, but we should preserve both messages so that
971 when the last fold operation runs we can show the user all the
974 when the last fold operation runs we can show the user all the
972 commit messages in their editor.
975 commit messages in their editor.
973 """
976 """
974 ),
977 ),
975 internal=True,
978 internal=True,
976 )
979 )
977 class _multifold(fold):
980 class _multifold(fold):
978 def skipprompt(self):
981 def skipprompt(self):
979 return True
982 return True
980
983
981
984
982 @action(
985 @action(
983 [b"roll", b"r"],
986 [b"roll", b"r"],
984 _(b"like fold, but discard this commit's description and date"),
987 _(b"like fold, but discard this commit's description and date"),
985 )
988 )
986 class rollup(fold):
989 class rollup(fold):
987 def mergedescs(self):
990 def mergedescs(self):
988 return False
991 return False
989
992
990 def skipprompt(self):
993 def skipprompt(self):
991 return True
994 return True
992
995
993 def firstdate(self):
996 def firstdate(self):
994 return True
997 return True
995
998
996
999
997 @action([b"drop", b"d"], _(b'remove commit from history'))
1000 @action([b"drop", b"d"], _(b'remove commit from history'))
998 class drop(histeditaction):
1001 class drop(histeditaction):
999 def run(self):
1002 def run(self):
1000 parentctx = self.repo[self.state.parentctxnode]
1003 parentctx = self.repo[self.state.parentctxnode]
1001 return parentctx, [(self.node, tuple())]
1004 return parentctx, [(self.node, tuple())]
1002
1005
1003
1006
1004 @action(
1007 @action(
1005 [b"mess", b"m"],
1008 [b"mess", b"m"],
1006 _(b'edit commit message without changing commit content'),
1009 _(b'edit commit message without changing commit content'),
1007 priority=True,
1010 priority=True,
1008 )
1011 )
1009 class message(histeditaction):
1012 class message(histeditaction):
1010 def commiteditor(self):
1013 def commiteditor(self):
1011 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1014 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1012
1015
1013
1016
1014 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1017 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1015 """utility function to find the first outgoing changeset
1018 """utility function to find the first outgoing changeset
1016
1019
1017 Used by initialization code"""
1020 Used by initialization code"""
1018 if opts is None:
1021 if opts is None:
1019 opts = {}
1022 opts = {}
1020 dest = ui.expandpath(remote or b'default-push', remote or b'default')
1023 dest = ui.expandpath(remote or b'default-push', remote or b'default')
1021 dest, branches = hg.parseurl(dest, None)[:2]
1024 dest, branches = hg.parseurl(dest, None)[:2]
1022 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1025 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1023
1026
1024 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
1027 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
1025 other = hg.peer(repo, opts, dest)
1028 other = hg.peer(repo, opts, dest)
1026
1029
1027 if revs:
1030 if revs:
1028 revs = [repo.lookup(rev) for rev in revs]
1031 revs = [repo.lookup(rev) for rev in revs]
1029
1032
1030 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1033 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1031 if not outgoing.missing:
1034 if not outgoing.missing:
1032 raise error.Abort(_(b'no outgoing ancestors'))
1035 raise error.Abort(_(b'no outgoing ancestors'))
1033 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1036 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1034 if len(roots) > 1:
1037 if len(roots) > 1:
1035 msg = _(b'there are ambiguous outgoing revisions')
1038 msg = _(b'there are ambiguous outgoing revisions')
1036 hint = _(b"see 'hg help histedit' for more detail")
1039 hint = _(b"see 'hg help histedit' for more detail")
1037 raise error.Abort(msg, hint=hint)
1040 raise error.Abort(msg, hint=hint)
1038 return repo[roots[0]].node()
1041 return repo[roots[0]].node()
1039
1042
1040
1043
1041 # Curses Support
1044 # Curses Support
1042 try:
1045 try:
1043 import curses
1046 import curses
1044 except ImportError:
1047 except ImportError:
1045 curses = None
1048 curses = None
1046
1049
1047 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1050 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1048 ACTION_LABELS = {
1051 ACTION_LABELS = {
1049 b'fold': b'^fold',
1052 b'fold': b'^fold',
1050 b'roll': b'^roll',
1053 b'roll': b'^roll',
1051 }
1054 }
1052
1055
1053 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1056 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1054 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1057 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1055
1058
1056 E_QUIT, E_HISTEDIT = 1, 2
1059 E_QUIT, E_HISTEDIT = 1, 2
1057 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1060 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1058 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1061 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1059
1062
1060 KEYTABLE = {
1063 KEYTABLE = {
1061 b'global': {
1064 b'global': {
1062 b'h': b'next-action',
1065 b'h': b'next-action',
1063 b'KEY_RIGHT': b'next-action',
1066 b'KEY_RIGHT': b'next-action',
1064 b'l': b'prev-action',
1067 b'l': b'prev-action',
1065 b'KEY_LEFT': b'prev-action',
1068 b'KEY_LEFT': b'prev-action',
1066 b'q': b'quit',
1069 b'q': b'quit',
1067 b'c': b'histedit',
1070 b'c': b'histedit',
1068 b'C': b'histedit',
1071 b'C': b'histedit',
1069 b'v': b'showpatch',
1072 b'v': b'showpatch',
1070 b'?': b'help',
1073 b'?': b'help',
1071 },
1074 },
1072 MODE_RULES: {
1075 MODE_RULES: {
1073 b'd': b'action-drop',
1076 b'd': b'action-drop',
1074 b'e': b'action-edit',
1077 b'e': b'action-edit',
1075 b'f': b'action-fold',
1078 b'f': b'action-fold',
1076 b'm': b'action-mess',
1079 b'm': b'action-mess',
1077 b'p': b'action-pick',
1080 b'p': b'action-pick',
1078 b'r': b'action-roll',
1081 b'r': b'action-roll',
1079 b' ': b'select',
1082 b' ': b'select',
1080 b'j': b'down',
1083 b'j': b'down',
1081 b'k': b'up',
1084 b'k': b'up',
1082 b'KEY_DOWN': b'down',
1085 b'KEY_DOWN': b'down',
1083 b'KEY_UP': b'up',
1086 b'KEY_UP': b'up',
1084 b'J': b'move-down',
1087 b'J': b'move-down',
1085 b'K': b'move-up',
1088 b'K': b'move-up',
1086 b'KEY_NPAGE': b'move-down',
1089 b'KEY_NPAGE': b'move-down',
1087 b'KEY_PPAGE': b'move-up',
1090 b'KEY_PPAGE': b'move-up',
1088 b'0': b'goto', # Used for 0..9
1091 b'0': b'goto', # Used for 0..9
1089 },
1092 },
1090 MODE_PATCH: {
1093 MODE_PATCH: {
1091 b' ': b'page-down',
1094 b' ': b'page-down',
1092 b'KEY_NPAGE': b'page-down',
1095 b'KEY_NPAGE': b'page-down',
1093 b'KEY_PPAGE': b'page-up',
1096 b'KEY_PPAGE': b'page-up',
1094 b'j': b'line-down',
1097 b'j': b'line-down',
1095 b'k': b'line-up',
1098 b'k': b'line-up',
1096 b'KEY_DOWN': b'line-down',
1099 b'KEY_DOWN': b'line-down',
1097 b'KEY_UP': b'line-up',
1100 b'KEY_UP': b'line-up',
1098 b'J': b'down',
1101 b'J': b'down',
1099 b'K': b'up',
1102 b'K': b'up',
1100 },
1103 },
1101 MODE_HELP: {},
1104 MODE_HELP: {},
1102 }
1105 }
1103
1106
1104
1107
1105 def screen_size():
1108 def screen_size():
1106 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1109 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1107
1110
1108
1111
1109 class histeditrule(object):
1112 class histeditrule(object):
1110 def __init__(self, ctx, pos, action=b'pick'):
1113 def __init__(self, ctx, pos, action=b'pick'):
1111 self.ctx = ctx
1114 self.ctx = ctx
1112 self.action = action
1115 self.action = action
1113 self.origpos = pos
1116 self.origpos = pos
1114 self.pos = pos
1117 self.pos = pos
1115 self.conflicts = []
1118 self.conflicts = []
1116
1119
1117 def __str__(self):
1120 def __str__(self):
1118 # Some actions ('fold' and 'roll') combine a patch with a previous one.
1121 # Some actions ('fold' and 'roll') combine a patch with a previous one.
1119 # Add a marker showing which patch they apply to, and also omit the
1122 # Add a marker showing which patch they apply to, and also omit the
1120 # description for 'roll' (since it will get discarded). Example display:
1123 # description for 'roll' (since it will get discarded). Example display:
1121 #
1124 #
1122 # #10 pick 316392:06a16c25c053 add option to skip tests
1125 # #10 pick 316392:06a16c25c053 add option to skip tests
1123 # #11 ^roll 316393:71313c964cc5
1126 # #11 ^roll 316393:71313c964cc5
1124 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1127 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1125 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1128 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1126 #
1129 #
1127 # The carets point to the changeset being folded into ("roll this
1130 # The carets point to the changeset being folded into ("roll this
1128 # changeset into the changeset above").
1131 # changeset into the changeset above").
1129 action = ACTION_LABELS.get(self.action, self.action)
1132 action = ACTION_LABELS.get(self.action, self.action)
1130 h = self.ctx.hex()[0:12]
1133 h = self.ctx.hex()[0:12]
1131 r = self.ctx.rev()
1134 r = self.ctx.rev()
1132 desc = self.ctx.description().splitlines()[0].strip()
1135 desc = self.ctx.description().splitlines()[0].strip()
1133 if self.action == b'roll':
1136 if self.action == b'roll':
1134 desc = b''
1137 desc = b''
1135 return b"#{0:<2} {1:<6} {2}:{3} {4}".format(
1138 return b"#{0:<2} {1:<6} {2}:{3} {4}".format(
1136 self.origpos, action, r, h, desc
1139 self.origpos, action, r, h, desc
1137 )
1140 )
1138
1141
1139 def checkconflicts(self, other):
1142 def checkconflicts(self, other):
1140 if other.pos > self.pos and other.origpos <= self.origpos:
1143 if other.pos > self.pos and other.origpos <= self.origpos:
1141 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1144 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1142 self.conflicts.append(other)
1145 self.conflicts.append(other)
1143 return self.conflicts
1146 return self.conflicts
1144
1147
1145 if other in self.conflicts:
1148 if other in self.conflicts:
1146 self.conflicts.remove(other)
1149 self.conflicts.remove(other)
1147 return self.conflicts
1150 return self.conflicts
1148
1151
1149
1152
1150 # ============ EVENTS ===============
1153 # ============ EVENTS ===============
1151 def movecursor(state, oldpos, newpos):
1154 def movecursor(state, oldpos, newpos):
1152 '''Change the rule/changeset that the cursor is pointing to, regardless of
1155 '''Change the rule/changeset that the cursor is pointing to, regardless of
1153 current mode (you can switch between patches from the view patch window).'''
1156 current mode (you can switch between patches from the view patch window).'''
1154 state[b'pos'] = newpos
1157 state[b'pos'] = newpos
1155
1158
1156 mode, _ = state[b'mode']
1159 mode, _ = state[b'mode']
1157 if mode == MODE_RULES:
1160 if mode == MODE_RULES:
1158 # Scroll through the list by updating the view for MODE_RULES, so that
1161 # Scroll through the list by updating the view for MODE_RULES, so that
1159 # even if we are not currently viewing the rules, switching back will
1162 # even if we are not currently viewing the rules, switching back will
1160 # result in the cursor's rule being visible.
1163 # result in the cursor's rule being visible.
1161 modestate = state[b'modes'][MODE_RULES]
1164 modestate = state[b'modes'][MODE_RULES]
1162 if newpos < modestate[b'line_offset']:
1165 if newpos < modestate[b'line_offset']:
1163 modestate[b'line_offset'] = newpos
1166 modestate[b'line_offset'] = newpos
1164 elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:
1167 elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:
1165 modestate[b'line_offset'] = newpos - state[b'page_height'] + 1
1168 modestate[b'line_offset'] = newpos - state[b'page_height'] + 1
1166
1169
1167 # Reset the patch view region to the top of the new patch.
1170 # Reset the patch view region to the top of the new patch.
1168 state[b'modes'][MODE_PATCH][b'line_offset'] = 0
1171 state[b'modes'][MODE_PATCH][b'line_offset'] = 0
1169
1172
1170
1173
1171 def changemode(state, mode):
1174 def changemode(state, mode):
1172 curmode, _ = state[b'mode']
1175 curmode, _ = state[b'mode']
1173 state[b'mode'] = (mode, curmode)
1176 state[b'mode'] = (mode, curmode)
1174 if mode == MODE_PATCH:
1177 if mode == MODE_PATCH:
1175 state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)
1178 state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)
1176
1179
1177
1180
1178 def makeselection(state, pos):
1181 def makeselection(state, pos):
1179 state[b'selected'] = pos
1182 state[b'selected'] = pos
1180
1183
1181
1184
1182 def swap(state, oldpos, newpos):
1185 def swap(state, oldpos, newpos):
1183 """Swap two positions and calculate necessary conflicts in
1186 """Swap two positions and calculate necessary conflicts in
1184 O(|newpos-oldpos|) time"""
1187 O(|newpos-oldpos|) time"""
1185
1188
1186 rules = state[b'rules']
1189 rules = state[b'rules']
1187 assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
1190 assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
1188
1191
1189 rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
1192 rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
1190
1193
1191 # TODO: swap should not know about histeditrule's internals
1194 # TODO: swap should not know about histeditrule's internals
1192 rules[newpos].pos = newpos
1195 rules[newpos].pos = newpos
1193 rules[oldpos].pos = oldpos
1196 rules[oldpos].pos = oldpos
1194
1197
1195 start = min(oldpos, newpos)
1198 start = min(oldpos, newpos)
1196 end = max(oldpos, newpos)
1199 end = max(oldpos, newpos)
1197 for r in pycompat.xrange(start, end + 1):
1200 for r in pycompat.xrange(start, end + 1):
1198 rules[newpos].checkconflicts(rules[r])
1201 rules[newpos].checkconflicts(rules[r])
1199 rules[oldpos].checkconflicts(rules[r])
1202 rules[oldpos].checkconflicts(rules[r])
1200
1203
1201 if state[b'selected']:
1204 if state[b'selected']:
1202 makeselection(state, newpos)
1205 makeselection(state, newpos)
1203
1206
1204
1207
1205 def changeaction(state, pos, action):
1208 def changeaction(state, pos, action):
1206 """Change the action state on the given position to the new action"""
1209 """Change the action state on the given position to the new action"""
1207 rules = state[b'rules']
1210 rules = state[b'rules']
1208 assert 0 <= pos < len(rules)
1211 assert 0 <= pos < len(rules)
1209 rules[pos].action = action
1212 rules[pos].action = action
1210
1213
1211
1214
1212 def cycleaction(state, pos, next=False):
1215 def cycleaction(state, pos, next=False):
1213 """Changes the action state the next or the previous action from
1216 """Changes the action state the next or the previous action from
1214 the action list"""
1217 the action list"""
1215 rules = state[b'rules']
1218 rules = state[b'rules']
1216 assert 0 <= pos < len(rules)
1219 assert 0 <= pos < len(rules)
1217 current = rules[pos].action
1220 current = rules[pos].action
1218
1221
1219 assert current in KEY_LIST
1222 assert current in KEY_LIST
1220
1223
1221 index = KEY_LIST.index(current)
1224 index = KEY_LIST.index(current)
1222 if next:
1225 if next:
1223 index += 1
1226 index += 1
1224 else:
1227 else:
1225 index -= 1
1228 index -= 1
1226 changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
1229 changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
1227
1230
1228
1231
1229 def changeview(state, delta, unit):
1232 def changeview(state, delta, unit):
1230 '''Change the region of whatever is being viewed (a patch or the list of
1233 '''Change the region of whatever is being viewed (a patch or the list of
1231 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.'''
1234 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.'''
1232 mode, _ = state[b'mode']
1235 mode, _ = state[b'mode']
1233 if mode != MODE_PATCH:
1236 if mode != MODE_PATCH:
1234 return
1237 return
1235 mode_state = state[b'modes'][mode]
1238 mode_state = state[b'modes'][mode]
1236 num_lines = len(mode_state[b'patchcontents'])
1239 num_lines = len(mode_state[b'patchcontents'])
1237 page_height = state[b'page_height']
1240 page_height = state[b'page_height']
1238 unit = page_height if unit == b'page' else 1
1241 unit = page_height if unit == b'page' else 1
1239 num_pages = 1 + (num_lines - 1) / page_height
1242 num_pages = 1 + (num_lines - 1) / page_height
1240 max_offset = (num_pages - 1) * page_height
1243 max_offset = (num_pages - 1) * page_height
1241 newline = mode_state[b'line_offset'] + delta * unit
1244 newline = mode_state[b'line_offset'] + delta * unit
1242 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1245 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1243
1246
1244
1247
1245 def event(state, ch):
1248 def event(state, ch):
1246 """Change state based on the current character input
1249 """Change state based on the current character input
1247
1250
1248 This takes the current state and based on the current character input from
1251 This takes the current state and based on the current character input from
1249 the user we change the state.
1252 the user we change the state.
1250 """
1253 """
1251 selected = state[b'selected']
1254 selected = state[b'selected']
1252 oldpos = state[b'pos']
1255 oldpos = state[b'pos']
1253 rules = state[b'rules']
1256 rules = state[b'rules']
1254
1257
1255 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1258 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1256 return E_RESIZE
1259 return E_RESIZE
1257
1260
1258 lookup_ch = ch
1261 lookup_ch = ch
1259 if b'0' <= ch <= b'9':
1262 if b'0' <= ch <= b'9':
1260 lookup_ch = b'0'
1263 lookup_ch = b'0'
1261
1264
1262 curmode, prevmode = state[b'mode']
1265 curmode, prevmode = state[b'mode']
1263 action = KEYTABLE[curmode].get(
1266 action = KEYTABLE[curmode].get(
1264 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1267 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1265 )
1268 )
1266 if action is None:
1269 if action is None:
1267 return
1270 return
1268 if action in (b'down', b'move-down'):
1271 if action in (b'down', b'move-down'):
1269 newpos = min(oldpos + 1, len(rules) - 1)
1272 newpos = min(oldpos + 1, len(rules) - 1)
1270 movecursor(state, oldpos, newpos)
1273 movecursor(state, oldpos, newpos)
1271 if selected is not None or action == b'move-down':
1274 if selected is not None or action == b'move-down':
1272 swap(state, oldpos, newpos)
1275 swap(state, oldpos, newpos)
1273 elif action in (b'up', b'move-up'):
1276 elif action in (b'up', b'move-up'):
1274 newpos = max(0, oldpos - 1)
1277 newpos = max(0, oldpos - 1)
1275 movecursor(state, oldpos, newpos)
1278 movecursor(state, oldpos, newpos)
1276 if selected is not None or action == b'move-up':
1279 if selected is not None or action == b'move-up':
1277 swap(state, oldpos, newpos)
1280 swap(state, oldpos, newpos)
1278 elif action == b'next-action':
1281 elif action == b'next-action':
1279 cycleaction(state, oldpos, next=True)
1282 cycleaction(state, oldpos, next=True)
1280 elif action == b'prev-action':
1283 elif action == b'prev-action':
1281 cycleaction(state, oldpos, next=False)
1284 cycleaction(state, oldpos, next=False)
1282 elif action == b'select':
1285 elif action == b'select':
1283 selected = oldpos if selected is None else None
1286 selected = oldpos if selected is None else None
1284 makeselection(state, selected)
1287 makeselection(state, selected)
1285 elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:
1288 elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:
1286 newrule = next((r for r in rules if r.origpos == int(ch)))
1289 newrule = next((r for r in rules if r.origpos == int(ch)))
1287 movecursor(state, oldpos, newrule.pos)
1290 movecursor(state, oldpos, newrule.pos)
1288 if selected is not None:
1291 if selected is not None:
1289 swap(state, oldpos, newrule.pos)
1292 swap(state, oldpos, newrule.pos)
1290 elif action.startswith(b'action-'):
1293 elif action.startswith(b'action-'):
1291 changeaction(state, oldpos, action[7:])
1294 changeaction(state, oldpos, action[7:])
1292 elif action == b'showpatch':
1295 elif action == b'showpatch':
1293 changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
1296 changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
1294 elif action == b'help':
1297 elif action == b'help':
1295 changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
1298 changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
1296 elif action == b'quit':
1299 elif action == b'quit':
1297 return E_QUIT
1300 return E_QUIT
1298 elif action == b'histedit':
1301 elif action == b'histedit':
1299 return E_HISTEDIT
1302 return E_HISTEDIT
1300 elif action == b'page-down':
1303 elif action == b'page-down':
1301 return E_PAGEDOWN
1304 return E_PAGEDOWN
1302 elif action == b'page-up':
1305 elif action == b'page-up':
1303 return E_PAGEUP
1306 return E_PAGEUP
1304 elif action == b'line-down':
1307 elif action == b'line-down':
1305 return E_LINEDOWN
1308 return E_LINEDOWN
1306 elif action == b'line-up':
1309 elif action == b'line-up':
1307 return E_LINEUP
1310 return E_LINEUP
1308
1311
1309
1312
1310 def makecommands(rules):
1313 def makecommands(rules):
1311 """Returns a list of commands consumable by histedit --commands based on
1314 """Returns a list of commands consumable by histedit --commands based on
1312 our list of rules"""
1315 our list of rules"""
1313 commands = []
1316 commands = []
1314 for rules in rules:
1317 for rules in rules:
1315 commands.append(b"{0} {1}\n".format(rules.action, rules.ctx))
1318 commands.append(b"{0} {1}\n".format(rules.action, rules.ctx))
1316 return commands
1319 return commands
1317
1320
1318
1321
1319 def addln(win, y, x, line, color=None):
1322 def addln(win, y, x, line, color=None):
1320 """Add a line to the given window left padding but 100% filled with
1323 """Add a line to the given window left padding but 100% filled with
1321 whitespace characters, so that the color appears on the whole line"""
1324 whitespace characters, so that the color appears on the whole line"""
1322 maxy, maxx = win.getmaxyx()
1325 maxy, maxx = win.getmaxyx()
1323 length = maxx - 1 - x
1326 length = maxx - 1 - x
1324 line = (b"{0:<%d}" % length).format(str(line).strip())[:length]
1327 line = (b"{0:<%d}" % length).format(str(line).strip())[:length]
1325 if y < 0:
1328 if y < 0:
1326 y = maxy + y
1329 y = maxy + y
1327 if x < 0:
1330 if x < 0:
1328 x = maxx + x
1331 x = maxx + x
1329 if color:
1332 if color:
1330 win.addstr(y, x, line, color)
1333 win.addstr(y, x, line, color)
1331 else:
1334 else:
1332 win.addstr(y, x, line)
1335 win.addstr(y, x, line)
1333
1336
1334
1337
1335 def _trunc_head(line, n):
1338 def _trunc_head(line, n):
1336 if len(line) <= n:
1339 if len(line) <= n:
1337 return line
1340 return line
1338 return b'> ' + line[-(n - 2) :]
1341 return b'> ' + line[-(n - 2) :]
1339
1342
1340
1343
1341 def _trunc_tail(line, n):
1344 def _trunc_tail(line, n):
1342 if len(line) <= n:
1345 if len(line) <= n:
1343 return line
1346 return line
1344 return line[: n - 2] + b' >'
1347 return line[: n - 2] + b' >'
1345
1348
1346
1349
1347 def patchcontents(state):
1350 def patchcontents(state):
1348 repo = state[b'repo']
1351 repo = state[b'repo']
1349 rule = state[b'rules'][state[b'pos']]
1352 rule = state[b'rules'][state[b'pos']]
1350 displayer = logcmdutil.changesetdisplayer(
1353 displayer = logcmdutil.changesetdisplayer(
1351 repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True
1354 repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True
1352 )
1355 )
1353 overrides = {(b'ui', b'verbose'): True}
1356 overrides = {(b'ui', b'verbose'): True}
1354 with repo.ui.configoverride(overrides, source=b'histedit'):
1357 with repo.ui.configoverride(overrides, source=b'histedit'):
1355 displayer.show(rule.ctx)
1358 displayer.show(rule.ctx)
1356 displayer.close()
1359 displayer.close()
1357 return displayer.hunk[rule.ctx.rev()].splitlines()
1360 return displayer.hunk[rule.ctx.rev()].splitlines()
1358
1361
1359
1362
1360 def _chisteditmain(repo, rules, stdscr):
1363 def _chisteditmain(repo, rules, stdscr):
1361 try:
1364 try:
1362 curses.use_default_colors()
1365 curses.use_default_colors()
1363 except curses.error:
1366 except curses.error:
1364 pass
1367 pass
1365
1368
1366 # initialize color pattern
1369 # initialize color pattern
1367 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1370 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1368 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1371 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1369 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1372 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1370 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1373 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1371 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1374 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1372 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1375 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1373 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1376 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1374 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1377 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1375
1378
1376 # don't display the cursor
1379 # don't display the cursor
1377 try:
1380 try:
1378 curses.curs_set(0)
1381 curses.curs_set(0)
1379 except curses.error:
1382 except curses.error:
1380 pass
1383 pass
1381
1384
1382 def rendercommit(win, state):
1385 def rendercommit(win, state):
1383 """Renders the commit window that shows the log of the current selected
1386 """Renders the commit window that shows the log of the current selected
1384 commit"""
1387 commit"""
1385 pos = state[b'pos']
1388 pos = state[b'pos']
1386 rules = state[b'rules']
1389 rules = state[b'rules']
1387 rule = rules[pos]
1390 rule = rules[pos]
1388
1391
1389 ctx = rule.ctx
1392 ctx = rule.ctx
1390 win.box()
1393 win.box()
1391
1394
1392 maxy, maxx = win.getmaxyx()
1395 maxy, maxx = win.getmaxyx()
1393 length = maxx - 3
1396 length = maxx - 3
1394
1397
1395 line = b"changeset: {0}:{1:<12}".format(ctx.rev(), ctx)
1398 line = b"changeset: {0}:{1:<12}".format(ctx.rev(), ctx)
1396 win.addstr(1, 1, line[:length])
1399 win.addstr(1, 1, line[:length])
1397
1400
1398 line = b"user: {0}".format(ctx.user())
1401 line = b"user: {0}".format(ctx.user())
1399 win.addstr(2, 1, line[:length])
1402 win.addstr(2, 1, line[:length])
1400
1403
1401 bms = repo.nodebookmarks(ctx.node())
1404 bms = repo.nodebookmarks(ctx.node())
1402 line = b"bookmark: {0}".format(b' '.join(bms))
1405 line = b"bookmark: {0}".format(b' '.join(bms))
1403 win.addstr(3, 1, line[:length])
1406 win.addstr(3, 1, line[:length])
1404
1407
1405 line = b"summary: {0}".format(ctx.description().splitlines()[0])
1408 line = b"summary: {0}".format(ctx.description().splitlines()[0])
1406 win.addstr(4, 1, line[:length])
1409 win.addstr(4, 1, line[:length])
1407
1410
1408 line = b"files: "
1411 line = b"files: "
1409 win.addstr(5, 1, line)
1412 win.addstr(5, 1, line)
1410 fnx = 1 + len(line)
1413 fnx = 1 + len(line)
1411 fnmaxx = length - fnx + 1
1414 fnmaxx = length - fnx + 1
1412 y = 5
1415 y = 5
1413 fnmaxn = maxy - (1 + y) - 1
1416 fnmaxn = maxy - (1 + y) - 1
1414 files = ctx.files()
1417 files = ctx.files()
1415 for i, line1 in enumerate(files):
1418 for i, line1 in enumerate(files):
1416 if len(files) > fnmaxn and i == fnmaxn - 1:
1419 if len(files) > fnmaxn and i == fnmaxn - 1:
1417 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1420 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1418 y = y + 1
1421 y = y + 1
1419 break
1422 break
1420 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1423 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1421 y = y + 1
1424 y = y + 1
1422
1425
1423 conflicts = rule.conflicts
1426 conflicts = rule.conflicts
1424 if len(conflicts) > 0:
1427 if len(conflicts) > 0:
1425 conflictstr = b','.join(map(lambda r: str(r.ctx), conflicts))
1428 conflictstr = b','.join(map(lambda r: str(r.ctx), conflicts))
1426 conflictstr = b"changed files overlap with {0}".format(conflictstr)
1429 conflictstr = b"changed files overlap with {0}".format(conflictstr)
1427 else:
1430 else:
1428 conflictstr = b'no overlap'
1431 conflictstr = b'no overlap'
1429
1432
1430 win.addstr(y, 1, conflictstr[:length])
1433 win.addstr(y, 1, conflictstr[:length])
1431 win.noutrefresh()
1434 win.noutrefresh()
1432
1435
1433 def helplines(mode):
1436 def helplines(mode):
1434 if mode == MODE_PATCH:
1437 if mode == MODE_PATCH:
1435 help = b"""\
1438 help = b"""\
1436 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1439 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1437 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1440 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1438 """
1441 """
1439 else:
1442 else:
1440 help = b"""\
1443 help = b"""\
1441 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1444 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1442 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1445 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1443 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1446 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1444 """
1447 """
1445 return help.splitlines()
1448 return help.splitlines()
1446
1449
1447 def renderhelp(win, state):
1450 def renderhelp(win, state):
1448 maxy, maxx = win.getmaxyx()
1451 maxy, maxx = win.getmaxyx()
1449 mode, _ = state[b'mode']
1452 mode, _ = state[b'mode']
1450 for y, line in enumerate(helplines(mode)):
1453 for y, line in enumerate(helplines(mode)):
1451 if y >= maxy:
1454 if y >= maxy:
1452 break
1455 break
1453 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1456 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1454 win.noutrefresh()
1457 win.noutrefresh()
1455
1458
1456 def renderrules(rulesscr, state):
1459 def renderrules(rulesscr, state):
1457 rules = state[b'rules']
1460 rules = state[b'rules']
1458 pos = state[b'pos']
1461 pos = state[b'pos']
1459 selected = state[b'selected']
1462 selected = state[b'selected']
1460 start = state[b'modes'][MODE_RULES][b'line_offset']
1463 start = state[b'modes'][MODE_RULES][b'line_offset']
1461
1464
1462 conflicts = [r.ctx for r in rules if r.conflicts]
1465 conflicts = [r.ctx for r in rules if r.conflicts]
1463 if len(conflicts) > 0:
1466 if len(conflicts) > 0:
1464 line = b"potential conflict in %s" % b','.join(map(str, conflicts))
1467 line = b"potential conflict in %s" % b','.join(map(str, conflicts))
1465 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1468 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1466
1469
1467 for y, rule in enumerate(rules[start:]):
1470 for y, rule in enumerate(rules[start:]):
1468 if y >= state[b'page_height']:
1471 if y >= state[b'page_height']:
1469 break
1472 break
1470 if len(rule.conflicts) > 0:
1473 if len(rule.conflicts) > 0:
1471 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1474 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1472 else:
1475 else:
1473 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1476 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1474 if y + start == selected:
1477 if y + start == selected:
1475 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1478 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1476 elif y + start == pos:
1479 elif y + start == pos:
1477 addln(
1480 addln(
1478 rulesscr,
1481 rulesscr,
1479 y,
1482 y,
1480 2,
1483 2,
1481 rule,
1484 rule,
1482 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1485 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1483 )
1486 )
1484 else:
1487 else:
1485 addln(rulesscr, y, 2, rule)
1488 addln(rulesscr, y, 2, rule)
1486 rulesscr.noutrefresh()
1489 rulesscr.noutrefresh()
1487
1490
1488 def renderstring(win, state, output, diffcolors=False):
1491 def renderstring(win, state, output, diffcolors=False):
1489 maxy, maxx = win.getmaxyx()
1492 maxy, maxx = win.getmaxyx()
1490 length = min(maxy - 1, len(output))
1493 length = min(maxy - 1, len(output))
1491 for y in range(0, length):
1494 for y in range(0, length):
1492 line = output[y]
1495 line = output[y]
1493 if diffcolors:
1496 if diffcolors:
1494 if line and line[0] == b'+':
1497 if line and line[0] == b'+':
1495 win.addstr(
1498 win.addstr(
1496 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1499 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1497 )
1500 )
1498 elif line and line[0] == b'-':
1501 elif line and line[0] == b'-':
1499 win.addstr(
1502 win.addstr(
1500 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1503 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1501 )
1504 )
1502 elif line.startswith(b'@@ '):
1505 elif line.startswith(b'@@ '):
1503 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1506 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1504 else:
1507 else:
1505 win.addstr(y, 0, line)
1508 win.addstr(y, 0, line)
1506 else:
1509 else:
1507 win.addstr(y, 0, line)
1510 win.addstr(y, 0, line)
1508 win.noutrefresh()
1511 win.noutrefresh()
1509
1512
1510 def renderpatch(win, state):
1513 def renderpatch(win, state):
1511 start = state[b'modes'][MODE_PATCH][b'line_offset']
1514 start = state[b'modes'][MODE_PATCH][b'line_offset']
1512 content = state[b'modes'][MODE_PATCH][b'patchcontents']
1515 content = state[b'modes'][MODE_PATCH][b'patchcontents']
1513 renderstring(win, state, content[start:], diffcolors=True)
1516 renderstring(win, state, content[start:], diffcolors=True)
1514
1517
1515 def layout(mode):
1518 def layout(mode):
1516 maxy, maxx = stdscr.getmaxyx()
1519 maxy, maxx = stdscr.getmaxyx()
1517 helplen = len(helplines(mode))
1520 helplen = len(helplines(mode))
1518 return {
1521 return {
1519 b'commit': (12, maxx),
1522 b'commit': (12, maxx),
1520 b'help': (helplen, maxx),
1523 b'help': (helplen, maxx),
1521 b'main': (maxy - helplen - 12, maxx),
1524 b'main': (maxy - helplen - 12, maxx),
1522 }
1525 }
1523
1526
1524 def drawvertwin(size, y, x):
1527 def drawvertwin(size, y, x):
1525 win = curses.newwin(size[0], size[1], y, x)
1528 win = curses.newwin(size[0], size[1], y, x)
1526 y += size[0]
1529 y += size[0]
1527 return win, y, x
1530 return win, y, x
1528
1531
1529 state = {
1532 state = {
1530 b'pos': 0,
1533 b'pos': 0,
1531 b'rules': rules,
1534 b'rules': rules,
1532 b'selected': None,
1535 b'selected': None,
1533 b'mode': (MODE_INIT, MODE_INIT),
1536 b'mode': (MODE_INIT, MODE_INIT),
1534 b'page_height': None,
1537 b'page_height': None,
1535 b'modes': {
1538 b'modes': {
1536 MODE_RULES: {b'line_offset': 0,},
1539 MODE_RULES: {b'line_offset': 0,},
1537 MODE_PATCH: {b'line_offset': 0,},
1540 MODE_PATCH: {b'line_offset': 0,},
1538 },
1541 },
1539 b'repo': repo,
1542 b'repo': repo,
1540 }
1543 }
1541
1544
1542 # eventloop
1545 # eventloop
1543 ch = None
1546 ch = None
1544 stdscr.clear()
1547 stdscr.clear()
1545 stdscr.refresh()
1548 stdscr.refresh()
1546 while True:
1549 while True:
1547 try:
1550 try:
1548 oldmode, _ = state[b'mode']
1551 oldmode, _ = state[b'mode']
1549 if oldmode == MODE_INIT:
1552 if oldmode == MODE_INIT:
1550 changemode(state, MODE_RULES)
1553 changemode(state, MODE_RULES)
1551 e = event(state, ch)
1554 e = event(state, ch)
1552
1555
1553 if e == E_QUIT:
1556 if e == E_QUIT:
1554 return False
1557 return False
1555 if e == E_HISTEDIT:
1558 if e == E_HISTEDIT:
1556 return state[b'rules']
1559 return state[b'rules']
1557 else:
1560 else:
1558 if e == E_RESIZE:
1561 if e == E_RESIZE:
1559 size = screen_size()
1562 size = screen_size()
1560 if size != stdscr.getmaxyx():
1563 if size != stdscr.getmaxyx():
1561 curses.resizeterm(*size)
1564 curses.resizeterm(*size)
1562
1565
1563 curmode, _ = state[b'mode']
1566 curmode, _ = state[b'mode']
1564 sizes = layout(curmode)
1567 sizes = layout(curmode)
1565 if curmode != oldmode:
1568 if curmode != oldmode:
1566 state[b'page_height'] = sizes[b'main'][0]
1569 state[b'page_height'] = sizes[b'main'][0]
1567 # Adjust the view to fit the current screen size.
1570 # Adjust the view to fit the current screen size.
1568 movecursor(state, state[b'pos'], state[b'pos'])
1571 movecursor(state, state[b'pos'], state[b'pos'])
1569
1572
1570 # Pack the windows against the top, each pane spread across the
1573 # Pack the windows against the top, each pane spread across the
1571 # full width of the screen.
1574 # full width of the screen.
1572 y, x = (0, 0)
1575 y, x = (0, 0)
1573 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1576 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1574 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1577 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1575 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1578 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1576
1579
1577 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1580 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1578 if e == E_PAGEDOWN:
1581 if e == E_PAGEDOWN:
1579 changeview(state, +1, b'page')
1582 changeview(state, +1, b'page')
1580 elif e == E_PAGEUP:
1583 elif e == E_PAGEUP:
1581 changeview(state, -1, b'page')
1584 changeview(state, -1, b'page')
1582 elif e == E_LINEDOWN:
1585 elif e == E_LINEDOWN:
1583 changeview(state, +1, b'line')
1586 changeview(state, +1, b'line')
1584 elif e == E_LINEUP:
1587 elif e == E_LINEUP:
1585 changeview(state, -1, b'line')
1588 changeview(state, -1, b'line')
1586
1589
1587 # start rendering
1590 # start rendering
1588 commitwin.erase()
1591 commitwin.erase()
1589 helpwin.erase()
1592 helpwin.erase()
1590 mainwin.erase()
1593 mainwin.erase()
1591 if curmode == MODE_PATCH:
1594 if curmode == MODE_PATCH:
1592 renderpatch(mainwin, state)
1595 renderpatch(mainwin, state)
1593 elif curmode == MODE_HELP:
1596 elif curmode == MODE_HELP:
1594 renderstring(mainwin, state, __doc__.strip().splitlines())
1597 renderstring(mainwin, state, __doc__.strip().splitlines())
1595 else:
1598 else:
1596 renderrules(mainwin, state)
1599 renderrules(mainwin, state)
1597 rendercommit(commitwin, state)
1600 rendercommit(commitwin, state)
1598 renderhelp(helpwin, state)
1601 renderhelp(helpwin, state)
1599 curses.doupdate()
1602 curses.doupdate()
1600 # done rendering
1603 # done rendering
1601 ch = stdscr.getkey()
1604 ch = stdscr.getkey()
1602 except curses.error:
1605 except curses.error:
1603 pass
1606 pass
1604
1607
1605
1608
1606 def _chistedit(ui, repo, *freeargs, **opts):
1609 def _chistedit(ui, repo, *freeargs, **opts):
1607 """interactively edit changeset history via a curses interface
1610 """interactively edit changeset history via a curses interface
1608
1611
1609 Provides a ncurses interface to histedit. Press ? in chistedit mode
1612 Provides a ncurses interface to histedit. Press ? in chistedit mode
1610 to see an extensive help. Requires python-curses to be installed."""
1613 to see an extensive help. Requires python-curses to be installed."""
1611
1614
1612 if curses is None:
1615 if curses is None:
1613 raise error.Abort(_(b"Python curses library required"))
1616 raise error.Abort(_(b"Python curses library required"))
1614
1617
1615 # disable color
1618 # disable color
1616 ui._colormode = None
1619 ui._colormode = None
1617
1620
1618 try:
1621 try:
1619 keep = opts.get(b'keep')
1622 keep = opts.get(b'keep')
1620 revs = opts.get(b'rev', [])[:]
1623 revs = opts.get(b'rev', [])[:]
1621 cmdutil.checkunfinished(repo)
1624 cmdutil.checkunfinished(repo)
1622 cmdutil.bailifchanged(repo)
1625 cmdutil.bailifchanged(repo)
1623
1626
1624 if os.path.exists(os.path.join(repo.path, b'histedit-state')):
1627 if os.path.exists(os.path.join(repo.path, b'histedit-state')):
1625 raise error.Abort(
1628 raise error.Abort(
1626 _(
1629 _(
1627 b'history edit already in progress, try '
1630 b'history edit already in progress, try '
1628 b'--continue or --abort'
1631 b'--continue or --abort'
1629 )
1632 )
1630 )
1633 )
1631 revs.extend(freeargs)
1634 revs.extend(freeargs)
1632 if not revs:
1635 if not revs:
1633 defaultrev = destutil.desthistedit(ui, repo)
1636 defaultrev = destutil.desthistedit(ui, repo)
1634 if defaultrev is not None:
1637 if defaultrev is not None:
1635 revs.append(defaultrev)
1638 revs.append(defaultrev)
1636 if len(revs) != 1:
1639 if len(revs) != 1:
1637 raise error.Abort(
1640 raise error.Abort(
1638 _(b'histedit requires exactly one ancestor revision')
1641 _(b'histedit requires exactly one ancestor revision')
1639 )
1642 )
1640
1643
1641 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
1644 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
1642 if len(rr) != 1:
1645 if len(rr) != 1:
1643 raise error.Abort(
1646 raise error.Abort(
1644 _(
1647 _(
1645 b'The specified revisions must have '
1648 b'The specified revisions must have '
1646 b'exactly one common root'
1649 b'exactly one common root'
1647 )
1650 )
1648 )
1651 )
1649 root = rr[0].node()
1652 root = rr[0].node()
1650
1653
1651 topmost = repo.dirstate.p1()
1654 topmost = repo.dirstate.p1()
1652 revs = between(repo, root, topmost, keep)
1655 revs = between(repo, root, topmost, keep)
1653 if not revs:
1656 if not revs:
1654 raise error.Abort(
1657 raise error.Abort(
1655 _(b'%s is not an ancestor of working directory')
1658 _(b'%s is not an ancestor of working directory')
1656 % node.short(root)
1659 % node.short(root)
1657 )
1660 )
1658
1661
1659 ctxs = []
1662 ctxs = []
1660 for i, r in enumerate(revs):
1663 for i, r in enumerate(revs):
1661 ctxs.append(histeditrule(repo[r], i))
1664 ctxs.append(histeditrule(repo[r], i))
1662 # Curses requires setting the locale or it will default to the C
1665 # Curses requires setting the locale or it will default to the C
1663 # locale. This sets the locale to the user's default system
1666 # locale. This sets the locale to the user's default system
1664 # locale.
1667 # locale.
1665 locale.setlocale(locale.LC_ALL, r'')
1668 locale.setlocale(locale.LC_ALL, r'')
1666 rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
1669 rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
1667 curses.echo()
1670 curses.echo()
1668 curses.endwin()
1671 curses.endwin()
1669 if rc is False:
1672 if rc is False:
1670 ui.write(_(b"histedit aborted\n"))
1673 ui.write(_(b"histedit aborted\n"))
1671 return 0
1674 return 0
1672 if type(rc) is list:
1675 if type(rc) is list:
1673 ui.status(_(b"performing changes\n"))
1676 ui.status(_(b"performing changes\n"))
1674 rules = makecommands(rc)
1677 rules = makecommands(rc)
1675 filename = repo.vfs.join(b'chistedit')
1678 filename = repo.vfs.join(b'chistedit')
1676 with open(filename, b'w+') as fp:
1679 with open(filename, b'w+') as fp:
1677 for r in rules:
1680 for r in rules:
1678 fp.write(r)
1681 fp.write(r)
1679 opts[b'commands'] = filename
1682 opts[b'commands'] = filename
1680 return _texthistedit(ui, repo, *freeargs, **opts)
1683 return _texthistedit(ui, repo, *freeargs, **opts)
1681 except KeyboardInterrupt:
1684 except KeyboardInterrupt:
1682 pass
1685 pass
1683 return -1
1686 return -1
1684
1687
1685
1688
1686 @command(
1689 @command(
1687 b'histedit',
1690 b'histedit',
1688 [
1691 [
1689 (
1692 (
1690 b'',
1693 b'',
1691 b'commands',
1694 b'commands',
1692 b'',
1695 b'',
1693 _(b'read history edits from the specified file'),
1696 _(b'read history edits from the specified file'),
1694 _(b'FILE'),
1697 _(b'FILE'),
1695 ),
1698 ),
1696 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1699 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1697 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1700 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1698 (
1701 (
1699 b'k',
1702 b'k',
1700 b'keep',
1703 b'keep',
1701 False,
1704 False,
1702 _(b"don't strip old nodes after edit is complete"),
1705 _(b"don't strip old nodes after edit is complete"),
1703 ),
1706 ),
1704 (b'', b'abort', False, _(b'abort an edit in progress')),
1707 (b'', b'abort', False, _(b'abort an edit in progress')),
1705 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1708 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1706 (
1709 (
1707 b'f',
1710 b'f',
1708 b'force',
1711 b'force',
1709 False,
1712 False,
1710 _(b'force outgoing even for unrelated repositories'),
1713 _(b'force outgoing even for unrelated repositories'),
1711 ),
1714 ),
1712 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1715 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1713 ]
1716 ]
1714 + cmdutil.formatteropts,
1717 + cmdutil.formatteropts,
1715 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1718 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1716 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1719 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1717 )
1720 )
1718 def histedit(ui, repo, *freeargs, **opts):
1721 def histedit(ui, repo, *freeargs, **opts):
1719 """interactively edit changeset history
1722 """interactively edit changeset history
1720
1723
1721 This command lets you edit a linear series of changesets (up to
1724 This command lets you edit a linear series of changesets (up to
1722 and including the working directory, which should be clean).
1725 and including the working directory, which should be clean).
1723 You can:
1726 You can:
1724
1727
1725 - `pick` to [re]order a changeset
1728 - `pick` to [re]order a changeset
1726
1729
1727 - `drop` to omit changeset
1730 - `drop` to omit changeset
1728
1731
1729 - `mess` to reword the changeset commit message
1732 - `mess` to reword the changeset commit message
1730
1733
1731 - `fold` to combine it with the preceding changeset (using the later date)
1734 - `fold` to combine it with the preceding changeset (using the later date)
1732
1735
1733 - `roll` like fold, but discarding this commit's description and date
1736 - `roll` like fold, but discarding this commit's description and date
1734
1737
1735 - `edit` to edit this changeset (preserving date)
1738 - `edit` to edit this changeset (preserving date)
1736
1739
1737 - `base` to checkout changeset and apply further changesets from there
1740 - `base` to checkout changeset and apply further changesets from there
1738
1741
1739 There are a number of ways to select the root changeset:
1742 There are a number of ways to select the root changeset:
1740
1743
1741 - Specify ANCESTOR directly
1744 - Specify ANCESTOR directly
1742
1745
1743 - Use --outgoing -- it will be the first linear changeset not
1746 - Use --outgoing -- it will be the first linear changeset not
1744 included in destination. (See :hg:`help config.paths.default-push`)
1747 included in destination. (See :hg:`help config.paths.default-push`)
1745
1748
1746 - Otherwise, the value from the "histedit.defaultrev" config option
1749 - Otherwise, the value from the "histedit.defaultrev" config option
1747 is used as a revset to select the base revision when ANCESTOR is not
1750 is used as a revset to select the base revision when ANCESTOR is not
1748 specified. The first revision returned by the revset is used. By
1751 specified. The first revision returned by the revset is used. By
1749 default, this selects the editable history that is unique to the
1752 default, this selects the editable history that is unique to the
1750 ancestry of the working directory.
1753 ancestry of the working directory.
1751
1754
1752 .. container:: verbose
1755 .. container:: verbose
1753
1756
1754 If you use --outgoing, this command will abort if there are ambiguous
1757 If you use --outgoing, this command will abort if there are ambiguous
1755 outgoing revisions. For example, if there are multiple branches
1758 outgoing revisions. For example, if there are multiple branches
1756 containing outgoing revisions.
1759 containing outgoing revisions.
1757
1760
1758 Use "min(outgoing() and ::.)" or similar revset specification
1761 Use "min(outgoing() and ::.)" or similar revset specification
1759 instead of --outgoing to specify edit target revision exactly in
1762 instead of --outgoing to specify edit target revision exactly in
1760 such ambiguous situation. See :hg:`help revsets` for detail about
1763 such ambiguous situation. See :hg:`help revsets` for detail about
1761 selecting revisions.
1764 selecting revisions.
1762
1765
1763 .. container:: verbose
1766 .. container:: verbose
1764
1767
1765 Examples:
1768 Examples:
1766
1769
1767 - A number of changes have been made.
1770 - A number of changes have been made.
1768 Revision 3 is no longer needed.
1771 Revision 3 is no longer needed.
1769
1772
1770 Start history editing from revision 3::
1773 Start history editing from revision 3::
1771
1774
1772 hg histedit -r 3
1775 hg histedit -r 3
1773
1776
1774 An editor opens, containing the list of revisions,
1777 An editor opens, containing the list of revisions,
1775 with specific actions specified::
1778 with specific actions specified::
1776
1779
1777 pick 5339bf82f0ca 3 Zworgle the foobar
1780 pick 5339bf82f0ca 3 Zworgle the foobar
1778 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1781 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1779 pick 0a9639fcda9d 5 Morgify the cromulancy
1782 pick 0a9639fcda9d 5 Morgify the cromulancy
1780
1783
1781 Additional information about the possible actions
1784 Additional information about the possible actions
1782 to take appears below the list of revisions.
1785 to take appears below the list of revisions.
1783
1786
1784 To remove revision 3 from the history,
1787 To remove revision 3 from the history,
1785 its action (at the beginning of the relevant line)
1788 its action (at the beginning of the relevant line)
1786 is changed to 'drop'::
1789 is changed to 'drop'::
1787
1790
1788 drop 5339bf82f0ca 3 Zworgle the foobar
1791 drop 5339bf82f0ca 3 Zworgle the foobar
1789 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1792 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1790 pick 0a9639fcda9d 5 Morgify the cromulancy
1793 pick 0a9639fcda9d 5 Morgify the cromulancy
1791
1794
1792 - A number of changes have been made.
1795 - A number of changes have been made.
1793 Revision 2 and 4 need to be swapped.
1796 Revision 2 and 4 need to be swapped.
1794
1797
1795 Start history editing from revision 2::
1798 Start history editing from revision 2::
1796
1799
1797 hg histedit -r 2
1800 hg histedit -r 2
1798
1801
1799 An editor opens, containing the list of revisions,
1802 An editor opens, containing the list of revisions,
1800 with specific actions specified::
1803 with specific actions specified::
1801
1804
1802 pick 252a1af424ad 2 Blorb a morgwazzle
1805 pick 252a1af424ad 2 Blorb a morgwazzle
1803 pick 5339bf82f0ca 3 Zworgle the foobar
1806 pick 5339bf82f0ca 3 Zworgle the foobar
1804 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1807 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1805
1808
1806 To swap revision 2 and 4, its lines are swapped
1809 To swap revision 2 and 4, its lines are swapped
1807 in the editor::
1810 in the editor::
1808
1811
1809 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1812 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1810 pick 5339bf82f0ca 3 Zworgle the foobar
1813 pick 5339bf82f0ca 3 Zworgle the foobar
1811 pick 252a1af424ad 2 Blorb a morgwazzle
1814 pick 252a1af424ad 2 Blorb a morgwazzle
1812
1815
1813 Returns 0 on success, 1 if user intervention is required (not only
1816 Returns 0 on success, 1 if user intervention is required (not only
1814 for intentional "edit" command, but also for resolving unexpected
1817 for intentional "edit" command, but also for resolving unexpected
1815 conflicts).
1818 conflicts).
1816 """
1819 """
1817 # kludge: _chistedit only works for starting an edit, not aborting
1820 # kludge: _chistedit only works for starting an edit, not aborting
1818 # or continuing, so fall back to regular _texthistedit for those
1821 # or continuing, so fall back to regular _texthistedit for those
1819 # operations.
1822 # operations.
1820 if (
1823 if (
1821 ui.interface(b'histedit') == b'curses'
1824 ui.interface(b'histedit') == b'curses'
1822 and _getgoal(pycompat.byteskwargs(opts)) == goalnew
1825 and _getgoal(pycompat.byteskwargs(opts)) == goalnew
1823 ):
1826 ):
1824 return _chistedit(ui, repo, *freeargs, **opts)
1827 return _chistedit(ui, repo, *freeargs, **opts)
1825 return _texthistedit(ui, repo, *freeargs, **opts)
1828 return _texthistedit(ui, repo, *freeargs, **opts)
1826
1829
1827
1830
1828 def _texthistedit(ui, repo, *freeargs, **opts):
1831 def _texthistedit(ui, repo, *freeargs, **opts):
1829 state = histeditstate(repo)
1832 state = histeditstate(repo)
1830 with repo.wlock() as wlock, repo.lock() as lock:
1833 with repo.wlock() as wlock, repo.lock() as lock:
1831 state.wlock = wlock
1834 state.wlock = wlock
1832 state.lock = lock
1835 state.lock = lock
1833 _histedit(ui, repo, state, *freeargs, **opts)
1836 _histedit(ui, repo, state, *freeargs, **opts)
1834
1837
1835
1838
1836 goalcontinue = b'continue'
1839 goalcontinue = b'continue'
1837 goalabort = b'abort'
1840 goalabort = b'abort'
1838 goaleditplan = b'edit-plan'
1841 goaleditplan = b'edit-plan'
1839 goalnew = b'new'
1842 goalnew = b'new'
1840
1843
1841
1844
1842 def _getgoal(opts):
1845 def _getgoal(opts):
1843 if opts.get(b'continue'):
1846 if opts.get(b'continue'):
1844 return goalcontinue
1847 return goalcontinue
1845 if opts.get(b'abort'):
1848 if opts.get(b'abort'):
1846 return goalabort
1849 return goalabort
1847 if opts.get(b'edit_plan'):
1850 if opts.get(b'edit_plan'):
1848 return goaleditplan
1851 return goaleditplan
1849 return goalnew
1852 return goalnew
1850
1853
1851
1854
1852 def _readfile(ui, path):
1855 def _readfile(ui, path):
1853 if path == b'-':
1856 if path == b'-':
1854 with ui.timeblockedsection(b'histedit'):
1857 with ui.timeblockedsection(b'histedit'):
1855 return ui.fin.read()
1858 return ui.fin.read()
1856 else:
1859 else:
1857 with open(path, b'rb') as f:
1860 with open(path, b'rb') as f:
1858 return f.read()
1861 return f.read()
1859
1862
1860
1863
1861 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1864 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1862 # TODO only abort if we try to histedit mq patches, not just
1865 # TODO only abort if we try to histedit mq patches, not just
1863 # blanket if mq patches are applied somewhere
1866 # blanket if mq patches are applied somewhere
1864 mq = getattr(repo, 'mq', None)
1867 mq = getattr(repo, 'mq', None)
1865 if mq and mq.applied:
1868 if mq and mq.applied:
1866 raise error.Abort(_(b'source has mq patches applied'))
1869 raise error.Abort(_(b'source has mq patches applied'))
1867
1870
1868 # basic argument incompatibility processing
1871 # basic argument incompatibility processing
1869 outg = opts.get(b'outgoing')
1872 outg = opts.get(b'outgoing')
1870 editplan = opts.get(b'edit_plan')
1873 editplan = opts.get(b'edit_plan')
1871 abort = opts.get(b'abort')
1874 abort = opts.get(b'abort')
1872 force = opts.get(b'force')
1875 force = opts.get(b'force')
1873 if force and not outg:
1876 if force and not outg:
1874 raise error.Abort(_(b'--force only allowed with --outgoing'))
1877 raise error.Abort(_(b'--force only allowed with --outgoing'))
1875 if goal == b'continue':
1878 if goal == b'continue':
1876 if any((outg, abort, revs, freeargs, rules, editplan)):
1879 if any((outg, abort, revs, freeargs, rules, editplan)):
1877 raise error.Abort(_(b'no arguments allowed with --continue'))
1880 raise error.Abort(_(b'no arguments allowed with --continue'))
1878 elif goal == b'abort':
1881 elif goal == b'abort':
1879 if any((outg, revs, freeargs, rules, editplan)):
1882 if any((outg, revs, freeargs, rules, editplan)):
1880 raise error.Abort(_(b'no arguments allowed with --abort'))
1883 raise error.Abort(_(b'no arguments allowed with --abort'))
1881 elif goal == b'edit-plan':
1884 elif goal == b'edit-plan':
1882 if any((outg, revs, freeargs)):
1885 if any((outg, revs, freeargs)):
1883 raise error.Abort(
1886 raise error.Abort(
1884 _(b'only --commands argument allowed with ' b'--edit-plan')
1887 _(b'only --commands argument allowed with ' b'--edit-plan')
1885 )
1888 )
1886 else:
1889 else:
1887 if state.inprogress():
1890 if state.inprogress():
1888 raise error.Abort(
1891 raise error.Abort(
1889 _(
1892 _(
1890 b'history edit already in progress, try '
1893 b'history edit already in progress, try '
1891 b'--continue or --abort'
1894 b'--continue or --abort'
1892 )
1895 )
1893 )
1896 )
1894 if outg:
1897 if outg:
1895 if revs:
1898 if revs:
1896 raise error.Abort(_(b'no revisions allowed with --outgoing'))
1899 raise error.Abort(_(b'no revisions allowed with --outgoing'))
1897 if len(freeargs) > 1:
1900 if len(freeargs) > 1:
1898 raise error.Abort(
1901 raise error.Abort(
1899 _(b'only one repo argument allowed with --outgoing')
1902 _(b'only one repo argument allowed with --outgoing')
1900 )
1903 )
1901 else:
1904 else:
1902 revs.extend(freeargs)
1905 revs.extend(freeargs)
1903 if len(revs) == 0:
1906 if len(revs) == 0:
1904 defaultrev = destutil.desthistedit(ui, repo)
1907 defaultrev = destutil.desthistedit(ui, repo)
1905 if defaultrev is not None:
1908 if defaultrev is not None:
1906 revs.append(defaultrev)
1909 revs.append(defaultrev)
1907
1910
1908 if len(revs) != 1:
1911 if len(revs) != 1:
1909 raise error.Abort(
1912 raise error.Abort(
1910 _(b'histedit requires exactly one ancestor revision')
1913 _(b'histedit requires exactly one ancestor revision')
1911 )
1914 )
1912
1915
1913
1916
1914 def _histedit(ui, repo, state, *freeargs, **opts):
1917 def _histedit(ui, repo, state, *freeargs, **opts):
1915 opts = pycompat.byteskwargs(opts)
1918 opts = pycompat.byteskwargs(opts)
1916 fm = ui.formatter(b'histedit', opts)
1919 fm = ui.formatter(b'histedit', opts)
1917 fm.startitem()
1920 fm.startitem()
1918 goal = _getgoal(opts)
1921 goal = _getgoal(opts)
1919 revs = opts.get(b'rev', [])
1922 revs = opts.get(b'rev', [])
1920 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
1923 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
1921 rules = opts.get(b'commands', b'')
1924 rules = opts.get(b'commands', b'')
1922 state.keep = opts.get(b'keep', False)
1925 state.keep = opts.get(b'keep', False)
1923
1926
1924 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
1927 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
1925
1928
1926 hastags = False
1929 hastags = False
1927 if revs:
1930 if revs:
1928 revs = scmutil.revrange(repo, revs)
1931 revs = scmutil.revrange(repo, revs)
1929 ctxs = [repo[rev] for rev in revs]
1932 ctxs = [repo[rev] for rev in revs]
1930 for ctx in ctxs:
1933 for ctx in ctxs:
1931 tags = [tag for tag in ctx.tags() if tag != b'tip']
1934 tags = [tag for tag in ctx.tags() if tag != b'tip']
1932 if not hastags:
1935 if not hastags:
1933 hastags = len(tags)
1936 hastags = len(tags)
1934 if hastags:
1937 if hastags:
1935 if ui.promptchoice(
1938 if ui.promptchoice(
1936 _(
1939 _(
1937 b'warning: tags associated with the given'
1940 b'warning: tags associated with the given'
1938 b' changeset will be lost after histedit.\n'
1941 b' changeset will be lost after histedit.\n'
1939 b'do you want to continue (yN)? $$ &Yes $$ &No'
1942 b'do you want to continue (yN)? $$ &Yes $$ &No'
1940 ),
1943 ),
1941 default=1,
1944 default=1,
1942 ):
1945 ):
1943 raise error.Abort(_(b'histedit cancelled\n'))
1946 raise error.Abort(_(b'histedit cancelled\n'))
1944 # rebuild state
1947 # rebuild state
1945 if goal == goalcontinue:
1948 if goal == goalcontinue:
1946 state.read()
1949 state.read()
1947 state = bootstrapcontinue(ui, state, opts)
1950 state = bootstrapcontinue(ui, state, opts)
1948 elif goal == goaleditplan:
1951 elif goal == goaleditplan:
1949 _edithisteditplan(ui, repo, state, rules)
1952 _edithisteditplan(ui, repo, state, rules)
1950 return
1953 return
1951 elif goal == goalabort:
1954 elif goal == goalabort:
1952 _aborthistedit(ui, repo, state, nobackup=nobackup)
1955 _aborthistedit(ui, repo, state, nobackup=nobackup)
1953 return
1956 return
1954 else:
1957 else:
1955 # goal == goalnew
1958 # goal == goalnew
1956 _newhistedit(ui, repo, state, revs, freeargs, opts)
1959 _newhistedit(ui, repo, state, revs, freeargs, opts)
1957
1960
1958 _continuehistedit(ui, repo, state)
1961 _continuehistedit(ui, repo, state)
1959 _finishhistedit(ui, repo, state, fm)
1962 _finishhistedit(ui, repo, state, fm)
1960 fm.end()
1963 fm.end()
1961
1964
1962
1965
1963 def _continuehistedit(ui, repo, state):
1966 def _continuehistedit(ui, repo, state):
1964 """This function runs after either:
1967 """This function runs after either:
1965 - bootstrapcontinue (if the goal is 'continue')
1968 - bootstrapcontinue (if the goal is 'continue')
1966 - _newhistedit (if the goal is 'new')
1969 - _newhistedit (if the goal is 'new')
1967 """
1970 """
1968 # preprocess rules so that we can hide inner folds from the user
1971 # preprocess rules so that we can hide inner folds from the user
1969 # and only show one editor
1972 # and only show one editor
1970 actions = state.actions[:]
1973 actions = state.actions[:]
1971 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
1974 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
1972 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
1975 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
1973 state.actions[idx].__class__ = _multifold
1976 state.actions[idx].__class__ = _multifold
1974
1977
1975 # Force an initial state file write, so the user can run --abort/continue
1978 # Force an initial state file write, so the user can run --abort/continue
1976 # even if there's an exception before the first transaction serialize.
1979 # even if there's an exception before the first transaction serialize.
1977 state.write()
1980 state.write()
1978
1981
1979 tr = None
1982 tr = None
1980 # Don't use singletransaction by default since it rolls the entire
1983 # Don't use singletransaction by default since it rolls the entire
1981 # transaction back if an unexpected exception happens (like a
1984 # transaction back if an unexpected exception happens (like a
1982 # pretxncommit hook throws, or the user aborts the commit msg editor).
1985 # pretxncommit hook throws, or the user aborts the commit msg editor).
1983 if ui.configbool(b"histedit", b"singletransaction"):
1986 if ui.configbool(b"histedit", b"singletransaction"):
1984 # Don't use a 'with' for the transaction, since actions may close
1987 # Don't use a 'with' for the transaction, since actions may close
1985 # and reopen a transaction. For example, if the action executes an
1988 # and reopen a transaction. For example, if the action executes an
1986 # external process it may choose to commit the transaction first.
1989 # external process it may choose to commit the transaction first.
1987 tr = repo.transaction(b'histedit')
1990 tr = repo.transaction(b'histedit')
1988 progress = ui.makeprogress(
1991 progress = ui.makeprogress(
1989 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
1992 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
1990 )
1993 )
1991 with progress, util.acceptintervention(tr):
1994 with progress, util.acceptintervention(tr):
1992 while state.actions:
1995 while state.actions:
1993 state.write(tr=tr)
1996 state.write(tr=tr)
1994 actobj = state.actions[0]
1997 actobj = state.actions[0]
1995 progress.increment(item=actobj.torule())
1998 progress.increment(item=actobj.torule())
1996 ui.debug(
1999 ui.debug(
1997 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2000 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
1998 )
2001 )
1999 parentctx, replacement_ = actobj.run()
2002 parentctx, replacement_ = actobj.run()
2000 state.parentctxnode = parentctx.node()
2003 state.parentctxnode = parentctx.node()
2001 state.replacements.extend(replacement_)
2004 state.replacements.extend(replacement_)
2002 state.actions.pop(0)
2005 state.actions.pop(0)
2003
2006
2004 state.write()
2007 state.write()
2005
2008
2006
2009
2007 def _finishhistedit(ui, repo, state, fm):
2010 def _finishhistedit(ui, repo, state, fm):
2008 """This action runs when histedit is finishing its session"""
2011 """This action runs when histedit is finishing its session"""
2009 hg.updaterepo(repo, state.parentctxnode, overwrite=False)
2012 hg.updaterepo(repo, state.parentctxnode, overwrite=False)
2010
2013
2011 mapping, tmpnodes, created, ntm = processreplacement(state)
2014 mapping, tmpnodes, created, ntm = processreplacement(state)
2012 if mapping:
2015 if mapping:
2013 for prec, succs in mapping.iteritems():
2016 for prec, succs in mapping.iteritems():
2014 if not succs:
2017 if not succs:
2015 ui.debug(b'histedit: %s is dropped\n' % node.short(prec))
2018 ui.debug(b'histedit: %s is dropped\n' % node.short(prec))
2016 else:
2019 else:
2017 ui.debug(
2020 ui.debug(
2018 b'histedit: %s is replaced by %s\n'
2021 b'histedit: %s is replaced by %s\n'
2019 % (node.short(prec), node.short(succs[0]))
2022 % (node.short(prec), node.short(succs[0]))
2020 )
2023 )
2021 if len(succs) > 1:
2024 if len(succs) > 1:
2022 m = b'histedit: %s'
2025 m = b'histedit: %s'
2023 for n in succs[1:]:
2026 for n in succs[1:]:
2024 ui.debug(m % node.short(n))
2027 ui.debug(m % node.short(n))
2025
2028
2026 if not state.keep:
2029 if not state.keep:
2027 if mapping:
2030 if mapping:
2028 movetopmostbookmarks(repo, state.topmost, ntm)
2031 movetopmostbookmarks(repo, state.topmost, ntm)
2029 # TODO update mq state
2032 # TODO update mq state
2030 else:
2033 else:
2031 mapping = {}
2034 mapping = {}
2032
2035
2033 for n in tmpnodes:
2036 for n in tmpnodes:
2034 if n in repo:
2037 if n in repo:
2035 mapping[n] = ()
2038 mapping[n] = ()
2036
2039
2037 # remove entries about unknown nodes
2040 # remove entries about unknown nodes
2038 nodemap = repo.unfiltered().changelog.nodemap
2041 nodemap = repo.unfiltered().changelog.nodemap
2039 mapping = {
2042 mapping = {
2040 k: v
2043 k: v
2041 for k, v in mapping.items()
2044 for k, v in mapping.items()
2042 if k in nodemap and all(n in nodemap for n in v)
2045 if k in nodemap and all(n in nodemap for n in v)
2043 }
2046 }
2044 scmutil.cleanupnodes(repo, mapping, b'histedit')
2047 scmutil.cleanupnodes(repo, mapping, b'histedit')
2045 hf = fm.hexfunc
2048 hf = fm.hexfunc
2046 fl = fm.formatlist
2049 fl = fm.formatlist
2047 fd = fm.formatdict
2050 fd = fm.formatdict
2048 nodechanges = fd(
2051 nodechanges = fd(
2049 {
2052 {
2050 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2053 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2051 for oldn, newn in mapping.iteritems()
2054 for oldn, newn in mapping.iteritems()
2052 },
2055 },
2053 key=b"oldnode",
2056 key=b"oldnode",
2054 value=b"newnodes",
2057 value=b"newnodes",
2055 )
2058 )
2056 fm.data(nodechanges=nodechanges)
2059 fm.data(nodechanges=nodechanges)
2057
2060
2058 state.clear()
2061 state.clear()
2059 if os.path.exists(repo.sjoin(b'undo')):
2062 if os.path.exists(repo.sjoin(b'undo')):
2060 os.unlink(repo.sjoin(b'undo'))
2063 os.unlink(repo.sjoin(b'undo'))
2061 if repo.vfs.exists(b'histedit-last-edit.txt'):
2064 if repo.vfs.exists(b'histedit-last-edit.txt'):
2062 repo.vfs.unlink(b'histedit-last-edit.txt')
2065 repo.vfs.unlink(b'histedit-last-edit.txt')
2063
2066
2064
2067
2065 def _aborthistedit(ui, repo, state, nobackup=False):
2068 def _aborthistedit(ui, repo, state, nobackup=False):
2066 try:
2069 try:
2067 state.read()
2070 state.read()
2068 __, leafs, tmpnodes, __ = processreplacement(state)
2071 __, leafs, tmpnodes, __ = processreplacement(state)
2069 ui.debug(b'restore wc to old parent %s\n' % node.short(state.topmost))
2072 ui.debug(b'restore wc to old parent %s\n' % node.short(state.topmost))
2070
2073
2071 # Recover our old commits if necessary
2074 # Recover our old commits if necessary
2072 if not state.topmost in repo and state.backupfile:
2075 if not state.topmost in repo and state.backupfile:
2073 backupfile = repo.vfs.join(state.backupfile)
2076 backupfile = repo.vfs.join(state.backupfile)
2074 f = hg.openpath(ui, backupfile)
2077 f = hg.openpath(ui, backupfile)
2075 gen = exchange.readbundle(ui, f, backupfile)
2078 gen = exchange.readbundle(ui, f, backupfile)
2076 with repo.transaction(b'histedit.abort') as tr:
2079 with repo.transaction(b'histedit.abort') as tr:
2077 bundle2.applybundle(
2080 bundle2.applybundle(
2078 repo,
2081 repo,
2079 gen,
2082 gen,
2080 tr,
2083 tr,
2081 source=b'histedit',
2084 source=b'histedit',
2082 url=b'bundle:' + backupfile,
2085 url=b'bundle:' + backupfile,
2083 )
2086 )
2084
2087
2085 os.remove(backupfile)
2088 os.remove(backupfile)
2086
2089
2087 # check whether we should update away
2090 # check whether we should update away
2088 if repo.unfiltered().revs(
2091 if repo.unfiltered().revs(
2089 b'parents() and (%n or %ln::)',
2092 b'parents() and (%n or %ln::)',
2090 state.parentctxnode,
2093 state.parentctxnode,
2091 leafs | tmpnodes,
2094 leafs | tmpnodes,
2092 ):
2095 ):
2093 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2096 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2094 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2097 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2095 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2098 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2096 except Exception:
2099 except Exception:
2097 if state.inprogress():
2100 if state.inprogress():
2098 ui.warn(
2101 ui.warn(
2099 _(
2102 _(
2100 b'warning: encountered an exception during histedit '
2103 b'warning: encountered an exception during histedit '
2101 b'--abort; the repository may not have been completely '
2104 b'--abort; the repository may not have been completely '
2102 b'cleaned up\n'
2105 b'cleaned up\n'
2103 )
2106 )
2104 )
2107 )
2105 raise
2108 raise
2106 finally:
2109 finally:
2107 state.clear()
2110 state.clear()
2108
2111
2109
2112
2110 def hgaborthistedit(ui, repo):
2113 def hgaborthistedit(ui, repo):
2111 state = histeditstate(repo)
2114 state = histeditstate(repo)
2112 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2115 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2113 with repo.wlock() as wlock, repo.lock() as lock:
2116 with repo.wlock() as wlock, repo.lock() as lock:
2114 state.wlock = wlock
2117 state.wlock = wlock
2115 state.lock = lock
2118 state.lock = lock
2116 _aborthistedit(ui, repo, state, nobackup=nobackup)
2119 _aborthistedit(ui, repo, state, nobackup=nobackup)
2117
2120
2118
2121
2119 def _edithisteditplan(ui, repo, state, rules):
2122 def _edithisteditplan(ui, repo, state, rules):
2120 state.read()
2123 state.read()
2121 if not rules:
2124 if not rules:
2122 comment = geteditcomment(
2125 comment = geteditcomment(
2123 ui, node.short(state.parentctxnode), node.short(state.topmost)
2126 ui, node.short(state.parentctxnode), node.short(state.topmost)
2124 )
2127 )
2125 rules = ruleeditor(repo, ui, state.actions, comment)
2128 rules = ruleeditor(repo, ui, state.actions, comment)
2126 else:
2129 else:
2127 rules = _readfile(ui, rules)
2130 rules = _readfile(ui, rules)
2128 actions = parserules(rules, state)
2131 actions = parserules(rules, state)
2129 ctxs = [repo[act.node] for act in state.actions if act.node]
2132 ctxs = [repo[act.node] for act in state.actions if act.node]
2130 warnverifyactions(ui, repo, actions, state, ctxs)
2133 warnverifyactions(ui, repo, actions, state, ctxs)
2131 state.actions = actions
2134 state.actions = actions
2132 state.write()
2135 state.write()
2133
2136
2134
2137
2135 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2138 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2136 outg = opts.get(b'outgoing')
2139 outg = opts.get(b'outgoing')
2137 rules = opts.get(b'commands', b'')
2140 rules = opts.get(b'commands', b'')
2138 force = opts.get(b'force')
2141 force = opts.get(b'force')
2139
2142
2140 cmdutil.checkunfinished(repo)
2143 cmdutil.checkunfinished(repo)
2141 cmdutil.bailifchanged(repo)
2144 cmdutil.bailifchanged(repo)
2142
2145
2143 topmost = repo.dirstate.p1()
2146 topmost = repo.dirstate.p1()
2144 if outg:
2147 if outg:
2145 if freeargs:
2148 if freeargs:
2146 remote = freeargs[0]
2149 remote = freeargs[0]
2147 else:
2150 else:
2148 remote = None
2151 remote = None
2149 root = findoutgoing(ui, repo, remote, force, opts)
2152 root = findoutgoing(ui, repo, remote, force, opts)
2150 else:
2153 else:
2151 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
2154 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
2152 if len(rr) != 1:
2155 if len(rr) != 1:
2153 raise error.Abort(
2156 raise error.Abort(
2154 _(
2157 _(
2155 b'The specified revisions must have '
2158 b'The specified revisions must have '
2156 b'exactly one common root'
2159 b'exactly one common root'
2157 )
2160 )
2158 )
2161 )
2159 root = rr[0].node()
2162 root = rr[0].node()
2160
2163
2161 revs = between(repo, root, topmost, state.keep)
2164 revs = between(repo, root, topmost, state.keep)
2162 if not revs:
2165 if not revs:
2163 raise error.Abort(
2166 raise error.Abort(
2164 _(b'%s is not an ancestor of working directory') % node.short(root)
2167 _(b'%s is not an ancestor of working directory') % node.short(root)
2165 )
2168 )
2166
2169
2167 ctxs = [repo[r] for r in revs]
2170 ctxs = [repo[r] for r in revs]
2168
2171
2169 wctx = repo[None]
2172 wctx = repo[None]
2170 # Please don't ask me why `ancestors` is this value. I figured it
2173 # Please don't ask me why `ancestors` is this value. I figured it
2171 # out with print-debugging, not by actually understanding what the
2174 # out with print-debugging, not by actually understanding what the
2172 # merge code is doing. :(
2175 # merge code is doing. :(
2173 ancs = [repo[b'.']]
2176 ancs = [repo[b'.']]
2174 # Sniff-test to make sure we won't collide with untracked files in
2177 # Sniff-test to make sure we won't collide with untracked files in
2175 # the working directory. If we don't do this, we can get a
2178 # the working directory. If we don't do this, we can get a
2176 # collision after we've started histedit and backing out gets ugly
2179 # collision after we've started histedit and backing out gets ugly
2177 # for everyone, especially the user.
2180 # for everyone, especially the user.
2178 for c in [ctxs[0].p1()] + ctxs:
2181 for c in [ctxs[0].p1()] + ctxs:
2179 try:
2182 try:
2180 mergemod.calculateupdates(
2183 mergemod.calculateupdates(
2181 repo,
2184 repo,
2182 wctx,
2185 wctx,
2183 c,
2186 c,
2184 ancs,
2187 ancs,
2185 # These parameters were determined by print-debugging
2188 # These parameters were determined by print-debugging
2186 # what happens later on inside histedit.
2189 # what happens later on inside histedit.
2187 branchmerge=False,
2190 branchmerge=False,
2188 force=False,
2191 force=False,
2189 acceptremote=False,
2192 acceptremote=False,
2190 followcopies=False,
2193 followcopies=False,
2191 )
2194 )
2192 except error.Abort:
2195 except error.Abort:
2193 raise error.Abort(
2196 raise error.Abort(
2194 _(
2197 _(
2195 b"untracked files in working directory conflict with files in %s"
2198 b"untracked files in working directory conflict with files in %s"
2196 )
2199 )
2197 % c
2200 % c
2198 )
2201 )
2199
2202
2200 if not rules:
2203 if not rules:
2201 comment = geteditcomment(ui, node.short(root), node.short(topmost))
2204 comment = geteditcomment(ui, node.short(root), node.short(topmost))
2202 actions = [pick(state, r) for r in revs]
2205 actions = [pick(state, r) for r in revs]
2203 rules = ruleeditor(repo, ui, actions, comment)
2206 rules = ruleeditor(repo, ui, actions, comment)
2204 else:
2207 else:
2205 rules = _readfile(ui, rules)
2208 rules = _readfile(ui, rules)
2206 actions = parserules(rules, state)
2209 actions = parserules(rules, state)
2207 warnverifyactions(ui, repo, actions, state, ctxs)
2210 warnverifyactions(ui, repo, actions, state, ctxs)
2208
2211
2209 parentctxnode = repo[root].p1().node()
2212 parentctxnode = repo[root].p1().node()
2210
2213
2211 state.parentctxnode = parentctxnode
2214 state.parentctxnode = parentctxnode
2212 state.actions = actions
2215 state.actions = actions
2213 state.topmost = topmost
2216 state.topmost = topmost
2214 state.replacements = []
2217 state.replacements = []
2215
2218
2216 ui.log(
2219 ui.log(
2217 b"histedit",
2220 b"histedit",
2218 b"%d actions to histedit\n",
2221 b"%d actions to histedit\n",
2219 len(actions),
2222 len(actions),
2220 histedit_num_actions=len(actions),
2223 histedit_num_actions=len(actions),
2221 )
2224 )
2222
2225
2223 # Create a backup so we can always abort completely.
2226 # Create a backup so we can always abort completely.
2224 backupfile = None
2227 backupfile = None
2225 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2228 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2226 backupfile = repair.backupbundle(
2229 backupfile = repair.backupbundle(
2227 repo, [parentctxnode], [topmost], root, b'histedit'
2230 repo, [parentctxnode], [topmost], root, b'histedit'
2228 )
2231 )
2229 state.backupfile = backupfile
2232 state.backupfile = backupfile
2230
2233
2231
2234
2232 def _getsummary(ctx):
2235 def _getsummary(ctx):
2233 # a common pattern is to extract the summary but default to the empty
2236 # a common pattern is to extract the summary but default to the empty
2234 # string
2237 # string
2235 summary = ctx.description() or b''
2238 summary = ctx.description() or b''
2236 if summary:
2239 if summary:
2237 summary = summary.splitlines()[0]
2240 summary = summary.splitlines()[0]
2238 return summary
2241 return summary
2239
2242
2240
2243
2241 def bootstrapcontinue(ui, state, opts):
2244 def bootstrapcontinue(ui, state, opts):
2242 repo = state.repo
2245 repo = state.repo
2243
2246
2244 ms = mergemod.mergestate.read(repo)
2247 ms = mergemod.mergestate.read(repo)
2245 mergeutil.checkunresolved(ms)
2248 mergeutil.checkunresolved(ms)
2246
2249
2247 if state.actions:
2250 if state.actions:
2248 actobj = state.actions.pop(0)
2251 actobj = state.actions.pop(0)
2249
2252
2250 if _isdirtywc(repo):
2253 if _isdirtywc(repo):
2251 actobj.continuedirty()
2254 actobj.continuedirty()
2252 if _isdirtywc(repo):
2255 if _isdirtywc(repo):
2253 abortdirty()
2256 abortdirty()
2254
2257
2255 parentctx, replacements = actobj.continueclean()
2258 parentctx, replacements = actobj.continueclean()
2256
2259
2257 state.parentctxnode = parentctx.node()
2260 state.parentctxnode = parentctx.node()
2258 state.replacements.extend(replacements)
2261 state.replacements.extend(replacements)
2259
2262
2260 return state
2263 return state
2261
2264
2262
2265
2263 def between(repo, old, new, keep):
2266 def between(repo, old, new, keep):
2264 """select and validate the set of revision to edit
2267 """select and validate the set of revision to edit
2265
2268
2266 When keep is false, the specified set can't have children."""
2269 When keep is false, the specified set can't have children."""
2267 revs = repo.revs(b'%n::%n', old, new)
2270 revs = repo.revs(b'%n::%n', old, new)
2268 if revs and not keep:
2271 if revs and not keep:
2269 if not obsolete.isenabled(
2272 if not obsolete.isenabled(
2270 repo, obsolete.allowunstableopt
2273 repo, obsolete.allowunstableopt
2271 ) and repo.revs(b'(%ld::) - (%ld)', revs, revs):
2274 ) and repo.revs(b'(%ld::) - (%ld)', revs, revs):
2272 raise error.Abort(
2275 raise error.Abort(
2273 _(
2276 _(
2274 b'can only histedit a changeset together '
2277 b'can only histedit a changeset together '
2275 b'with all its descendants'
2278 b'with all its descendants'
2276 )
2279 )
2277 )
2280 )
2278 if repo.revs(b'(%ld) and merge()', revs):
2281 if repo.revs(b'(%ld) and merge()', revs):
2279 raise error.Abort(_(b'cannot edit history that contains merges'))
2282 raise error.Abort(_(b'cannot edit history that contains merges'))
2280 root = repo[revs.first()] # list is already sorted by repo.revs()
2283 root = repo[revs.first()] # list is already sorted by repo.revs()
2281 if not root.mutable():
2284 if not root.mutable():
2282 raise error.Abort(
2285 raise error.Abort(
2283 _(b'cannot edit public changeset: %s') % root,
2286 _(b'cannot edit public changeset: %s') % root,
2284 hint=_(b"see 'hg help phases' for details"),
2287 hint=_(b"see 'hg help phases' for details"),
2285 )
2288 )
2286 return pycompat.maplist(repo.changelog.node, revs)
2289 return pycompat.maplist(repo.changelog.node, revs)
2287
2290
2288
2291
2289 def ruleeditor(repo, ui, actions, editcomment=b""):
2292 def ruleeditor(repo, ui, actions, editcomment=b""):
2290 """open an editor to edit rules
2293 """open an editor to edit rules
2291
2294
2292 rules are in the format [ [act, ctx], ...] like in state.rules
2295 rules are in the format [ [act, ctx], ...] like in state.rules
2293 """
2296 """
2294 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2297 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2295 newact = util.sortdict()
2298 newact = util.sortdict()
2296 for act in actions:
2299 for act in actions:
2297 ctx = repo[act.node]
2300 ctx = repo[act.node]
2298 summary = _getsummary(ctx)
2301 summary = _getsummary(ctx)
2299 fword = summary.split(b' ', 1)[0].lower()
2302 fword = summary.split(b' ', 1)[0].lower()
2300 added = False
2303 added = False
2301
2304
2302 # if it doesn't end with the special character '!' just skip this
2305 # if it doesn't end with the special character '!' just skip this
2303 if fword.endswith(b'!'):
2306 if fword.endswith(b'!'):
2304 fword = fword[:-1]
2307 fword = fword[:-1]
2305 if fword in primaryactions | secondaryactions | tertiaryactions:
2308 if fword in primaryactions | secondaryactions | tertiaryactions:
2306 act.verb = fword
2309 act.verb = fword
2307 # get the target summary
2310 # get the target summary
2308 tsum = summary[len(fword) + 1 :].lstrip()
2311 tsum = summary[len(fword) + 1 :].lstrip()
2309 # safe but slow: reverse iterate over the actions so we
2312 # safe but slow: reverse iterate over the actions so we
2310 # don't clash on two commits having the same summary
2313 # don't clash on two commits having the same summary
2311 for na, l in reversed(list(newact.iteritems())):
2314 for na, l in reversed(list(newact.iteritems())):
2312 actx = repo[na.node]
2315 actx = repo[na.node]
2313 asum = _getsummary(actx)
2316 asum = _getsummary(actx)
2314 if asum == tsum:
2317 if asum == tsum:
2315 added = True
2318 added = True
2316 l.append(act)
2319 l.append(act)
2317 break
2320 break
2318
2321
2319 if not added:
2322 if not added:
2320 newact[act] = []
2323 newact[act] = []
2321
2324
2322 # copy over and flatten the new list
2325 # copy over and flatten the new list
2323 actions = []
2326 actions = []
2324 for na, l in newact.iteritems():
2327 for na, l in newact.iteritems():
2325 actions.append(na)
2328 actions.append(na)
2326 actions += l
2329 actions += l
2327
2330
2328 rules = b'\n'.join([act.torule() for act in actions])
2331 rules = b'\n'.join([act.torule() for act in actions])
2329 rules += b'\n\n'
2332 rules += b'\n\n'
2330 rules += editcomment
2333 rules += editcomment
2331 rules = ui.edit(
2334 rules = ui.edit(
2332 rules,
2335 rules,
2333 ui.username(),
2336 ui.username(),
2334 {b'prefix': b'histedit'},
2337 {b'prefix': b'histedit'},
2335 repopath=repo.path,
2338 repopath=repo.path,
2336 action=b'histedit',
2339 action=b'histedit',
2337 )
2340 )
2338
2341
2339 # Save edit rules in .hg/histedit-last-edit.txt in case
2342 # Save edit rules in .hg/histedit-last-edit.txt in case
2340 # the user needs to ask for help after something
2343 # the user needs to ask for help after something
2341 # surprising happens.
2344 # surprising happens.
2342 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2345 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2343 f.write(rules)
2346 f.write(rules)
2344
2347
2345 return rules
2348 return rules
2346
2349
2347
2350
2348 def parserules(rules, state):
2351 def parserules(rules, state):
2349 """Read the histedit rules string and return list of action objects """
2352 """Read the histedit rules string and return list of action objects """
2350 rules = [
2353 rules = [
2351 l
2354 l
2352 for l in (r.strip() for r in rules.splitlines())
2355 for l in (r.strip() for r in rules.splitlines())
2353 if l and not l.startswith(b'#')
2356 if l and not l.startswith(b'#')
2354 ]
2357 ]
2355 actions = []
2358 actions = []
2356 for r in rules:
2359 for r in rules:
2357 if b' ' not in r:
2360 if b' ' not in r:
2358 raise error.ParseError(_(b'malformed line "%s"') % r)
2361 raise error.ParseError(_(b'malformed line "%s"') % r)
2359 verb, rest = r.split(b' ', 1)
2362 verb, rest = r.split(b' ', 1)
2360
2363
2361 if verb not in actiontable:
2364 if verb not in actiontable:
2362 raise error.ParseError(_(b'unknown action "%s"') % verb)
2365 raise error.ParseError(_(b'unknown action "%s"') % verb)
2363
2366
2364 action = actiontable[verb].fromrule(state, rest)
2367 action = actiontable[verb].fromrule(state, rest)
2365 actions.append(action)
2368 actions.append(action)
2366 return actions
2369 return actions
2367
2370
2368
2371
2369 def warnverifyactions(ui, repo, actions, state, ctxs):
2372 def warnverifyactions(ui, repo, actions, state, ctxs):
2370 try:
2373 try:
2371 verifyactions(actions, state, ctxs)
2374 verifyactions(actions, state, ctxs)
2372 except error.ParseError:
2375 except error.ParseError:
2373 if repo.vfs.exists(b'histedit-last-edit.txt'):
2376 if repo.vfs.exists(b'histedit-last-edit.txt'):
2374 ui.warn(
2377 ui.warn(
2375 _(
2378 _(
2376 b'warning: histedit rules saved '
2379 b'warning: histedit rules saved '
2377 b'to: .hg/histedit-last-edit.txt\n'
2380 b'to: .hg/histedit-last-edit.txt\n'
2378 )
2381 )
2379 )
2382 )
2380 raise
2383 raise
2381
2384
2382
2385
2383 def verifyactions(actions, state, ctxs):
2386 def verifyactions(actions, state, ctxs):
2384 """Verify that there exists exactly one action per given changeset and
2387 """Verify that there exists exactly one action per given changeset and
2385 other constraints.
2388 other constraints.
2386
2389
2387 Will abort if there are to many or too few rules, a malformed rule,
2390 Will abort if there are to many or too few rules, a malformed rule,
2388 or a rule on a changeset outside of the user-given range.
2391 or a rule on a changeset outside of the user-given range.
2389 """
2392 """
2390 expected = set(c.node() for c in ctxs)
2393 expected = set(c.node() for c in ctxs)
2391 seen = set()
2394 seen = set()
2392 prev = None
2395 prev = None
2393
2396
2394 if actions and actions[0].verb in [b'roll', b'fold']:
2397 if actions and actions[0].verb in [b'roll', b'fold']:
2395 raise error.ParseError(
2398 raise error.ParseError(
2396 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2399 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2397 )
2400 )
2398
2401
2399 for action in actions:
2402 for action in actions:
2400 action.verify(prev, expected, seen)
2403 action.verify(prev, expected, seen)
2401 prev = action
2404 prev = action
2402 if action.node is not None:
2405 if action.node is not None:
2403 seen.add(action.node)
2406 seen.add(action.node)
2404 missing = sorted(expected - seen) # sort to stabilize output
2407 missing = sorted(expected - seen) # sort to stabilize output
2405
2408
2406 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2409 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2407 if len(actions) == 0:
2410 if len(actions) == 0:
2408 raise error.ParseError(
2411 raise error.ParseError(
2409 _(b'no rules provided'),
2412 _(b'no rules provided'),
2410 hint=_(b'use strip extension to remove commits'),
2413 hint=_(b'use strip extension to remove commits'),
2411 )
2414 )
2412
2415
2413 drops = [drop(state, n) for n in missing]
2416 drops = [drop(state, n) for n in missing]
2414 # put the in the beginning so they execute immediately and
2417 # put the in the beginning so they execute immediately and
2415 # don't show in the edit-plan in the future
2418 # don't show in the edit-plan in the future
2416 actions[:0] = drops
2419 actions[:0] = drops
2417 elif missing:
2420 elif missing:
2418 raise error.ParseError(
2421 raise error.ParseError(
2419 _(b'missing rules for changeset %s') % node.short(missing[0]),
2422 _(b'missing rules for changeset %s') % node.short(missing[0]),
2420 hint=_(
2423 hint=_(
2421 b'use "drop %s" to discard, see also: '
2424 b'use "drop %s" to discard, see also: '
2422 b"'hg help -e histedit.config'"
2425 b"'hg help -e histedit.config'"
2423 )
2426 )
2424 % node.short(missing[0]),
2427 % node.short(missing[0]),
2425 )
2428 )
2426
2429
2427
2430
2428 def adjustreplacementsfrommarkers(repo, oldreplacements):
2431 def adjustreplacementsfrommarkers(repo, oldreplacements):
2429 """Adjust replacements from obsolescence markers
2432 """Adjust replacements from obsolescence markers
2430
2433
2431 Replacements structure is originally generated based on
2434 Replacements structure is originally generated based on
2432 histedit's state and does not account for changes that are
2435 histedit's state and does not account for changes that are
2433 not recorded there. This function fixes that by adding
2436 not recorded there. This function fixes that by adding
2434 data read from obsolescence markers"""
2437 data read from obsolescence markers"""
2435 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2438 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2436 return oldreplacements
2439 return oldreplacements
2437
2440
2438 unfi = repo.unfiltered()
2441 unfi = repo.unfiltered()
2439 nm = unfi.changelog.nodemap
2442 nm = unfi.changelog.nodemap
2440 obsstore = repo.obsstore
2443 obsstore = repo.obsstore
2441 newreplacements = list(oldreplacements)
2444 newreplacements = list(oldreplacements)
2442 oldsuccs = [r[1] for r in oldreplacements]
2445 oldsuccs = [r[1] for r in oldreplacements]
2443 # successors that have already been added to succstocheck once
2446 # successors that have already been added to succstocheck once
2444 seensuccs = set().union(
2447 seensuccs = set().union(
2445 *oldsuccs
2448 *oldsuccs
2446 ) # create a set from an iterable of tuples
2449 ) # create a set from an iterable of tuples
2447 succstocheck = list(seensuccs)
2450 succstocheck = list(seensuccs)
2448 while succstocheck:
2451 while succstocheck:
2449 n = succstocheck.pop()
2452 n = succstocheck.pop()
2450 missing = nm.get(n) is None
2453 missing = nm.get(n) is None
2451 markers = obsstore.successors.get(n, ())
2454 markers = obsstore.successors.get(n, ())
2452 if missing and not markers:
2455 if missing and not markers:
2453 # dead end, mark it as such
2456 # dead end, mark it as such
2454 newreplacements.append((n, ()))
2457 newreplacements.append((n, ()))
2455 for marker in markers:
2458 for marker in markers:
2456 nsuccs = marker[1]
2459 nsuccs = marker[1]
2457 newreplacements.append((n, nsuccs))
2460 newreplacements.append((n, nsuccs))
2458 for nsucc in nsuccs:
2461 for nsucc in nsuccs:
2459 if nsucc not in seensuccs:
2462 if nsucc not in seensuccs:
2460 seensuccs.add(nsucc)
2463 seensuccs.add(nsucc)
2461 succstocheck.append(nsucc)
2464 succstocheck.append(nsucc)
2462
2465
2463 return newreplacements
2466 return newreplacements
2464
2467
2465
2468
2466 def processreplacement(state):
2469 def processreplacement(state):
2467 """process the list of replacements to return
2470 """process the list of replacements to return
2468
2471
2469 1) the final mapping between original and created nodes
2472 1) the final mapping between original and created nodes
2470 2) the list of temporary node created by histedit
2473 2) the list of temporary node created by histedit
2471 3) the list of new commit created by histedit"""
2474 3) the list of new commit created by histedit"""
2472 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2475 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2473 allsuccs = set()
2476 allsuccs = set()
2474 replaced = set()
2477 replaced = set()
2475 fullmapping = {}
2478 fullmapping = {}
2476 # initialize basic set
2479 # initialize basic set
2477 # fullmapping records all operations recorded in replacement
2480 # fullmapping records all operations recorded in replacement
2478 for rep in replacements:
2481 for rep in replacements:
2479 allsuccs.update(rep[1])
2482 allsuccs.update(rep[1])
2480 replaced.add(rep[0])
2483 replaced.add(rep[0])
2481 fullmapping.setdefault(rep[0], set()).update(rep[1])
2484 fullmapping.setdefault(rep[0], set()).update(rep[1])
2482 new = allsuccs - replaced
2485 new = allsuccs - replaced
2483 tmpnodes = allsuccs & replaced
2486 tmpnodes = allsuccs & replaced
2484 # Reduce content fullmapping into direct relation between original nodes
2487 # Reduce content fullmapping into direct relation between original nodes
2485 # and final node created during history edition
2488 # and final node created during history edition
2486 # Dropped changeset are replaced by an empty list
2489 # Dropped changeset are replaced by an empty list
2487 toproceed = set(fullmapping)
2490 toproceed = set(fullmapping)
2488 final = {}
2491 final = {}
2489 while toproceed:
2492 while toproceed:
2490 for x in list(toproceed):
2493 for x in list(toproceed):
2491 succs = fullmapping[x]
2494 succs = fullmapping[x]
2492 for s in list(succs):
2495 for s in list(succs):
2493 if s in toproceed:
2496 if s in toproceed:
2494 # non final node with unknown closure
2497 # non final node with unknown closure
2495 # We can't process this now
2498 # We can't process this now
2496 break
2499 break
2497 elif s in final:
2500 elif s in final:
2498 # non final node, replace with closure
2501 # non final node, replace with closure
2499 succs.remove(s)
2502 succs.remove(s)
2500 succs.update(final[s])
2503 succs.update(final[s])
2501 else:
2504 else:
2502 final[x] = succs
2505 final[x] = succs
2503 toproceed.remove(x)
2506 toproceed.remove(x)
2504 # remove tmpnodes from final mapping
2507 # remove tmpnodes from final mapping
2505 for n in tmpnodes:
2508 for n in tmpnodes:
2506 del final[n]
2509 del final[n]
2507 # we expect all changes involved in final to exist in the repo
2510 # we expect all changes involved in final to exist in the repo
2508 # turn `final` into list (topologically sorted)
2511 # turn `final` into list (topologically sorted)
2509 nm = state.repo.changelog.nodemap
2512 nm = state.repo.changelog.nodemap
2510 for prec, succs in final.items():
2513 for prec, succs in final.items():
2511 final[prec] = sorted(succs, key=nm.get)
2514 final[prec] = sorted(succs, key=nm.get)
2512
2515
2513 # computed topmost element (necessary for bookmark)
2516 # computed topmost element (necessary for bookmark)
2514 if new:
2517 if new:
2515 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2518 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2516 elif not final:
2519 elif not final:
2517 # Nothing rewritten at all. we won't need `newtopmost`
2520 # Nothing rewritten at all. we won't need `newtopmost`
2518 # It is the same as `oldtopmost` and `processreplacement` know it
2521 # It is the same as `oldtopmost` and `processreplacement` know it
2519 newtopmost = None
2522 newtopmost = None
2520 else:
2523 else:
2521 # every body died. The newtopmost is the parent of the root.
2524 # every body died. The newtopmost is the parent of the root.
2522 r = state.repo.changelog.rev
2525 r = state.repo.changelog.rev
2523 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2526 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2524
2527
2525 return final, tmpnodes, new, newtopmost
2528 return final, tmpnodes, new, newtopmost
2526
2529
2527
2530
2528 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2531 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2529 """Move bookmark from oldtopmost to newly created topmost
2532 """Move bookmark from oldtopmost to newly created topmost
2530
2533
2531 This is arguably a feature and we may only want that for the active
2534 This is arguably a feature and we may only want that for the active
2532 bookmark. But the behavior is kept compatible with the old version for now.
2535 bookmark. But the behavior is kept compatible with the old version for now.
2533 """
2536 """
2534 if not oldtopmost or not newtopmost:
2537 if not oldtopmost or not newtopmost:
2535 return
2538 return
2536 oldbmarks = repo.nodebookmarks(oldtopmost)
2539 oldbmarks = repo.nodebookmarks(oldtopmost)
2537 if oldbmarks:
2540 if oldbmarks:
2538 with repo.lock(), repo.transaction(b'histedit') as tr:
2541 with repo.lock(), repo.transaction(b'histedit') as tr:
2539 marks = repo._bookmarks
2542 marks = repo._bookmarks
2540 changes = []
2543 changes = []
2541 for name in oldbmarks:
2544 for name in oldbmarks:
2542 changes.append((name, newtopmost))
2545 changes.append((name, newtopmost))
2543 marks.applychanges(repo, tr, changes)
2546 marks.applychanges(repo, tr, changes)
2544
2547
2545
2548
2546 def cleanupnode(ui, repo, nodes, nobackup=False):
2549 def cleanupnode(ui, repo, nodes, nobackup=False):
2547 """strip a group of nodes from the repository
2550 """strip a group of nodes from the repository
2548
2551
2549 The set of node to strip may contains unknown nodes."""
2552 The set of node to strip may contains unknown nodes."""
2550 with repo.lock():
2553 with repo.lock():
2551 # do not let filtering get in the way of the cleanse
2554 # do not let filtering get in the way of the cleanse
2552 # we should probably get rid of obsolescence marker created during the
2555 # we should probably get rid of obsolescence marker created during the
2553 # histedit, but we currently do not have such information.
2556 # histedit, but we currently do not have such information.
2554 repo = repo.unfiltered()
2557 repo = repo.unfiltered()
2555 # Find all nodes that need to be stripped
2558 # Find all nodes that need to be stripped
2556 # (we use %lr instead of %ln to silently ignore unknown items)
2559 # (we use %lr instead of %ln to silently ignore unknown items)
2557 nm = repo.changelog.nodemap
2560 nm = repo.changelog.nodemap
2558 nodes = sorted(n for n in nodes if n in nm)
2561 nodes = sorted(n for n in nodes if n in nm)
2559 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2562 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2560 if roots:
2563 if roots:
2561 backup = not nobackup
2564 backup = not nobackup
2562 repair.strip(ui, repo, roots, backup=backup)
2565 repair.strip(ui, repo, roots, backup=backup)
2563
2566
2564
2567
2565 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2568 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2566 if isinstance(nodelist, str):
2569 if isinstance(nodelist, str):
2567 nodelist = [nodelist]
2570 nodelist = [nodelist]
2568 state = histeditstate(repo)
2571 state = histeditstate(repo)
2569 if state.inprogress():
2572 if state.inprogress():
2570 state.read()
2573 state.read()
2571 histedit_nodes = {
2574 histedit_nodes = {
2572 action.node for action in state.actions if action.node
2575 action.node for action in state.actions if action.node
2573 }
2576 }
2574 common_nodes = histedit_nodes & set(nodelist)
2577 common_nodes = histedit_nodes & set(nodelist)
2575 if common_nodes:
2578 if common_nodes:
2576 raise error.Abort(
2579 raise error.Abort(
2577 _(b"histedit in progress, can't strip %s")
2580 _(b"histedit in progress, can't strip %s")
2578 % b', '.join(node.short(x) for x in common_nodes)
2581 % b', '.join(node.short(x) for x in common_nodes)
2579 )
2582 )
2580 return orig(ui, repo, nodelist, *args, **kwargs)
2583 return orig(ui, repo, nodelist, *args, **kwargs)
2581
2584
2582
2585
2583 extensions.wrapfunction(repair, b'strip', stripwrapper)
2586 extensions.wrapfunction(repair, b'strip', stripwrapper)
2584
2587
2585
2588
2586 def summaryhook(ui, repo):
2589 def summaryhook(ui, repo):
2587 state = histeditstate(repo)
2590 state = histeditstate(repo)
2588 if not state.inprogress():
2591 if not state.inprogress():
2589 return
2592 return
2590 state.read()
2593 state.read()
2591 if state.actions:
2594 if state.actions:
2592 # i18n: column positioning for "hg summary"
2595 # i18n: column positioning for "hg summary"
2593 ui.write(
2596 ui.write(
2594 _(b'hist: %s (histedit --continue)\n')
2597 _(b'hist: %s (histedit --continue)\n')
2595 % (
2598 % (
2596 ui.label(_(b'%d remaining'), b'histedit.remaining')
2599 ui.label(_(b'%d remaining'), b'histedit.remaining')
2597 % len(state.actions)
2600 % len(state.actions)
2598 )
2601 )
2599 )
2602 )
2600
2603
2601
2604
2602 def extsetup(ui):
2605 def extsetup(ui):
2603 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2606 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2604 statemod.addunfinished(
2607 statemod.addunfinished(
2605 b'histedit',
2608 b'histedit',
2606 fname=b'histedit-state',
2609 fname=b'histedit-state',
2607 allowcommit=True,
2610 allowcommit=True,
2608 continueflag=True,
2611 continueflag=True,
2609 abortfunc=hgaborthistedit,
2612 abortfunc=hgaborthistedit,
2610 )
2613 )
@@ -1,1334 +1,1337
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 [infinitepush]
9 [infinitepush]
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 branchpattern = PATTERN
11 branchpattern = PATTERN
12
12
13 # Server or client
13 # Server or client
14 server = False
14 server = False
15
15
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 indextype = disk
17 indextype = disk
18
18
19 # Server-side option. Used only if indextype=sql.
19 # Server-side option. Used only if indextype=sql.
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22
22
23 # Server-side option. Used only if indextype=disk.
23 # Server-side option. Used only if indextype=disk.
24 # Filesystem path to the index store
24 # Filesystem path to the index store
25 indexpath = PATH
25 indexpath = PATH
26
26
27 # Server-side option. Possible values: 'disk' or 'external'
27 # Server-side option. Possible values: 'disk' or 'external'
28 # Fails if not set
28 # Fails if not set
29 storetype = disk
29 storetype = disk
30
30
31 # Server-side option.
31 # Server-side option.
32 # Path to the binary that will save bundle to the bundlestore
32 # Path to the binary that will save bundle to the bundlestore
33 # Formatted cmd line will be passed to it (see `put_args`)
33 # Formatted cmd line will be passed to it (see `put_args`)
34 put_binary = put
34 put_binary = put
35
35
36 # Serser-side option. Used only if storetype=external.
36 # Serser-side option. Used only if storetype=external.
37 # Format cmd-line string for put binary. Placeholder: {filename}
37 # Format cmd-line string for put binary. Placeholder: {filename}
38 put_args = {filename}
38 put_args = {filename}
39
39
40 # Server-side option.
40 # Server-side option.
41 # Path to the binary that get bundle from the bundlestore.
41 # Path to the binary that get bundle from the bundlestore.
42 # Formatted cmd line will be passed to it (see `get_args`)
42 # Formatted cmd line will be passed to it (see `get_args`)
43 get_binary = get
43 get_binary = get
44
44
45 # Serser-side option. Used only if storetype=external.
45 # Serser-side option. Used only if storetype=external.
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 get_args = {filename} {handle}
47 get_args = {filename} {handle}
48
48
49 # Server-side option
49 # Server-side option
50 logfile = FIlE
50 logfile = FIlE
51
51
52 # Server-side option
52 # Server-side option
53 loglevel = DEBUG
53 loglevel = DEBUG
54
54
55 # Server-side option. Used only if indextype=sql.
55 # Server-side option. Used only if indextype=sql.
56 # Sets mysql wait_timeout option.
56 # Sets mysql wait_timeout option.
57 waittimeout = 300
57 waittimeout = 300
58
58
59 # Server-side option. Used only if indextype=sql.
59 # Server-side option. Used only if indextype=sql.
60 # Sets mysql innodb_lock_wait_timeout option.
60 # Sets mysql innodb_lock_wait_timeout option.
61 locktimeout = 120
61 locktimeout = 120
62
62
63 # Server-side option. Used only if indextype=sql.
63 # Server-side option. Used only if indextype=sql.
64 # Name of the repository
64 # Name of the repository
65 reponame = ''
65 reponame = ''
66
66
67 # Client-side option. Used by --list-remote option. List of remote scratch
67 # Client-side option. Used by --list-remote option. List of remote scratch
68 # patterns to list if no patterns are specified.
68 # patterns to list if no patterns are specified.
69 defaultremotepatterns = ['*']
69 defaultremotepatterns = ['*']
70
70
71 # Instructs infinitepush to forward all received bundle2 parts to the
71 # Instructs infinitepush to forward all received bundle2 parts to the
72 # bundle for storage. Defaults to False.
72 # bundle for storage. Defaults to False.
73 storeallparts = True
73 storeallparts = True
74
74
75 # routes each incoming push to the bundlestore. defaults to False
75 # routes each incoming push to the bundlestore. defaults to False
76 pushtobundlestore = True
76 pushtobundlestore = True
77
77
78 [remotenames]
78 [remotenames]
79 # Client-side option
79 # Client-side option
80 # This option should be set only if remotenames extension is enabled.
80 # This option should be set only if remotenames extension is enabled.
81 # Whether remote bookmarks are tracked by remotenames extension.
81 # Whether remote bookmarks are tracked by remotenames extension.
82 bookmarks = True
82 bookmarks = True
83 """
83 """
84
84
85 from __future__ import absolute_import
85 from __future__ import absolute_import
86
86
87 import collections
87 import collections
88 import contextlib
88 import contextlib
89 import errno
89 import errno
90 import functools
90 import functools
91 import logging
91 import logging
92 import os
92 import os
93 import random
93 import random
94 import re
94 import re
95 import socket
95 import socket
96 import subprocess
96 import subprocess
97 import time
97 import time
98
98
99 from mercurial.node import (
99 from mercurial.node import (
100 bin,
100 bin,
101 hex,
101 hex,
102 )
102 )
103
103
104 from mercurial.i18n import _
104 from mercurial.i18n import _
105
105
106 from mercurial.pycompat import open
106 from mercurial.pycompat import (
107 getattr,
108 open,
109 )
107
110
108 from mercurial.utils import (
111 from mercurial.utils import (
109 procutil,
112 procutil,
110 stringutil,
113 stringutil,
111 )
114 )
112
115
113 from mercurial import (
116 from mercurial import (
114 bundle2,
117 bundle2,
115 changegroup,
118 changegroup,
116 commands,
119 commands,
117 discovery,
120 discovery,
118 encoding,
121 encoding,
119 error,
122 error,
120 exchange,
123 exchange,
121 extensions,
124 extensions,
122 hg,
125 hg,
123 localrepo,
126 localrepo,
124 phases,
127 phases,
125 pushkey,
128 pushkey,
126 pycompat,
129 pycompat,
127 registrar,
130 registrar,
128 util,
131 util,
129 wireprototypes,
132 wireprototypes,
130 wireprotov1peer,
133 wireprotov1peer,
131 wireprotov1server,
134 wireprotov1server,
132 )
135 )
133
136
134 from . import (
137 from . import (
135 bundleparts,
138 bundleparts,
136 common,
139 common,
137 )
140 )
138
141
139 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
142 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
140 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
143 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
141 # be specifying the version(s) of Mercurial they are tested with, or
144 # be specifying the version(s) of Mercurial they are tested with, or
142 # leave the attribute unspecified.
145 # leave the attribute unspecified.
143 testedwith = b'ships-with-hg-core'
146 testedwith = b'ships-with-hg-core'
144
147
145 configtable = {}
148 configtable = {}
146 configitem = registrar.configitem(configtable)
149 configitem = registrar.configitem(configtable)
147
150
148 configitem(
151 configitem(
149 b'infinitepush', b'server', default=False,
152 b'infinitepush', b'server', default=False,
150 )
153 )
151 configitem(
154 configitem(
152 b'infinitepush', b'storetype', default=b'',
155 b'infinitepush', b'storetype', default=b'',
153 )
156 )
154 configitem(
157 configitem(
155 b'infinitepush', b'indextype', default=b'',
158 b'infinitepush', b'indextype', default=b'',
156 )
159 )
157 configitem(
160 configitem(
158 b'infinitepush', b'indexpath', default=b'',
161 b'infinitepush', b'indexpath', default=b'',
159 )
162 )
160 configitem(
163 configitem(
161 b'infinitepush', b'storeallparts', default=False,
164 b'infinitepush', b'storeallparts', default=False,
162 )
165 )
163 configitem(
166 configitem(
164 b'infinitepush', b'reponame', default=b'',
167 b'infinitepush', b'reponame', default=b'',
165 )
168 )
166 configitem(
169 configitem(
167 b'scratchbranch', b'storepath', default=b'',
170 b'scratchbranch', b'storepath', default=b'',
168 )
171 )
169 configitem(
172 configitem(
170 b'infinitepush', b'branchpattern', default=b'',
173 b'infinitepush', b'branchpattern', default=b'',
171 )
174 )
172 configitem(
175 configitem(
173 b'infinitepush', b'pushtobundlestore', default=False,
176 b'infinitepush', b'pushtobundlestore', default=False,
174 )
177 )
175 configitem(
178 configitem(
176 b'experimental', b'server-bundlestore-bookmark', default=b'',
179 b'experimental', b'server-bundlestore-bookmark', default=b'',
177 )
180 )
178 configitem(
181 configitem(
179 b'experimental', b'infinitepush-scratchpush', default=False,
182 b'experimental', b'infinitepush-scratchpush', default=False,
180 )
183 )
181
184
182 experimental = b'experimental'
185 experimental = b'experimental'
183 configbookmark = b'server-bundlestore-bookmark'
186 configbookmark = b'server-bundlestore-bookmark'
184 configscratchpush = b'infinitepush-scratchpush'
187 configscratchpush = b'infinitepush-scratchpush'
185
188
186 scratchbranchparttype = bundleparts.scratchbranchparttype
189 scratchbranchparttype = bundleparts.scratchbranchparttype
187 revsetpredicate = registrar.revsetpredicate()
190 revsetpredicate = registrar.revsetpredicate()
188 templatekeyword = registrar.templatekeyword()
191 templatekeyword = registrar.templatekeyword()
189 _scratchbranchmatcher = lambda x: False
192 _scratchbranchmatcher = lambda x: False
190 _maybehash = re.compile(r'^[a-f0-9]+$').search
193 _maybehash = re.compile(r'^[a-f0-9]+$').search
191
194
192
195
193 def _buildexternalbundlestore(ui):
196 def _buildexternalbundlestore(ui):
194 put_args = ui.configlist(b'infinitepush', b'put_args', [])
197 put_args = ui.configlist(b'infinitepush', b'put_args', [])
195 put_binary = ui.config(b'infinitepush', b'put_binary')
198 put_binary = ui.config(b'infinitepush', b'put_binary')
196 if not put_binary:
199 if not put_binary:
197 raise error.Abort(b'put binary is not specified')
200 raise error.Abort(b'put binary is not specified')
198 get_args = ui.configlist(b'infinitepush', b'get_args', [])
201 get_args = ui.configlist(b'infinitepush', b'get_args', [])
199 get_binary = ui.config(b'infinitepush', b'get_binary')
202 get_binary = ui.config(b'infinitepush', b'get_binary')
200 if not get_binary:
203 if not get_binary:
201 raise error.Abort(b'get binary is not specified')
204 raise error.Abort(b'get binary is not specified')
202 from . import store
205 from . import store
203
206
204 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
207 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
205
208
206
209
207 def _buildsqlindex(ui):
210 def _buildsqlindex(ui):
208 sqlhost = ui.config(b'infinitepush', b'sqlhost')
211 sqlhost = ui.config(b'infinitepush', b'sqlhost')
209 if not sqlhost:
212 if not sqlhost:
210 raise error.Abort(_(b'please set infinitepush.sqlhost'))
213 raise error.Abort(_(b'please set infinitepush.sqlhost'))
211 host, port, db, user, password = sqlhost.split(b':')
214 host, port, db, user, password = sqlhost.split(b':')
212 reponame = ui.config(b'infinitepush', b'reponame')
215 reponame = ui.config(b'infinitepush', b'reponame')
213 if not reponame:
216 if not reponame:
214 raise error.Abort(_(b'please set infinitepush.reponame'))
217 raise error.Abort(_(b'please set infinitepush.reponame'))
215
218
216 logfile = ui.config(b'infinitepush', b'logfile', b'')
219 logfile = ui.config(b'infinitepush', b'logfile', b'')
217 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
220 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
218 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
221 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
219 from . import sqlindexapi
222 from . import sqlindexapi
220
223
221 return sqlindexapi.sqlindexapi(
224 return sqlindexapi.sqlindexapi(
222 reponame,
225 reponame,
223 host,
226 host,
224 port,
227 port,
225 db,
228 db,
226 user,
229 user,
227 password,
230 password,
228 logfile,
231 logfile,
229 _getloglevel(ui),
232 _getloglevel(ui),
230 waittimeout=waittimeout,
233 waittimeout=waittimeout,
231 locktimeout=locktimeout,
234 locktimeout=locktimeout,
232 )
235 )
233
236
234
237
235 def _getloglevel(ui):
238 def _getloglevel(ui):
236 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
239 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
237 numeric_loglevel = getattr(logging, loglevel.upper(), None)
240 numeric_loglevel = getattr(logging, loglevel.upper(), None)
238 if not isinstance(numeric_loglevel, int):
241 if not isinstance(numeric_loglevel, int):
239 raise error.Abort(_(b'invalid log level %s') % loglevel)
242 raise error.Abort(_(b'invalid log level %s') % loglevel)
240 return numeric_loglevel
243 return numeric_loglevel
241
244
242
245
243 def _tryhoist(ui, remotebookmark):
246 def _tryhoist(ui, remotebookmark):
244 '''returns a bookmarks with hoisted part removed
247 '''returns a bookmarks with hoisted part removed
245
248
246 Remotenames extension has a 'hoist' config that allows to use remote
249 Remotenames extension has a 'hoist' config that allows to use remote
247 bookmarks without specifying remote path. For example, 'hg update master'
250 bookmarks without specifying remote path. For example, 'hg update master'
248 works as well as 'hg update remote/master'. We want to allow the same in
251 works as well as 'hg update remote/master'. We want to allow the same in
249 infinitepush.
252 infinitepush.
250 '''
253 '''
251
254
252 if common.isremotebooksenabled(ui):
255 if common.isremotebooksenabled(ui):
253 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
256 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
254 if remotebookmark.startswith(hoist):
257 if remotebookmark.startswith(hoist):
255 return remotebookmark[len(hoist) :]
258 return remotebookmark[len(hoist) :]
256 return remotebookmark
259 return remotebookmark
257
260
258
261
259 class bundlestore(object):
262 class bundlestore(object):
260 def __init__(self, repo):
263 def __init__(self, repo):
261 self._repo = repo
264 self._repo = repo
262 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
265 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
263 if storetype == b'disk':
266 if storetype == b'disk':
264 from . import store
267 from . import store
265
268
266 self.store = store.filebundlestore(self._repo.ui, self._repo)
269 self.store = store.filebundlestore(self._repo.ui, self._repo)
267 elif storetype == b'external':
270 elif storetype == b'external':
268 self.store = _buildexternalbundlestore(self._repo.ui)
271 self.store = _buildexternalbundlestore(self._repo.ui)
269 else:
272 else:
270 raise error.Abort(
273 raise error.Abort(
271 _(b'unknown infinitepush store type specified %s') % storetype
274 _(b'unknown infinitepush store type specified %s') % storetype
272 )
275 )
273
276
274 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
277 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
275 if indextype == b'disk':
278 if indextype == b'disk':
276 from . import fileindexapi
279 from . import fileindexapi
277
280
278 self.index = fileindexapi.fileindexapi(self._repo)
281 self.index = fileindexapi.fileindexapi(self._repo)
279 elif indextype == b'sql':
282 elif indextype == b'sql':
280 self.index = _buildsqlindex(self._repo.ui)
283 self.index = _buildsqlindex(self._repo.ui)
281 else:
284 else:
282 raise error.Abort(
285 raise error.Abort(
283 _(b'unknown infinitepush index type specified %s') % indextype
286 _(b'unknown infinitepush index type specified %s') % indextype
284 )
287 )
285
288
286
289
287 def _isserver(ui):
290 def _isserver(ui):
288 return ui.configbool(b'infinitepush', b'server')
291 return ui.configbool(b'infinitepush', b'server')
289
292
290
293
291 def reposetup(ui, repo):
294 def reposetup(ui, repo):
292 if _isserver(ui) and repo.local():
295 if _isserver(ui) and repo.local():
293 repo.bundlestore = bundlestore(repo)
296 repo.bundlestore = bundlestore(repo)
294
297
295
298
296 def extsetup(ui):
299 def extsetup(ui):
297 commonsetup(ui)
300 commonsetup(ui)
298 if _isserver(ui):
301 if _isserver(ui):
299 serverextsetup(ui)
302 serverextsetup(ui)
300 else:
303 else:
301 clientextsetup(ui)
304 clientextsetup(ui)
302
305
303
306
304 def commonsetup(ui):
307 def commonsetup(ui):
305 wireprotov1server.commands[b'listkeyspatterns'] = (
308 wireprotov1server.commands[b'listkeyspatterns'] = (
306 wireprotolistkeyspatterns,
309 wireprotolistkeyspatterns,
307 b'namespace patterns',
310 b'namespace patterns',
308 )
311 )
309 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
312 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
310 if scratchbranchpat:
313 if scratchbranchpat:
311 global _scratchbranchmatcher
314 global _scratchbranchmatcher
312 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
315 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
313 scratchbranchpat
316 scratchbranchpat
314 )
317 )
315
318
316
319
317 def serverextsetup(ui):
320 def serverextsetup(ui):
318 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
321 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
319
322
320 def newpushkeyhandler(*args, **kwargs):
323 def newpushkeyhandler(*args, **kwargs):
321 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
324 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
322
325
323 newpushkeyhandler.params = origpushkeyhandler.params
326 newpushkeyhandler.params = origpushkeyhandler.params
324 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
327 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
325
328
326 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
329 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
327 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
330 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
328 orighandlephasehandler, *args, **kwargs
331 orighandlephasehandler, *args, **kwargs
329 )
332 )
330 newphaseheadshandler.params = orighandlephasehandler.params
333 newphaseheadshandler.params = orighandlephasehandler.params
331 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
334 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
332
335
333 extensions.wrapfunction(
336 extensions.wrapfunction(
334 localrepo.localrepository, b'listkeys', localrepolistkeys
337 localrepo.localrepository, b'listkeys', localrepolistkeys
335 )
338 )
336 wireprotov1server.commands[b'lookup'] = (
339 wireprotov1server.commands[b'lookup'] = (
337 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
340 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
338 b'key',
341 b'key',
339 )
342 )
340 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
343 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
341
344
342 extensions.wrapfunction(bundle2, b'processparts', processparts)
345 extensions.wrapfunction(bundle2, b'processparts', processparts)
343
346
344
347
345 def clientextsetup(ui):
348 def clientextsetup(ui):
346 entry = extensions.wrapcommand(commands.table, b'push', _push)
349 entry = extensions.wrapcommand(commands.table, b'push', _push)
347
350
348 entry[1].append(
351 entry[1].append(
349 (
352 (
350 b'',
353 b'',
351 b'bundle-store',
354 b'bundle-store',
352 None,
355 None,
353 _(b'force push to go to bundle store (EXPERIMENTAL)'),
356 _(b'force push to go to bundle store (EXPERIMENTAL)'),
354 )
357 )
355 )
358 )
356
359
357 extensions.wrapcommand(commands.table, b'pull', _pull)
360 extensions.wrapcommand(commands.table, b'pull', _pull)
358
361
359 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
362 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
360
363
361 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
364 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
362
365
363 partorder = exchange.b2partsgenorder
366 partorder = exchange.b2partsgenorder
364 index = partorder.index(b'changeset')
367 index = partorder.index(b'changeset')
365 partorder.insert(
368 partorder.insert(
366 index, partorder.pop(partorder.index(scratchbranchparttype))
369 index, partorder.pop(partorder.index(scratchbranchparttype))
367 )
370 )
368
371
369
372
370 def _checkheads(orig, pushop):
373 def _checkheads(orig, pushop):
371 if pushop.ui.configbool(experimental, configscratchpush, False):
374 if pushop.ui.configbool(experimental, configscratchpush, False):
372 return
375 return
373 return orig(pushop)
376 return orig(pushop)
374
377
375
378
376 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
379 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
377 patterns = wireprototypes.decodelist(patterns)
380 patterns = wireprototypes.decodelist(patterns)
378 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
381 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
379 return pushkey.encodekeys(d)
382 return pushkey.encodekeys(d)
380
383
381
384
382 def localrepolistkeys(orig, self, namespace, patterns=None):
385 def localrepolistkeys(orig, self, namespace, patterns=None):
383 if namespace == b'bookmarks' and patterns:
386 if namespace == b'bookmarks' and patterns:
384 index = self.bundlestore.index
387 index = self.bundlestore.index
385 results = {}
388 results = {}
386 bookmarks = orig(self, namespace)
389 bookmarks = orig(self, namespace)
387 for pattern in patterns:
390 for pattern in patterns:
388 results.update(index.getbookmarks(pattern))
391 results.update(index.getbookmarks(pattern))
389 if pattern.endswith(b'*'):
392 if pattern.endswith(b'*'):
390 pattern = b're:^' + pattern[:-1] + b'.*'
393 pattern = b're:^' + pattern[:-1] + b'.*'
391 kind, pat, matcher = stringutil.stringmatcher(pattern)
394 kind, pat, matcher = stringutil.stringmatcher(pattern)
392 for bookmark, node in bookmarks.iteritems():
395 for bookmark, node in bookmarks.iteritems():
393 if matcher(bookmark):
396 if matcher(bookmark):
394 results[bookmark] = node
397 results[bookmark] = node
395 return results
398 return results
396 else:
399 else:
397 return orig(self, namespace)
400 return orig(self, namespace)
398
401
399
402
400 @wireprotov1peer.batchable
403 @wireprotov1peer.batchable
401 def listkeyspatterns(self, namespace, patterns):
404 def listkeyspatterns(self, namespace, patterns):
402 if not self.capable(b'pushkey'):
405 if not self.capable(b'pushkey'):
403 yield {}, None
406 yield {}, None
404 f = wireprotov1peer.future()
407 f = wireprotov1peer.future()
405 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
408 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
406 yield {
409 yield {
407 b'namespace': encoding.fromlocal(namespace),
410 b'namespace': encoding.fromlocal(namespace),
408 b'patterns': wireprototypes.encodelist(patterns),
411 b'patterns': wireprototypes.encodelist(patterns),
409 }, f
412 }, f
410 d = f.value
413 d = f.value
411 self.ui.debug(
414 self.ui.debug(
412 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
415 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
413 )
416 )
414 yield pushkey.decodekeys(d)
417 yield pushkey.decodekeys(d)
415
418
416
419
417 def _readbundlerevs(bundlerepo):
420 def _readbundlerevs(bundlerepo):
418 return list(bundlerepo.revs(b'bundle()'))
421 return list(bundlerepo.revs(b'bundle()'))
419
422
420
423
421 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
424 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
422 '''Tells remotefilelog to include all changed files to the changegroup
425 '''Tells remotefilelog to include all changed files to the changegroup
423
426
424 By default remotefilelog doesn't include file content to the changegroup.
427 By default remotefilelog doesn't include file content to the changegroup.
425 But we need to include it if we are fetching from bundlestore.
428 But we need to include it if we are fetching from bundlestore.
426 '''
429 '''
427 changedfiles = set()
430 changedfiles = set()
428 cl = bundlerepo.changelog
431 cl = bundlerepo.changelog
429 for r in bundlerevs:
432 for r in bundlerevs:
430 # [3] means changed files
433 # [3] means changed files
431 changedfiles.update(cl.read(r)[3])
434 changedfiles.update(cl.read(r)[3])
432 if not changedfiles:
435 if not changedfiles:
433 return bundlecaps
436 return bundlecaps
434
437
435 changedfiles = b'\0'.join(changedfiles)
438 changedfiles = b'\0'.join(changedfiles)
436 newcaps = []
439 newcaps = []
437 appended = False
440 appended = False
438 for cap in bundlecaps or []:
441 for cap in bundlecaps or []:
439 if cap.startswith(b'excludepattern='):
442 if cap.startswith(b'excludepattern='):
440 newcaps.append(b'\0'.join((cap, changedfiles)))
443 newcaps.append(b'\0'.join((cap, changedfiles)))
441 appended = True
444 appended = True
442 else:
445 else:
443 newcaps.append(cap)
446 newcaps.append(cap)
444 if not appended:
447 if not appended:
445 # Not found excludepattern cap. Just append it
448 # Not found excludepattern cap. Just append it
446 newcaps.append(b'excludepattern=' + changedfiles)
449 newcaps.append(b'excludepattern=' + changedfiles)
447
450
448 return newcaps
451 return newcaps
449
452
450
453
451 def _rebundle(bundlerepo, bundleroots, unknownhead):
454 def _rebundle(bundlerepo, bundleroots, unknownhead):
452 '''
455 '''
453 Bundle may include more revision then user requested. For example,
456 Bundle may include more revision then user requested. For example,
454 if user asks for revision but bundle also consists its descendants.
457 if user asks for revision but bundle also consists its descendants.
455 This function will filter out all revision that user is not requested.
458 This function will filter out all revision that user is not requested.
456 '''
459 '''
457 parts = []
460 parts = []
458
461
459 version = b'02'
462 version = b'02'
460 outgoing = discovery.outgoing(
463 outgoing = discovery.outgoing(
461 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
464 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
462 )
465 )
463 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
466 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
464 cgstream = util.chunkbuffer(cgstream).read()
467 cgstream = util.chunkbuffer(cgstream).read()
465 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
468 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
466 cgpart.addparam(b'version', version)
469 cgpart.addparam(b'version', version)
467 parts.append(cgpart)
470 parts.append(cgpart)
468
471
469 return parts
472 return parts
470
473
471
474
472 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
475 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
473 cl = bundlerepo.changelog
476 cl = bundlerepo.changelog
474 bundleroots = []
477 bundleroots = []
475 for rev in bundlerevs:
478 for rev in bundlerevs:
476 node = cl.node(rev)
479 node = cl.node(rev)
477 parents = cl.parents(node)
480 parents = cl.parents(node)
478 for parent in parents:
481 for parent in parents:
479 # include all revs that exist in the main repo
482 # include all revs that exist in the main repo
480 # to make sure that bundle may apply client-side
483 # to make sure that bundle may apply client-side
481 if parent in oldrepo:
484 if parent in oldrepo:
482 bundleroots.append(parent)
485 bundleroots.append(parent)
483 return bundleroots
486 return bundleroots
484
487
485
488
486 def _needsrebundling(head, bundlerepo):
489 def _needsrebundling(head, bundlerepo):
487 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
490 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
488 return not (
491 return not (
489 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
492 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
490 )
493 )
491
494
492
495
493 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
496 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
494 '''generates bundle that will be send to the user
497 '''generates bundle that will be send to the user
495
498
496 returns tuple with raw bundle string and bundle type
499 returns tuple with raw bundle string and bundle type
497 '''
500 '''
498 parts = []
501 parts = []
499 if not _needsrebundling(head, bundlerepo):
502 if not _needsrebundling(head, bundlerepo):
500 with util.posixfile(bundlefile, b"rb") as f:
503 with util.posixfile(bundlefile, b"rb") as f:
501 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
504 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
502 if isinstance(unbundler, changegroup.cg1unpacker):
505 if isinstance(unbundler, changegroup.cg1unpacker):
503 part = bundle2.bundlepart(
506 part = bundle2.bundlepart(
504 b'changegroup', data=unbundler._stream.read()
507 b'changegroup', data=unbundler._stream.read()
505 )
508 )
506 part.addparam(b'version', b'01')
509 part.addparam(b'version', b'01')
507 parts.append(part)
510 parts.append(part)
508 elif isinstance(unbundler, bundle2.unbundle20):
511 elif isinstance(unbundler, bundle2.unbundle20):
509 haschangegroup = False
512 haschangegroup = False
510 for part in unbundler.iterparts():
513 for part in unbundler.iterparts():
511 if part.type == b'changegroup':
514 if part.type == b'changegroup':
512 haschangegroup = True
515 haschangegroup = True
513 newpart = bundle2.bundlepart(part.type, data=part.read())
516 newpart = bundle2.bundlepart(part.type, data=part.read())
514 for key, value in part.params.iteritems():
517 for key, value in part.params.iteritems():
515 newpart.addparam(key, value)
518 newpart.addparam(key, value)
516 parts.append(newpart)
519 parts.append(newpart)
517
520
518 if not haschangegroup:
521 if not haschangegroup:
519 raise error.Abort(
522 raise error.Abort(
520 b'unexpected bundle without changegroup part, '
523 b'unexpected bundle without changegroup part, '
521 + b'head: %s' % hex(head),
524 + b'head: %s' % hex(head),
522 hint=b'report to administrator',
525 hint=b'report to administrator',
523 )
526 )
524 else:
527 else:
525 raise error.Abort(b'unknown bundle type')
528 raise error.Abort(b'unknown bundle type')
526 else:
529 else:
527 parts = _rebundle(bundlerepo, bundleroots, head)
530 parts = _rebundle(bundlerepo, bundleroots, head)
528
531
529 return parts
532 return parts
530
533
531
534
532 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
535 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
533 heads = heads or []
536 heads = heads or []
534 # newheads are parents of roots of scratch bundles that were requested
537 # newheads are parents of roots of scratch bundles that were requested
535 newphases = {}
538 newphases = {}
536 scratchbundles = []
539 scratchbundles = []
537 newheads = []
540 newheads = []
538 scratchheads = []
541 scratchheads = []
539 nodestobundle = {}
542 nodestobundle = {}
540 allbundlestocleanup = []
543 allbundlestocleanup = []
541 try:
544 try:
542 for head in heads:
545 for head in heads:
543 if head not in repo.changelog.nodemap:
546 if head not in repo.changelog.nodemap:
544 if head not in nodestobundle:
547 if head not in nodestobundle:
545 newbundlefile = common.downloadbundle(repo, head)
548 newbundlefile = common.downloadbundle(repo, head)
546 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
549 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
547 bundlerepo = hg.repository(repo.ui, bundlepath)
550 bundlerepo = hg.repository(repo.ui, bundlepath)
548
551
549 allbundlestocleanup.append((bundlerepo, newbundlefile))
552 allbundlestocleanup.append((bundlerepo, newbundlefile))
550 bundlerevs = set(_readbundlerevs(bundlerepo))
553 bundlerevs = set(_readbundlerevs(bundlerepo))
551 bundlecaps = _includefilelogstobundle(
554 bundlecaps = _includefilelogstobundle(
552 bundlecaps, bundlerepo, bundlerevs, repo.ui
555 bundlecaps, bundlerepo, bundlerevs, repo.ui
553 )
556 )
554 cl = bundlerepo.changelog
557 cl = bundlerepo.changelog
555 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
558 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
556 for rev in bundlerevs:
559 for rev in bundlerevs:
557 node = cl.node(rev)
560 node = cl.node(rev)
558 newphases[hex(node)] = str(phases.draft)
561 newphases[hex(node)] = str(phases.draft)
559 nodestobundle[node] = (
562 nodestobundle[node] = (
560 bundlerepo,
563 bundlerepo,
561 bundleroots,
564 bundleroots,
562 newbundlefile,
565 newbundlefile,
563 )
566 )
564
567
565 scratchbundles.append(
568 scratchbundles.append(
566 _generateoutputparts(head, *nodestobundle[head])
569 _generateoutputparts(head, *nodestobundle[head])
567 )
570 )
568 newheads.extend(bundleroots)
571 newheads.extend(bundleroots)
569 scratchheads.append(head)
572 scratchheads.append(head)
570 finally:
573 finally:
571 for bundlerepo, bundlefile in allbundlestocleanup:
574 for bundlerepo, bundlefile in allbundlestocleanup:
572 bundlerepo.close()
575 bundlerepo.close()
573 try:
576 try:
574 os.unlink(bundlefile)
577 os.unlink(bundlefile)
575 except (IOError, OSError):
578 except (IOError, OSError):
576 # if we can't cleanup the file then just ignore the error,
579 # if we can't cleanup the file then just ignore the error,
577 # no need to fail
580 # no need to fail
578 pass
581 pass
579
582
580 pullfrombundlestore = bool(scratchbundles)
583 pullfrombundlestore = bool(scratchbundles)
581 wrappedchangegrouppart = False
584 wrappedchangegrouppart = False
582 wrappedlistkeys = False
585 wrappedlistkeys = False
583 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
586 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
584 try:
587 try:
585
588
586 def _changegrouppart(bundler, *args, **kwargs):
589 def _changegrouppart(bundler, *args, **kwargs):
587 # Order is important here. First add non-scratch part
590 # Order is important here. First add non-scratch part
588 # and only then add parts with scratch bundles because
591 # and only then add parts with scratch bundles because
589 # non-scratch part contains parents of roots of scratch bundles.
592 # non-scratch part contains parents of roots of scratch bundles.
590 result = oldchangegrouppart(bundler, *args, **kwargs)
593 result = oldchangegrouppart(bundler, *args, **kwargs)
591 for bundle in scratchbundles:
594 for bundle in scratchbundles:
592 for part in bundle:
595 for part in bundle:
593 bundler.addpart(part)
596 bundler.addpart(part)
594 return result
597 return result
595
598
596 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
599 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
597 wrappedchangegrouppart = True
600 wrappedchangegrouppart = True
598
601
599 def _listkeys(orig, self, namespace):
602 def _listkeys(orig, self, namespace):
600 origvalues = orig(self, namespace)
603 origvalues = orig(self, namespace)
601 if namespace == b'phases' and pullfrombundlestore:
604 if namespace == b'phases' and pullfrombundlestore:
602 if origvalues.get(b'publishing') == b'True':
605 if origvalues.get(b'publishing') == b'True':
603 # Make repo non-publishing to preserve draft phase
606 # Make repo non-publishing to preserve draft phase
604 del origvalues[b'publishing']
607 del origvalues[b'publishing']
605 origvalues.update(newphases)
608 origvalues.update(newphases)
606 return origvalues
609 return origvalues
607
610
608 extensions.wrapfunction(
611 extensions.wrapfunction(
609 localrepo.localrepository, b'listkeys', _listkeys
612 localrepo.localrepository, b'listkeys', _listkeys
610 )
613 )
611 wrappedlistkeys = True
614 wrappedlistkeys = True
612 heads = list((set(newheads) | set(heads)) - set(scratchheads))
615 heads = list((set(newheads) | set(heads)) - set(scratchheads))
613 result = orig(
616 result = orig(
614 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
617 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
615 )
618 )
616 finally:
619 finally:
617 if wrappedchangegrouppart:
620 if wrappedchangegrouppart:
618 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
621 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
619 if wrappedlistkeys:
622 if wrappedlistkeys:
620 extensions.unwrapfunction(
623 extensions.unwrapfunction(
621 localrepo.localrepository, b'listkeys', _listkeys
624 localrepo.localrepository, b'listkeys', _listkeys
622 )
625 )
623 return result
626 return result
624
627
625
628
626 def _lookupwrap(orig):
629 def _lookupwrap(orig):
627 def _lookup(repo, proto, key):
630 def _lookup(repo, proto, key):
628 localkey = encoding.tolocal(key)
631 localkey = encoding.tolocal(key)
629
632
630 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
633 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
631 scratchnode = repo.bundlestore.index.getnode(localkey)
634 scratchnode = repo.bundlestore.index.getnode(localkey)
632 if scratchnode:
635 if scratchnode:
633 return b"%d %s\n" % (1, scratchnode)
636 return b"%d %s\n" % (1, scratchnode)
634 else:
637 else:
635 return b"%d %s\n" % (
638 return b"%d %s\n" % (
636 0,
639 0,
637 b'scratch branch %s not found' % localkey,
640 b'scratch branch %s not found' % localkey,
638 )
641 )
639 else:
642 else:
640 try:
643 try:
641 r = hex(repo.lookup(localkey))
644 r = hex(repo.lookup(localkey))
642 return b"%d %s\n" % (1, r)
645 return b"%d %s\n" % (1, r)
643 except Exception as inst:
646 except Exception as inst:
644 if repo.bundlestore.index.getbundle(localkey):
647 if repo.bundlestore.index.getbundle(localkey):
645 return b"%d %s\n" % (1, localkey)
648 return b"%d %s\n" % (1, localkey)
646 else:
649 else:
647 r = stringutil.forcebytestr(inst)
650 r = stringutil.forcebytestr(inst)
648 return b"%d %s\n" % (0, r)
651 return b"%d %s\n" % (0, r)
649
652
650 return _lookup
653 return _lookup
651
654
652
655
653 def _pull(orig, ui, repo, source=b"default", **opts):
656 def _pull(orig, ui, repo, source=b"default", **opts):
654 opts = pycompat.byteskwargs(opts)
657 opts = pycompat.byteskwargs(opts)
655 # Copy paste from `pull` command
658 # Copy paste from `pull` command
656 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
659 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
657
660
658 scratchbookmarks = {}
661 scratchbookmarks = {}
659 unfi = repo.unfiltered()
662 unfi = repo.unfiltered()
660 unknownnodes = []
663 unknownnodes = []
661 for rev in opts.get(b'rev', []):
664 for rev in opts.get(b'rev', []):
662 if rev not in unfi:
665 if rev not in unfi:
663 unknownnodes.append(rev)
666 unknownnodes.append(rev)
664 if opts.get(b'bookmark'):
667 if opts.get(b'bookmark'):
665 bookmarks = []
668 bookmarks = []
666 revs = opts.get(b'rev') or []
669 revs = opts.get(b'rev') or []
667 for bookmark in opts.get(b'bookmark'):
670 for bookmark in opts.get(b'bookmark'):
668 if _scratchbranchmatcher(bookmark):
671 if _scratchbranchmatcher(bookmark):
669 # rev is not known yet
672 # rev is not known yet
670 # it will be fetched with listkeyspatterns next
673 # it will be fetched with listkeyspatterns next
671 scratchbookmarks[bookmark] = b'REVTOFETCH'
674 scratchbookmarks[bookmark] = b'REVTOFETCH'
672 else:
675 else:
673 bookmarks.append(bookmark)
676 bookmarks.append(bookmark)
674
677
675 if scratchbookmarks:
678 if scratchbookmarks:
676 other = hg.peer(repo, opts, source)
679 other = hg.peer(repo, opts, source)
677 fetchedbookmarks = other.listkeyspatterns(
680 fetchedbookmarks = other.listkeyspatterns(
678 b'bookmarks', patterns=scratchbookmarks
681 b'bookmarks', patterns=scratchbookmarks
679 )
682 )
680 for bookmark in scratchbookmarks:
683 for bookmark in scratchbookmarks:
681 if bookmark not in fetchedbookmarks:
684 if bookmark not in fetchedbookmarks:
682 raise error.Abort(
685 raise error.Abort(
683 b'remote bookmark %s not found!' % bookmark
686 b'remote bookmark %s not found!' % bookmark
684 )
687 )
685 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
688 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
686 revs.append(fetchedbookmarks[bookmark])
689 revs.append(fetchedbookmarks[bookmark])
687 opts[b'bookmark'] = bookmarks
690 opts[b'bookmark'] = bookmarks
688 opts[b'rev'] = revs
691 opts[b'rev'] = revs
689
692
690 if scratchbookmarks or unknownnodes:
693 if scratchbookmarks or unknownnodes:
691 # Set anyincoming to True
694 # Set anyincoming to True
692 extensions.wrapfunction(
695 extensions.wrapfunction(
693 discovery, b'findcommonincoming', _findcommonincoming
696 discovery, b'findcommonincoming', _findcommonincoming
694 )
697 )
695 try:
698 try:
696 # Remote scratch bookmarks will be deleted because remotenames doesn't
699 # Remote scratch bookmarks will be deleted because remotenames doesn't
697 # know about them. Let's save it before pull and restore after
700 # know about them. Let's save it before pull and restore after
698 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
701 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
699 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
702 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
700 # TODO(stash): race condition is possible
703 # TODO(stash): race condition is possible
701 # if scratch bookmarks was updated right after orig.
704 # if scratch bookmarks was updated right after orig.
702 # But that's unlikely and shouldn't be harmful.
705 # But that's unlikely and shouldn't be harmful.
703 if common.isremotebooksenabled(ui):
706 if common.isremotebooksenabled(ui):
704 remotescratchbookmarks.update(scratchbookmarks)
707 remotescratchbookmarks.update(scratchbookmarks)
705 _saveremotebookmarks(repo, remotescratchbookmarks, source)
708 _saveremotebookmarks(repo, remotescratchbookmarks, source)
706 else:
709 else:
707 _savelocalbookmarks(repo, scratchbookmarks)
710 _savelocalbookmarks(repo, scratchbookmarks)
708 return result
711 return result
709 finally:
712 finally:
710 if scratchbookmarks:
713 if scratchbookmarks:
711 extensions.unwrapfunction(discovery, b'findcommonincoming')
714 extensions.unwrapfunction(discovery, b'findcommonincoming')
712
715
713
716
714 def _readscratchremotebookmarks(ui, repo, other):
717 def _readscratchremotebookmarks(ui, repo, other):
715 if common.isremotebooksenabled(ui):
718 if common.isremotebooksenabled(ui):
716 remotenamesext = extensions.find(b'remotenames')
719 remotenamesext = extensions.find(b'remotenames')
717 remotepath = remotenamesext.activepath(repo.ui, other)
720 remotepath = remotenamesext.activepath(repo.ui, other)
718 result = {}
721 result = {}
719 # Let's refresh remotenames to make sure we have it up to date
722 # Let's refresh remotenames to make sure we have it up to date
720 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
723 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
721 # and it results in deleting scratch bookmarks. Our best guess how to
724 # and it results in deleting scratch bookmarks. Our best guess how to
722 # fix it is to use `clearnames()`
725 # fix it is to use `clearnames()`
723 repo._remotenames.clearnames()
726 repo._remotenames.clearnames()
724 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
727 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
725 path, bookname = remotenamesext.splitremotename(remotebookmark)
728 path, bookname = remotenamesext.splitremotename(remotebookmark)
726 if path == remotepath and _scratchbranchmatcher(bookname):
729 if path == remotepath and _scratchbranchmatcher(bookname):
727 nodes = repo.names[b'remotebookmarks'].nodes(
730 nodes = repo.names[b'remotebookmarks'].nodes(
728 repo, remotebookmark
731 repo, remotebookmark
729 )
732 )
730 if nodes:
733 if nodes:
731 result[bookname] = hex(nodes[0])
734 result[bookname] = hex(nodes[0])
732 return result
735 return result
733 else:
736 else:
734 return {}
737 return {}
735
738
736
739
737 def _saveremotebookmarks(repo, newbookmarks, remote):
740 def _saveremotebookmarks(repo, newbookmarks, remote):
738 remotenamesext = extensions.find(b'remotenames')
741 remotenamesext = extensions.find(b'remotenames')
739 remotepath = remotenamesext.activepath(repo.ui, remote)
742 remotepath = remotenamesext.activepath(repo.ui, remote)
740 branches = collections.defaultdict(list)
743 branches = collections.defaultdict(list)
741 bookmarks = {}
744 bookmarks = {}
742 remotenames = remotenamesext.readremotenames(repo)
745 remotenames = remotenamesext.readremotenames(repo)
743 for hexnode, nametype, remote, rname in remotenames:
746 for hexnode, nametype, remote, rname in remotenames:
744 if remote != remotepath:
747 if remote != remotepath:
745 continue
748 continue
746 if nametype == b'bookmarks':
749 if nametype == b'bookmarks':
747 if rname in newbookmarks:
750 if rname in newbookmarks:
748 # It's possible if we have a normal bookmark that matches
751 # It's possible if we have a normal bookmark that matches
749 # scratch branch pattern. In this case just use the current
752 # scratch branch pattern. In this case just use the current
750 # bookmark node
753 # bookmark node
751 del newbookmarks[rname]
754 del newbookmarks[rname]
752 bookmarks[rname] = hexnode
755 bookmarks[rname] = hexnode
753 elif nametype == b'branches':
756 elif nametype == b'branches':
754 # saveremotenames expects 20 byte binary nodes for branches
757 # saveremotenames expects 20 byte binary nodes for branches
755 branches[rname].append(bin(hexnode))
758 branches[rname].append(bin(hexnode))
756
759
757 for bookmark, hexnode in newbookmarks.iteritems():
760 for bookmark, hexnode in newbookmarks.iteritems():
758 bookmarks[bookmark] = hexnode
761 bookmarks[bookmark] = hexnode
759 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
762 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
760
763
761
764
762 def _savelocalbookmarks(repo, bookmarks):
765 def _savelocalbookmarks(repo, bookmarks):
763 if not bookmarks:
766 if not bookmarks:
764 return
767 return
765 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
768 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
766 changes = []
769 changes = []
767 for scratchbook, node in bookmarks.iteritems():
770 for scratchbook, node in bookmarks.iteritems():
768 changectx = repo[node]
771 changectx = repo[node]
769 changes.append((scratchbook, changectx.node()))
772 changes.append((scratchbook, changectx.node()))
770 repo._bookmarks.applychanges(repo, tr, changes)
773 repo._bookmarks.applychanges(repo, tr, changes)
771
774
772
775
773 def _findcommonincoming(orig, *args, **kwargs):
776 def _findcommonincoming(orig, *args, **kwargs):
774 common, inc, remoteheads = orig(*args, **kwargs)
777 common, inc, remoteheads = orig(*args, **kwargs)
775 return common, True, remoteheads
778 return common, True, remoteheads
776
779
777
780
778 def _push(orig, ui, repo, dest=None, *args, **opts):
781 def _push(orig, ui, repo, dest=None, *args, **opts):
779 opts = pycompat.byteskwargs(opts)
782 opts = pycompat.byteskwargs(opts)
780 bookmark = opts.get(b'bookmark')
783 bookmark = opts.get(b'bookmark')
781 # we only support pushing one infinitepush bookmark at once
784 # we only support pushing one infinitepush bookmark at once
782 if len(bookmark) == 1:
785 if len(bookmark) == 1:
783 bookmark = bookmark[0]
786 bookmark = bookmark[0]
784 else:
787 else:
785 bookmark = b''
788 bookmark = b''
786
789
787 oldphasemove = None
790 oldphasemove = None
788 overrides = {(experimental, configbookmark): bookmark}
791 overrides = {(experimental, configbookmark): bookmark}
789
792
790 with ui.configoverride(overrides, b'infinitepush'):
793 with ui.configoverride(overrides, b'infinitepush'):
791 scratchpush = opts.get(b'bundle_store')
794 scratchpush = opts.get(b'bundle_store')
792 if _scratchbranchmatcher(bookmark):
795 if _scratchbranchmatcher(bookmark):
793 scratchpush = True
796 scratchpush = True
794 # bundle2 can be sent back after push (for example, bundle2
797 # bundle2 can be sent back after push (for example, bundle2
795 # containing `pushkey` part to update bookmarks)
798 # containing `pushkey` part to update bookmarks)
796 ui.setconfig(experimental, b'bundle2.pushback', True)
799 ui.setconfig(experimental, b'bundle2.pushback', True)
797
800
798 if scratchpush:
801 if scratchpush:
799 # this is an infinitepush, we don't want the bookmark to be applied
802 # this is an infinitepush, we don't want the bookmark to be applied
800 # rather that should be stored in the bundlestore
803 # rather that should be stored in the bundlestore
801 opts[b'bookmark'] = []
804 opts[b'bookmark'] = []
802 ui.setconfig(experimental, configscratchpush, True)
805 ui.setconfig(experimental, configscratchpush, True)
803 oldphasemove = extensions.wrapfunction(
806 oldphasemove = extensions.wrapfunction(
804 exchange, b'_localphasemove', _phasemove
807 exchange, b'_localphasemove', _phasemove
805 )
808 )
806 # Copy-paste from `push` command
809 # Copy-paste from `push` command
807 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
810 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
808 if not path:
811 if not path:
809 raise error.Abort(
812 raise error.Abort(
810 _(b'default repository not configured!'),
813 _(b'default repository not configured!'),
811 hint=_(b"see 'hg help config.paths'"),
814 hint=_(b"see 'hg help config.paths'"),
812 )
815 )
813 destpath = path.pushloc or path.loc
816 destpath = path.pushloc or path.loc
814 # Remote scratch bookmarks will be deleted because remotenames doesn't
817 # Remote scratch bookmarks will be deleted because remotenames doesn't
815 # know about them. Let's save it before push and restore after
818 # know about them. Let's save it before push and restore after
816 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
819 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
817 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
820 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
818 if common.isremotebooksenabled(ui):
821 if common.isremotebooksenabled(ui):
819 if bookmark and scratchpush:
822 if bookmark and scratchpush:
820 other = hg.peer(repo, opts, destpath)
823 other = hg.peer(repo, opts, destpath)
821 fetchedbookmarks = other.listkeyspatterns(
824 fetchedbookmarks = other.listkeyspatterns(
822 b'bookmarks', patterns=[bookmark]
825 b'bookmarks', patterns=[bookmark]
823 )
826 )
824 remotescratchbookmarks.update(fetchedbookmarks)
827 remotescratchbookmarks.update(fetchedbookmarks)
825 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
828 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
826 if oldphasemove:
829 if oldphasemove:
827 exchange._localphasemove = oldphasemove
830 exchange._localphasemove = oldphasemove
828 return result
831 return result
829
832
830
833
831 def _deleteinfinitepushbookmarks(ui, repo, path, names):
834 def _deleteinfinitepushbookmarks(ui, repo, path, names):
832 """Prune remote names by removing the bookmarks we don't want anymore,
835 """Prune remote names by removing the bookmarks we don't want anymore,
833 then writing the result back to disk
836 then writing the result back to disk
834 """
837 """
835 remotenamesext = extensions.find(b'remotenames')
838 remotenamesext = extensions.find(b'remotenames')
836
839
837 # remotename format is:
840 # remotename format is:
838 # (node, nametype ("branches" or "bookmarks"), remote, name)
841 # (node, nametype ("branches" or "bookmarks"), remote, name)
839 nametype_idx = 1
842 nametype_idx = 1
840 remote_idx = 2
843 remote_idx = 2
841 name_idx = 3
844 name_idx = 3
842 remotenames = [
845 remotenames = [
843 remotename
846 remotename
844 for remotename in remotenamesext.readremotenames(repo)
847 for remotename in remotenamesext.readremotenames(repo)
845 if remotename[remote_idx] == path
848 if remotename[remote_idx] == path
846 ]
849 ]
847 remote_bm_names = [
850 remote_bm_names = [
848 remotename[name_idx]
851 remotename[name_idx]
849 for remotename in remotenames
852 for remotename in remotenames
850 if remotename[nametype_idx] == b"bookmarks"
853 if remotename[nametype_idx] == b"bookmarks"
851 ]
854 ]
852
855
853 for name in names:
856 for name in names:
854 if name not in remote_bm_names:
857 if name not in remote_bm_names:
855 raise error.Abort(
858 raise error.Abort(
856 _(
859 _(
857 b"infinitepush bookmark '{}' does not exist "
860 b"infinitepush bookmark '{}' does not exist "
858 b"in path '{}'"
861 b"in path '{}'"
859 ).format(name, path)
862 ).format(name, path)
860 )
863 )
861
864
862 bookmarks = {}
865 bookmarks = {}
863 branches = collections.defaultdict(list)
866 branches = collections.defaultdict(list)
864 for node, nametype, remote, name in remotenames:
867 for node, nametype, remote, name in remotenames:
865 if nametype == b"bookmarks" and name not in names:
868 if nametype == b"bookmarks" and name not in names:
866 bookmarks[name] = node
869 bookmarks[name] = node
867 elif nametype == b"branches":
870 elif nametype == b"branches":
868 # saveremotenames wants binary nodes for branches
871 # saveremotenames wants binary nodes for branches
869 branches[name].append(bin(node))
872 branches[name].append(bin(node))
870
873
871 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
874 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
872
875
873
876
874 def _phasemove(orig, pushop, nodes, phase=phases.public):
877 def _phasemove(orig, pushop, nodes, phase=phases.public):
875 """prevent commits from being marked public
878 """prevent commits from being marked public
876
879
877 Since these are going to a scratch branch, they aren't really being
880 Since these are going to a scratch branch, they aren't really being
878 published."""
881 published."""
879
882
880 if phase != phases.public:
883 if phase != phases.public:
881 orig(pushop, nodes, phase)
884 orig(pushop, nodes, phase)
882
885
883
886
884 @exchange.b2partsgenerator(scratchbranchparttype)
887 @exchange.b2partsgenerator(scratchbranchparttype)
885 def partgen(pushop, bundler):
888 def partgen(pushop, bundler):
886 bookmark = pushop.ui.config(experimental, configbookmark)
889 bookmark = pushop.ui.config(experimental, configbookmark)
887 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
890 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
888 if b'changesets' in pushop.stepsdone or not scratchpush:
891 if b'changesets' in pushop.stepsdone or not scratchpush:
889 return
892 return
890
893
891 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
894 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
892 return
895 return
893
896
894 pushop.stepsdone.add(b'changesets')
897 pushop.stepsdone.add(b'changesets')
895 if not pushop.outgoing.missing:
898 if not pushop.outgoing.missing:
896 pushop.ui.status(_(b'no changes found\n'))
899 pushop.ui.status(_(b'no changes found\n'))
897 pushop.cgresult = 0
900 pushop.cgresult = 0
898 return
901 return
899
902
900 # This parameter tells the server that the following bundle is an
903 # This parameter tells the server that the following bundle is an
901 # infinitepush. This let's it switch the part processing to our infinitepush
904 # infinitepush. This let's it switch the part processing to our infinitepush
902 # code path.
905 # code path.
903 bundler.addparam(b"infinitepush", b"True")
906 bundler.addparam(b"infinitepush", b"True")
904
907
905 scratchparts = bundleparts.getscratchbranchparts(
908 scratchparts = bundleparts.getscratchbranchparts(
906 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
909 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
907 )
910 )
908
911
909 for scratchpart in scratchparts:
912 for scratchpart in scratchparts:
910 bundler.addpart(scratchpart)
913 bundler.addpart(scratchpart)
911
914
912 def handlereply(op):
915 def handlereply(op):
913 # server either succeeds or aborts; no code to read
916 # server either succeeds or aborts; no code to read
914 pushop.cgresult = 1
917 pushop.cgresult = 1
915
918
916 return handlereply
919 return handlereply
917
920
918
921
919 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
922 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
920
923
921
924
922 def _getrevs(bundle, oldnode, force, bookmark):
925 def _getrevs(bundle, oldnode, force, bookmark):
923 b'extracts and validates the revs to be imported'
926 b'extracts and validates the revs to be imported'
924 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
927 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
925
928
926 # new bookmark
929 # new bookmark
927 if oldnode is None:
930 if oldnode is None:
928 return revs
931 return revs
929
932
930 # Fast forward update
933 # Fast forward update
931 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
934 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
932 return revs
935 return revs
933
936
934 return revs
937 return revs
935
938
936
939
937 @contextlib.contextmanager
940 @contextlib.contextmanager
938 def logservicecall(logger, service, **kwargs):
941 def logservicecall(logger, service, **kwargs):
939 start = time.time()
942 start = time.time()
940 logger(service, eventtype=b'start', **kwargs)
943 logger(service, eventtype=b'start', **kwargs)
941 try:
944 try:
942 yield
945 yield
943 logger(
946 logger(
944 service,
947 service,
945 eventtype=b'success',
948 eventtype=b'success',
946 elapsedms=(time.time() - start) * 1000,
949 elapsedms=(time.time() - start) * 1000,
947 **kwargs
950 **kwargs
948 )
951 )
949 except Exception as e:
952 except Exception as e:
950 logger(
953 logger(
951 service,
954 service,
952 eventtype=b'failure',
955 eventtype=b'failure',
953 elapsedms=(time.time() - start) * 1000,
956 elapsedms=(time.time() - start) * 1000,
954 errormsg=str(e),
957 errormsg=str(e),
955 **kwargs
958 **kwargs
956 )
959 )
957 raise
960 raise
958
961
959
962
960 def _getorcreateinfinitepushlogger(op):
963 def _getorcreateinfinitepushlogger(op):
961 logger = op.records[b'infinitepushlogger']
964 logger = op.records[b'infinitepushlogger']
962 if not logger:
965 if not logger:
963 ui = op.repo.ui
966 ui = op.repo.ui
964 try:
967 try:
965 username = procutil.getuser()
968 username = procutil.getuser()
966 except Exception:
969 except Exception:
967 username = b'unknown'
970 username = b'unknown'
968 # Generate random request id to be able to find all logged entries
971 # Generate random request id to be able to find all logged entries
969 # for the same request. Since requestid is pseudo-generated it may
972 # for the same request. Since requestid is pseudo-generated it may
970 # not be unique, but we assume that (hostname, username, requestid)
973 # not be unique, but we assume that (hostname, username, requestid)
971 # is unique.
974 # is unique.
972 random.seed()
975 random.seed()
973 requestid = random.randint(0, 2000000000)
976 requestid = random.randint(0, 2000000000)
974 hostname = socket.gethostname()
977 hostname = socket.gethostname()
975 logger = functools.partial(
978 logger = functools.partial(
976 ui.log,
979 ui.log,
977 b'infinitepush',
980 b'infinitepush',
978 user=username,
981 user=username,
979 requestid=requestid,
982 requestid=requestid,
980 hostname=hostname,
983 hostname=hostname,
981 reponame=ui.config(b'infinitepush', b'reponame'),
984 reponame=ui.config(b'infinitepush', b'reponame'),
982 )
985 )
983 op.records.add(b'infinitepushlogger', logger)
986 op.records.add(b'infinitepushlogger', logger)
984 else:
987 else:
985 logger = logger[0]
988 logger = logger[0]
986 return logger
989 return logger
987
990
988
991
989 def storetobundlestore(orig, repo, op, unbundler):
992 def storetobundlestore(orig, repo, op, unbundler):
990 """stores the incoming bundle coming from push command to the bundlestore
993 """stores the incoming bundle coming from push command to the bundlestore
991 instead of applying on the revlogs"""
994 instead of applying on the revlogs"""
992
995
993 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
996 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
994 bundler = bundle2.bundle20(repo.ui)
997 bundler = bundle2.bundle20(repo.ui)
995
998
996 # processing each part and storing it in bundler
999 # processing each part and storing it in bundler
997 with bundle2.partiterator(repo, op, unbundler) as parts:
1000 with bundle2.partiterator(repo, op, unbundler) as parts:
998 for part in parts:
1001 for part in parts:
999 bundlepart = None
1002 bundlepart = None
1000 if part.type == b'replycaps':
1003 if part.type == b'replycaps':
1001 # This configures the current operation to allow reply parts.
1004 # This configures the current operation to allow reply parts.
1002 bundle2._processpart(op, part)
1005 bundle2._processpart(op, part)
1003 else:
1006 else:
1004 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1007 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1005 for key, value in part.params.iteritems():
1008 for key, value in part.params.iteritems():
1006 bundlepart.addparam(key, value)
1009 bundlepart.addparam(key, value)
1007
1010
1008 # Certain parts require a response
1011 # Certain parts require a response
1009 if part.type in (b'pushkey', b'changegroup'):
1012 if part.type in (b'pushkey', b'changegroup'):
1010 if op.reply is not None:
1013 if op.reply is not None:
1011 rpart = op.reply.newpart(b'reply:%s' % part.type)
1014 rpart = op.reply.newpart(b'reply:%s' % part.type)
1012 rpart.addparam(
1015 rpart.addparam(
1013 b'in-reply-to', b'%d' % part.id, mandatory=False
1016 b'in-reply-to', b'%d' % part.id, mandatory=False
1014 )
1017 )
1015 rpart.addparam(b'return', b'1', mandatory=False)
1018 rpart.addparam(b'return', b'1', mandatory=False)
1016
1019
1017 op.records.add(part.type, {b'return': 1,})
1020 op.records.add(part.type, {b'return': 1,})
1018 if bundlepart:
1021 if bundlepart:
1019 bundler.addpart(bundlepart)
1022 bundler.addpart(bundlepart)
1020
1023
1021 # storing the bundle in the bundlestore
1024 # storing the bundle in the bundlestore
1022 buf = util.chunkbuffer(bundler.getchunks())
1025 buf = util.chunkbuffer(bundler.getchunks())
1023 fd, bundlefile = pycompat.mkstemp()
1026 fd, bundlefile = pycompat.mkstemp()
1024 try:
1027 try:
1025 try:
1028 try:
1026 fp = os.fdopen(fd, r'wb')
1029 fp = os.fdopen(fd, r'wb')
1027 fp.write(buf.read())
1030 fp.write(buf.read())
1028 finally:
1031 finally:
1029 fp.close()
1032 fp.close()
1030 storebundle(op, {}, bundlefile)
1033 storebundle(op, {}, bundlefile)
1031 finally:
1034 finally:
1032 try:
1035 try:
1033 os.unlink(bundlefile)
1036 os.unlink(bundlefile)
1034 except Exception:
1037 except Exception:
1035 # we would rather see the original exception
1038 # we would rather see the original exception
1036 pass
1039 pass
1037
1040
1038
1041
1039 def processparts(orig, repo, op, unbundler):
1042 def processparts(orig, repo, op, unbundler):
1040
1043
1041 # make sure we don't wrap processparts in case of `hg unbundle`
1044 # make sure we don't wrap processparts in case of `hg unbundle`
1042 if op.source == b'unbundle':
1045 if op.source == b'unbundle':
1043 return orig(repo, op, unbundler)
1046 return orig(repo, op, unbundler)
1044
1047
1045 # this server routes each push to bundle store
1048 # this server routes each push to bundle store
1046 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1049 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1047 return storetobundlestore(orig, repo, op, unbundler)
1050 return storetobundlestore(orig, repo, op, unbundler)
1048
1051
1049 if unbundler.params.get(b'infinitepush') != b'True':
1052 if unbundler.params.get(b'infinitepush') != b'True':
1050 return orig(repo, op, unbundler)
1053 return orig(repo, op, unbundler)
1051
1054
1052 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1055 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1053
1056
1054 bundler = bundle2.bundle20(repo.ui)
1057 bundler = bundle2.bundle20(repo.ui)
1055 cgparams = None
1058 cgparams = None
1056 with bundle2.partiterator(repo, op, unbundler) as parts:
1059 with bundle2.partiterator(repo, op, unbundler) as parts:
1057 for part in parts:
1060 for part in parts:
1058 bundlepart = None
1061 bundlepart = None
1059 if part.type == b'replycaps':
1062 if part.type == b'replycaps':
1060 # This configures the current operation to allow reply parts.
1063 # This configures the current operation to allow reply parts.
1061 bundle2._processpart(op, part)
1064 bundle2._processpart(op, part)
1062 elif part.type == bundleparts.scratchbranchparttype:
1065 elif part.type == bundleparts.scratchbranchparttype:
1063 # Scratch branch parts need to be converted to normal
1066 # Scratch branch parts need to be converted to normal
1064 # changegroup parts, and the extra parameters stored for later
1067 # changegroup parts, and the extra parameters stored for later
1065 # when we upload to the store. Eventually those parameters will
1068 # when we upload to the store. Eventually those parameters will
1066 # be put on the actual bundle instead of this part, then we can
1069 # be put on the actual bundle instead of this part, then we can
1067 # send a vanilla changegroup instead of the scratchbranch part.
1070 # send a vanilla changegroup instead of the scratchbranch part.
1068 cgversion = part.params.get(b'cgversion', b'01')
1071 cgversion = part.params.get(b'cgversion', b'01')
1069 bundlepart = bundle2.bundlepart(
1072 bundlepart = bundle2.bundlepart(
1070 b'changegroup', data=part.read()
1073 b'changegroup', data=part.read()
1071 )
1074 )
1072 bundlepart.addparam(b'version', cgversion)
1075 bundlepart.addparam(b'version', cgversion)
1073 cgparams = part.params
1076 cgparams = part.params
1074
1077
1075 # If we're not dumping all parts into the new bundle, we need to
1078 # If we're not dumping all parts into the new bundle, we need to
1076 # alert the future pushkey and phase-heads handler to skip
1079 # alert the future pushkey and phase-heads handler to skip
1077 # the part.
1080 # the part.
1078 if not handleallparts:
1081 if not handleallparts:
1079 op.records.add(
1082 op.records.add(
1080 scratchbranchparttype + b'_skippushkey', True
1083 scratchbranchparttype + b'_skippushkey', True
1081 )
1084 )
1082 op.records.add(
1085 op.records.add(
1083 scratchbranchparttype + b'_skipphaseheads', True
1086 scratchbranchparttype + b'_skipphaseheads', True
1084 )
1087 )
1085 else:
1088 else:
1086 if handleallparts:
1089 if handleallparts:
1087 # Ideally we would not process any parts, and instead just
1090 # Ideally we would not process any parts, and instead just
1088 # forward them to the bundle for storage, but since this
1091 # forward them to the bundle for storage, but since this
1089 # differs from previous behavior, we need to put it behind a
1092 # differs from previous behavior, we need to put it behind a
1090 # config flag for incremental rollout.
1093 # config flag for incremental rollout.
1091 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1094 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1092 for key, value in part.params.iteritems():
1095 for key, value in part.params.iteritems():
1093 bundlepart.addparam(key, value)
1096 bundlepart.addparam(key, value)
1094
1097
1095 # Certain parts require a response
1098 # Certain parts require a response
1096 if part.type == b'pushkey':
1099 if part.type == b'pushkey':
1097 if op.reply is not None:
1100 if op.reply is not None:
1098 rpart = op.reply.newpart(b'reply:pushkey')
1101 rpart = op.reply.newpart(b'reply:pushkey')
1099 rpart.addparam(
1102 rpart.addparam(
1100 b'in-reply-to', str(part.id), mandatory=False
1103 b'in-reply-to', str(part.id), mandatory=False
1101 )
1104 )
1102 rpart.addparam(b'return', b'1', mandatory=False)
1105 rpart.addparam(b'return', b'1', mandatory=False)
1103 else:
1106 else:
1104 bundle2._processpart(op, part)
1107 bundle2._processpart(op, part)
1105
1108
1106 if handleallparts:
1109 if handleallparts:
1107 op.records.add(part.type, {b'return': 1,})
1110 op.records.add(part.type, {b'return': 1,})
1108 if bundlepart:
1111 if bundlepart:
1109 bundler.addpart(bundlepart)
1112 bundler.addpart(bundlepart)
1110
1113
1111 # If commits were sent, store them
1114 # If commits were sent, store them
1112 if cgparams:
1115 if cgparams:
1113 buf = util.chunkbuffer(bundler.getchunks())
1116 buf = util.chunkbuffer(bundler.getchunks())
1114 fd, bundlefile = pycompat.mkstemp()
1117 fd, bundlefile = pycompat.mkstemp()
1115 try:
1118 try:
1116 try:
1119 try:
1117 fp = os.fdopen(fd, r'wb')
1120 fp = os.fdopen(fd, r'wb')
1118 fp.write(buf.read())
1121 fp.write(buf.read())
1119 finally:
1122 finally:
1120 fp.close()
1123 fp.close()
1121 storebundle(op, cgparams, bundlefile)
1124 storebundle(op, cgparams, bundlefile)
1122 finally:
1125 finally:
1123 try:
1126 try:
1124 os.unlink(bundlefile)
1127 os.unlink(bundlefile)
1125 except Exception:
1128 except Exception:
1126 # we would rather see the original exception
1129 # we would rather see the original exception
1127 pass
1130 pass
1128
1131
1129
1132
1130 def storebundle(op, params, bundlefile):
1133 def storebundle(op, params, bundlefile):
1131 log = _getorcreateinfinitepushlogger(op)
1134 log = _getorcreateinfinitepushlogger(op)
1132 parthandlerstart = time.time()
1135 parthandlerstart = time.time()
1133 log(scratchbranchparttype, eventtype=b'start')
1136 log(scratchbranchparttype, eventtype=b'start')
1134 index = op.repo.bundlestore.index
1137 index = op.repo.bundlestore.index
1135 store = op.repo.bundlestore.store
1138 store = op.repo.bundlestore.store
1136 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1139 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1137
1140
1138 bundle = None
1141 bundle = None
1139 try: # guards bundle
1142 try: # guards bundle
1140 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1143 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1141 bundle = hg.repository(op.repo.ui, bundlepath)
1144 bundle = hg.repository(op.repo.ui, bundlepath)
1142
1145
1143 bookmark = params.get(b'bookmark')
1146 bookmark = params.get(b'bookmark')
1144 bookprevnode = params.get(b'bookprevnode', b'')
1147 bookprevnode = params.get(b'bookprevnode', b'')
1145 force = params.get(b'force')
1148 force = params.get(b'force')
1146
1149
1147 if bookmark:
1150 if bookmark:
1148 oldnode = index.getnode(bookmark)
1151 oldnode = index.getnode(bookmark)
1149 else:
1152 else:
1150 oldnode = None
1153 oldnode = None
1151 bundleheads = bundle.revs(b'heads(bundle())')
1154 bundleheads = bundle.revs(b'heads(bundle())')
1152 if bookmark and len(bundleheads) > 1:
1155 if bookmark and len(bundleheads) > 1:
1153 raise error.Abort(
1156 raise error.Abort(
1154 _(b'cannot push more than one head to a scratch branch')
1157 _(b'cannot push more than one head to a scratch branch')
1155 )
1158 )
1156
1159
1157 revs = _getrevs(bundle, oldnode, force, bookmark)
1160 revs = _getrevs(bundle, oldnode, force, bookmark)
1158
1161
1159 # Notify the user of what is being pushed
1162 # Notify the user of what is being pushed
1160 plural = b's' if len(revs) > 1 else b''
1163 plural = b's' if len(revs) > 1 else b''
1161 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1164 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1162 maxoutput = 10
1165 maxoutput = 10
1163 for i in range(0, min(len(revs), maxoutput)):
1166 for i in range(0, min(len(revs), maxoutput)):
1164 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1167 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1165 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1168 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1166
1169
1167 if len(revs) > maxoutput + 1:
1170 if len(revs) > maxoutput + 1:
1168 op.repo.ui.warn(b" ...\n")
1171 op.repo.ui.warn(b" ...\n")
1169 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1172 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1170 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1173 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1171
1174
1172 nodesctx = [bundle[rev] for rev in revs]
1175 nodesctx = [bundle[rev] for rev in revs]
1173 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1176 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1174 if bundleheads:
1177 if bundleheads:
1175 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1178 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1176 else:
1179 else:
1177 newheadscount = 0
1180 newheadscount = 0
1178 # If there's a bookmark specified, there should be only one head,
1181 # If there's a bookmark specified, there should be only one head,
1179 # so we choose the last node, which will be that head.
1182 # so we choose the last node, which will be that head.
1180 # If a bug or malicious client allows there to be a bookmark
1183 # If a bug or malicious client allows there to be a bookmark
1181 # with multiple heads, we will place the bookmark on the last head.
1184 # with multiple heads, we will place the bookmark on the last head.
1182 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1185 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1183 key = None
1186 key = None
1184 if newheadscount:
1187 if newheadscount:
1185 with open(bundlefile, b'rb') as f:
1188 with open(bundlefile, b'rb') as f:
1186 bundledata = f.read()
1189 bundledata = f.read()
1187 with logservicecall(
1190 with logservicecall(
1188 log, b'bundlestore', bundlesize=len(bundledata)
1191 log, b'bundlestore', bundlesize=len(bundledata)
1189 ):
1192 ):
1190 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1193 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1191 if len(bundledata) > bundlesizelimit:
1194 if len(bundledata) > bundlesizelimit:
1192 error_msg = (
1195 error_msg = (
1193 b'bundle is too big: %d bytes. '
1196 b'bundle is too big: %d bytes. '
1194 + b'max allowed size is 100 MB'
1197 + b'max allowed size is 100 MB'
1195 )
1198 )
1196 raise error.Abort(error_msg % (len(bundledata),))
1199 raise error.Abort(error_msg % (len(bundledata),))
1197 key = store.write(bundledata)
1200 key = store.write(bundledata)
1198
1201
1199 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1202 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1200 if key:
1203 if key:
1201 index.addbundle(key, nodesctx)
1204 index.addbundle(key, nodesctx)
1202 if bookmark:
1205 if bookmark:
1203 index.addbookmark(bookmark, bookmarknode)
1206 index.addbookmark(bookmark, bookmarknode)
1204 _maybeaddpushbackpart(
1207 _maybeaddpushbackpart(
1205 op, bookmark, bookmarknode, bookprevnode, params
1208 op, bookmark, bookmarknode, bookprevnode, params
1206 )
1209 )
1207 log(
1210 log(
1208 scratchbranchparttype,
1211 scratchbranchparttype,
1209 eventtype=b'success',
1212 eventtype=b'success',
1210 elapsedms=(time.time() - parthandlerstart) * 1000,
1213 elapsedms=(time.time() - parthandlerstart) * 1000,
1211 )
1214 )
1212
1215
1213 except Exception as e:
1216 except Exception as e:
1214 log(
1217 log(
1215 scratchbranchparttype,
1218 scratchbranchparttype,
1216 eventtype=b'failure',
1219 eventtype=b'failure',
1217 elapsedms=(time.time() - parthandlerstart) * 1000,
1220 elapsedms=(time.time() - parthandlerstart) * 1000,
1218 errormsg=str(e),
1221 errormsg=str(e),
1219 )
1222 )
1220 raise
1223 raise
1221 finally:
1224 finally:
1222 if bundle:
1225 if bundle:
1223 bundle.close()
1226 bundle.close()
1224
1227
1225
1228
1226 @bundle2.parthandler(
1229 @bundle2.parthandler(
1227 scratchbranchparttype,
1230 scratchbranchparttype,
1228 (
1231 (
1229 b'bookmark',
1232 b'bookmark',
1230 b'bookprevnode',
1233 b'bookprevnode',
1231 b'force',
1234 b'force',
1232 b'pushbackbookmarks',
1235 b'pushbackbookmarks',
1233 b'cgversion',
1236 b'cgversion',
1234 ),
1237 ),
1235 )
1238 )
1236 def bundle2scratchbranch(op, part):
1239 def bundle2scratchbranch(op, part):
1237 '''unbundle a bundle2 part containing a changegroup to store'''
1240 '''unbundle a bundle2 part containing a changegroup to store'''
1238
1241
1239 bundler = bundle2.bundle20(op.repo.ui)
1242 bundler = bundle2.bundle20(op.repo.ui)
1240 cgversion = part.params.get(b'cgversion', b'01')
1243 cgversion = part.params.get(b'cgversion', b'01')
1241 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1244 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1242 cgpart.addparam(b'version', cgversion)
1245 cgpart.addparam(b'version', cgversion)
1243 bundler.addpart(cgpart)
1246 bundler.addpart(cgpart)
1244 buf = util.chunkbuffer(bundler.getchunks())
1247 buf = util.chunkbuffer(bundler.getchunks())
1245
1248
1246 fd, bundlefile = pycompat.mkstemp()
1249 fd, bundlefile = pycompat.mkstemp()
1247 try:
1250 try:
1248 try:
1251 try:
1249 fp = os.fdopen(fd, r'wb')
1252 fp = os.fdopen(fd, r'wb')
1250 fp.write(buf.read())
1253 fp.write(buf.read())
1251 finally:
1254 finally:
1252 fp.close()
1255 fp.close()
1253 storebundle(op, part.params, bundlefile)
1256 storebundle(op, part.params, bundlefile)
1254 finally:
1257 finally:
1255 try:
1258 try:
1256 os.unlink(bundlefile)
1259 os.unlink(bundlefile)
1257 except OSError as e:
1260 except OSError as e:
1258 if e.errno != errno.ENOENT:
1261 if e.errno != errno.ENOENT:
1259 raise
1262 raise
1260
1263
1261 return 1
1264 return 1
1262
1265
1263
1266
1264 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1267 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1265 if params.get(b'pushbackbookmarks'):
1268 if params.get(b'pushbackbookmarks'):
1266 if op.reply and b'pushback' in op.reply.capabilities:
1269 if op.reply and b'pushback' in op.reply.capabilities:
1267 params = {
1270 params = {
1268 b'namespace': b'bookmarks',
1271 b'namespace': b'bookmarks',
1269 b'key': bookmark,
1272 b'key': bookmark,
1270 b'new': newnode,
1273 b'new': newnode,
1271 b'old': oldnode,
1274 b'old': oldnode,
1272 }
1275 }
1273 op.reply.newpart(b'pushkey', mandatoryparams=params.iteritems())
1276 op.reply.newpart(b'pushkey', mandatoryparams=params.iteritems())
1274
1277
1275
1278
1276 def bundle2pushkey(orig, op, part):
1279 def bundle2pushkey(orig, op, part):
1277 '''Wrapper of bundle2.handlepushkey()
1280 '''Wrapper of bundle2.handlepushkey()
1278
1281
1279 The only goal is to skip calling the original function if flag is set.
1282 The only goal is to skip calling the original function if flag is set.
1280 It's set if infinitepush push is happening.
1283 It's set if infinitepush push is happening.
1281 '''
1284 '''
1282 if op.records[scratchbranchparttype + b'_skippushkey']:
1285 if op.records[scratchbranchparttype + b'_skippushkey']:
1283 if op.reply is not None:
1286 if op.reply is not None:
1284 rpart = op.reply.newpart(b'reply:pushkey')
1287 rpart = op.reply.newpart(b'reply:pushkey')
1285 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1288 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1286 rpart.addparam(b'return', b'1', mandatory=False)
1289 rpart.addparam(b'return', b'1', mandatory=False)
1287 return 1
1290 return 1
1288
1291
1289 return orig(op, part)
1292 return orig(op, part)
1290
1293
1291
1294
1292 def bundle2handlephases(orig, op, part):
1295 def bundle2handlephases(orig, op, part):
1293 '''Wrapper of bundle2.handlephases()
1296 '''Wrapper of bundle2.handlephases()
1294
1297
1295 The only goal is to skip calling the original function if flag is set.
1298 The only goal is to skip calling the original function if flag is set.
1296 It's set if infinitepush push is happening.
1299 It's set if infinitepush push is happening.
1297 '''
1300 '''
1298
1301
1299 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1302 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1300 return
1303 return
1301
1304
1302 return orig(op, part)
1305 return orig(op, part)
1303
1306
1304
1307
1305 def _asyncsavemetadata(root, nodes):
1308 def _asyncsavemetadata(root, nodes):
1306 '''starts a separate process that fills metadata for the nodes
1309 '''starts a separate process that fills metadata for the nodes
1307
1310
1308 This function creates a separate process and doesn't wait for it's
1311 This function creates a separate process and doesn't wait for it's
1309 completion. This was done to avoid slowing down pushes
1312 completion. This was done to avoid slowing down pushes
1310 '''
1313 '''
1311
1314
1312 maxnodes = 50
1315 maxnodes = 50
1313 if len(nodes) > maxnodes:
1316 if len(nodes) > maxnodes:
1314 return
1317 return
1315 nodesargs = []
1318 nodesargs = []
1316 for node in nodes:
1319 for node in nodes:
1317 nodesargs.append(b'--node')
1320 nodesargs.append(b'--node')
1318 nodesargs.append(node)
1321 nodesargs.append(node)
1319 with open(os.devnull, b'w+b') as devnull:
1322 with open(os.devnull, b'w+b') as devnull:
1320 cmdline = [
1323 cmdline = [
1321 util.hgexecutable(),
1324 util.hgexecutable(),
1322 b'debugfillinfinitepushmetadata',
1325 b'debugfillinfinitepushmetadata',
1323 b'-R',
1326 b'-R',
1324 root,
1327 root,
1325 ] + nodesargs
1328 ] + nodesargs
1326 # Process will run in background. We don't care about the return code
1329 # Process will run in background. We don't care about the return code
1327 subprocess.Popen(
1330 subprocess.Popen(
1328 pycompat.rapply(procutil.tonativestr, cmdline),
1331 pycompat.rapply(procutil.tonativestr, cmdline),
1329 close_fds=True,
1332 close_fds=True,
1330 shell=False,
1333 shell=False,
1331 stdin=devnull,
1334 stdin=devnull,
1332 stdout=devnull,
1335 stdout=devnull,
1333 stderr=devnull,
1336 stderr=devnull,
1334 )
1337 )
@@ -1,886 +1,887
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85
85
86 from __future__ import absolute_import
86 from __future__ import absolute_import
87
87
88 import os
88 import os
89 import re
89 import re
90 import weakref
90 import weakref
91
91
92 from mercurial.i18n import _
92 from mercurial.i18n import _
93 from mercurial.pycompat import getattr
93 from mercurial.hgweb import webcommands
94 from mercurial.hgweb import webcommands
94
95
95 from mercurial import (
96 from mercurial import (
96 cmdutil,
97 cmdutil,
97 context,
98 context,
98 dispatch,
99 dispatch,
99 error,
100 error,
100 extensions,
101 extensions,
101 filelog,
102 filelog,
102 localrepo,
103 localrepo,
103 logcmdutil,
104 logcmdutil,
104 match,
105 match,
105 patch,
106 patch,
106 pathutil,
107 pathutil,
107 pycompat,
108 pycompat,
108 registrar,
109 registrar,
109 scmutil,
110 scmutil,
110 templatefilters,
111 templatefilters,
111 templateutil,
112 templateutil,
112 util,
113 util,
113 )
114 )
114 from mercurial.utils import (
115 from mercurial.utils import (
115 dateutil,
116 dateutil,
116 stringutil,
117 stringutil,
117 )
118 )
118
119
119 cmdtable = {}
120 cmdtable = {}
120 command = registrar.command(cmdtable)
121 command = registrar.command(cmdtable)
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # be specifying the version(s) of Mercurial they are tested with, or
124 # be specifying the version(s) of Mercurial they are tested with, or
124 # leave the attribute unspecified.
125 # leave the attribute unspecified.
125 testedwith = b'ships-with-hg-core'
126 testedwith = b'ships-with-hg-core'
126
127
127 # hg commands that do not act on keywords
128 # hg commands that do not act on keywords
128 nokwcommands = (
129 nokwcommands = (
129 b'add addremove annotate bundle export grep incoming init log'
130 b'add addremove annotate bundle export grep incoming init log'
130 b' outgoing push tip verify convert email glog'
131 b' outgoing push tip verify convert email glog'
131 )
132 )
132
133
133 # webcommands that do not act on keywords
134 # webcommands that do not act on keywords
134 nokwwebcommands = b'annotate changeset rev filediff diff comparison'
135 nokwwebcommands = b'annotate changeset rev filediff diff comparison'
135
136
136 # hg commands that trigger expansion only when writing to working dir,
137 # hg commands that trigger expansion only when writing to working dir,
137 # not when reading filelog, and unexpand when reading from working dir
138 # not when reading filelog, and unexpand when reading from working dir
138 restricted = (
139 restricted = (
139 b'merge kwexpand kwshrink record qrecord resolve transplant'
140 b'merge kwexpand kwshrink record qrecord resolve transplant'
140 b' unshelve rebase graft backout histedit fetch'
141 b' unshelve rebase graft backout histedit fetch'
141 )
142 )
142
143
143 # names of extensions using dorecord
144 # names of extensions using dorecord
144 recordextensions = b'record'
145 recordextensions = b'record'
145
146
146 colortable = {
147 colortable = {
147 b'kwfiles.enabled': b'green bold',
148 b'kwfiles.enabled': b'green bold',
148 b'kwfiles.deleted': b'cyan bold underline',
149 b'kwfiles.deleted': b'cyan bold underline',
149 b'kwfiles.enabledunknown': b'green',
150 b'kwfiles.enabledunknown': b'green',
150 b'kwfiles.ignored': b'bold',
151 b'kwfiles.ignored': b'bold',
151 b'kwfiles.ignoredunknown': b'none',
152 b'kwfiles.ignoredunknown': b'none',
152 }
153 }
153
154
154 templatefilter = registrar.templatefilter()
155 templatefilter = registrar.templatefilter()
155
156
156 configtable = {}
157 configtable = {}
157 configitem = registrar.configitem(configtable)
158 configitem = registrar.configitem(configtable)
158
159
159 configitem(
160 configitem(
160 b'keywordset', b'svn', default=False,
161 b'keywordset', b'svn', default=False,
161 )
162 )
162 # date like in cvs' $Date
163 # date like in cvs' $Date
163 @templatefilter(b'utcdate', intype=templateutil.date)
164 @templatefilter(b'utcdate', intype=templateutil.date)
164 def utcdate(date):
165 def utcdate(date):
165 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
166 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
166 '''
167 '''
167 dateformat = b'%Y/%m/%d %H:%M:%S'
168 dateformat = b'%Y/%m/%d %H:%M:%S'
168 return dateutil.datestr((date[0], 0), dateformat)
169 return dateutil.datestr((date[0], 0), dateformat)
169
170
170
171
171 # date like in svn's $Date
172 # date like in svn's $Date
172 @templatefilter(b'svnisodate', intype=templateutil.date)
173 @templatefilter(b'svnisodate', intype=templateutil.date)
173 def svnisodate(date):
174 def svnisodate(date):
174 '''Date. Returns a date in this format: "2009-08-18 13:00:13
175 '''Date. Returns a date in this format: "2009-08-18 13:00:13
175 +0200 (Tue, 18 Aug 2009)".
176 +0200 (Tue, 18 Aug 2009)".
176 '''
177 '''
177 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
178 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
178
179
179
180
180 # date like in svn's $Id
181 # date like in svn's $Id
181 @templatefilter(b'svnutcdate', intype=templateutil.date)
182 @templatefilter(b'svnutcdate', intype=templateutil.date)
182 def svnutcdate(date):
183 def svnutcdate(date):
183 '''Date. Returns a UTC-date in this format: "2009-08-18
184 '''Date. Returns a UTC-date in this format: "2009-08-18
184 11:00:13Z".
185 11:00:13Z".
185 '''
186 '''
186 dateformat = b'%Y-%m-%d %H:%M:%SZ'
187 dateformat = b'%Y-%m-%d %H:%M:%SZ'
187 return dateutil.datestr((date[0], 0), dateformat)
188 return dateutil.datestr((date[0], 0), dateformat)
188
189
189
190
190 # make keyword tools accessible
191 # make keyword tools accessible
191 kwtools = {b'hgcmd': b''}
192 kwtools = {b'hgcmd': b''}
192
193
193
194
194 def _defaultkwmaps(ui):
195 def _defaultkwmaps(ui):
195 '''Returns default keywordmaps according to keywordset configuration.'''
196 '''Returns default keywordmaps according to keywordset configuration.'''
196 templates = {
197 templates = {
197 b'Revision': b'{node|short}',
198 b'Revision': b'{node|short}',
198 b'Author': b'{author|user}',
199 b'Author': b'{author|user}',
199 }
200 }
200 kwsets = (
201 kwsets = (
201 {
202 {
202 b'Date': b'{date|utcdate}',
203 b'Date': b'{date|utcdate}',
203 b'RCSfile': b'{file|basename},v',
204 b'RCSfile': b'{file|basename},v',
204 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
205 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
205 # with hg-keyword
206 # with hg-keyword
206 b'Source': b'{root}/{file},v',
207 b'Source': b'{root}/{file},v',
207 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
208 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
208 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
209 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
209 },
210 },
210 {
211 {
211 b'Date': b'{date|svnisodate}',
212 b'Date': b'{date|svnisodate}',
212 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
213 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
213 b'LastChangedRevision': b'{node|short}',
214 b'LastChangedRevision': b'{node|short}',
214 b'LastChangedBy': b'{author|user}',
215 b'LastChangedBy': b'{author|user}',
215 b'LastChangedDate': b'{date|svnisodate}',
216 b'LastChangedDate': b'{date|svnisodate}',
216 },
217 },
217 )
218 )
218 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
219 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
219 return templates
220 return templates
220
221
221
222
222 def _shrinktext(text, subfunc):
223 def _shrinktext(text, subfunc):
223 '''Helper for keyword expansion removal in text.
224 '''Helper for keyword expansion removal in text.
224 Depending on subfunc also returns number of substitutions.'''
225 Depending on subfunc also returns number of substitutions.'''
225 return subfunc(br'$\1$', text)
226 return subfunc(br'$\1$', text)
226
227
227
228
228 def _preselect(wstatus, changed):
229 def _preselect(wstatus, changed):
229 '''Retrieves modified and added files from a working directory state
230 '''Retrieves modified and added files from a working directory state
230 and returns the subset of each contained in given changed files
231 and returns the subset of each contained in given changed files
231 retrieved from a change context.'''
232 retrieved from a change context.'''
232 modified = [f for f in wstatus.modified if f in changed]
233 modified = [f for f in wstatus.modified if f in changed]
233 added = [f for f in wstatus.added if f in changed]
234 added = [f for f in wstatus.added if f in changed]
234 return modified, added
235 return modified, added
235
236
236
237
237 class kwtemplater(object):
238 class kwtemplater(object):
238 '''
239 '''
239 Sets up keyword templates, corresponding keyword regex, and
240 Sets up keyword templates, corresponding keyword regex, and
240 provides keyword substitution functions.
241 provides keyword substitution functions.
241 '''
242 '''
242
243
243 def __init__(self, ui, repo, inc, exc):
244 def __init__(self, ui, repo, inc, exc):
244 self.ui = ui
245 self.ui = ui
245 self._repo = weakref.ref(repo)
246 self._repo = weakref.ref(repo)
246 self.match = match.match(repo.root, b'', [], inc, exc)
247 self.match = match.match(repo.root, b'', [], inc, exc)
247 self.restrict = kwtools[b'hgcmd'] in restricted.split()
248 self.restrict = kwtools[b'hgcmd'] in restricted.split()
248 self.postcommit = False
249 self.postcommit = False
249
250
250 kwmaps = self.ui.configitems(b'keywordmaps')
251 kwmaps = self.ui.configitems(b'keywordmaps')
251 if kwmaps: # override default templates
252 if kwmaps: # override default templates
252 self.templates = dict(kwmaps)
253 self.templates = dict(kwmaps)
253 else:
254 else:
254 self.templates = _defaultkwmaps(self.ui)
255 self.templates = _defaultkwmaps(self.ui)
255
256
256 @property
257 @property
257 def repo(self):
258 def repo(self):
258 return self._repo()
259 return self._repo()
259
260
260 @util.propertycache
261 @util.propertycache
261 def escape(self):
262 def escape(self):
262 '''Returns bar-separated and escaped keywords.'''
263 '''Returns bar-separated and escaped keywords.'''
263 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
264 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
264
265
265 @util.propertycache
266 @util.propertycache
266 def rekw(self):
267 def rekw(self):
267 '''Returns regex for unexpanded keywords.'''
268 '''Returns regex for unexpanded keywords.'''
268 return re.compile(br'\$(%s)\$' % self.escape)
269 return re.compile(br'\$(%s)\$' % self.escape)
269
270
270 @util.propertycache
271 @util.propertycache
271 def rekwexp(self):
272 def rekwexp(self):
272 '''Returns regex for expanded keywords.'''
273 '''Returns regex for expanded keywords.'''
273 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
274 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
274
275
275 def substitute(self, data, path, ctx, subfunc):
276 def substitute(self, data, path, ctx, subfunc):
276 '''Replaces keywords in data with expanded template.'''
277 '''Replaces keywords in data with expanded template.'''
277
278
278 def kwsub(mobj):
279 def kwsub(mobj):
279 kw = mobj.group(1)
280 kw = mobj.group(1)
280 ct = logcmdutil.maketemplater(
281 ct = logcmdutil.maketemplater(
281 self.ui, self.repo, self.templates[kw]
282 self.ui, self.repo, self.templates[kw]
282 )
283 )
283 self.ui.pushbuffer()
284 self.ui.pushbuffer()
284 ct.show(ctx, root=self.repo.root, file=path)
285 ct.show(ctx, root=self.repo.root, file=path)
285 ekw = templatefilters.firstline(self.ui.popbuffer())
286 ekw = templatefilters.firstline(self.ui.popbuffer())
286 return b'$%s: %s $' % (kw, ekw)
287 return b'$%s: %s $' % (kw, ekw)
287
288
288 return subfunc(kwsub, data)
289 return subfunc(kwsub, data)
289
290
290 def linkctx(self, path, fileid):
291 def linkctx(self, path, fileid):
291 '''Similar to filelog.linkrev, but returns a changectx.'''
292 '''Similar to filelog.linkrev, but returns a changectx.'''
292 return self.repo.filectx(path, fileid=fileid).changectx()
293 return self.repo.filectx(path, fileid=fileid).changectx()
293
294
294 def expand(self, path, node, data):
295 def expand(self, path, node, data):
295 '''Returns data with keywords expanded.'''
296 '''Returns data with keywords expanded.'''
296 if (
297 if (
297 not self.restrict
298 not self.restrict
298 and self.match(path)
299 and self.match(path)
299 and not stringutil.binary(data)
300 and not stringutil.binary(data)
300 ):
301 ):
301 ctx = self.linkctx(path, node)
302 ctx = self.linkctx(path, node)
302 return self.substitute(data, path, ctx, self.rekw.sub)
303 return self.substitute(data, path, ctx, self.rekw.sub)
303 return data
304 return data
304
305
305 def iskwfile(self, cand, ctx):
306 def iskwfile(self, cand, ctx):
306 '''Returns subset of candidates which are configured for keyword
307 '''Returns subset of candidates which are configured for keyword
307 expansion but are not symbolic links.'''
308 expansion but are not symbolic links.'''
308 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
309 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
309
310
310 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
311 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
311 '''Overwrites selected files expanding/shrinking keywords.'''
312 '''Overwrites selected files expanding/shrinking keywords.'''
312 if self.restrict or lookup or self.postcommit: # exclude kw_copy
313 if self.restrict or lookup or self.postcommit: # exclude kw_copy
313 candidates = self.iskwfile(candidates, ctx)
314 candidates = self.iskwfile(candidates, ctx)
314 if not candidates:
315 if not candidates:
315 return
316 return
316 kwcmd = self.restrict and lookup # kwexpand/kwshrink
317 kwcmd = self.restrict and lookup # kwexpand/kwshrink
317 if self.restrict or expand and lookup:
318 if self.restrict or expand and lookup:
318 mf = ctx.manifest()
319 mf = ctx.manifest()
319 if self.restrict or rekw:
320 if self.restrict or rekw:
320 re_kw = self.rekw
321 re_kw = self.rekw
321 else:
322 else:
322 re_kw = self.rekwexp
323 re_kw = self.rekwexp
323 if expand:
324 if expand:
324 msg = _(b'overwriting %s expanding keywords\n')
325 msg = _(b'overwriting %s expanding keywords\n')
325 else:
326 else:
326 msg = _(b'overwriting %s shrinking keywords\n')
327 msg = _(b'overwriting %s shrinking keywords\n')
327 for f in candidates:
328 for f in candidates:
328 if self.restrict:
329 if self.restrict:
329 data = self.repo.file(f).read(mf[f])
330 data = self.repo.file(f).read(mf[f])
330 else:
331 else:
331 data = self.repo.wread(f)
332 data = self.repo.wread(f)
332 if stringutil.binary(data):
333 if stringutil.binary(data):
333 continue
334 continue
334 if expand:
335 if expand:
335 parents = ctx.parents()
336 parents = ctx.parents()
336 if lookup:
337 if lookup:
337 ctx = self.linkctx(f, mf[f])
338 ctx = self.linkctx(f, mf[f])
338 elif self.restrict and len(parents) > 1:
339 elif self.restrict and len(parents) > 1:
339 # merge commit
340 # merge commit
340 # in case of conflict f is in modified state during
341 # in case of conflict f is in modified state during
341 # merge, even if f does not differ from f in parent
342 # merge, even if f does not differ from f in parent
342 for p in parents:
343 for p in parents:
343 if f in p and not p[f].cmp(ctx[f]):
344 if f in p and not p[f].cmp(ctx[f]):
344 ctx = p[f].changectx()
345 ctx = p[f].changectx()
345 break
346 break
346 data, found = self.substitute(data, f, ctx, re_kw.subn)
347 data, found = self.substitute(data, f, ctx, re_kw.subn)
347 elif self.restrict:
348 elif self.restrict:
348 found = re_kw.search(data)
349 found = re_kw.search(data)
349 else:
350 else:
350 data, found = _shrinktext(data, re_kw.subn)
351 data, found = _shrinktext(data, re_kw.subn)
351 if found:
352 if found:
352 self.ui.note(msg % f)
353 self.ui.note(msg % f)
353 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
354 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
354 fp.write(data)
355 fp.write(data)
355 fp.close()
356 fp.close()
356 if kwcmd:
357 if kwcmd:
357 self.repo.dirstate.normal(f)
358 self.repo.dirstate.normal(f)
358 elif self.postcommit:
359 elif self.postcommit:
359 self.repo.dirstate.normallookup(f)
360 self.repo.dirstate.normallookup(f)
360
361
361 def shrink(self, fname, text):
362 def shrink(self, fname, text):
362 '''Returns text with all keyword substitutions removed.'''
363 '''Returns text with all keyword substitutions removed.'''
363 if self.match(fname) and not stringutil.binary(text):
364 if self.match(fname) and not stringutil.binary(text):
364 return _shrinktext(text, self.rekwexp.sub)
365 return _shrinktext(text, self.rekwexp.sub)
365 return text
366 return text
366
367
367 def shrinklines(self, fname, lines):
368 def shrinklines(self, fname, lines):
368 '''Returns lines with keyword substitutions removed.'''
369 '''Returns lines with keyword substitutions removed.'''
369 if self.match(fname):
370 if self.match(fname):
370 text = b''.join(lines)
371 text = b''.join(lines)
371 if not stringutil.binary(text):
372 if not stringutil.binary(text):
372 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
373 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
373 return lines
374 return lines
374
375
375 def wread(self, fname, data):
376 def wread(self, fname, data):
376 '''If in restricted mode returns data read from wdir with
377 '''If in restricted mode returns data read from wdir with
377 keyword substitutions removed.'''
378 keyword substitutions removed.'''
378 if self.restrict:
379 if self.restrict:
379 return self.shrink(fname, data)
380 return self.shrink(fname, data)
380 return data
381 return data
381
382
382
383
383 class kwfilelog(filelog.filelog):
384 class kwfilelog(filelog.filelog):
384 '''
385 '''
385 Subclass of filelog to hook into its read, add, cmp methods.
386 Subclass of filelog to hook into its read, add, cmp methods.
386 Keywords are "stored" unexpanded, and processed on reading.
387 Keywords are "stored" unexpanded, and processed on reading.
387 '''
388 '''
388
389
389 def __init__(self, opener, kwt, path):
390 def __init__(self, opener, kwt, path):
390 super(kwfilelog, self).__init__(opener, path)
391 super(kwfilelog, self).__init__(opener, path)
391 self.kwt = kwt
392 self.kwt = kwt
392 self.path = path
393 self.path = path
393
394
394 def read(self, node):
395 def read(self, node):
395 '''Expands keywords when reading filelog.'''
396 '''Expands keywords when reading filelog.'''
396 data = super(kwfilelog, self).read(node)
397 data = super(kwfilelog, self).read(node)
397 if self.renamed(node):
398 if self.renamed(node):
398 return data
399 return data
399 return self.kwt.expand(self.path, node, data)
400 return self.kwt.expand(self.path, node, data)
400
401
401 def add(self, text, meta, tr, link, p1=None, p2=None):
402 def add(self, text, meta, tr, link, p1=None, p2=None):
402 '''Removes keyword substitutions when adding to filelog.'''
403 '''Removes keyword substitutions when adding to filelog.'''
403 text = self.kwt.shrink(self.path, text)
404 text = self.kwt.shrink(self.path, text)
404 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
405 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
405
406
406 def cmp(self, node, text):
407 def cmp(self, node, text):
407 '''Removes keyword substitutions for comparison.'''
408 '''Removes keyword substitutions for comparison.'''
408 text = self.kwt.shrink(self.path, text)
409 text = self.kwt.shrink(self.path, text)
409 return super(kwfilelog, self).cmp(node, text)
410 return super(kwfilelog, self).cmp(node, text)
410
411
411
412
412 def _status(ui, repo, wctx, kwt, *pats, **opts):
413 def _status(ui, repo, wctx, kwt, *pats, **opts):
413 '''Bails out if [keyword] configuration is not active.
414 '''Bails out if [keyword] configuration is not active.
414 Returns status of working directory.'''
415 Returns status of working directory.'''
415 if kwt:
416 if kwt:
416 opts = pycompat.byteskwargs(opts)
417 opts = pycompat.byteskwargs(opts)
417 return repo.status(
418 return repo.status(
418 match=scmutil.match(wctx, pats, opts),
419 match=scmutil.match(wctx, pats, opts),
419 clean=True,
420 clean=True,
420 unknown=opts.get(b'unknown') or opts.get(b'all'),
421 unknown=opts.get(b'unknown') or opts.get(b'all'),
421 )
422 )
422 if ui.configitems(b'keyword'):
423 if ui.configitems(b'keyword'):
423 raise error.Abort(_(b'[keyword] patterns cannot match'))
424 raise error.Abort(_(b'[keyword] patterns cannot match'))
424 raise error.Abort(_(b'no [keyword] patterns configured'))
425 raise error.Abort(_(b'no [keyword] patterns configured'))
425
426
426
427
427 def _kwfwrite(ui, repo, expand, *pats, **opts):
428 def _kwfwrite(ui, repo, expand, *pats, **opts):
428 '''Selects files and passes them to kwtemplater.overwrite.'''
429 '''Selects files and passes them to kwtemplater.overwrite.'''
429 wctx = repo[None]
430 wctx = repo[None]
430 if len(wctx.parents()) > 1:
431 if len(wctx.parents()) > 1:
431 raise error.Abort(_(b'outstanding uncommitted merge'))
432 raise error.Abort(_(b'outstanding uncommitted merge'))
432 kwt = getattr(repo, '_keywordkwt', None)
433 kwt = getattr(repo, '_keywordkwt', None)
433 with repo.wlock():
434 with repo.wlock():
434 status = _status(ui, repo, wctx, kwt, *pats, **opts)
435 status = _status(ui, repo, wctx, kwt, *pats, **opts)
435 if status.modified or status.added or status.removed or status.deleted:
436 if status.modified or status.added or status.removed or status.deleted:
436 raise error.Abort(_(b'outstanding uncommitted changes'))
437 raise error.Abort(_(b'outstanding uncommitted changes'))
437 kwt.overwrite(wctx, status.clean, True, expand)
438 kwt.overwrite(wctx, status.clean, True, expand)
438
439
439
440
440 @command(
441 @command(
441 b'kwdemo',
442 b'kwdemo',
442 [
443 [
443 (b'd', b'default', None, _(b'show default keyword template maps')),
444 (b'd', b'default', None, _(b'show default keyword template maps')),
444 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
445 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
445 ],
446 ],
446 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
447 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
447 optionalrepo=True,
448 optionalrepo=True,
448 )
449 )
449 def demo(ui, repo, *args, **opts):
450 def demo(ui, repo, *args, **opts):
450 '''print [keywordmaps] configuration and an expansion example
451 '''print [keywordmaps] configuration and an expansion example
451
452
452 Show current, custom, or default keyword template maps and their
453 Show current, custom, or default keyword template maps and their
453 expansions.
454 expansions.
454
455
455 Extend the current configuration by specifying maps as arguments
456 Extend the current configuration by specifying maps as arguments
456 and using -f/--rcfile to source an external hgrc file.
457 and using -f/--rcfile to source an external hgrc file.
457
458
458 Use -d/--default to disable current configuration.
459 Use -d/--default to disable current configuration.
459
460
460 See :hg:`help templates` for information on templates and filters.
461 See :hg:`help templates` for information on templates and filters.
461 '''
462 '''
462
463
463 def demoitems(section, items):
464 def demoitems(section, items):
464 ui.write(b'[%s]\n' % section)
465 ui.write(b'[%s]\n' % section)
465 for k, v in sorted(items):
466 for k, v in sorted(items):
466 if isinstance(v, bool):
467 if isinstance(v, bool):
467 v = stringutil.pprint(v)
468 v = stringutil.pprint(v)
468 ui.write(b'%s = %s\n' % (k, v))
469 ui.write(b'%s = %s\n' % (k, v))
469
470
470 fn = b'demo.txt'
471 fn = b'demo.txt'
471 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
472 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
472 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
473 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
473 if repo is None:
474 if repo is None:
474 baseui = ui
475 baseui = ui
475 else:
476 else:
476 baseui = repo.baseui
477 baseui = repo.baseui
477 repo = localrepo.instance(baseui, tmpdir, create=True)
478 repo = localrepo.instance(baseui, tmpdir, create=True)
478 ui.setconfig(b'keyword', fn, b'', b'keyword')
479 ui.setconfig(b'keyword', fn, b'', b'keyword')
479 svn = ui.configbool(b'keywordset', b'svn')
480 svn = ui.configbool(b'keywordset', b'svn')
480 # explicitly set keywordset for demo output
481 # explicitly set keywordset for demo output
481 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
482 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
482
483
483 uikwmaps = ui.configitems(b'keywordmaps')
484 uikwmaps = ui.configitems(b'keywordmaps')
484 if args or opts.get(r'rcfile'):
485 if args or opts.get(r'rcfile'):
485 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
486 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
486 if uikwmaps:
487 if uikwmaps:
487 ui.status(_(b'\textending current template maps\n'))
488 ui.status(_(b'\textending current template maps\n'))
488 if opts.get(r'default') or not uikwmaps:
489 if opts.get(r'default') or not uikwmaps:
489 if svn:
490 if svn:
490 ui.status(_(b'\toverriding default svn keywordset\n'))
491 ui.status(_(b'\toverriding default svn keywordset\n'))
491 else:
492 else:
492 ui.status(_(b'\toverriding default cvs keywordset\n'))
493 ui.status(_(b'\toverriding default cvs keywordset\n'))
493 if opts.get(r'rcfile'):
494 if opts.get(r'rcfile'):
494 ui.readconfig(opts.get(b'rcfile'))
495 ui.readconfig(opts.get(b'rcfile'))
495 if args:
496 if args:
496 # simulate hgrc parsing
497 # simulate hgrc parsing
497 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
498 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
498 repo.vfs.write(b'hgrc', rcmaps)
499 repo.vfs.write(b'hgrc', rcmaps)
499 ui.readconfig(repo.vfs.join(b'hgrc'))
500 ui.readconfig(repo.vfs.join(b'hgrc'))
500 kwmaps = dict(ui.configitems(b'keywordmaps'))
501 kwmaps = dict(ui.configitems(b'keywordmaps'))
501 elif opts.get(r'default'):
502 elif opts.get(r'default'):
502 if svn:
503 if svn:
503 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
504 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
504 else:
505 else:
505 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
506 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
506 kwmaps = _defaultkwmaps(ui)
507 kwmaps = _defaultkwmaps(ui)
507 if uikwmaps:
508 if uikwmaps:
508 ui.status(_(b'\tdisabling current template maps\n'))
509 ui.status(_(b'\tdisabling current template maps\n'))
509 for k, v in kwmaps.iteritems():
510 for k, v in kwmaps.iteritems():
510 ui.setconfig(b'keywordmaps', k, v, b'keyword')
511 ui.setconfig(b'keywordmaps', k, v, b'keyword')
511 else:
512 else:
512 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
513 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
513 if uikwmaps:
514 if uikwmaps:
514 kwmaps = dict(uikwmaps)
515 kwmaps = dict(uikwmaps)
515 else:
516 else:
516 kwmaps = _defaultkwmaps(ui)
517 kwmaps = _defaultkwmaps(ui)
517
518
518 uisetup(ui)
519 uisetup(ui)
519 reposetup(ui, repo)
520 reposetup(ui, repo)
520 ui.writenoi18n(b'[extensions]\nkeyword =\n')
521 ui.writenoi18n(b'[extensions]\nkeyword =\n')
521 demoitems(b'keyword', ui.configitems(b'keyword'))
522 demoitems(b'keyword', ui.configitems(b'keyword'))
522 demoitems(b'keywordset', ui.configitems(b'keywordset'))
523 demoitems(b'keywordset', ui.configitems(b'keywordset'))
523 demoitems(b'keywordmaps', kwmaps.iteritems())
524 demoitems(b'keywordmaps', kwmaps.iteritems())
524 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
525 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
525 repo.wvfs.write(fn, keywords)
526 repo.wvfs.write(fn, keywords)
526 repo[None].add([fn])
527 repo[None].add([fn])
527 ui.note(_(b'\nkeywords written to %s:\n') % fn)
528 ui.note(_(b'\nkeywords written to %s:\n') % fn)
528 ui.note(keywords)
529 ui.note(keywords)
529 with repo.wlock():
530 with repo.wlock():
530 repo.dirstate.setbranch(b'demobranch')
531 repo.dirstate.setbranch(b'demobranch')
531 for name, cmd in ui.configitems(b'hooks'):
532 for name, cmd in ui.configitems(b'hooks'):
532 if name.split(b'.', 1)[0].find(b'commit') > -1:
533 if name.split(b'.', 1)[0].find(b'commit') > -1:
533 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
534 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
534 msg = _(b'hg keyword configuration and expansion example')
535 msg = _(b'hg keyword configuration and expansion example')
535 ui.note((b"hg ci -m '%s'\n" % msg))
536 ui.note((b"hg ci -m '%s'\n" % msg))
536 repo.commit(text=msg)
537 repo.commit(text=msg)
537 ui.status(_(b'\n\tkeywords expanded\n'))
538 ui.status(_(b'\n\tkeywords expanded\n'))
538 ui.write(repo.wread(fn))
539 ui.write(repo.wread(fn))
539 repo.wvfs.rmtree(repo.root)
540 repo.wvfs.rmtree(repo.root)
540
541
541
542
542 @command(
543 @command(
543 b'kwexpand',
544 b'kwexpand',
544 cmdutil.walkopts,
545 cmdutil.walkopts,
545 _(b'hg kwexpand [OPTION]... [FILE]...'),
546 _(b'hg kwexpand [OPTION]... [FILE]...'),
546 inferrepo=True,
547 inferrepo=True,
547 )
548 )
548 def expand(ui, repo, *pats, **opts):
549 def expand(ui, repo, *pats, **opts):
549 '''expand keywords in the working directory
550 '''expand keywords in the working directory
550
551
551 Run after (re)enabling keyword expansion.
552 Run after (re)enabling keyword expansion.
552
553
553 kwexpand refuses to run if given files contain local changes.
554 kwexpand refuses to run if given files contain local changes.
554 '''
555 '''
555 # 3rd argument sets expansion to True
556 # 3rd argument sets expansion to True
556 _kwfwrite(ui, repo, True, *pats, **opts)
557 _kwfwrite(ui, repo, True, *pats, **opts)
557
558
558
559
559 @command(
560 @command(
560 b'kwfiles',
561 b'kwfiles',
561 [
562 [
562 (b'A', b'all', None, _(b'show keyword status flags of all files')),
563 (b'A', b'all', None, _(b'show keyword status flags of all files')),
563 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
564 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
564 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
565 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
565 ]
566 ]
566 + cmdutil.walkopts,
567 + cmdutil.walkopts,
567 _(b'hg kwfiles [OPTION]... [FILE]...'),
568 _(b'hg kwfiles [OPTION]... [FILE]...'),
568 inferrepo=True,
569 inferrepo=True,
569 )
570 )
570 def files(ui, repo, *pats, **opts):
571 def files(ui, repo, *pats, **opts):
571 '''show files configured for keyword expansion
572 '''show files configured for keyword expansion
572
573
573 List which files in the working directory are matched by the
574 List which files in the working directory are matched by the
574 [keyword] configuration patterns.
575 [keyword] configuration patterns.
575
576
576 Useful to prevent inadvertent keyword expansion and to speed up
577 Useful to prevent inadvertent keyword expansion and to speed up
577 execution by including only files that are actual candidates for
578 execution by including only files that are actual candidates for
578 expansion.
579 expansion.
579
580
580 See :hg:`help keyword` on how to construct patterns both for
581 See :hg:`help keyword` on how to construct patterns both for
581 inclusion and exclusion of files.
582 inclusion and exclusion of files.
582
583
583 With -A/--all and -v/--verbose the codes used to show the status
584 With -A/--all and -v/--verbose the codes used to show the status
584 of files are::
585 of files are::
585
586
586 K = keyword expansion candidate
587 K = keyword expansion candidate
587 k = keyword expansion candidate (not tracked)
588 k = keyword expansion candidate (not tracked)
588 I = ignored
589 I = ignored
589 i = ignored (not tracked)
590 i = ignored (not tracked)
590 '''
591 '''
591 kwt = getattr(repo, '_keywordkwt', None)
592 kwt = getattr(repo, '_keywordkwt', None)
592 wctx = repo[None]
593 wctx = repo[None]
593 status = _status(ui, repo, wctx, kwt, *pats, **opts)
594 status = _status(ui, repo, wctx, kwt, *pats, **opts)
594 if pats:
595 if pats:
595 cwd = repo.getcwd()
596 cwd = repo.getcwd()
596 else:
597 else:
597 cwd = b''
598 cwd = b''
598 files = []
599 files = []
599 opts = pycompat.byteskwargs(opts)
600 opts = pycompat.byteskwargs(opts)
600 if not opts.get(b'unknown') or opts.get(b'all'):
601 if not opts.get(b'unknown') or opts.get(b'all'):
601 files = sorted(status.modified + status.added + status.clean)
602 files = sorted(status.modified + status.added + status.clean)
602 kwfiles = kwt.iskwfile(files, wctx)
603 kwfiles = kwt.iskwfile(files, wctx)
603 kwdeleted = kwt.iskwfile(status.deleted, wctx)
604 kwdeleted = kwt.iskwfile(status.deleted, wctx)
604 kwunknown = kwt.iskwfile(status.unknown, wctx)
605 kwunknown = kwt.iskwfile(status.unknown, wctx)
605 if not opts.get(b'ignore') or opts.get(b'all'):
606 if not opts.get(b'ignore') or opts.get(b'all'):
606 showfiles = kwfiles, kwdeleted, kwunknown
607 showfiles = kwfiles, kwdeleted, kwunknown
607 else:
608 else:
608 showfiles = [], [], []
609 showfiles = [], [], []
609 if opts.get(b'all') or opts.get(b'ignore'):
610 if opts.get(b'all') or opts.get(b'ignore'):
610 showfiles += (
611 showfiles += (
611 [f for f in files if f not in kwfiles],
612 [f for f in files if f not in kwfiles],
612 [f for f in status.unknown if f not in kwunknown],
613 [f for f in status.unknown if f not in kwunknown],
613 )
614 )
614 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
615 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
615 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
616 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
616 fm = ui.formatter(b'kwfiles', opts)
617 fm = ui.formatter(b'kwfiles', opts)
617 fmt = b'%.0s%s\n'
618 fmt = b'%.0s%s\n'
618 if opts.get(b'all') or ui.verbose:
619 if opts.get(b'all') or ui.verbose:
619 fmt = b'%s %s\n'
620 fmt = b'%s %s\n'
620 for kwstate, char, filenames in kwstates:
621 for kwstate, char, filenames in kwstates:
621 label = b'kwfiles.' + kwstate
622 label = b'kwfiles.' + kwstate
622 for f in filenames:
623 for f in filenames:
623 fm.startitem()
624 fm.startitem()
624 fm.data(kwstatus=char, path=f)
625 fm.data(kwstatus=char, path=f)
625 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
626 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
626 fm.end()
627 fm.end()
627
628
628
629
629 @command(
630 @command(
630 b'kwshrink',
631 b'kwshrink',
631 cmdutil.walkopts,
632 cmdutil.walkopts,
632 _(b'hg kwshrink [OPTION]... [FILE]...'),
633 _(b'hg kwshrink [OPTION]... [FILE]...'),
633 inferrepo=True,
634 inferrepo=True,
634 )
635 )
635 def shrink(ui, repo, *pats, **opts):
636 def shrink(ui, repo, *pats, **opts):
636 '''revert expanded keywords in the working directory
637 '''revert expanded keywords in the working directory
637
638
638 Must be run before changing/disabling active keywords.
639 Must be run before changing/disabling active keywords.
639
640
640 kwshrink refuses to run if given files contain local changes.
641 kwshrink refuses to run if given files contain local changes.
641 '''
642 '''
642 # 3rd argument sets expansion to False
643 # 3rd argument sets expansion to False
643 _kwfwrite(ui, repo, False, *pats, **opts)
644 _kwfwrite(ui, repo, False, *pats, **opts)
644
645
645
646
646 # monkeypatches
647 # monkeypatches
647
648
648
649
649 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
650 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
650 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
651 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
651 rejects or conflicts due to expanded keywords in working dir.'''
652 rejects or conflicts due to expanded keywords in working dir.'''
652 orig(self, ui, gp, backend, store, eolmode)
653 orig(self, ui, gp, backend, store, eolmode)
653 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
654 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
654 if kwt:
655 if kwt:
655 # shrink keywords read from working dir
656 # shrink keywords read from working dir
656 self.lines = kwt.shrinklines(self.fname, self.lines)
657 self.lines = kwt.shrinklines(self.fname, self.lines)
657
658
658
659
659 def kwdiff(orig, repo, *args, **kwargs):
660 def kwdiff(orig, repo, *args, **kwargs):
660 '''Monkeypatch patch.diff to avoid expansion.'''
661 '''Monkeypatch patch.diff to avoid expansion.'''
661 kwt = getattr(repo, '_keywordkwt', None)
662 kwt = getattr(repo, '_keywordkwt', None)
662 if kwt:
663 if kwt:
663 restrict = kwt.restrict
664 restrict = kwt.restrict
664 kwt.restrict = True
665 kwt.restrict = True
665 try:
666 try:
666 for chunk in orig(repo, *args, **kwargs):
667 for chunk in orig(repo, *args, **kwargs):
667 yield chunk
668 yield chunk
668 finally:
669 finally:
669 if kwt:
670 if kwt:
670 kwt.restrict = restrict
671 kwt.restrict = restrict
671
672
672
673
673 def kwweb_skip(orig, web):
674 def kwweb_skip(orig, web):
674 '''Wraps webcommands.x turning off keyword expansion.'''
675 '''Wraps webcommands.x turning off keyword expansion.'''
675 kwt = getattr(web.repo, '_keywordkwt', None)
676 kwt = getattr(web.repo, '_keywordkwt', None)
676 if kwt:
677 if kwt:
677 origmatch = kwt.match
678 origmatch = kwt.match
678 kwt.match = util.never
679 kwt.match = util.never
679 try:
680 try:
680 for chunk in orig(web):
681 for chunk in orig(web):
681 yield chunk
682 yield chunk
682 finally:
683 finally:
683 if kwt:
684 if kwt:
684 kwt.match = origmatch
685 kwt.match = origmatch
685
686
686
687
687 def kw_amend(orig, ui, repo, old, extra, pats, opts):
688 def kw_amend(orig, ui, repo, old, extra, pats, opts):
688 '''Wraps cmdutil.amend expanding keywords after amend.'''
689 '''Wraps cmdutil.amend expanding keywords after amend.'''
689 kwt = getattr(repo, '_keywordkwt', None)
690 kwt = getattr(repo, '_keywordkwt', None)
690 if kwt is None:
691 if kwt is None:
691 return orig(ui, repo, old, extra, pats, opts)
692 return orig(ui, repo, old, extra, pats, opts)
692 with repo.wlock():
693 with repo.wlock():
693 kwt.postcommit = True
694 kwt.postcommit = True
694 newid = orig(ui, repo, old, extra, pats, opts)
695 newid = orig(ui, repo, old, extra, pats, opts)
695 if newid != old.node():
696 if newid != old.node():
696 ctx = repo[newid]
697 ctx = repo[newid]
697 kwt.restrict = True
698 kwt.restrict = True
698 kwt.overwrite(ctx, ctx.files(), False, True)
699 kwt.overwrite(ctx, ctx.files(), False, True)
699 kwt.restrict = False
700 kwt.restrict = False
700 return newid
701 return newid
701
702
702
703
703 def kw_copy(orig, ui, repo, pats, opts, rename=False):
704 def kw_copy(orig, ui, repo, pats, opts, rename=False):
704 '''Wraps cmdutil.copy so that copy/rename destinations do not
705 '''Wraps cmdutil.copy so that copy/rename destinations do not
705 contain expanded keywords.
706 contain expanded keywords.
706 Note that the source of a regular file destination may also be a
707 Note that the source of a regular file destination may also be a
707 symlink:
708 symlink:
708 hg cp sym x -> x is symlink
709 hg cp sym x -> x is symlink
709 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
710 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
710 For the latter we have to follow the symlink to find out whether its
711 For the latter we have to follow the symlink to find out whether its
711 target is configured for expansion and we therefore must unexpand the
712 target is configured for expansion and we therefore must unexpand the
712 keywords in the destination.'''
713 keywords in the destination.'''
713 kwt = getattr(repo, '_keywordkwt', None)
714 kwt = getattr(repo, '_keywordkwt', None)
714 if kwt is None:
715 if kwt is None:
715 return orig(ui, repo, pats, opts, rename)
716 return orig(ui, repo, pats, opts, rename)
716 with repo.wlock():
717 with repo.wlock():
717 orig(ui, repo, pats, opts, rename)
718 orig(ui, repo, pats, opts, rename)
718 if opts.get(b'dry_run'):
719 if opts.get(b'dry_run'):
719 return
720 return
720 wctx = repo[None]
721 wctx = repo[None]
721 cwd = repo.getcwd()
722 cwd = repo.getcwd()
722
723
723 def haskwsource(dest):
724 def haskwsource(dest):
724 '''Returns true if dest is a regular file and configured for
725 '''Returns true if dest is a regular file and configured for
725 expansion or a symlink which points to a file configured for
726 expansion or a symlink which points to a file configured for
726 expansion. '''
727 expansion. '''
727 source = repo.dirstate.copied(dest)
728 source = repo.dirstate.copied(dest)
728 if b'l' in wctx.flags(source):
729 if b'l' in wctx.flags(source):
729 source = pathutil.canonpath(
730 source = pathutil.canonpath(
730 repo.root, cwd, os.path.realpath(source)
731 repo.root, cwd, os.path.realpath(source)
731 )
732 )
732 return kwt.match(source)
733 return kwt.match(source)
733
734
734 candidates = [
735 candidates = [
735 f
736 f
736 for f in repo.dirstate.copies()
737 for f in repo.dirstate.copies()
737 if b'l' not in wctx.flags(f) and haskwsource(f)
738 if b'l' not in wctx.flags(f) and haskwsource(f)
738 ]
739 ]
739 kwt.overwrite(wctx, candidates, False, False)
740 kwt.overwrite(wctx, candidates, False, False)
740
741
741
742
742 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
743 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
743 '''Wraps record.dorecord expanding keywords after recording.'''
744 '''Wraps record.dorecord expanding keywords after recording.'''
744 kwt = getattr(repo, '_keywordkwt', None)
745 kwt = getattr(repo, '_keywordkwt', None)
745 if kwt is None:
746 if kwt is None:
746 return orig(ui, repo, commitfunc, *pats, **opts)
747 return orig(ui, repo, commitfunc, *pats, **opts)
747 with repo.wlock():
748 with repo.wlock():
748 # record returns 0 even when nothing has changed
749 # record returns 0 even when nothing has changed
749 # therefore compare nodes before and after
750 # therefore compare nodes before and after
750 kwt.postcommit = True
751 kwt.postcommit = True
751 ctx = repo[b'.']
752 ctx = repo[b'.']
752 wstatus = ctx.status()
753 wstatus = ctx.status()
753 ret = orig(ui, repo, commitfunc, *pats, **opts)
754 ret = orig(ui, repo, commitfunc, *pats, **opts)
754 recctx = repo[b'.']
755 recctx = repo[b'.']
755 if ctx != recctx:
756 if ctx != recctx:
756 modified, added = _preselect(wstatus, recctx.files())
757 modified, added = _preselect(wstatus, recctx.files())
757 kwt.restrict = False
758 kwt.restrict = False
758 kwt.overwrite(recctx, modified, False, True)
759 kwt.overwrite(recctx, modified, False, True)
759 kwt.overwrite(recctx, added, False, True, True)
760 kwt.overwrite(recctx, added, False, True, True)
760 kwt.restrict = True
761 kwt.restrict = True
761 return ret
762 return ret
762
763
763
764
764 def kwfilectx_cmp(orig, self, fctx):
765 def kwfilectx_cmp(orig, self, fctx):
765 if fctx._customcmp:
766 if fctx._customcmp:
766 return fctx.cmp(self)
767 return fctx.cmp(self)
767 kwt = getattr(self._repo, '_keywordkwt', None)
768 kwt = getattr(self._repo, '_keywordkwt', None)
768 if kwt is None:
769 if kwt is None:
769 return orig(self, fctx)
770 return orig(self, fctx)
770 # keyword affects data size, comparing wdir and filelog size does
771 # keyword affects data size, comparing wdir and filelog size does
771 # not make sense
772 # not make sense
772 if (
773 if (
773 fctx._filenode is None
774 fctx._filenode is None
774 and (
775 and (
775 self._repo._encodefilterpats
776 self._repo._encodefilterpats
776 or kwt.match(fctx.path())
777 or kwt.match(fctx.path())
777 and b'l' not in fctx.flags()
778 and b'l' not in fctx.flags()
778 or self.size() - 4 == fctx.size()
779 or self.size() - 4 == fctx.size()
779 )
780 )
780 or self.size() == fctx.size()
781 or self.size() == fctx.size()
781 ):
782 ):
782 return self._filelog.cmp(self._filenode, fctx.data())
783 return self._filelog.cmp(self._filenode, fctx.data())
783 return True
784 return True
784
785
785
786
786 def uisetup(ui):
787 def uisetup(ui):
787 ''' Monkeypatches dispatch._parse to retrieve user command.
788 ''' Monkeypatches dispatch._parse to retrieve user command.
788 Overrides file method to return kwfilelog instead of filelog
789 Overrides file method to return kwfilelog instead of filelog
789 if file matches user configuration.
790 if file matches user configuration.
790 Wraps commit to overwrite configured files with updated
791 Wraps commit to overwrite configured files with updated
791 keyword substitutions.
792 keyword substitutions.
792 Monkeypatches patch and webcommands.'''
793 Monkeypatches patch and webcommands.'''
793
794
794 def kwdispatch_parse(orig, ui, args):
795 def kwdispatch_parse(orig, ui, args):
795 '''Monkeypatch dispatch._parse to obtain running hg command.'''
796 '''Monkeypatch dispatch._parse to obtain running hg command.'''
796 cmd, func, args, options, cmdoptions = orig(ui, args)
797 cmd, func, args, options, cmdoptions = orig(ui, args)
797 kwtools[b'hgcmd'] = cmd
798 kwtools[b'hgcmd'] = cmd
798 return cmd, func, args, options, cmdoptions
799 return cmd, func, args, options, cmdoptions
799
800
800 extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
801 extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
801
802
802 extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
803 extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
803 extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
804 extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
804 extensions.wrapfunction(patch, b'diff', kwdiff)
805 extensions.wrapfunction(patch, b'diff', kwdiff)
805 extensions.wrapfunction(cmdutil, b'amend', kw_amend)
806 extensions.wrapfunction(cmdutil, b'amend', kw_amend)
806 extensions.wrapfunction(cmdutil, b'copy', kw_copy)
807 extensions.wrapfunction(cmdutil, b'copy', kw_copy)
807 extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
808 extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
808 for c in nokwwebcommands.split():
809 for c in nokwwebcommands.split():
809 extensions.wrapfunction(webcommands, c, kwweb_skip)
810 extensions.wrapfunction(webcommands, c, kwweb_skip)
810
811
811
812
812 def reposetup(ui, repo):
813 def reposetup(ui, repo):
813 '''Sets up repo as kwrepo for keyword substitution.'''
814 '''Sets up repo as kwrepo for keyword substitution.'''
814
815
815 try:
816 try:
816 if (
817 if (
817 not repo.local()
818 not repo.local()
818 or kwtools[b'hgcmd'] in nokwcommands.split()
819 or kwtools[b'hgcmd'] in nokwcommands.split()
819 or b'.hg' in util.splitpath(repo.root)
820 or b'.hg' in util.splitpath(repo.root)
820 or repo._url.startswith(b'bundle:')
821 or repo._url.startswith(b'bundle:')
821 ):
822 ):
822 return
823 return
823 except AttributeError:
824 except AttributeError:
824 pass
825 pass
825
826
826 inc, exc = [], [b'.hg*']
827 inc, exc = [], [b'.hg*']
827 for pat, opt in ui.configitems(b'keyword'):
828 for pat, opt in ui.configitems(b'keyword'):
828 if opt != b'ignore':
829 if opt != b'ignore':
829 inc.append(pat)
830 inc.append(pat)
830 else:
831 else:
831 exc.append(pat)
832 exc.append(pat)
832 if not inc:
833 if not inc:
833 return
834 return
834
835
835 kwt = kwtemplater(ui, repo, inc, exc)
836 kwt = kwtemplater(ui, repo, inc, exc)
836
837
837 class kwrepo(repo.__class__):
838 class kwrepo(repo.__class__):
838 def file(self, f):
839 def file(self, f):
839 if f[0] == b'/':
840 if f[0] == b'/':
840 f = f[1:]
841 f = f[1:]
841 return kwfilelog(self.svfs, kwt, f)
842 return kwfilelog(self.svfs, kwt, f)
842
843
843 def wread(self, filename):
844 def wread(self, filename):
844 data = super(kwrepo, self).wread(filename)
845 data = super(kwrepo, self).wread(filename)
845 return kwt.wread(filename, data)
846 return kwt.wread(filename, data)
846
847
847 def commit(self, *args, **opts):
848 def commit(self, *args, **opts):
848 # use custom commitctx for user commands
849 # use custom commitctx for user commands
849 # other extensions can still wrap repo.commitctx directly
850 # other extensions can still wrap repo.commitctx directly
850 self.commitctx = self.kwcommitctx
851 self.commitctx = self.kwcommitctx
851 try:
852 try:
852 return super(kwrepo, self).commit(*args, **opts)
853 return super(kwrepo, self).commit(*args, **opts)
853 finally:
854 finally:
854 del self.commitctx
855 del self.commitctx
855
856
856 def kwcommitctx(self, ctx, error=False, origctx=None):
857 def kwcommitctx(self, ctx, error=False, origctx=None):
857 n = super(kwrepo, self).commitctx(ctx, error, origctx)
858 n = super(kwrepo, self).commitctx(ctx, error, origctx)
858 # no lock needed, only called from repo.commit() which already locks
859 # no lock needed, only called from repo.commit() which already locks
859 if not kwt.postcommit:
860 if not kwt.postcommit:
860 restrict = kwt.restrict
861 restrict = kwt.restrict
861 kwt.restrict = True
862 kwt.restrict = True
862 kwt.overwrite(
863 kwt.overwrite(
863 self[n], sorted(ctx.added() + ctx.modified()), False, True
864 self[n], sorted(ctx.added() + ctx.modified()), False, True
864 )
865 )
865 kwt.restrict = restrict
866 kwt.restrict = restrict
866 return n
867 return n
867
868
868 def rollback(self, dryrun=False, force=False):
869 def rollback(self, dryrun=False, force=False):
869 with self.wlock():
870 with self.wlock():
870 origrestrict = kwt.restrict
871 origrestrict = kwt.restrict
871 try:
872 try:
872 if not dryrun:
873 if not dryrun:
873 changed = self[b'.'].files()
874 changed = self[b'.'].files()
874 ret = super(kwrepo, self).rollback(dryrun, force)
875 ret = super(kwrepo, self).rollback(dryrun, force)
875 if not dryrun:
876 if not dryrun:
876 ctx = self[b'.']
877 ctx = self[b'.']
877 modified, added = _preselect(ctx.status(), changed)
878 modified, added = _preselect(ctx.status(), changed)
878 kwt.restrict = False
879 kwt.restrict = False
879 kwt.overwrite(ctx, modified, True, True)
880 kwt.overwrite(ctx, modified, True, True)
880 kwt.overwrite(ctx, added, True, False)
881 kwt.overwrite(ctx, added, True, False)
881 return ret
882 return ret
882 finally:
883 finally:
883 kwt.restrict = origrestrict
884 kwt.restrict = origrestrict
884
885
885 repo.__class__ = kwrepo
886 repo.__class__ = kwrepo
886 repo._keywordkwt = kwt
887 repo._keywordkwt = kwt
@@ -1,89 +1,89
1 # This software may be used and distributed according to the terms of the
1 # This software may be used and distributed according to the terms of the
2 # GNU General Public License version 2 or any later version.
2 # GNU General Public License version 2 or any later version.
3
3
4 from __future__ import absolute_import
4 from __future__ import absolute_import
5
5
6 import re
6 import re
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9
9 from mercurial.pycompat import getattr
10 from mercurial import (
10 from mercurial import (
11 error,
11 error,
12 hg,
12 hg,
13 util,
13 util,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 lfutil,
17 lfutil,
18 localstore,
18 localstore,
19 wirestore,
19 wirestore,
20 )
20 )
21
21
22 # During clone this function is passed the src's ui object
22 # During clone this function is passed the src's ui object
23 # but it needs the dest's ui object so it can read out of
23 # but it needs the dest's ui object so it can read out of
24 # the config file. Use repo.ui instead.
24 # the config file. Use repo.ui instead.
25 def openstore(repo=None, remote=None, put=False, ui=None):
25 def openstore(repo=None, remote=None, put=False, ui=None):
26 if ui is None:
26 if ui is None:
27 ui = repo.ui
27 ui = repo.ui
28
28
29 if not remote:
29 if not remote:
30 lfpullsource = getattr(repo, 'lfpullsource', None)
30 lfpullsource = getattr(repo, 'lfpullsource', None)
31 if lfpullsource:
31 if lfpullsource:
32 path = ui.expandpath(lfpullsource)
32 path = ui.expandpath(lfpullsource)
33 elif put:
33 elif put:
34 path = ui.expandpath(b'default-push', b'default')
34 path = ui.expandpath(b'default-push', b'default')
35 else:
35 else:
36 path = ui.expandpath(b'default')
36 path = ui.expandpath(b'default')
37
37
38 # ui.expandpath() leaves 'default-push' and 'default' alone if
38 # ui.expandpath() leaves 'default-push' and 'default' alone if
39 # they cannot be expanded: fallback to the empty string,
39 # they cannot be expanded: fallback to the empty string,
40 # meaning the current directory.
40 # meaning the current directory.
41 if repo is None:
41 if repo is None:
42 path = ui.expandpath(b'default')
42 path = ui.expandpath(b'default')
43 path, _branches = hg.parseurl(path)
43 path, _branches = hg.parseurl(path)
44 remote = hg.peer(repo or ui, {}, path)
44 remote = hg.peer(repo or ui, {}, path)
45 elif path == b'default-push' or path == b'default':
45 elif path == b'default-push' or path == b'default':
46 remote = repo
46 remote = repo
47 else:
47 else:
48 path, _branches = hg.parseurl(path)
48 path, _branches = hg.parseurl(path)
49 remote = hg.peer(repo or ui, {}, path)
49 remote = hg.peer(repo or ui, {}, path)
50
50
51 # The path could be a scheme so use Mercurial's normal functionality
51 # The path could be a scheme so use Mercurial's normal functionality
52 # to resolve the scheme to a repository and use its path
52 # to resolve the scheme to a repository and use its path
53 path = util.safehasattr(remote, b'url') and remote.url() or remote.path
53 path = util.safehasattr(remote, b'url') and remote.url() or remote.path
54
54
55 match = _scheme_re.match(path)
55 match = _scheme_re.match(path)
56 if not match: # regular filesystem path
56 if not match: # regular filesystem path
57 scheme = b'file'
57 scheme = b'file'
58 else:
58 else:
59 scheme = match.group(1)
59 scheme = match.group(1)
60
60
61 try:
61 try:
62 storeproviders = _storeprovider[scheme]
62 storeproviders = _storeprovider[scheme]
63 except KeyError:
63 except KeyError:
64 raise error.Abort(_(b'unsupported URL scheme %r') % scheme)
64 raise error.Abort(_(b'unsupported URL scheme %r') % scheme)
65
65
66 for classobj in storeproviders:
66 for classobj in storeproviders:
67 try:
67 try:
68 return classobj(ui, repo, remote)
68 return classobj(ui, repo, remote)
69 except lfutil.storeprotonotcapable:
69 except lfutil.storeprotonotcapable:
70 pass
70 pass
71
71
72 raise error.Abort(
72 raise error.Abort(
73 _(b'%s does not appear to be a largefile store')
73 _(b'%s does not appear to be a largefile store')
74 % util.hidepassword(path)
74 % util.hidepassword(path)
75 )
75 )
76
76
77
77
78 _storeprovider = {
78 _storeprovider = {
79 b'file': [localstore.localstore],
79 b'file': [localstore.localstore],
80 b'http': [wirestore.wirestore],
80 b'http': [wirestore.wirestore],
81 b'https': [wirestore.wirestore],
81 b'https': [wirestore.wirestore],
82 b'ssh': [wirestore.wirestore],
82 b'ssh': [wirestore.wirestore],
83 }
83 }
84
84
85 _scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
85 _scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
86
86
87
87
88 def getlfile(ui, hash):
88 def getlfile(ui, hash):
89 return util.chunkbuffer(openstore(ui=ui)._get(hash))
89 return util.chunkbuffer(openstore(ui=ui)._get(hash))
@@ -1,745 +1,746
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import re
15 import re
16 import socket
16 import socket
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.pycompat import getattr
19
20
20 from mercurial import (
21 from mercurial import (
21 encoding,
22 encoding,
22 error,
23 error,
23 node,
24 node,
24 pathutil,
25 pathutil,
25 pycompat,
26 pycompat,
26 url as urlmod,
27 url as urlmod,
27 util,
28 util,
28 vfs as vfsmod,
29 vfs as vfsmod,
29 worker,
30 worker,
30 )
31 )
31
32
32 from mercurial.utils import stringutil
33 from mercurial.utils import stringutil
33
34
34 from ..largefiles import lfutil
35 from ..largefiles import lfutil
35
36
36 # 64 bytes for SHA256
37 # 64 bytes for SHA256
37 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
38
39
39
40
40 class lfsvfs(vfsmod.vfs):
41 class lfsvfs(vfsmod.vfs):
41 def join(self, path):
42 def join(self, path):
42 """split the path at first two characters, like: XX/XXXXX..."""
43 """split the path at first two characters, like: XX/XXXXX..."""
43 if not _lfsre.match(path):
44 if not _lfsre.match(path):
44 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
45 return super(lfsvfs, self).join(path[0:2], path[2:])
46 return super(lfsvfs, self).join(path[0:2], path[2:])
46
47
47 def walk(self, path=None, onerror=None):
48 def walk(self, path=None, onerror=None):
48 """Yield (dirpath, [], oids) tuple for blobs under path
49 """Yield (dirpath, [], oids) tuple for blobs under path
49
50
50 Oids only exist in the root of this vfs, so dirpath is always ''.
51 Oids only exist in the root of this vfs, so dirpath is always ''.
51 """
52 """
52 root = os.path.normpath(self.base)
53 root = os.path.normpath(self.base)
53 # when dirpath == root, dirpath[prefixlen:] becomes empty
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
54 # because len(dirpath) < prefixlen.
55 # because len(dirpath) < prefixlen.
55 prefixlen = len(pathutil.normasprefix(root))
56 prefixlen = len(pathutil.normasprefix(root))
56 oids = []
57 oids = []
57
58
58 for dirpath, dirs, files in os.walk(
59 for dirpath, dirs, files in os.walk(
59 self.reljoin(self.base, path or b''), onerror=onerror
60 self.reljoin(self.base, path or b''), onerror=onerror
60 ):
61 ):
61 dirpath = dirpath[prefixlen:]
62 dirpath = dirpath[prefixlen:]
62
63
63 # Silently skip unexpected files and directories
64 # Silently skip unexpected files and directories
64 if len(dirpath) == 2:
65 if len(dirpath) == 2:
65 oids.extend(
66 oids.extend(
66 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
67 )
68 )
68
69
69 yield (b'', [], oids)
70 yield (b'', [], oids)
70
71
71
72
72 class nullvfs(lfsvfs):
73 class nullvfs(lfsvfs):
73 def __init__(self):
74 def __init__(self):
74 pass
75 pass
75
76
76 def exists(self, oid):
77 def exists(self, oid):
77 return False
78 return False
78
79
79 def read(self, oid):
80 def read(self, oid):
80 # store.read() calls into here if the blob doesn't exist in its
81 # store.read() calls into here if the blob doesn't exist in its
81 # self.vfs. Raise the same error as a normal vfs when asked to read a
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
82 # file that doesn't exist. The only difference is the full file path
83 # file that doesn't exist. The only difference is the full file path
83 # isn't available in the error.
84 # isn't available in the error.
84 raise IOError(
85 raise IOError(
85 errno.ENOENT,
86 errno.ENOENT,
86 pycompat.sysstr(b'%s: No such file or directory' % oid),
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
87 )
88 )
88
89
89 def walk(self, path=None, onerror=None):
90 def walk(self, path=None, onerror=None):
90 return (b'', [], [])
91 return (b'', [], [])
91
92
92 def write(self, oid, data):
93 def write(self, oid, data):
93 pass
94 pass
94
95
95
96
96 class filewithprogress(object):
97 class filewithprogress(object):
97 """a file-like object that supports __len__ and read.
98 """a file-like object that supports __len__ and read.
98
99
99 Useful to provide progress information for how many bytes are read.
100 Useful to provide progress information for how many bytes are read.
100 """
101 """
101
102
102 def __init__(self, fp, callback):
103 def __init__(self, fp, callback):
103 self._fp = fp
104 self._fp = fp
104 self._callback = callback # func(readsize)
105 self._callback = callback # func(readsize)
105 fp.seek(0, os.SEEK_END)
106 fp.seek(0, os.SEEK_END)
106 self._len = fp.tell()
107 self._len = fp.tell()
107 fp.seek(0)
108 fp.seek(0)
108
109
109 def __len__(self):
110 def __len__(self):
110 return self._len
111 return self._len
111
112
112 def read(self, size):
113 def read(self, size):
113 if self._fp is None:
114 if self._fp is None:
114 return b''
115 return b''
115 data = self._fp.read(size)
116 data = self._fp.read(size)
116 if data:
117 if data:
117 if self._callback:
118 if self._callback:
118 self._callback(len(data))
119 self._callback(len(data))
119 else:
120 else:
120 self._fp.close()
121 self._fp.close()
121 self._fp = None
122 self._fp = None
122 return data
123 return data
123
124
124
125
125 class local(object):
126 class local(object):
126 """Local blobstore for large file contents.
127 """Local blobstore for large file contents.
127
128
128 This blobstore is used both as a cache and as a staging area for large blobs
129 This blobstore is used both as a cache and as a staging area for large blobs
129 to be uploaded to the remote blobstore.
130 to be uploaded to the remote blobstore.
130 """
131 """
131
132
132 def __init__(self, repo):
133 def __init__(self, repo):
133 fullpath = repo.svfs.join(b'lfs/objects')
134 fullpath = repo.svfs.join(b'lfs/objects')
134 self.vfs = lfsvfs(fullpath)
135 self.vfs = lfsvfs(fullpath)
135
136
136 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
137 self.cachevfs = nullvfs()
138 self.cachevfs = nullvfs()
138 else:
139 else:
139 usercache = lfutil._usercachedir(repo.ui, b'lfs')
140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
140 self.cachevfs = lfsvfs(usercache)
141 self.cachevfs = lfsvfs(usercache)
141 self.ui = repo.ui
142 self.ui = repo.ui
142
143
143 def open(self, oid):
144 def open(self, oid):
144 """Open a read-only file descriptor to the named blob, in either the
145 """Open a read-only file descriptor to the named blob, in either the
145 usercache or the local store."""
146 usercache or the local store."""
146 # The usercache is the most likely place to hold the file. Commit will
147 # The usercache is the most likely place to hold the file. Commit will
147 # write to both it and the local store, as will anything that downloads
148 # write to both it and the local store, as will anything that downloads
148 # the blobs. However, things like clone without an update won't
149 # the blobs. However, things like clone without an update won't
149 # populate the local store. For an init + push of a local clone,
150 # populate the local store. For an init + push of a local clone,
150 # the usercache is the only place it _could_ be. If not present, the
151 # the usercache is the only place it _could_ be. If not present, the
151 # missing file msg here will indicate the local repo, not the usercache.
152 # missing file msg here will indicate the local repo, not the usercache.
152 if self.cachevfs.exists(oid):
153 if self.cachevfs.exists(oid):
153 return self.cachevfs(oid, b'rb')
154 return self.cachevfs(oid, b'rb')
154
155
155 return self.vfs(oid, b'rb')
156 return self.vfs(oid, b'rb')
156
157
157 def download(self, oid, src):
158 def download(self, oid, src):
158 """Read the blob from the remote source in chunks, verify the content,
159 """Read the blob from the remote source in chunks, verify the content,
159 and write to this local blobstore."""
160 and write to this local blobstore."""
160 sha256 = hashlib.sha256()
161 sha256 = hashlib.sha256()
161
162
162 with self.vfs(oid, b'wb', atomictemp=True) as fp:
163 with self.vfs(oid, b'wb', atomictemp=True) as fp:
163 for chunk in util.filechunkiter(src, size=1048576):
164 for chunk in util.filechunkiter(src, size=1048576):
164 fp.write(chunk)
165 fp.write(chunk)
165 sha256.update(chunk)
166 sha256.update(chunk)
166
167
167 realoid = node.hex(sha256.digest())
168 realoid = node.hex(sha256.digest())
168 if realoid != oid:
169 if realoid != oid:
169 raise LfsCorruptionError(
170 raise LfsCorruptionError(
170 _(b'corrupt remote lfs object: %s') % oid
171 _(b'corrupt remote lfs object: %s') % oid
171 )
172 )
172
173
173 self._linktousercache(oid)
174 self._linktousercache(oid)
174
175
175 def write(self, oid, data):
176 def write(self, oid, data):
176 """Write blob to local blobstore.
177 """Write blob to local blobstore.
177
178
178 This should only be called from the filelog during a commit or similar.
179 This should only be called from the filelog during a commit or similar.
179 As such, there is no need to verify the data. Imports from a remote
180 As such, there is no need to verify the data. Imports from a remote
180 store must use ``download()`` instead."""
181 store must use ``download()`` instead."""
181 with self.vfs(oid, b'wb', atomictemp=True) as fp:
182 with self.vfs(oid, b'wb', atomictemp=True) as fp:
182 fp.write(data)
183 fp.write(data)
183
184
184 self._linktousercache(oid)
185 self._linktousercache(oid)
185
186
186 def linkfromusercache(self, oid):
187 def linkfromusercache(self, oid):
187 """Link blobs found in the user cache into this store.
188 """Link blobs found in the user cache into this store.
188
189
189 The server module needs to do this when it lets the client know not to
190 The server module needs to do this when it lets the client know not to
190 upload the blob, to ensure it is always available in this store.
191 upload the blob, to ensure it is always available in this store.
191 Normally this is done implicitly when the client reads or writes the
192 Normally this is done implicitly when the client reads or writes the
192 blob, but that doesn't happen when the server tells the client that it
193 blob, but that doesn't happen when the server tells the client that it
193 already has the blob.
194 already has the blob.
194 """
195 """
195 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
196 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
196 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
197 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
197 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
198 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
198
199
199 def _linktousercache(self, oid):
200 def _linktousercache(self, oid):
200 # XXX: should we verify the content of the cache, and hardlink back to
201 # XXX: should we verify the content of the cache, and hardlink back to
201 # the local store on success, but truncate, write and link on failure?
202 # the local store on success, but truncate, write and link on failure?
202 if not self.cachevfs.exists(oid) and not isinstance(
203 if not self.cachevfs.exists(oid) and not isinstance(
203 self.cachevfs, nullvfs
204 self.cachevfs, nullvfs
204 ):
205 ):
205 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
206 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
206 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
207 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
207
208
208 def read(self, oid, verify=True):
209 def read(self, oid, verify=True):
209 """Read blob from local blobstore."""
210 """Read blob from local blobstore."""
210 if not self.vfs.exists(oid):
211 if not self.vfs.exists(oid):
211 blob = self._read(self.cachevfs, oid, verify)
212 blob = self._read(self.cachevfs, oid, verify)
212
213
213 # Even if revlog will verify the content, it needs to be verified
214 # Even if revlog will verify the content, it needs to be verified
214 # now before making the hardlink to avoid propagating corrupt blobs.
215 # now before making the hardlink to avoid propagating corrupt blobs.
215 # Don't abort if corruption is detected, because `hg verify` will
216 # Don't abort if corruption is detected, because `hg verify` will
216 # give more useful info about the corruption- simply don't add the
217 # give more useful info about the corruption- simply don't add the
217 # hardlink.
218 # hardlink.
218 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
219 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
219 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
220 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
220 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
221 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
221 else:
222 else:
222 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
223 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
223 blob = self._read(self.vfs, oid, verify)
224 blob = self._read(self.vfs, oid, verify)
224 return blob
225 return blob
225
226
226 def _read(self, vfs, oid, verify):
227 def _read(self, vfs, oid, verify):
227 """Read blob (after verifying) from the given store"""
228 """Read blob (after verifying) from the given store"""
228 blob = vfs.read(oid)
229 blob = vfs.read(oid)
229 if verify:
230 if verify:
230 _verify(oid, blob)
231 _verify(oid, blob)
231 return blob
232 return blob
232
233
233 def verify(self, oid):
234 def verify(self, oid):
234 """Indicate whether or not the hash of the underlying file matches its
235 """Indicate whether or not the hash of the underlying file matches its
235 name."""
236 name."""
236 sha256 = hashlib.sha256()
237 sha256 = hashlib.sha256()
237
238
238 with self.open(oid) as fp:
239 with self.open(oid) as fp:
239 for chunk in util.filechunkiter(fp, size=1048576):
240 for chunk in util.filechunkiter(fp, size=1048576):
240 sha256.update(chunk)
241 sha256.update(chunk)
241
242
242 return oid == node.hex(sha256.digest())
243 return oid == node.hex(sha256.digest())
243
244
244 def has(self, oid):
245 def has(self, oid):
245 """Returns True if the local blobstore contains the requested blob,
246 """Returns True if the local blobstore contains the requested blob,
246 False otherwise."""
247 False otherwise."""
247 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
248 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
248
249
249
250
250 def _urlerrorreason(urlerror):
251 def _urlerrorreason(urlerror):
251 '''Create a friendly message for the given URLError to be used in an
252 '''Create a friendly message for the given URLError to be used in an
252 LfsRemoteError message.
253 LfsRemoteError message.
253 '''
254 '''
254 inst = urlerror
255 inst = urlerror
255
256
256 if isinstance(urlerror.reason, Exception):
257 if isinstance(urlerror.reason, Exception):
257 inst = urlerror.reason
258 inst = urlerror.reason
258
259
259 if util.safehasattr(inst, b'reason'):
260 if util.safehasattr(inst, b'reason'):
260 try: # usually it is in the form (errno, strerror)
261 try: # usually it is in the form (errno, strerror)
261 reason = inst.reason.args[1]
262 reason = inst.reason.args[1]
262 except (AttributeError, IndexError):
263 except (AttributeError, IndexError):
263 # it might be anything, for example a string
264 # it might be anything, for example a string
264 reason = inst.reason
265 reason = inst.reason
265 if isinstance(reason, pycompat.unicode):
266 if isinstance(reason, pycompat.unicode):
266 # SSLError of Python 2.7.9 contains a unicode
267 # SSLError of Python 2.7.9 contains a unicode
267 reason = encoding.unitolocal(reason)
268 reason = encoding.unitolocal(reason)
268 return reason
269 return reason
269 elif getattr(inst, "strerror", None):
270 elif getattr(inst, "strerror", None):
270 return encoding.strtolocal(inst.strerror)
271 return encoding.strtolocal(inst.strerror)
271 else:
272 else:
272 return stringutil.forcebytestr(urlerror)
273 return stringutil.forcebytestr(urlerror)
273
274
274
275
275 class lfsauthhandler(util.urlreq.basehandler):
276 class lfsauthhandler(util.urlreq.basehandler):
276 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
277 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
277
278
278 def http_error_401(self, req, fp, code, msg, headers):
279 def http_error_401(self, req, fp, code, msg, headers):
279 """Enforces that any authentication performed is HTTP Basic
280 """Enforces that any authentication performed is HTTP Basic
280 Authentication. No authentication is also acceptable.
281 Authentication. No authentication is also acceptable.
281 """
282 """
282 authreq = headers.get(r'www-authenticate', None)
283 authreq = headers.get(r'www-authenticate', None)
283 if authreq:
284 if authreq:
284 scheme = authreq.split()[0]
285 scheme = authreq.split()[0]
285
286
286 if scheme.lower() != r'basic':
287 if scheme.lower() != r'basic':
287 msg = _(b'the server must support Basic Authentication')
288 msg = _(b'the server must support Basic Authentication')
288 raise util.urlerr.httperror(
289 raise util.urlerr.httperror(
289 req.get_full_url(),
290 req.get_full_url(),
290 code,
291 code,
291 encoding.strfromlocal(msg),
292 encoding.strfromlocal(msg),
292 headers,
293 headers,
293 fp,
294 fp,
294 )
295 )
295 return None
296 return None
296
297
297
298
298 class _gitlfsremote(object):
299 class _gitlfsremote(object):
299 def __init__(self, repo, url):
300 def __init__(self, repo, url):
300 ui = repo.ui
301 ui = repo.ui
301 self.ui = ui
302 self.ui = ui
302 baseurl, authinfo = url.authinfo()
303 baseurl, authinfo = url.authinfo()
303 self.baseurl = baseurl.rstrip(b'/')
304 self.baseurl = baseurl.rstrip(b'/')
304 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
305 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
305 if not useragent:
306 if not useragent:
306 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
307 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
307 self.urlopener = urlmod.opener(ui, authinfo, useragent)
308 self.urlopener = urlmod.opener(ui, authinfo, useragent)
308 self.urlopener.add_handler(lfsauthhandler())
309 self.urlopener.add_handler(lfsauthhandler())
309 self.retry = ui.configint(b'lfs', b'retry')
310 self.retry = ui.configint(b'lfs', b'retry')
310
311
311 def writebatch(self, pointers, fromstore):
312 def writebatch(self, pointers, fromstore):
312 """Batch upload from local to remote blobstore."""
313 """Batch upload from local to remote blobstore."""
313 self._batch(_deduplicate(pointers), fromstore, b'upload')
314 self._batch(_deduplicate(pointers), fromstore, b'upload')
314
315
315 def readbatch(self, pointers, tostore):
316 def readbatch(self, pointers, tostore):
316 """Batch download from remote to local blostore."""
317 """Batch download from remote to local blostore."""
317 self._batch(_deduplicate(pointers), tostore, b'download')
318 self._batch(_deduplicate(pointers), tostore, b'download')
318
319
319 def _batchrequest(self, pointers, action):
320 def _batchrequest(self, pointers, action):
320 """Get metadata about objects pointed by pointers for given action
321 """Get metadata about objects pointed by pointers for given action
321
322
322 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
323 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
323 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
324 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
324 """
325 """
325 objects = [
326 objects = [
326 {r'oid': pycompat.strurl(p.oid()), r'size': p.size()}
327 {r'oid': pycompat.strurl(p.oid()), r'size': p.size()}
327 for p in pointers
328 for p in pointers
328 ]
329 ]
329 requestdata = pycompat.bytesurl(
330 requestdata = pycompat.bytesurl(
330 json.dumps(
331 json.dumps(
331 {r'objects': objects, r'operation': pycompat.strurl(action),}
332 {r'objects': objects, r'operation': pycompat.strurl(action),}
332 )
333 )
333 )
334 )
334 url = b'%s/objects/batch' % self.baseurl
335 url = b'%s/objects/batch' % self.baseurl
335 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
336 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
336 batchreq.add_header(r'Accept', r'application/vnd.git-lfs+json')
337 batchreq.add_header(r'Accept', r'application/vnd.git-lfs+json')
337 batchreq.add_header(r'Content-Type', r'application/vnd.git-lfs+json')
338 batchreq.add_header(r'Content-Type', r'application/vnd.git-lfs+json')
338 try:
339 try:
339 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
340 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
340 rawjson = rsp.read()
341 rawjson = rsp.read()
341 except util.urlerr.httperror as ex:
342 except util.urlerr.httperror as ex:
342 hints = {
343 hints = {
343 400: _(
344 400: _(
344 b'check that lfs serving is enabled on %s and "%s" is '
345 b'check that lfs serving is enabled on %s and "%s" is '
345 b'supported'
346 b'supported'
346 )
347 )
347 % (self.baseurl, action),
348 % (self.baseurl, action),
348 404: _(b'the "lfs.url" config may be used to override %s')
349 404: _(b'the "lfs.url" config may be used to override %s')
349 % self.baseurl,
350 % self.baseurl,
350 }
351 }
351 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
352 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
352 raise LfsRemoteError(
353 raise LfsRemoteError(
353 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
354 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
354 hint=hint,
355 hint=hint,
355 )
356 )
356 except util.urlerr.urlerror as ex:
357 except util.urlerr.urlerror as ex:
357 hint = (
358 hint = (
358 _(b'the "lfs.url" config may be used to override %s')
359 _(b'the "lfs.url" config may be used to override %s')
359 % self.baseurl
360 % self.baseurl
360 )
361 )
361 raise LfsRemoteError(
362 raise LfsRemoteError(
362 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
363 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
363 )
364 )
364 try:
365 try:
365 response = json.loads(rawjson)
366 response = json.loads(rawjson)
366 except ValueError:
367 except ValueError:
367 raise LfsRemoteError(
368 raise LfsRemoteError(
368 _(b'LFS server returns invalid JSON: %s')
369 _(b'LFS server returns invalid JSON: %s')
369 % rawjson.encode("utf-8")
370 % rawjson.encode("utf-8")
370 )
371 )
371
372
372 if self.ui.debugflag:
373 if self.ui.debugflag:
373 self.ui.debug(b'Status: %d\n' % rsp.status)
374 self.ui.debug(b'Status: %d\n' % rsp.status)
374 # lfs-test-server and hg serve return headers in different order
375 # lfs-test-server and hg serve return headers in different order
375 headers = pycompat.bytestr(rsp.info()).strip()
376 headers = pycompat.bytestr(rsp.info()).strip()
376 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
377 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
377
378
378 if r'objects' in response:
379 if r'objects' in response:
379 response[r'objects'] = sorted(
380 response[r'objects'] = sorted(
380 response[r'objects'], key=lambda p: p[r'oid']
381 response[r'objects'], key=lambda p: p[r'oid']
381 )
382 )
382 self.ui.debug(
383 self.ui.debug(
383 b'%s\n'
384 b'%s\n'
384 % pycompat.bytesurl(
385 % pycompat.bytesurl(
385 json.dumps(
386 json.dumps(
386 response,
387 response,
387 indent=2,
388 indent=2,
388 separators=(r'', r': '),
389 separators=(r'', r': '),
389 sort_keys=True,
390 sort_keys=True,
390 )
391 )
391 )
392 )
392 )
393 )
393
394
394 def encodestr(x):
395 def encodestr(x):
395 if isinstance(x, pycompat.unicode):
396 if isinstance(x, pycompat.unicode):
396 return x.encode(u'utf-8')
397 return x.encode(u'utf-8')
397 return x
398 return x
398
399
399 return pycompat.rapply(encodestr, response)
400 return pycompat.rapply(encodestr, response)
400
401
401 def _checkforservererror(self, pointers, responses, action):
402 def _checkforservererror(self, pointers, responses, action):
402 """Scans errors from objects
403 """Scans errors from objects
403
404
404 Raises LfsRemoteError if any objects have an error"""
405 Raises LfsRemoteError if any objects have an error"""
405 for response in responses:
406 for response in responses:
406 # The server should return 404 when objects cannot be found. Some
407 # The server should return 404 when objects cannot be found. Some
407 # server implementation (ex. lfs-test-server) does not set "error"
408 # server implementation (ex. lfs-test-server) does not set "error"
408 # but just removes "download" from "actions". Treat that case
409 # but just removes "download" from "actions". Treat that case
409 # as the same as 404 error.
410 # as the same as 404 error.
410 if b'error' not in response:
411 if b'error' not in response:
411 if action == b'download' and action not in response.get(
412 if action == b'download' and action not in response.get(
412 b'actions', []
413 b'actions', []
413 ):
414 ):
414 code = 404
415 code = 404
415 else:
416 else:
416 continue
417 continue
417 else:
418 else:
418 # An error dict without a code doesn't make much sense, so
419 # An error dict without a code doesn't make much sense, so
419 # treat as a server error.
420 # treat as a server error.
420 code = response.get(b'error').get(b'code', 500)
421 code = response.get(b'error').get(b'code', 500)
421
422
422 ptrmap = {p.oid(): p for p in pointers}
423 ptrmap = {p.oid(): p for p in pointers}
423 p = ptrmap.get(response[b'oid'], None)
424 p = ptrmap.get(response[b'oid'], None)
424 if p:
425 if p:
425 filename = getattr(p, 'filename', b'unknown')
426 filename = getattr(p, 'filename', b'unknown')
426 errors = {
427 errors = {
427 404: b'The object does not exist',
428 404: b'The object does not exist',
428 410: b'The object was removed by the owner',
429 410: b'The object was removed by the owner',
429 422: b'Validation error',
430 422: b'Validation error',
430 500: b'Internal server error',
431 500: b'Internal server error',
431 }
432 }
432 msg = errors.get(code, b'status code %d' % code)
433 msg = errors.get(code, b'status code %d' % code)
433 raise LfsRemoteError(
434 raise LfsRemoteError(
434 _(b'LFS server error for "%s": %s') % (filename, msg)
435 _(b'LFS server error for "%s": %s') % (filename, msg)
435 )
436 )
436 else:
437 else:
437 raise LfsRemoteError(
438 raise LfsRemoteError(
438 _(b'LFS server error. Unsolicited response for oid %s')
439 _(b'LFS server error. Unsolicited response for oid %s')
439 % response[b'oid']
440 % response[b'oid']
440 )
441 )
441
442
442 def _extractobjects(self, response, pointers, action):
443 def _extractobjects(self, response, pointers, action):
443 """extract objects from response of the batch API
444 """extract objects from response of the batch API
444
445
445 response: parsed JSON object returned by batch API
446 response: parsed JSON object returned by batch API
446 return response['objects'] filtered by action
447 return response['objects'] filtered by action
447 raise if any object has an error
448 raise if any object has an error
448 """
449 """
449 # Scan errors from objects - fail early
450 # Scan errors from objects - fail early
450 objects = response.get(b'objects', [])
451 objects = response.get(b'objects', [])
451 self._checkforservererror(pointers, objects, action)
452 self._checkforservererror(pointers, objects, action)
452
453
453 # Filter objects with given action. Practically, this skips uploading
454 # Filter objects with given action. Practically, this skips uploading
454 # objects which exist in the server.
455 # objects which exist in the server.
455 filteredobjects = [
456 filteredobjects = [
456 o for o in objects if action in o.get(b'actions', [])
457 o for o in objects if action in o.get(b'actions', [])
457 ]
458 ]
458
459
459 return filteredobjects
460 return filteredobjects
460
461
461 def _basictransfer(self, obj, action, localstore):
462 def _basictransfer(self, obj, action, localstore):
462 """Download or upload a single object using basic transfer protocol
463 """Download or upload a single object using basic transfer protocol
463
464
464 obj: dict, an object description returned by batch API
465 obj: dict, an object description returned by batch API
465 action: string, one of ['upload', 'download']
466 action: string, one of ['upload', 'download']
466 localstore: blobstore.local
467 localstore: blobstore.local
467
468
468 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
469 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
469 basic-transfers.md
470 basic-transfers.md
470 """
471 """
471 oid = obj[b'oid']
472 oid = obj[b'oid']
472 href = obj[b'actions'][action].get(b'href')
473 href = obj[b'actions'][action].get(b'href')
473 headers = obj[b'actions'][action].get(b'header', {}).items()
474 headers = obj[b'actions'][action].get(b'header', {}).items()
474
475
475 request = util.urlreq.request(pycompat.strurl(href))
476 request = util.urlreq.request(pycompat.strurl(href))
476 if action == b'upload':
477 if action == b'upload':
477 # If uploading blobs, read data from local blobstore.
478 # If uploading blobs, read data from local blobstore.
478 if not localstore.verify(oid):
479 if not localstore.verify(oid):
479 raise error.Abort(
480 raise error.Abort(
480 _(b'detected corrupt lfs object: %s') % oid,
481 _(b'detected corrupt lfs object: %s') % oid,
481 hint=_(b'run hg verify'),
482 hint=_(b'run hg verify'),
482 )
483 )
483 request.data = filewithprogress(localstore.open(oid), None)
484 request.data = filewithprogress(localstore.open(oid), None)
484 request.get_method = lambda: r'PUT'
485 request.get_method = lambda: r'PUT'
485 request.add_header(r'Content-Type', r'application/octet-stream')
486 request.add_header(r'Content-Type', r'application/octet-stream')
486 request.add_header(r'Content-Length', len(request.data))
487 request.add_header(r'Content-Length', len(request.data))
487
488
488 for k, v in headers:
489 for k, v in headers:
489 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
490 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
490
491
491 response = b''
492 response = b''
492 try:
493 try:
493 with contextlib.closing(self.urlopener.open(request)) as req:
494 with contextlib.closing(self.urlopener.open(request)) as req:
494 ui = self.ui # Shorten debug lines
495 ui = self.ui # Shorten debug lines
495 if self.ui.debugflag:
496 if self.ui.debugflag:
496 ui.debug(b'Status: %d\n' % req.status)
497 ui.debug(b'Status: %d\n' % req.status)
497 # lfs-test-server and hg serve return headers in different
498 # lfs-test-server and hg serve return headers in different
498 # order
499 # order
499 headers = pycompat.bytestr(req.info()).strip()
500 headers = pycompat.bytestr(req.info()).strip()
500 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
501 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
501
502
502 if action == b'download':
503 if action == b'download':
503 # If downloading blobs, store downloaded data to local
504 # If downloading blobs, store downloaded data to local
504 # blobstore
505 # blobstore
505 localstore.download(oid, req)
506 localstore.download(oid, req)
506 else:
507 else:
507 while True:
508 while True:
508 data = req.read(1048576)
509 data = req.read(1048576)
509 if not data:
510 if not data:
510 break
511 break
511 response += data
512 response += data
512 if response:
513 if response:
513 ui.debug(b'lfs %s response: %s' % (action, response))
514 ui.debug(b'lfs %s response: %s' % (action, response))
514 except util.urlerr.httperror as ex:
515 except util.urlerr.httperror as ex:
515 if self.ui.debugflag:
516 if self.ui.debugflag:
516 self.ui.debug(
517 self.ui.debug(
517 b'%s: %s\n' % (oid, ex.read())
518 b'%s: %s\n' % (oid, ex.read())
518 ) # XXX: also bytes?
519 ) # XXX: also bytes?
519 raise LfsRemoteError(
520 raise LfsRemoteError(
520 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
521 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
521 % (stringutil.forcebytestr(ex), oid, action)
522 % (stringutil.forcebytestr(ex), oid, action)
522 )
523 )
523 except util.urlerr.urlerror as ex:
524 except util.urlerr.urlerror as ex:
524 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
525 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
525 util.urllibcompat.getfullurl(request)
526 util.urllibcompat.getfullurl(request)
526 )
527 )
527 raise LfsRemoteError(
528 raise LfsRemoteError(
528 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
529 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
529 )
530 )
530
531
531 def _batch(self, pointers, localstore, action):
532 def _batch(self, pointers, localstore, action):
532 if action not in [b'upload', b'download']:
533 if action not in [b'upload', b'download']:
533 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
534 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
534
535
535 response = self._batchrequest(pointers, action)
536 response = self._batchrequest(pointers, action)
536 objects = self._extractobjects(response, pointers, action)
537 objects = self._extractobjects(response, pointers, action)
537 total = sum(x.get(b'size', 0) for x in objects)
538 total = sum(x.get(b'size', 0) for x in objects)
538 sizes = {}
539 sizes = {}
539 for obj in objects:
540 for obj in objects:
540 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
541 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
541 topic = {
542 topic = {
542 b'upload': _(b'lfs uploading'),
543 b'upload': _(b'lfs uploading'),
543 b'download': _(b'lfs downloading'),
544 b'download': _(b'lfs downloading'),
544 }[action]
545 }[action]
545 if len(objects) > 1:
546 if len(objects) > 1:
546 self.ui.note(
547 self.ui.note(
547 _(b'lfs: need to transfer %d objects (%s)\n')
548 _(b'lfs: need to transfer %d objects (%s)\n')
548 % (len(objects), util.bytecount(total))
549 % (len(objects), util.bytecount(total))
549 )
550 )
550
551
551 def transfer(chunk):
552 def transfer(chunk):
552 for obj in chunk:
553 for obj in chunk:
553 objsize = obj.get(b'size', 0)
554 objsize = obj.get(b'size', 0)
554 if self.ui.verbose:
555 if self.ui.verbose:
555 if action == b'download':
556 if action == b'download':
556 msg = _(b'lfs: downloading %s (%s)\n')
557 msg = _(b'lfs: downloading %s (%s)\n')
557 elif action == b'upload':
558 elif action == b'upload':
558 msg = _(b'lfs: uploading %s (%s)\n')
559 msg = _(b'lfs: uploading %s (%s)\n')
559 self.ui.note(
560 self.ui.note(
560 msg % (obj.get(b'oid'), util.bytecount(objsize))
561 msg % (obj.get(b'oid'), util.bytecount(objsize))
561 )
562 )
562 retry = self.retry
563 retry = self.retry
563 while True:
564 while True:
564 try:
565 try:
565 self._basictransfer(obj, action, localstore)
566 self._basictransfer(obj, action, localstore)
566 yield 1, obj.get(b'oid')
567 yield 1, obj.get(b'oid')
567 break
568 break
568 except socket.error as ex:
569 except socket.error as ex:
569 if retry > 0:
570 if retry > 0:
570 self.ui.note(
571 self.ui.note(
571 _(b'lfs: failed: %r (remaining retry %d)\n')
572 _(b'lfs: failed: %r (remaining retry %d)\n')
572 % (stringutil.forcebytestr(ex), retry)
573 % (stringutil.forcebytestr(ex), retry)
573 )
574 )
574 retry -= 1
575 retry -= 1
575 continue
576 continue
576 raise
577 raise
577
578
578 # Until https multiplexing gets sorted out
579 # Until https multiplexing gets sorted out
579 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
580 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
580 oids = worker.worker(
581 oids = worker.worker(
581 self.ui,
582 self.ui,
582 0.1,
583 0.1,
583 transfer,
584 transfer,
584 (),
585 (),
585 sorted(objects, key=lambda o: o.get(b'oid')),
586 sorted(objects, key=lambda o: o.get(b'oid')),
586 )
587 )
587 else:
588 else:
588 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
589 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
589
590
590 with self.ui.makeprogress(topic, total=total) as progress:
591 with self.ui.makeprogress(topic, total=total) as progress:
591 progress.update(0)
592 progress.update(0)
592 processed = 0
593 processed = 0
593 blobs = 0
594 blobs = 0
594 for _one, oid in oids:
595 for _one, oid in oids:
595 processed += sizes[oid]
596 processed += sizes[oid]
596 blobs += 1
597 blobs += 1
597 progress.update(processed)
598 progress.update(processed)
598 self.ui.note(_(b'lfs: processed: %s\n') % oid)
599 self.ui.note(_(b'lfs: processed: %s\n') % oid)
599
600
600 if blobs > 0:
601 if blobs > 0:
601 if action == b'upload':
602 if action == b'upload':
602 self.ui.status(
603 self.ui.status(
603 _(b'lfs: uploaded %d files (%s)\n')
604 _(b'lfs: uploaded %d files (%s)\n')
604 % (blobs, util.bytecount(processed))
605 % (blobs, util.bytecount(processed))
605 )
606 )
606 elif action == b'download':
607 elif action == b'download':
607 self.ui.status(
608 self.ui.status(
608 _(b'lfs: downloaded %d files (%s)\n')
609 _(b'lfs: downloaded %d files (%s)\n')
609 % (blobs, util.bytecount(processed))
610 % (blobs, util.bytecount(processed))
610 )
611 )
611
612
612 def __del__(self):
613 def __del__(self):
613 # copied from mercurial/httppeer.py
614 # copied from mercurial/httppeer.py
614 urlopener = getattr(self, 'urlopener', None)
615 urlopener = getattr(self, 'urlopener', None)
615 if urlopener:
616 if urlopener:
616 for h in urlopener.handlers:
617 for h in urlopener.handlers:
617 h.close()
618 h.close()
618 getattr(h, "close_all", lambda: None)()
619 getattr(h, "close_all", lambda: None)()
619
620
620
621
621 class _dummyremote(object):
622 class _dummyremote(object):
622 """Dummy store storing blobs to temp directory."""
623 """Dummy store storing blobs to temp directory."""
623
624
624 def __init__(self, repo, url):
625 def __init__(self, repo, url):
625 fullpath = repo.vfs.join(b'lfs', url.path)
626 fullpath = repo.vfs.join(b'lfs', url.path)
626 self.vfs = lfsvfs(fullpath)
627 self.vfs = lfsvfs(fullpath)
627
628
628 def writebatch(self, pointers, fromstore):
629 def writebatch(self, pointers, fromstore):
629 for p in _deduplicate(pointers):
630 for p in _deduplicate(pointers):
630 content = fromstore.read(p.oid(), verify=True)
631 content = fromstore.read(p.oid(), verify=True)
631 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
632 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
632 fp.write(content)
633 fp.write(content)
633
634
634 def readbatch(self, pointers, tostore):
635 def readbatch(self, pointers, tostore):
635 for p in _deduplicate(pointers):
636 for p in _deduplicate(pointers):
636 with self.vfs(p.oid(), b'rb') as fp:
637 with self.vfs(p.oid(), b'rb') as fp:
637 tostore.download(p.oid(), fp)
638 tostore.download(p.oid(), fp)
638
639
639
640
640 class _nullremote(object):
641 class _nullremote(object):
641 """Null store storing blobs to /dev/null."""
642 """Null store storing blobs to /dev/null."""
642
643
643 def __init__(self, repo, url):
644 def __init__(self, repo, url):
644 pass
645 pass
645
646
646 def writebatch(self, pointers, fromstore):
647 def writebatch(self, pointers, fromstore):
647 pass
648 pass
648
649
649 def readbatch(self, pointers, tostore):
650 def readbatch(self, pointers, tostore):
650 pass
651 pass
651
652
652
653
653 class _promptremote(object):
654 class _promptremote(object):
654 """Prompt user to set lfs.url when accessed."""
655 """Prompt user to set lfs.url when accessed."""
655
656
656 def __init__(self, repo, url):
657 def __init__(self, repo, url):
657 pass
658 pass
658
659
659 def writebatch(self, pointers, fromstore, ui=None):
660 def writebatch(self, pointers, fromstore, ui=None):
660 self._prompt()
661 self._prompt()
661
662
662 def readbatch(self, pointers, tostore, ui=None):
663 def readbatch(self, pointers, tostore, ui=None):
663 self._prompt()
664 self._prompt()
664
665
665 def _prompt(self):
666 def _prompt(self):
666 raise error.Abort(_(b'lfs.url needs to be configured'))
667 raise error.Abort(_(b'lfs.url needs to be configured'))
667
668
668
669
669 _storemap = {
670 _storemap = {
670 b'https': _gitlfsremote,
671 b'https': _gitlfsremote,
671 b'http': _gitlfsremote,
672 b'http': _gitlfsremote,
672 b'file': _dummyremote,
673 b'file': _dummyremote,
673 b'null': _nullremote,
674 b'null': _nullremote,
674 None: _promptremote,
675 None: _promptremote,
675 }
676 }
676
677
677
678
678 def _deduplicate(pointers):
679 def _deduplicate(pointers):
679 """Remove any duplicate oids that exist in the list"""
680 """Remove any duplicate oids that exist in the list"""
680 reduced = util.sortdict()
681 reduced = util.sortdict()
681 for p in pointers:
682 for p in pointers:
682 reduced[p.oid()] = p
683 reduced[p.oid()] = p
683 return reduced.values()
684 return reduced.values()
684
685
685
686
686 def _verify(oid, content):
687 def _verify(oid, content):
687 realoid = node.hex(hashlib.sha256(content).digest())
688 realoid = node.hex(hashlib.sha256(content).digest())
688 if realoid != oid:
689 if realoid != oid:
689 raise LfsCorruptionError(
690 raise LfsCorruptionError(
690 _(b'detected corrupt lfs object: %s') % oid,
691 _(b'detected corrupt lfs object: %s') % oid,
691 hint=_(b'run hg verify'),
692 hint=_(b'run hg verify'),
692 )
693 )
693
694
694
695
695 def remote(repo, remote=None):
696 def remote(repo, remote=None):
696 """remotestore factory. return a store in _storemap depending on config
697 """remotestore factory. return a store in _storemap depending on config
697
698
698 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
699 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
699 infer the endpoint, based on the remote repository using the same path
700 infer the endpoint, based on the remote repository using the same path
700 adjustments as git. As an extension, 'http' is supported as well so that
701 adjustments as git. As an extension, 'http' is supported as well so that
701 ``hg serve`` works out of the box.
702 ``hg serve`` works out of the box.
702
703
703 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
704 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
704 """
705 """
705 lfsurl = repo.ui.config(b'lfs', b'url')
706 lfsurl = repo.ui.config(b'lfs', b'url')
706 url = util.url(lfsurl or b'')
707 url = util.url(lfsurl or b'')
707 if lfsurl is None:
708 if lfsurl is None:
708 if remote:
709 if remote:
709 path = remote
710 path = remote
710 elif util.safehasattr(repo, b'_subtoppath'):
711 elif util.safehasattr(repo, b'_subtoppath'):
711 # The pull command sets this during the optional update phase, which
712 # The pull command sets this during the optional update phase, which
712 # tells exactly where the pull originated, whether 'paths.default'
713 # tells exactly where the pull originated, whether 'paths.default'
713 # or explicit.
714 # or explicit.
714 path = repo._subtoppath
715 path = repo._subtoppath
715 else:
716 else:
716 # TODO: investigate 'paths.remote:lfsurl' style path customization,
717 # TODO: investigate 'paths.remote:lfsurl' style path customization,
717 # and fall back to inferring from 'paths.remote' if unspecified.
718 # and fall back to inferring from 'paths.remote' if unspecified.
718 path = repo.ui.config(b'paths', b'default') or b''
719 path = repo.ui.config(b'paths', b'default') or b''
719
720
720 defaulturl = util.url(path)
721 defaulturl = util.url(path)
721
722
722 # TODO: support local paths as well.
723 # TODO: support local paths as well.
723 # TODO: consider the ssh -> https transformation that git applies
724 # TODO: consider the ssh -> https transformation that git applies
724 if defaulturl.scheme in (b'http', b'https'):
725 if defaulturl.scheme in (b'http', b'https'):
725 if defaulturl.path and defaulturl.path[:-1] != b'/':
726 if defaulturl.path and defaulturl.path[:-1] != b'/':
726 defaulturl.path += b'/'
727 defaulturl.path += b'/'
727 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
728 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
728
729
729 url = util.url(bytes(defaulturl))
730 url = util.url(bytes(defaulturl))
730 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
731 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
731
732
732 scheme = url.scheme
733 scheme = url.scheme
733 if scheme not in _storemap:
734 if scheme not in _storemap:
734 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
735 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
735 return _storemap[scheme](repo, url)
736 return _storemap[scheme](repo, url)
736
737
737
738
738 class LfsRemoteError(error.StorageError):
739 class LfsRemoteError(error.StorageError):
739 pass
740 pass
740
741
741
742
742 class LfsCorruptionError(error.Abort):
743 class LfsCorruptionError(error.Abort):
743 """Raised when a corrupt blob is detected, aborting an operation
744 """Raised when a corrupt blob is detected, aborting an operation
744
745
745 It exists to allow specialized handling on the server side."""
746 It exists to allow specialized handling on the server side."""
@@ -1,523 +1,526
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, nullid, short
14 from mercurial.pycompat import setattr
14 from mercurial.pycompat import (
15 getattr,
16 setattr,
17 )
15
18
16 from mercurial import (
19 from mercurial import (
17 bundle2,
20 bundle2,
18 changegroup,
21 changegroup,
19 cmdutil,
22 cmdutil,
20 context,
23 context,
21 error,
24 error,
22 exchange,
25 exchange,
23 exthelper,
26 exthelper,
24 localrepo,
27 localrepo,
25 revlog,
28 revlog,
26 scmutil,
29 scmutil,
27 upgrade,
30 upgrade,
28 util,
31 util,
29 vfs as vfsmod,
32 vfs as vfsmod,
30 wireprotov1server,
33 wireprotov1server,
31 )
34 )
32
35
33 from mercurial.interfaces import repository
36 from mercurial.interfaces import repository
34
37
35 from mercurial.utils import (
38 from mercurial.utils import (
36 storageutil,
39 storageutil,
37 stringutil,
40 stringutil,
38 )
41 )
39
42
40 from ..largefiles import lfutil
43 from ..largefiles import lfutil
41
44
42 from . import (
45 from . import (
43 blobstore,
46 blobstore,
44 pointer,
47 pointer,
45 )
48 )
46
49
47 eh = exthelper.exthelper()
50 eh = exthelper.exthelper()
48
51
49
52
50 @eh.wrapfunction(localrepo, b'makefilestorage')
53 @eh.wrapfunction(localrepo, b'makefilestorage')
51 def localrepomakefilestorage(orig, requirements, features, **kwargs):
54 def localrepomakefilestorage(orig, requirements, features, **kwargs):
52 if b'lfs' in requirements:
55 if b'lfs' in requirements:
53 features.add(repository.REPO_FEATURE_LFS)
56 features.add(repository.REPO_FEATURE_LFS)
54
57
55 return orig(requirements=requirements, features=features, **kwargs)
58 return orig(requirements=requirements, features=features, **kwargs)
56
59
57
60
58 @eh.wrapfunction(changegroup, b'allsupportedversions')
61 @eh.wrapfunction(changegroup, b'allsupportedversions')
59 def allsupportedversions(orig, ui):
62 def allsupportedversions(orig, ui):
60 versions = orig(ui)
63 versions = orig(ui)
61 versions.add(b'03')
64 versions.add(b'03')
62 return versions
65 return versions
63
66
64
67
65 @eh.wrapfunction(wireprotov1server, b'_capabilities')
68 @eh.wrapfunction(wireprotov1server, b'_capabilities')
66 def _capabilities(orig, repo, proto):
69 def _capabilities(orig, repo, proto):
67 '''Wrap server command to announce lfs server capability'''
70 '''Wrap server command to announce lfs server capability'''
68 caps = orig(repo, proto)
71 caps = orig(repo, proto)
69 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
72 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
70 # Advertise a slightly different capability when lfs is *required*, so
73 # Advertise a slightly different capability when lfs is *required*, so
71 # that the client knows it MUST load the extension. If lfs is not
74 # that the client knows it MUST load the extension. If lfs is not
72 # required on the server, there's no reason to autoload the extension
75 # required on the server, there's no reason to autoload the extension
73 # on the client.
76 # on the client.
74 if b'lfs' in repo.requirements:
77 if b'lfs' in repo.requirements:
75 caps.append(b'lfs-serve')
78 caps.append(b'lfs-serve')
76
79
77 caps.append(b'lfs')
80 caps.append(b'lfs')
78 return caps
81 return caps
79
82
80
83
81 def bypasscheckhash(self, text):
84 def bypasscheckhash(self, text):
82 return False
85 return False
83
86
84
87
85 def readfromstore(self, text):
88 def readfromstore(self, text):
86 """Read filelog content from local blobstore transform for flagprocessor.
89 """Read filelog content from local blobstore transform for flagprocessor.
87
90
88 Default tranform for flagprocessor, returning contents from blobstore.
91 Default tranform for flagprocessor, returning contents from blobstore.
89 Returns a 2-typle (text, validatehash) where validatehash is True as the
92 Returns a 2-typle (text, validatehash) where validatehash is True as the
90 contents of the blobstore should be checked using checkhash.
93 contents of the blobstore should be checked using checkhash.
91 """
94 """
92 p = pointer.deserialize(text)
95 p = pointer.deserialize(text)
93 oid = p.oid()
96 oid = p.oid()
94 store = self.opener.lfslocalblobstore
97 store = self.opener.lfslocalblobstore
95 if not store.has(oid):
98 if not store.has(oid):
96 p.filename = self.filename
99 p.filename = self.filename
97 self.opener.lfsremoteblobstore.readbatch([p], store)
100 self.opener.lfsremoteblobstore.readbatch([p], store)
98
101
99 # The caller will validate the content
102 # The caller will validate the content
100 text = store.read(oid, verify=False)
103 text = store.read(oid, verify=False)
101
104
102 # pack hg filelog metadata
105 # pack hg filelog metadata
103 hgmeta = {}
106 hgmeta = {}
104 for k in p.keys():
107 for k in p.keys():
105 if k.startswith(b'x-hg-'):
108 if k.startswith(b'x-hg-'):
106 name = k[len(b'x-hg-') :]
109 name = k[len(b'x-hg-') :]
107 hgmeta[name] = p[k]
110 hgmeta[name] = p[k]
108 if hgmeta or text.startswith(b'\1\n'):
111 if hgmeta or text.startswith(b'\1\n'):
109 text = storageutil.packmeta(hgmeta, text)
112 text = storageutil.packmeta(hgmeta, text)
110
113
111 return (text, True, {})
114 return (text, True, {})
112
115
113
116
114 def writetostore(self, text, sidedata):
117 def writetostore(self, text, sidedata):
115 # hg filelog metadata (includes rename, etc)
118 # hg filelog metadata (includes rename, etc)
116 hgmeta, offset = storageutil.parsemeta(text)
119 hgmeta, offset = storageutil.parsemeta(text)
117 if offset and offset > 0:
120 if offset and offset > 0:
118 # lfs blob does not contain hg filelog metadata
121 # lfs blob does not contain hg filelog metadata
119 text = text[offset:]
122 text = text[offset:]
120
123
121 # git-lfs only supports sha256
124 # git-lfs only supports sha256
122 oid = hex(hashlib.sha256(text).digest())
125 oid = hex(hashlib.sha256(text).digest())
123 self.opener.lfslocalblobstore.write(oid, text)
126 self.opener.lfslocalblobstore.write(oid, text)
124
127
125 # replace contents with metadata
128 # replace contents with metadata
126 longoid = b'sha256:%s' % oid
129 longoid = b'sha256:%s' % oid
127 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
130 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
128
131
129 # by default, we expect the content to be binary. however, LFS could also
132 # by default, we expect the content to be binary. however, LFS could also
130 # be used for non-binary content. add a special entry for non-binary data.
133 # be used for non-binary content. add a special entry for non-binary data.
131 # this will be used by filectx.isbinary().
134 # this will be used by filectx.isbinary().
132 if not stringutil.binary(text):
135 if not stringutil.binary(text):
133 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
136 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
134 metadata[b'x-is-binary'] = b'0'
137 metadata[b'x-is-binary'] = b'0'
135
138
136 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
139 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
137 if hgmeta is not None:
140 if hgmeta is not None:
138 for k, v in hgmeta.iteritems():
141 for k, v in hgmeta.iteritems():
139 metadata[b'x-hg-%s' % k] = v
142 metadata[b'x-hg-%s' % k] = v
140
143
141 rawtext = metadata.serialize()
144 rawtext = metadata.serialize()
142 return (rawtext, False)
145 return (rawtext, False)
143
146
144
147
145 def _islfs(rlog, node=None, rev=None):
148 def _islfs(rlog, node=None, rev=None):
146 if rev is None:
149 if rev is None:
147 if node is None:
150 if node is None:
148 # both None - likely working copy content where node is not ready
151 # both None - likely working copy content where node is not ready
149 return False
152 return False
150 rev = rlog._revlog.rev(node)
153 rev = rlog._revlog.rev(node)
151 else:
154 else:
152 node = rlog._revlog.node(rev)
155 node = rlog._revlog.node(rev)
153 if node == nullid:
156 if node == nullid:
154 return False
157 return False
155 flags = rlog._revlog.flags(rev)
158 flags = rlog._revlog.flags(rev)
156 return bool(flags & revlog.REVIDX_EXTSTORED)
159 return bool(flags & revlog.REVIDX_EXTSTORED)
157
160
158
161
159 # Wrapping may also be applied by remotefilelog
162 # Wrapping may also be applied by remotefilelog
160 def filelogaddrevision(
163 def filelogaddrevision(
161 orig,
164 orig,
162 self,
165 self,
163 text,
166 text,
164 transaction,
167 transaction,
165 link,
168 link,
166 p1,
169 p1,
167 p2,
170 p2,
168 cachedelta=None,
171 cachedelta=None,
169 node=None,
172 node=None,
170 flags=revlog.REVIDX_DEFAULT_FLAGS,
173 flags=revlog.REVIDX_DEFAULT_FLAGS,
171 **kwds
174 **kwds
172 ):
175 ):
173 # The matcher isn't available if reposetup() wasn't called.
176 # The matcher isn't available if reposetup() wasn't called.
174 lfstrack = self._revlog.opener.options.get(b'lfstrack')
177 lfstrack = self._revlog.opener.options.get(b'lfstrack')
175
178
176 if lfstrack:
179 if lfstrack:
177 textlen = len(text)
180 textlen = len(text)
178 # exclude hg rename meta from file size
181 # exclude hg rename meta from file size
179 meta, offset = storageutil.parsemeta(text)
182 meta, offset = storageutil.parsemeta(text)
180 if offset:
183 if offset:
181 textlen -= offset
184 textlen -= offset
182
185
183 if lfstrack(self._revlog.filename, textlen):
186 if lfstrack(self._revlog.filename, textlen):
184 flags |= revlog.REVIDX_EXTSTORED
187 flags |= revlog.REVIDX_EXTSTORED
185
188
186 return orig(
189 return orig(
187 self,
190 self,
188 text,
191 text,
189 transaction,
192 transaction,
190 link,
193 link,
191 p1,
194 p1,
192 p2,
195 p2,
193 cachedelta=cachedelta,
196 cachedelta=cachedelta,
194 node=node,
197 node=node,
195 flags=flags,
198 flags=flags,
196 **kwds
199 **kwds
197 )
200 )
198
201
199
202
200 # Wrapping may also be applied by remotefilelog
203 # Wrapping may also be applied by remotefilelog
201 def filelogrenamed(orig, self, node):
204 def filelogrenamed(orig, self, node):
202 if _islfs(self, node):
205 if _islfs(self, node):
203 rawtext = self._revlog.rawdata(node)
206 rawtext = self._revlog.rawdata(node)
204 if not rawtext:
207 if not rawtext:
205 return False
208 return False
206 metadata = pointer.deserialize(rawtext)
209 metadata = pointer.deserialize(rawtext)
207 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
210 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
208 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
211 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
209 else:
212 else:
210 return False
213 return False
211 return orig(self, node)
214 return orig(self, node)
212
215
213
216
214 # Wrapping may also be applied by remotefilelog
217 # Wrapping may also be applied by remotefilelog
215 def filelogsize(orig, self, rev):
218 def filelogsize(orig, self, rev):
216 if _islfs(self, rev=rev):
219 if _islfs(self, rev=rev):
217 # fast path: use lfs metadata to answer size
220 # fast path: use lfs metadata to answer size
218 rawtext = self._revlog.rawdata(rev)
221 rawtext = self._revlog.rawdata(rev)
219 metadata = pointer.deserialize(rawtext)
222 metadata = pointer.deserialize(rawtext)
220 return int(metadata[b'size'])
223 return int(metadata[b'size'])
221 return orig(self, rev)
224 return orig(self, rev)
222
225
223
226
224 @eh.wrapfunction(context.basefilectx, b'cmp')
227 @eh.wrapfunction(context.basefilectx, b'cmp')
225 def filectxcmp(orig, self, fctx):
228 def filectxcmp(orig, self, fctx):
226 """returns True if text is different than fctx"""
229 """returns True if text is different than fctx"""
227 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
230 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
228 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
231 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
229 # fast path: check LFS oid
232 # fast path: check LFS oid
230 p1 = pointer.deserialize(self.rawdata())
233 p1 = pointer.deserialize(self.rawdata())
231 p2 = pointer.deserialize(fctx.rawdata())
234 p2 = pointer.deserialize(fctx.rawdata())
232 return p1.oid() != p2.oid()
235 return p1.oid() != p2.oid()
233 return orig(self, fctx)
236 return orig(self, fctx)
234
237
235
238
236 @eh.wrapfunction(context.basefilectx, b'isbinary')
239 @eh.wrapfunction(context.basefilectx, b'isbinary')
237 def filectxisbinary(orig, self):
240 def filectxisbinary(orig, self):
238 if self.islfs():
241 if self.islfs():
239 # fast path: use lfs metadata to answer isbinary
242 # fast path: use lfs metadata to answer isbinary
240 metadata = pointer.deserialize(self.rawdata())
243 metadata = pointer.deserialize(self.rawdata())
241 # if lfs metadata says nothing, assume it's binary by default
244 # if lfs metadata says nothing, assume it's binary by default
242 return bool(int(metadata.get(b'x-is-binary', 1)))
245 return bool(int(metadata.get(b'x-is-binary', 1)))
243 return orig(self)
246 return orig(self)
244
247
245
248
246 def filectxislfs(self):
249 def filectxislfs(self):
247 return _islfs(self.filelog(), self.filenode())
250 return _islfs(self.filelog(), self.filenode())
248
251
249
252
250 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
253 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
251 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
254 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
252 orig(fm, ctx, matcher, path, decode)
255 orig(fm, ctx, matcher, path, decode)
253 fm.data(rawdata=ctx[path].rawdata())
256 fm.data(rawdata=ctx[path].rawdata())
254
257
255
258
256 @eh.wrapfunction(scmutil, b'wrapconvertsink')
259 @eh.wrapfunction(scmutil, b'wrapconvertsink')
257 def convertsink(orig, sink):
260 def convertsink(orig, sink):
258 sink = orig(sink)
261 sink = orig(sink)
259 if sink.repotype == b'hg':
262 if sink.repotype == b'hg':
260
263
261 class lfssink(sink.__class__):
264 class lfssink(sink.__class__):
262 def putcommit(
265 def putcommit(
263 self,
266 self,
264 files,
267 files,
265 copies,
268 copies,
266 parents,
269 parents,
267 commit,
270 commit,
268 source,
271 source,
269 revmap,
272 revmap,
270 full,
273 full,
271 cleanp2,
274 cleanp2,
272 ):
275 ):
273 pc = super(lfssink, self).putcommit
276 pc = super(lfssink, self).putcommit
274 node = pc(
277 node = pc(
275 files,
278 files,
276 copies,
279 copies,
277 parents,
280 parents,
278 commit,
281 commit,
279 source,
282 source,
280 revmap,
283 revmap,
281 full,
284 full,
282 cleanp2,
285 cleanp2,
283 )
286 )
284
287
285 if b'lfs' not in self.repo.requirements:
288 if b'lfs' not in self.repo.requirements:
286 ctx = self.repo[node]
289 ctx = self.repo[node]
287
290
288 # The file list may contain removed files, so check for
291 # The file list may contain removed files, so check for
289 # membership before assuming it is in the context.
292 # membership before assuming it is in the context.
290 if any(f in ctx and ctx[f].islfs() for f, n in files):
293 if any(f in ctx and ctx[f].islfs() for f, n in files):
291 self.repo.requirements.add(b'lfs')
294 self.repo.requirements.add(b'lfs')
292 self.repo._writerequirements()
295 self.repo._writerequirements()
293
296
294 return node
297 return node
295
298
296 sink.__class__ = lfssink
299 sink.__class__ = lfssink
297
300
298 return sink
301 return sink
299
302
300
303
301 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
304 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
302 # options and blob stores are passed from othervfs to the new readonlyvfs.
305 # options and blob stores are passed from othervfs to the new readonlyvfs.
303 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
306 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
304 def vfsinit(orig, self, othervfs):
307 def vfsinit(orig, self, othervfs):
305 orig(self, othervfs)
308 orig(self, othervfs)
306 # copy lfs related options
309 # copy lfs related options
307 for k, v in othervfs.options.items():
310 for k, v in othervfs.options.items():
308 if k.startswith(b'lfs'):
311 if k.startswith(b'lfs'):
309 self.options[k] = v
312 self.options[k] = v
310 # also copy lfs blobstores. note: this can run before reposetup, so lfs
313 # also copy lfs blobstores. note: this can run before reposetup, so lfs
311 # blobstore attributes are not always ready at this time.
314 # blobstore attributes are not always ready at this time.
312 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
315 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
313 if util.safehasattr(othervfs, name):
316 if util.safehasattr(othervfs, name):
314 setattr(self, name, getattr(othervfs, name))
317 setattr(self, name, getattr(othervfs, name))
315
318
316
319
317 def _prefetchfiles(repo, revs, match):
320 def _prefetchfiles(repo, revs, match):
318 """Ensure that required LFS blobs are present, fetching them as a group if
321 """Ensure that required LFS blobs are present, fetching them as a group if
319 needed."""
322 needed."""
320 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
323 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
321 return
324 return
322
325
323 pointers = []
326 pointers = []
324 oids = set()
327 oids = set()
325 localstore = repo.svfs.lfslocalblobstore
328 localstore = repo.svfs.lfslocalblobstore
326
329
327 for rev in revs:
330 for rev in revs:
328 ctx = repo[rev]
331 ctx = repo[rev]
329 for f in ctx.walk(match):
332 for f in ctx.walk(match):
330 p = pointerfromctx(ctx, f)
333 p = pointerfromctx(ctx, f)
331 if p and p.oid() not in oids and not localstore.has(p.oid()):
334 if p and p.oid() not in oids and not localstore.has(p.oid()):
332 p.filename = f
335 p.filename = f
333 pointers.append(p)
336 pointers.append(p)
334 oids.add(p.oid())
337 oids.add(p.oid())
335
338
336 if pointers:
339 if pointers:
337 # Recalculating the repo store here allows 'paths.default' that is set
340 # Recalculating the repo store here allows 'paths.default' that is set
338 # on the repo by a clone command to be used for the update.
341 # on the repo by a clone command to be used for the update.
339 blobstore.remote(repo).readbatch(pointers, localstore)
342 blobstore.remote(repo).readbatch(pointers, localstore)
340
343
341
344
342 def _canskipupload(repo):
345 def _canskipupload(repo):
343 # Skip if this hasn't been passed to reposetup()
346 # Skip if this hasn't been passed to reposetup()
344 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
347 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
345 return True
348 return True
346
349
347 # if remotestore is a null store, upload is a no-op and can be skipped
350 # if remotestore is a null store, upload is a no-op and can be skipped
348 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
351 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
349
352
350
353
351 def candownload(repo):
354 def candownload(repo):
352 # Skip if this hasn't been passed to reposetup()
355 # Skip if this hasn't been passed to reposetup()
353 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
356 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
354 return False
357 return False
355
358
356 # if remotestore is a null store, downloads will lead to nothing
359 # if remotestore is a null store, downloads will lead to nothing
357 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
360 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
358
361
359
362
360 def uploadblobsfromrevs(repo, revs):
363 def uploadblobsfromrevs(repo, revs):
361 '''upload lfs blobs introduced by revs
364 '''upload lfs blobs introduced by revs
362
365
363 Note: also used by other extensions e. g. infinitepush. avoid renaming.
366 Note: also used by other extensions e. g. infinitepush. avoid renaming.
364 '''
367 '''
365 if _canskipupload(repo):
368 if _canskipupload(repo):
366 return
369 return
367 pointers = extractpointers(repo, revs)
370 pointers = extractpointers(repo, revs)
368 uploadblobs(repo, pointers)
371 uploadblobs(repo, pointers)
369
372
370
373
371 def prepush(pushop):
374 def prepush(pushop):
372 """Prepush hook.
375 """Prepush hook.
373
376
374 Read through the revisions to push, looking for filelog entries that can be
377 Read through the revisions to push, looking for filelog entries that can be
375 deserialized into metadata so that we can block the push on their upload to
378 deserialized into metadata so that we can block the push on their upload to
376 the remote blobstore.
379 the remote blobstore.
377 """
380 """
378 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
381 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
379
382
380
383
381 @eh.wrapfunction(exchange, b'push')
384 @eh.wrapfunction(exchange, b'push')
382 def push(orig, repo, remote, *args, **kwargs):
385 def push(orig, repo, remote, *args, **kwargs):
383 """bail on push if the extension isn't enabled on remote when needed, and
386 """bail on push if the extension isn't enabled on remote when needed, and
384 update the remote store based on the destination path."""
387 update the remote store based on the destination path."""
385 if b'lfs' in repo.requirements:
388 if b'lfs' in repo.requirements:
386 # If the remote peer is for a local repo, the requirement tests in the
389 # If the remote peer is for a local repo, the requirement tests in the
387 # base class method enforce lfs support. Otherwise, some revisions in
390 # base class method enforce lfs support. Otherwise, some revisions in
388 # this repo use lfs, and the remote repo needs the extension loaded.
391 # this repo use lfs, and the remote repo needs the extension loaded.
389 if not remote.local() and not remote.capable(b'lfs'):
392 if not remote.local() and not remote.capable(b'lfs'):
390 # This is a copy of the message in exchange.push() when requirements
393 # This is a copy of the message in exchange.push() when requirements
391 # are missing between local repos.
394 # are missing between local repos.
392 m = _(b"required features are not supported in the destination: %s")
395 m = _(b"required features are not supported in the destination: %s")
393 raise error.Abort(
396 raise error.Abort(
394 m % b'lfs', hint=_(b'enable the lfs extension on the server')
397 m % b'lfs', hint=_(b'enable the lfs extension on the server')
395 )
398 )
396
399
397 # Repositories where this extension is disabled won't have the field.
400 # Repositories where this extension is disabled won't have the field.
398 # But if there's a requirement, then the extension must be loaded AND
401 # But if there's a requirement, then the extension must be loaded AND
399 # there may be blobs to push.
402 # there may be blobs to push.
400 remotestore = repo.svfs.lfsremoteblobstore
403 remotestore = repo.svfs.lfsremoteblobstore
401 try:
404 try:
402 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
405 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
403 return orig(repo, remote, *args, **kwargs)
406 return orig(repo, remote, *args, **kwargs)
404 finally:
407 finally:
405 repo.svfs.lfsremoteblobstore = remotestore
408 repo.svfs.lfsremoteblobstore = remotestore
406 else:
409 else:
407 return orig(repo, remote, *args, **kwargs)
410 return orig(repo, remote, *args, **kwargs)
408
411
409
412
410 # when writing a bundle via "hg bundle" command, upload related LFS blobs
413 # when writing a bundle via "hg bundle" command, upload related LFS blobs
411 @eh.wrapfunction(bundle2, b'writenewbundle')
414 @eh.wrapfunction(bundle2, b'writenewbundle')
412 def writenewbundle(
415 def writenewbundle(
413 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
416 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
414 ):
417 ):
415 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
418 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
416 uploadblobsfromrevs(repo, outgoing.missing)
419 uploadblobsfromrevs(repo, outgoing.missing)
417 return orig(
420 return orig(
418 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
421 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
419 )
422 )
420
423
421
424
422 def extractpointers(repo, revs):
425 def extractpointers(repo, revs):
423 """return a list of lfs pointers added by given revs"""
426 """return a list of lfs pointers added by given revs"""
424 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
427 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
425 pointers = {}
428 pointers = {}
426
429
427 makeprogress = repo.ui.makeprogress
430 makeprogress = repo.ui.makeprogress
428 with makeprogress(
431 with makeprogress(
429 _(b'lfs search'), _(b'changesets'), len(revs)
432 _(b'lfs search'), _(b'changesets'), len(revs)
430 ) as progress:
433 ) as progress:
431 for r in revs:
434 for r in revs:
432 ctx = repo[r]
435 ctx = repo[r]
433 for p in pointersfromctx(ctx).values():
436 for p in pointersfromctx(ctx).values():
434 pointers[p.oid()] = p
437 pointers[p.oid()] = p
435 progress.increment()
438 progress.increment()
436 return sorted(pointers.values(), key=lambda p: p.oid())
439 return sorted(pointers.values(), key=lambda p: p.oid())
437
440
438
441
439 def pointerfromctx(ctx, f, removed=False):
442 def pointerfromctx(ctx, f, removed=False):
440 """return a pointer for the named file from the given changectx, or None if
443 """return a pointer for the named file from the given changectx, or None if
441 the file isn't LFS.
444 the file isn't LFS.
442
445
443 Optionally, the pointer for a file deleted from the context can be returned.
446 Optionally, the pointer for a file deleted from the context can be returned.
444 Since no such pointer is actually stored, and to distinguish from a non LFS
447 Since no such pointer is actually stored, and to distinguish from a non LFS
445 file, this pointer is represented by an empty dict.
448 file, this pointer is represented by an empty dict.
446 """
449 """
447 _ctx = ctx
450 _ctx = ctx
448 if f not in ctx:
451 if f not in ctx:
449 if not removed:
452 if not removed:
450 return None
453 return None
451 if f in ctx.p1():
454 if f in ctx.p1():
452 _ctx = ctx.p1()
455 _ctx = ctx.p1()
453 elif f in ctx.p2():
456 elif f in ctx.p2():
454 _ctx = ctx.p2()
457 _ctx = ctx.p2()
455 else:
458 else:
456 return None
459 return None
457 fctx = _ctx[f]
460 fctx = _ctx[f]
458 if not _islfs(fctx.filelog(), fctx.filenode()):
461 if not _islfs(fctx.filelog(), fctx.filenode()):
459 return None
462 return None
460 try:
463 try:
461 p = pointer.deserialize(fctx.rawdata())
464 p = pointer.deserialize(fctx.rawdata())
462 if ctx == _ctx:
465 if ctx == _ctx:
463 return p
466 return p
464 return {}
467 return {}
465 except pointer.InvalidPointer as ex:
468 except pointer.InvalidPointer as ex:
466 raise error.Abort(
469 raise error.Abort(
467 _(b'lfs: corrupted pointer (%s@%s): %s\n')
470 _(b'lfs: corrupted pointer (%s@%s): %s\n')
468 % (f, short(_ctx.node()), ex)
471 % (f, short(_ctx.node()), ex)
469 )
472 )
470
473
471
474
472 def pointersfromctx(ctx, removed=False):
475 def pointersfromctx(ctx, removed=False):
473 """return a dict {path: pointer} for given single changectx.
476 """return a dict {path: pointer} for given single changectx.
474
477
475 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
478 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
476 stored for the path is an empty dict.
479 stored for the path is an empty dict.
477 """
480 """
478 result = {}
481 result = {}
479 m = ctx.repo().narrowmatch()
482 m = ctx.repo().narrowmatch()
480
483
481 # TODO: consider manifest.fastread() instead
484 # TODO: consider manifest.fastread() instead
482 for f in ctx.files():
485 for f in ctx.files():
483 if not m(f):
486 if not m(f):
484 continue
487 continue
485 p = pointerfromctx(ctx, f, removed=removed)
488 p = pointerfromctx(ctx, f, removed=removed)
486 if p is not None:
489 if p is not None:
487 result[f] = p
490 result[f] = p
488 return result
491 return result
489
492
490
493
491 def uploadblobs(repo, pointers):
494 def uploadblobs(repo, pointers):
492 """upload given pointers from local blobstore"""
495 """upload given pointers from local blobstore"""
493 if not pointers:
496 if not pointers:
494 return
497 return
495
498
496 remoteblob = repo.svfs.lfsremoteblobstore
499 remoteblob = repo.svfs.lfsremoteblobstore
497 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
500 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
498
501
499
502
500 @eh.wrapfunction(upgrade, b'_finishdatamigration')
503 @eh.wrapfunction(upgrade, b'_finishdatamigration')
501 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
504 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
502 orig(ui, srcrepo, dstrepo, requirements)
505 orig(ui, srcrepo, dstrepo, requirements)
503
506
504 # Skip if this hasn't been passed to reposetup()
507 # Skip if this hasn't been passed to reposetup()
505 if util.safehasattr(
508 if util.safehasattr(
506 srcrepo.svfs, b'lfslocalblobstore'
509 srcrepo.svfs, b'lfslocalblobstore'
507 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
510 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
508 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
511 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
509 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
512 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
510
513
511 for dirpath, dirs, files in srclfsvfs.walk():
514 for dirpath, dirs, files in srclfsvfs.walk():
512 for oid in files:
515 for oid in files:
513 ui.write(_(b'copying lfs blob %s\n') % oid)
516 ui.write(_(b'copying lfs blob %s\n') % oid)
514 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
517 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
515
518
516
519
517 @eh.wrapfunction(upgrade, b'preservedrequirements')
520 @eh.wrapfunction(upgrade, b'preservedrequirements')
518 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
521 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
519 def upgraderequirements(orig, repo):
522 def upgraderequirements(orig, repo):
520 reqs = orig(repo)
523 reqs = orig(repo)
521 if b'lfs' in repo.requirements:
524 if b'lfs' in repo.requirements:
522 reqs.add(b'lfs')
525 reqs.add(b'lfs')
523 return reqs
526 return reqs
@@ -1,4285 +1,4288
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help COMMAND` for more details)::
17 Common tasks (use :hg:`help COMMAND` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behavior can be configured with::
31 files creations or deletions. This behavior can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting::
52 discarded. Setting::
53
53
54 [mq]
54 [mq]
55 keepchanges = True
55 keepchanges = True
56
56
57 make them behave as if --keep-changes were passed, and non-conflicting
57 make them behave as if --keep-changes were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60
60
61 This extension used to provide a strip command. This command now lives
61 This extension used to provide a strip command. This command now lives
62 in the strip extension.
62 in the strip extension.
63 '''
63 '''
64
64
65 from __future__ import absolute_import, print_function
65 from __future__ import absolute_import, print_function
66
66
67 import errno
67 import errno
68 import os
68 import os
69 import re
69 import re
70 import shutil
70 import shutil
71 from mercurial.i18n import _
71 from mercurial.i18n import _
72 from mercurial.node import (
72 from mercurial.node import (
73 bin,
73 bin,
74 hex,
74 hex,
75 nullid,
75 nullid,
76 nullrev,
76 nullrev,
77 short,
77 short,
78 )
78 )
79 from mercurial.pycompat import open
79 from mercurial.pycompat import (
80 getattr,
81 open,
82 )
80 from mercurial import (
83 from mercurial import (
81 cmdutil,
84 cmdutil,
82 commands,
85 commands,
83 dirstateguard,
86 dirstateguard,
84 encoding,
87 encoding,
85 error,
88 error,
86 extensions,
89 extensions,
87 hg,
90 hg,
88 localrepo,
91 localrepo,
89 lock as lockmod,
92 lock as lockmod,
90 logcmdutil,
93 logcmdutil,
91 patch as patchmod,
94 patch as patchmod,
92 phases,
95 phases,
93 pycompat,
96 pycompat,
94 registrar,
97 registrar,
95 revsetlang,
98 revsetlang,
96 scmutil,
99 scmutil,
97 smartset,
100 smartset,
98 subrepoutil,
101 subrepoutil,
99 util,
102 util,
100 vfs as vfsmod,
103 vfs as vfsmod,
101 )
104 )
102 from mercurial.utils import (
105 from mercurial.utils import (
103 dateutil,
106 dateutil,
104 stringutil,
107 stringutil,
105 )
108 )
106
109
107 release = lockmod.release
110 release = lockmod.release
108 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
111 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
109
112
110 cmdtable = {}
113 cmdtable = {}
111 command = registrar.command(cmdtable)
114 command = registrar.command(cmdtable)
112 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
115 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
113 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
116 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
114 # be specifying the version(s) of Mercurial they are tested with, or
117 # be specifying the version(s) of Mercurial they are tested with, or
115 # leave the attribute unspecified.
118 # leave the attribute unspecified.
116 testedwith = b'ships-with-hg-core'
119 testedwith = b'ships-with-hg-core'
117
120
118 configtable = {}
121 configtable = {}
119 configitem = registrar.configitem(configtable)
122 configitem = registrar.configitem(configtable)
120
123
121 configitem(
124 configitem(
122 b'mq', b'git', default=b'auto',
125 b'mq', b'git', default=b'auto',
123 )
126 )
124 configitem(
127 configitem(
125 b'mq', b'keepchanges', default=False,
128 b'mq', b'keepchanges', default=False,
126 )
129 )
127 configitem(
130 configitem(
128 b'mq', b'plain', default=False,
131 b'mq', b'plain', default=False,
129 )
132 )
130 configitem(
133 configitem(
131 b'mq', b'secret', default=False,
134 b'mq', b'secret', default=False,
132 )
135 )
133
136
134 # force load strip extension formerly included in mq and import some utility
137 # force load strip extension formerly included in mq and import some utility
135 try:
138 try:
136 stripext = extensions.find(b'strip')
139 stripext = extensions.find(b'strip')
137 except KeyError:
140 except KeyError:
138 # note: load is lazy so we could avoid the try-except,
141 # note: load is lazy so we could avoid the try-except,
139 # but I (marmoute) prefer this explicit code.
142 # but I (marmoute) prefer this explicit code.
140 class dummyui(object):
143 class dummyui(object):
141 def debug(self, msg):
144 def debug(self, msg):
142 pass
145 pass
143
146
144 def log(self, event, msgfmt, *msgargs, **opts):
147 def log(self, event, msgfmt, *msgargs, **opts):
145 pass
148 pass
146
149
147 stripext = extensions.load(dummyui(), b'strip', b'')
150 stripext = extensions.load(dummyui(), b'strip', b'')
148
151
149 strip = stripext.strip
152 strip = stripext.strip
150
153
151
154
152 def checksubstate(repo, baserev=None):
155 def checksubstate(repo, baserev=None):
153 '''return list of subrepos at a different revision than substate.
156 '''return list of subrepos at a different revision than substate.
154 Abort if any subrepos have uncommitted changes.'''
157 Abort if any subrepos have uncommitted changes.'''
155 inclsubs = []
158 inclsubs = []
156 wctx = repo[None]
159 wctx = repo[None]
157 if baserev:
160 if baserev:
158 bctx = repo[baserev]
161 bctx = repo[baserev]
159 else:
162 else:
160 bctx = wctx.p1()
163 bctx = wctx.p1()
161 for s in sorted(wctx.substate):
164 for s in sorted(wctx.substate):
162 wctx.sub(s).bailifchanged(True)
165 wctx.sub(s).bailifchanged(True)
163 if s not in bctx.substate or bctx.sub(s).dirty():
166 if s not in bctx.substate or bctx.sub(s).dirty():
164 inclsubs.append(s)
167 inclsubs.append(s)
165 return inclsubs
168 return inclsubs
166
169
167
170
168 # Patch names looks like unix-file names.
171 # Patch names looks like unix-file names.
169 # They must be joinable with queue directory and result in the patch path.
172 # They must be joinable with queue directory and result in the patch path.
170 normname = util.normpath
173 normname = util.normpath
171
174
172
175
173 class statusentry(object):
176 class statusentry(object):
174 def __init__(self, node, name):
177 def __init__(self, node, name):
175 self.node, self.name = node, name
178 self.node, self.name = node, name
176
179
177 def __bytes__(self):
180 def __bytes__(self):
178 return hex(self.node) + b':' + self.name
181 return hex(self.node) + b':' + self.name
179
182
180 __str__ = encoding.strmethod(__bytes__)
183 __str__ = encoding.strmethod(__bytes__)
181 __repr__ = encoding.strmethod(__bytes__)
184 __repr__ = encoding.strmethod(__bytes__)
182
185
183
186
184 # The order of the headers in 'hg export' HG patches:
187 # The order of the headers in 'hg export' HG patches:
185 HGHEADERS = [
188 HGHEADERS = [
186 # '# HG changeset patch',
189 # '# HG changeset patch',
187 b'# User ',
190 b'# User ',
188 b'# Date ',
191 b'# Date ',
189 b'# ',
192 b'# ',
190 b'# Branch ',
193 b'# Branch ',
191 b'# Node ID ',
194 b'# Node ID ',
192 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
195 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
193 ]
196 ]
194 # The order of headers in plain 'mail style' patches:
197 # The order of headers in plain 'mail style' patches:
195 PLAINHEADERS = {
198 PLAINHEADERS = {
196 b'from': 0,
199 b'from': 0,
197 b'date': 1,
200 b'date': 1,
198 b'subject': 2,
201 b'subject': 2,
199 }
202 }
200
203
201
204
202 def inserthgheader(lines, header, value):
205 def inserthgheader(lines, header, value):
203 """Assuming lines contains a HG patch header, add a header line with value.
206 """Assuming lines contains a HG patch header, add a header line with value.
204 >>> try: inserthgheader([], b'# Date ', b'z')
207 >>> try: inserthgheader([], b'# Date ', b'z')
205 ... except ValueError as inst: print("oops")
208 ... except ValueError as inst: print("oops")
206 oops
209 oops
207 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
210 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
208 ['# HG changeset patch', '# Date z']
211 ['# HG changeset patch', '# Date z']
209 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
212 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
210 ['# HG changeset patch', '# Date z', '']
213 ['# HG changeset patch', '# Date z', '']
211 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
214 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
212 ['# HG changeset patch', '# User y', '# Date z']
215 ['# HG changeset patch', '# User y', '# Date z']
213 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
216 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
214 ... b'# User ', b'z')
217 ... b'# User ', b'z')
215 ['# HG changeset patch', '# Date x', '# User z']
218 ['# HG changeset patch', '# Date x', '# User z']
216 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
219 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
217 ['# HG changeset patch', '# Date z']
220 ['# HG changeset patch', '# Date z']
218 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
221 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
219 ... b'# Date ', b'z')
222 ... b'# Date ', b'z')
220 ['# HG changeset patch', '# Date z', '', '# Date y']
223 ['# HG changeset patch', '# Date z', '', '# Date y']
221 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
224 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
222 ... b'# Date ', b'z')
225 ... b'# Date ', b'z')
223 ['# HG changeset patch', '# Date z', '# Parent y']
226 ['# HG changeset patch', '# Date z', '# Parent y']
224 """
227 """
225 start = lines.index(b'# HG changeset patch') + 1
228 start = lines.index(b'# HG changeset patch') + 1
226 newindex = HGHEADERS.index(header)
229 newindex = HGHEADERS.index(header)
227 bestpos = len(lines)
230 bestpos = len(lines)
228 for i in range(start, len(lines)):
231 for i in range(start, len(lines)):
229 line = lines[i]
232 line = lines[i]
230 if not line.startswith(b'# '):
233 if not line.startswith(b'# '):
231 bestpos = min(bestpos, i)
234 bestpos = min(bestpos, i)
232 break
235 break
233 for lineindex, h in enumerate(HGHEADERS):
236 for lineindex, h in enumerate(HGHEADERS):
234 if line.startswith(h):
237 if line.startswith(h):
235 if lineindex == newindex:
238 if lineindex == newindex:
236 lines[i] = header + value
239 lines[i] = header + value
237 return lines
240 return lines
238 if lineindex > newindex:
241 if lineindex > newindex:
239 bestpos = min(bestpos, i)
242 bestpos = min(bestpos, i)
240 break # next line
243 break # next line
241 lines.insert(bestpos, header + value)
244 lines.insert(bestpos, header + value)
242 return lines
245 return lines
243
246
244
247
245 def insertplainheader(lines, header, value):
248 def insertplainheader(lines, header, value):
246 """For lines containing a plain patch header, add a header line with value.
249 """For lines containing a plain patch header, add a header line with value.
247 >>> insertplainheader([], b'Date', b'z')
250 >>> insertplainheader([], b'Date', b'z')
248 ['Date: z']
251 ['Date: z']
249 >>> insertplainheader([b''], b'Date', b'z')
252 >>> insertplainheader([b''], b'Date', b'z')
250 ['Date: z', '']
253 ['Date: z', '']
251 >>> insertplainheader([b'x'], b'Date', b'z')
254 >>> insertplainheader([b'x'], b'Date', b'z')
252 ['Date: z', '', 'x']
255 ['Date: z', '', 'x']
253 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
256 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
254 ['From: y', 'Date: z', '', 'x']
257 ['From: y', 'Date: z', '', 'x']
255 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
258 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
256 [' date : x', 'From: z', '']
259 [' date : x', 'From: z', '']
257 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
260 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
258 ['Date: z', '', 'Date: y']
261 ['Date: z', '', 'Date: y']
259 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
262 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
260 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
263 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
261 """
264 """
262 newprio = PLAINHEADERS[header.lower()]
265 newprio = PLAINHEADERS[header.lower()]
263 bestpos = len(lines)
266 bestpos = len(lines)
264 for i, line in enumerate(lines):
267 for i, line in enumerate(lines):
265 if b':' in line:
268 if b':' in line:
266 lheader = line.split(b':', 1)[0].strip().lower()
269 lheader = line.split(b':', 1)[0].strip().lower()
267 lprio = PLAINHEADERS.get(lheader, newprio + 1)
270 lprio = PLAINHEADERS.get(lheader, newprio + 1)
268 if lprio == newprio:
271 if lprio == newprio:
269 lines[i] = b'%s: %s' % (header, value)
272 lines[i] = b'%s: %s' % (header, value)
270 return lines
273 return lines
271 if lprio > newprio and i < bestpos:
274 if lprio > newprio and i < bestpos:
272 bestpos = i
275 bestpos = i
273 else:
276 else:
274 if line:
277 if line:
275 lines.insert(i, b'')
278 lines.insert(i, b'')
276 if i < bestpos:
279 if i < bestpos:
277 bestpos = i
280 bestpos = i
278 break
281 break
279 lines.insert(bestpos, b'%s: %s' % (header, value))
282 lines.insert(bestpos, b'%s: %s' % (header, value))
280 return lines
283 return lines
281
284
282
285
283 class patchheader(object):
286 class patchheader(object):
284 def __init__(self, pf, plainmode=False):
287 def __init__(self, pf, plainmode=False):
285 def eatdiff(lines):
288 def eatdiff(lines):
286 while lines:
289 while lines:
287 l = lines[-1]
290 l = lines[-1]
288 if (
291 if (
289 l.startswith(b"diff -")
292 l.startswith(b"diff -")
290 or l.startswith(b"Index:")
293 or l.startswith(b"Index:")
291 or l.startswith(b"===========")
294 or l.startswith(b"===========")
292 ):
295 ):
293 del lines[-1]
296 del lines[-1]
294 else:
297 else:
295 break
298 break
296
299
297 def eatempty(lines):
300 def eatempty(lines):
298 while lines:
301 while lines:
299 if not lines[-1].strip():
302 if not lines[-1].strip():
300 del lines[-1]
303 del lines[-1]
301 else:
304 else:
302 break
305 break
303
306
304 message = []
307 message = []
305 comments = []
308 comments = []
306 user = None
309 user = None
307 date = None
310 date = None
308 parent = None
311 parent = None
309 format = None
312 format = None
310 subject = None
313 subject = None
311 branch = None
314 branch = None
312 nodeid = None
315 nodeid = None
313 diffstart = 0
316 diffstart = 0
314
317
315 for line in open(pf, b'rb'):
318 for line in open(pf, b'rb'):
316 line = line.rstrip()
319 line = line.rstrip()
317 if line.startswith(b'diff --git') or (
320 if line.startswith(b'diff --git') or (
318 diffstart and line.startswith(b'+++ ')
321 diffstart and line.startswith(b'+++ ')
319 ):
322 ):
320 diffstart = 2
323 diffstart = 2
321 break
324 break
322 diffstart = 0 # reset
325 diffstart = 0 # reset
323 if line.startswith(b"--- "):
326 if line.startswith(b"--- "):
324 diffstart = 1
327 diffstart = 1
325 continue
328 continue
326 elif format == b"hgpatch":
329 elif format == b"hgpatch":
327 # parse values when importing the result of an hg export
330 # parse values when importing the result of an hg export
328 if line.startswith(b"# User "):
331 if line.startswith(b"# User "):
329 user = line[7:]
332 user = line[7:]
330 elif line.startswith(b"# Date "):
333 elif line.startswith(b"# Date "):
331 date = line[7:]
334 date = line[7:]
332 elif line.startswith(b"# Parent "):
335 elif line.startswith(b"# Parent "):
333 parent = line[9:].lstrip() # handle double trailing space
336 parent = line[9:].lstrip() # handle double trailing space
334 elif line.startswith(b"# Branch "):
337 elif line.startswith(b"# Branch "):
335 branch = line[9:]
338 branch = line[9:]
336 elif line.startswith(b"# Node ID "):
339 elif line.startswith(b"# Node ID "):
337 nodeid = line[10:]
340 nodeid = line[10:]
338 elif not line.startswith(b"# ") and line:
341 elif not line.startswith(b"# ") and line:
339 message.append(line)
342 message.append(line)
340 format = None
343 format = None
341 elif line == b'# HG changeset patch':
344 elif line == b'# HG changeset patch':
342 message = []
345 message = []
343 format = b"hgpatch"
346 format = b"hgpatch"
344 elif format != b"tagdone" and (
347 elif format != b"tagdone" and (
345 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
348 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
346 ):
349 ):
347 subject = line[9:]
350 subject = line[9:]
348 format = b"tag"
351 format = b"tag"
349 elif format != b"tagdone" and (
352 elif format != b"tagdone" and (
350 line.startswith(b"From: ") or line.startswith(b"from: ")
353 line.startswith(b"From: ") or line.startswith(b"from: ")
351 ):
354 ):
352 user = line[6:]
355 user = line[6:]
353 format = b"tag"
356 format = b"tag"
354 elif format != b"tagdone" and (
357 elif format != b"tagdone" and (
355 line.startswith(b"Date: ") or line.startswith(b"date: ")
358 line.startswith(b"Date: ") or line.startswith(b"date: ")
356 ):
359 ):
357 date = line[6:]
360 date = line[6:]
358 format = b"tag"
361 format = b"tag"
359 elif format == b"tag" and line == b"":
362 elif format == b"tag" and line == b"":
360 # when looking for tags (subject: from: etc) they
363 # when looking for tags (subject: from: etc) they
361 # end once you find a blank line in the source
364 # end once you find a blank line in the source
362 format = b"tagdone"
365 format = b"tagdone"
363 elif message or line:
366 elif message or line:
364 message.append(line)
367 message.append(line)
365 comments.append(line)
368 comments.append(line)
366
369
367 eatdiff(message)
370 eatdiff(message)
368 eatdiff(comments)
371 eatdiff(comments)
369 # Remember the exact starting line of the patch diffs before consuming
372 # Remember the exact starting line of the patch diffs before consuming
370 # empty lines, for external use by TortoiseHg and others
373 # empty lines, for external use by TortoiseHg and others
371 self.diffstartline = len(comments)
374 self.diffstartline = len(comments)
372 eatempty(message)
375 eatempty(message)
373 eatempty(comments)
376 eatempty(comments)
374
377
375 # make sure message isn't empty
378 # make sure message isn't empty
376 if format and format.startswith(b"tag") and subject:
379 if format and format.startswith(b"tag") and subject:
377 message.insert(0, subject)
380 message.insert(0, subject)
378
381
379 self.message = message
382 self.message = message
380 self.comments = comments
383 self.comments = comments
381 self.user = user
384 self.user = user
382 self.date = date
385 self.date = date
383 self.parent = parent
386 self.parent = parent
384 # nodeid and branch are for external use by TortoiseHg and others
387 # nodeid and branch are for external use by TortoiseHg and others
385 self.nodeid = nodeid
388 self.nodeid = nodeid
386 self.branch = branch
389 self.branch = branch
387 self.haspatch = diffstart > 1
390 self.haspatch = diffstart > 1
388 self.plainmode = (
391 self.plainmode = (
389 plainmode
392 plainmode
390 or b'# HG changeset patch' not in self.comments
393 or b'# HG changeset patch' not in self.comments
391 and any(
394 and any(
392 c.startswith(b'Date: ') or c.startswith(b'From: ')
395 c.startswith(b'Date: ') or c.startswith(b'From: ')
393 for c in self.comments
396 for c in self.comments
394 )
397 )
395 )
398 )
396
399
397 def setuser(self, user):
400 def setuser(self, user):
398 try:
401 try:
399 inserthgheader(self.comments, b'# User ', user)
402 inserthgheader(self.comments, b'# User ', user)
400 except ValueError:
403 except ValueError:
401 if self.plainmode:
404 if self.plainmode:
402 insertplainheader(self.comments, b'From', user)
405 insertplainheader(self.comments, b'From', user)
403 else:
406 else:
404 tmp = [b'# HG changeset patch', b'# User ' + user]
407 tmp = [b'# HG changeset patch', b'# User ' + user]
405 self.comments = tmp + self.comments
408 self.comments = tmp + self.comments
406 self.user = user
409 self.user = user
407
410
408 def setdate(self, date):
411 def setdate(self, date):
409 try:
412 try:
410 inserthgheader(self.comments, b'# Date ', date)
413 inserthgheader(self.comments, b'# Date ', date)
411 except ValueError:
414 except ValueError:
412 if self.plainmode:
415 if self.plainmode:
413 insertplainheader(self.comments, b'Date', date)
416 insertplainheader(self.comments, b'Date', date)
414 else:
417 else:
415 tmp = [b'# HG changeset patch', b'# Date ' + date]
418 tmp = [b'# HG changeset patch', b'# Date ' + date]
416 self.comments = tmp + self.comments
419 self.comments = tmp + self.comments
417 self.date = date
420 self.date = date
418
421
419 def setparent(self, parent):
422 def setparent(self, parent):
420 try:
423 try:
421 inserthgheader(self.comments, b'# Parent ', parent)
424 inserthgheader(self.comments, b'# Parent ', parent)
422 except ValueError:
425 except ValueError:
423 if not self.plainmode:
426 if not self.plainmode:
424 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
427 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
425 self.comments = tmp + self.comments
428 self.comments = tmp + self.comments
426 self.parent = parent
429 self.parent = parent
427
430
428 def setmessage(self, message):
431 def setmessage(self, message):
429 if self.comments:
432 if self.comments:
430 self._delmsg()
433 self._delmsg()
431 self.message = [message]
434 self.message = [message]
432 if message:
435 if message:
433 if self.plainmode and self.comments and self.comments[-1]:
436 if self.plainmode and self.comments and self.comments[-1]:
434 self.comments.append(b'')
437 self.comments.append(b'')
435 self.comments.append(message)
438 self.comments.append(message)
436
439
437 def __bytes__(self):
440 def __bytes__(self):
438 s = b'\n'.join(self.comments).rstrip()
441 s = b'\n'.join(self.comments).rstrip()
439 if not s:
442 if not s:
440 return b''
443 return b''
441 return s + b'\n\n'
444 return s + b'\n\n'
442
445
443 __str__ = encoding.strmethod(__bytes__)
446 __str__ = encoding.strmethod(__bytes__)
444
447
445 def _delmsg(self):
448 def _delmsg(self):
446 '''Remove existing message, keeping the rest of the comments fields.
449 '''Remove existing message, keeping the rest of the comments fields.
447 If comments contains 'subject: ', message will prepend
450 If comments contains 'subject: ', message will prepend
448 the field and a blank line.'''
451 the field and a blank line.'''
449 if self.message:
452 if self.message:
450 subj = b'subject: ' + self.message[0].lower()
453 subj = b'subject: ' + self.message[0].lower()
451 for i in pycompat.xrange(len(self.comments)):
454 for i in pycompat.xrange(len(self.comments)):
452 if subj == self.comments[i].lower():
455 if subj == self.comments[i].lower():
453 del self.comments[i]
456 del self.comments[i]
454 self.message = self.message[2:]
457 self.message = self.message[2:]
455 break
458 break
456 ci = 0
459 ci = 0
457 for mi in self.message:
460 for mi in self.message:
458 while mi != self.comments[ci]:
461 while mi != self.comments[ci]:
459 ci += 1
462 ci += 1
460 del self.comments[ci]
463 del self.comments[ci]
461
464
462
465
463 def newcommit(repo, phase, *args, **kwargs):
466 def newcommit(repo, phase, *args, **kwargs):
464 """helper dedicated to ensure a commit respect mq.secret setting
467 """helper dedicated to ensure a commit respect mq.secret setting
465
468
466 It should be used instead of repo.commit inside the mq source for operation
469 It should be used instead of repo.commit inside the mq source for operation
467 creating new changeset.
470 creating new changeset.
468 """
471 """
469 repo = repo.unfiltered()
472 repo = repo.unfiltered()
470 if phase is None:
473 if phase is None:
471 if repo.ui.configbool(b'mq', b'secret'):
474 if repo.ui.configbool(b'mq', b'secret'):
472 phase = phases.secret
475 phase = phases.secret
473 overrides = {(b'ui', b'allowemptycommit'): True}
476 overrides = {(b'ui', b'allowemptycommit'): True}
474 if phase is not None:
477 if phase is not None:
475 overrides[(b'phases', b'new-commit')] = phase
478 overrides[(b'phases', b'new-commit')] = phase
476 with repo.ui.configoverride(overrides, b'mq'):
479 with repo.ui.configoverride(overrides, b'mq'):
477 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
480 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
478 return repo.commit(*args, **kwargs)
481 return repo.commit(*args, **kwargs)
479
482
480
483
481 class AbortNoCleanup(error.Abort):
484 class AbortNoCleanup(error.Abort):
482 pass
485 pass
483
486
484
487
485 class queue(object):
488 class queue(object):
486 def __init__(self, ui, baseui, path, patchdir=None):
489 def __init__(self, ui, baseui, path, patchdir=None):
487 self.basepath = path
490 self.basepath = path
488 try:
491 try:
489 with open(os.path.join(path, b'patches.queue'), r'rb') as fh:
492 with open(os.path.join(path, b'patches.queue'), r'rb') as fh:
490 cur = fh.read().rstrip()
493 cur = fh.read().rstrip()
491
494
492 if not cur:
495 if not cur:
493 curpath = os.path.join(path, b'patches')
496 curpath = os.path.join(path, b'patches')
494 else:
497 else:
495 curpath = os.path.join(path, b'patches-' + cur)
498 curpath = os.path.join(path, b'patches-' + cur)
496 except IOError:
499 except IOError:
497 curpath = os.path.join(path, b'patches')
500 curpath = os.path.join(path, b'patches')
498 self.path = patchdir or curpath
501 self.path = patchdir or curpath
499 self.opener = vfsmod.vfs(self.path)
502 self.opener = vfsmod.vfs(self.path)
500 self.ui = ui
503 self.ui = ui
501 self.baseui = baseui
504 self.baseui = baseui
502 self.applieddirty = False
505 self.applieddirty = False
503 self.seriesdirty = False
506 self.seriesdirty = False
504 self.added = []
507 self.added = []
505 self.seriespath = b"series"
508 self.seriespath = b"series"
506 self.statuspath = b"status"
509 self.statuspath = b"status"
507 self.guardspath = b"guards"
510 self.guardspath = b"guards"
508 self.activeguards = None
511 self.activeguards = None
509 self.guardsdirty = False
512 self.guardsdirty = False
510 # Handle mq.git as a bool with extended values
513 # Handle mq.git as a bool with extended values
511 gitmode = ui.config(b'mq', b'git').lower()
514 gitmode = ui.config(b'mq', b'git').lower()
512 boolmode = stringutil.parsebool(gitmode)
515 boolmode = stringutil.parsebool(gitmode)
513 if boolmode is not None:
516 if boolmode is not None:
514 if boolmode:
517 if boolmode:
515 gitmode = b'yes'
518 gitmode = b'yes'
516 else:
519 else:
517 gitmode = b'no'
520 gitmode = b'no'
518 self.gitmode = gitmode
521 self.gitmode = gitmode
519 # deprecated config: mq.plain
522 # deprecated config: mq.plain
520 self.plainmode = ui.configbool(b'mq', b'plain')
523 self.plainmode = ui.configbool(b'mq', b'plain')
521 self.checkapplied = True
524 self.checkapplied = True
522
525
523 @util.propertycache
526 @util.propertycache
524 def applied(self):
527 def applied(self):
525 def parselines(lines):
528 def parselines(lines):
526 for l in lines:
529 for l in lines:
527 entry = l.split(b':', 1)
530 entry = l.split(b':', 1)
528 if len(entry) > 1:
531 if len(entry) > 1:
529 n, name = entry
532 n, name = entry
530 yield statusentry(bin(n), name)
533 yield statusentry(bin(n), name)
531 elif l.strip():
534 elif l.strip():
532 self.ui.warn(
535 self.ui.warn(
533 _(b'malformated mq status line: %s\n')
536 _(b'malformated mq status line: %s\n')
534 % stringutil.pprint(entry)
537 % stringutil.pprint(entry)
535 )
538 )
536 # else we ignore empty lines
539 # else we ignore empty lines
537
540
538 try:
541 try:
539 lines = self.opener.read(self.statuspath).splitlines()
542 lines = self.opener.read(self.statuspath).splitlines()
540 return list(parselines(lines))
543 return list(parselines(lines))
541 except IOError as e:
544 except IOError as e:
542 if e.errno == errno.ENOENT:
545 if e.errno == errno.ENOENT:
543 return []
546 return []
544 raise
547 raise
545
548
546 @util.propertycache
549 @util.propertycache
547 def fullseries(self):
550 def fullseries(self):
548 try:
551 try:
549 return self.opener.read(self.seriespath).splitlines()
552 return self.opener.read(self.seriespath).splitlines()
550 except IOError as e:
553 except IOError as e:
551 if e.errno == errno.ENOENT:
554 if e.errno == errno.ENOENT:
552 return []
555 return []
553 raise
556 raise
554
557
555 @util.propertycache
558 @util.propertycache
556 def series(self):
559 def series(self):
557 self.parseseries()
560 self.parseseries()
558 return self.series
561 return self.series
559
562
560 @util.propertycache
563 @util.propertycache
561 def seriesguards(self):
564 def seriesguards(self):
562 self.parseseries()
565 self.parseseries()
563 return self.seriesguards
566 return self.seriesguards
564
567
565 def invalidate(self):
568 def invalidate(self):
566 for a in b'applied fullseries series seriesguards'.split():
569 for a in b'applied fullseries series seriesguards'.split():
567 if a in self.__dict__:
570 if a in self.__dict__:
568 delattr(self, a)
571 delattr(self, a)
569 self.applieddirty = False
572 self.applieddirty = False
570 self.seriesdirty = False
573 self.seriesdirty = False
571 self.guardsdirty = False
574 self.guardsdirty = False
572 self.activeguards = None
575 self.activeguards = None
573
576
574 def diffopts(self, opts=None, patchfn=None, plain=False):
577 def diffopts(self, opts=None, patchfn=None, plain=False):
575 """Return diff options tweaked for this mq use, possibly upgrading to
578 """Return diff options tweaked for this mq use, possibly upgrading to
576 git format, and possibly plain and without lossy options."""
579 git format, and possibly plain and without lossy options."""
577 diffopts = patchmod.difffeatureopts(
580 diffopts = patchmod.difffeatureopts(
578 self.ui,
581 self.ui,
579 opts,
582 opts,
580 git=True,
583 git=True,
581 whitespace=not plain,
584 whitespace=not plain,
582 formatchanging=not plain,
585 formatchanging=not plain,
583 )
586 )
584 if self.gitmode == b'auto':
587 if self.gitmode == b'auto':
585 diffopts.upgrade = True
588 diffopts.upgrade = True
586 elif self.gitmode == b'keep':
589 elif self.gitmode == b'keep':
587 pass
590 pass
588 elif self.gitmode in (b'yes', b'no'):
591 elif self.gitmode in (b'yes', b'no'):
589 diffopts.git = self.gitmode == b'yes'
592 diffopts.git = self.gitmode == b'yes'
590 else:
593 else:
591 raise error.Abort(
594 raise error.Abort(
592 _(b'mq.git option can be auto/keep/yes/no' b' got %s')
595 _(b'mq.git option can be auto/keep/yes/no' b' got %s')
593 % self.gitmode
596 % self.gitmode
594 )
597 )
595 if patchfn:
598 if patchfn:
596 diffopts = self.patchopts(diffopts, patchfn)
599 diffopts = self.patchopts(diffopts, patchfn)
597 return diffopts
600 return diffopts
598
601
599 def patchopts(self, diffopts, *patches):
602 def patchopts(self, diffopts, *patches):
600 """Return a copy of input diff options with git set to true if
603 """Return a copy of input diff options with git set to true if
601 referenced patch is a git patch and should be preserved as such.
604 referenced patch is a git patch and should be preserved as such.
602 """
605 """
603 diffopts = diffopts.copy()
606 diffopts = diffopts.copy()
604 if not diffopts.git and self.gitmode == b'keep':
607 if not diffopts.git and self.gitmode == b'keep':
605 for patchfn in patches:
608 for patchfn in patches:
606 patchf = self.opener(patchfn, b'r')
609 patchf = self.opener(patchfn, b'r')
607 # if the patch was a git patch, refresh it as a git patch
610 # if the patch was a git patch, refresh it as a git patch
608 diffopts.git = any(
611 diffopts.git = any(
609 line.startswith(b'diff --git') for line in patchf
612 line.startswith(b'diff --git') for line in patchf
610 )
613 )
611 patchf.close()
614 patchf.close()
612 return diffopts
615 return diffopts
613
616
614 def join(self, *p):
617 def join(self, *p):
615 return os.path.join(self.path, *p)
618 return os.path.join(self.path, *p)
616
619
617 def findseries(self, patch):
620 def findseries(self, patch):
618 def matchpatch(l):
621 def matchpatch(l):
619 l = l.split(b'#', 1)[0]
622 l = l.split(b'#', 1)[0]
620 return l.strip() == patch
623 return l.strip() == patch
621
624
622 for index, l in enumerate(self.fullseries):
625 for index, l in enumerate(self.fullseries):
623 if matchpatch(l):
626 if matchpatch(l):
624 return index
627 return index
625 return None
628 return None
626
629
627 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
630 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
628
631
629 def parseseries(self):
632 def parseseries(self):
630 self.series = []
633 self.series = []
631 self.seriesguards = []
634 self.seriesguards = []
632 for l in self.fullseries:
635 for l in self.fullseries:
633 h = l.find(b'#')
636 h = l.find(b'#')
634 if h == -1:
637 if h == -1:
635 patch = l
638 patch = l
636 comment = b''
639 comment = b''
637 elif h == 0:
640 elif h == 0:
638 continue
641 continue
639 else:
642 else:
640 patch = l[:h]
643 patch = l[:h]
641 comment = l[h:]
644 comment = l[h:]
642 patch = patch.strip()
645 patch = patch.strip()
643 if patch:
646 if patch:
644 if patch in self.series:
647 if patch in self.series:
645 raise error.Abort(
648 raise error.Abort(
646 _(b'%s appears more than once in %s')
649 _(b'%s appears more than once in %s')
647 % (patch, self.join(self.seriespath))
650 % (patch, self.join(self.seriespath))
648 )
651 )
649 self.series.append(patch)
652 self.series.append(patch)
650 self.seriesguards.append(self.guard_re.findall(comment))
653 self.seriesguards.append(self.guard_re.findall(comment))
651
654
652 def checkguard(self, guard):
655 def checkguard(self, guard):
653 if not guard:
656 if not guard:
654 return _(b'guard cannot be an empty string')
657 return _(b'guard cannot be an empty string')
655 bad_chars = b'# \t\r\n\f'
658 bad_chars = b'# \t\r\n\f'
656 first = guard[0]
659 first = guard[0]
657 if first in b'-+':
660 if first in b'-+':
658 return _(b'guard %r starts with invalid character: %r') % (
661 return _(b'guard %r starts with invalid character: %r') % (
659 guard,
662 guard,
660 first,
663 first,
661 )
664 )
662 for c in bad_chars:
665 for c in bad_chars:
663 if c in guard:
666 if c in guard:
664 return _(b'invalid character in guard %r: %r') % (guard, c)
667 return _(b'invalid character in guard %r: %r') % (guard, c)
665
668
666 def setactive(self, guards):
669 def setactive(self, guards):
667 for guard in guards:
670 for guard in guards:
668 bad = self.checkguard(guard)
671 bad = self.checkguard(guard)
669 if bad:
672 if bad:
670 raise error.Abort(bad)
673 raise error.Abort(bad)
671 guards = sorted(set(guards))
674 guards = sorted(set(guards))
672 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
675 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
673 self.activeguards = guards
676 self.activeguards = guards
674 self.guardsdirty = True
677 self.guardsdirty = True
675
678
676 def active(self):
679 def active(self):
677 if self.activeguards is None:
680 if self.activeguards is None:
678 self.activeguards = []
681 self.activeguards = []
679 try:
682 try:
680 guards = self.opener.read(self.guardspath).split()
683 guards = self.opener.read(self.guardspath).split()
681 except IOError as err:
684 except IOError as err:
682 if err.errno != errno.ENOENT:
685 if err.errno != errno.ENOENT:
683 raise
686 raise
684 guards = []
687 guards = []
685 for i, guard in enumerate(guards):
688 for i, guard in enumerate(guards):
686 bad = self.checkguard(guard)
689 bad = self.checkguard(guard)
687 if bad:
690 if bad:
688 self.ui.warn(
691 self.ui.warn(
689 b'%s:%d: %s\n'
692 b'%s:%d: %s\n'
690 % (self.join(self.guardspath), i + 1, bad)
693 % (self.join(self.guardspath), i + 1, bad)
691 )
694 )
692 else:
695 else:
693 self.activeguards.append(guard)
696 self.activeguards.append(guard)
694 return self.activeguards
697 return self.activeguards
695
698
696 def setguards(self, idx, guards):
699 def setguards(self, idx, guards):
697 for g in guards:
700 for g in guards:
698 if len(g) < 2:
701 if len(g) < 2:
699 raise error.Abort(_(b'guard %r too short') % g)
702 raise error.Abort(_(b'guard %r too short') % g)
700 if g[0] not in b'-+':
703 if g[0] not in b'-+':
701 raise error.Abort(_(b'guard %r starts with invalid char') % g)
704 raise error.Abort(_(b'guard %r starts with invalid char') % g)
702 bad = self.checkguard(g[1:])
705 bad = self.checkguard(g[1:])
703 if bad:
706 if bad:
704 raise error.Abort(bad)
707 raise error.Abort(bad)
705 drop = self.guard_re.sub(b'', self.fullseries[idx])
708 drop = self.guard_re.sub(b'', self.fullseries[idx])
706 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
709 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
707 self.parseseries()
710 self.parseseries()
708 self.seriesdirty = True
711 self.seriesdirty = True
709
712
710 def pushable(self, idx):
713 def pushable(self, idx):
711 if isinstance(idx, bytes):
714 if isinstance(idx, bytes):
712 idx = self.series.index(idx)
715 idx = self.series.index(idx)
713 patchguards = self.seriesguards[idx]
716 patchguards = self.seriesguards[idx]
714 if not patchguards:
717 if not patchguards:
715 return True, None
718 return True, None
716 guards = self.active()
719 guards = self.active()
717 exactneg = [
720 exactneg = [
718 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
721 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
719 ]
722 ]
720 if exactneg:
723 if exactneg:
721 return False, stringutil.pprint(exactneg[0])
724 return False, stringutil.pprint(exactneg[0])
722 pos = [g for g in patchguards if g.startswith(b'+')]
725 pos = [g for g in patchguards if g.startswith(b'+')]
723 exactpos = [g for g in pos if g[1:] in guards]
726 exactpos = [g for g in pos if g[1:] in guards]
724 if pos:
727 if pos:
725 if exactpos:
728 if exactpos:
726 return True, stringutil.pprint(exactpos[0])
729 return True, stringutil.pprint(exactpos[0])
727 return False, b' '.join([stringutil.pprint(p) for p in pos])
730 return False, b' '.join([stringutil.pprint(p) for p in pos])
728 return True, b''
731 return True, b''
729
732
730 def explainpushable(self, idx, all_patches=False):
733 def explainpushable(self, idx, all_patches=False):
731 if all_patches:
734 if all_patches:
732 write = self.ui.write
735 write = self.ui.write
733 else:
736 else:
734 write = self.ui.warn
737 write = self.ui.warn
735
738
736 if all_patches or self.ui.verbose:
739 if all_patches or self.ui.verbose:
737 if isinstance(idx, bytes):
740 if isinstance(idx, bytes):
738 idx = self.series.index(idx)
741 idx = self.series.index(idx)
739 pushable, why = self.pushable(idx)
742 pushable, why = self.pushable(idx)
740 if all_patches and pushable:
743 if all_patches and pushable:
741 if why is None:
744 if why is None:
742 write(
745 write(
743 _(b'allowing %s - no guards in effect\n')
746 _(b'allowing %s - no guards in effect\n')
744 % self.series[idx]
747 % self.series[idx]
745 )
748 )
746 else:
749 else:
747 if not why:
750 if not why:
748 write(
751 write(
749 _(b'allowing %s - no matching negative guards\n')
752 _(b'allowing %s - no matching negative guards\n')
750 % self.series[idx]
753 % self.series[idx]
751 )
754 )
752 else:
755 else:
753 write(
756 write(
754 _(b'allowing %s - guarded by %s\n')
757 _(b'allowing %s - guarded by %s\n')
755 % (self.series[idx], why)
758 % (self.series[idx], why)
756 )
759 )
757 if not pushable:
760 if not pushable:
758 if why:
761 if why:
759 write(
762 write(
760 _(b'skipping %s - guarded by %s\n')
763 _(b'skipping %s - guarded by %s\n')
761 % (self.series[idx], why)
764 % (self.series[idx], why)
762 )
765 )
763 else:
766 else:
764 write(
767 write(
765 _(b'skipping %s - no matching guards\n')
768 _(b'skipping %s - no matching guards\n')
766 % self.series[idx]
769 % self.series[idx]
767 )
770 )
768
771
769 def savedirty(self):
772 def savedirty(self):
770 def writelist(items, path):
773 def writelist(items, path):
771 fp = self.opener(path, b'wb')
774 fp = self.opener(path, b'wb')
772 for i in items:
775 for i in items:
773 fp.write(b"%s\n" % i)
776 fp.write(b"%s\n" % i)
774 fp.close()
777 fp.close()
775
778
776 if self.applieddirty:
779 if self.applieddirty:
777 writelist(map(bytes, self.applied), self.statuspath)
780 writelist(map(bytes, self.applied), self.statuspath)
778 self.applieddirty = False
781 self.applieddirty = False
779 if self.seriesdirty:
782 if self.seriesdirty:
780 writelist(self.fullseries, self.seriespath)
783 writelist(self.fullseries, self.seriespath)
781 self.seriesdirty = False
784 self.seriesdirty = False
782 if self.guardsdirty:
785 if self.guardsdirty:
783 writelist(self.activeguards, self.guardspath)
786 writelist(self.activeguards, self.guardspath)
784 self.guardsdirty = False
787 self.guardsdirty = False
785 if self.added:
788 if self.added:
786 qrepo = self.qrepo()
789 qrepo = self.qrepo()
787 if qrepo:
790 if qrepo:
788 qrepo[None].add(f for f in self.added if f not in qrepo[None])
791 qrepo[None].add(f for f in self.added if f not in qrepo[None])
789 self.added = []
792 self.added = []
790
793
791 def removeundo(self, repo):
794 def removeundo(self, repo):
792 undo = repo.sjoin(b'undo')
795 undo = repo.sjoin(b'undo')
793 if not os.path.exists(undo):
796 if not os.path.exists(undo):
794 return
797 return
795 try:
798 try:
796 os.unlink(undo)
799 os.unlink(undo)
797 except OSError as inst:
800 except OSError as inst:
798 self.ui.warn(
801 self.ui.warn(
799 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
802 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
800 )
803 )
801
804
802 def backup(self, repo, files, copy=False):
805 def backup(self, repo, files, copy=False):
803 # backup local changes in --force case
806 # backup local changes in --force case
804 for f in sorted(files):
807 for f in sorted(files):
805 absf = repo.wjoin(f)
808 absf = repo.wjoin(f)
806 if os.path.lexists(absf):
809 if os.path.lexists(absf):
807 absorig = scmutil.backuppath(self.ui, repo, f)
810 absorig = scmutil.backuppath(self.ui, repo, f)
808 self.ui.note(
811 self.ui.note(
809 _(b'saving current version of %s as %s\n')
812 _(b'saving current version of %s as %s\n')
810 % (f, os.path.relpath(absorig))
813 % (f, os.path.relpath(absorig))
811 )
814 )
812
815
813 if copy:
816 if copy:
814 util.copyfile(absf, absorig)
817 util.copyfile(absf, absorig)
815 else:
818 else:
816 util.rename(absf, absorig)
819 util.rename(absf, absorig)
817
820
818 def printdiff(
821 def printdiff(
819 self,
822 self,
820 repo,
823 repo,
821 diffopts,
824 diffopts,
822 node1,
825 node1,
823 node2=None,
826 node2=None,
824 files=None,
827 files=None,
825 fp=None,
828 fp=None,
826 changes=None,
829 changes=None,
827 opts=None,
830 opts=None,
828 ):
831 ):
829 if opts is None:
832 if opts is None:
830 opts = {}
833 opts = {}
831 stat = opts.get(b'stat')
834 stat = opts.get(b'stat')
832 m = scmutil.match(repo[node1], files, opts)
835 m = scmutil.match(repo[node1], files, opts)
833 logcmdutil.diffordiffstat(
836 logcmdutil.diffordiffstat(
834 self.ui, repo, diffopts, node1, node2, m, changes, stat, fp
837 self.ui, repo, diffopts, node1, node2, m, changes, stat, fp
835 )
838 )
836
839
837 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
840 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
838 # first try just applying the patch
841 # first try just applying the patch
839 (err, n) = self.apply(
842 (err, n) = self.apply(
840 repo, [patch], update_status=False, strict=True, merge=rev
843 repo, [patch], update_status=False, strict=True, merge=rev
841 )
844 )
842
845
843 if err == 0:
846 if err == 0:
844 return (err, n)
847 return (err, n)
845
848
846 if n is None:
849 if n is None:
847 raise error.Abort(_(b"apply failed for patch %s") % patch)
850 raise error.Abort(_(b"apply failed for patch %s") % patch)
848
851
849 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
852 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
850
853
851 # apply failed, strip away that rev and merge.
854 # apply failed, strip away that rev and merge.
852 hg.clean(repo, head)
855 hg.clean(repo, head)
853 strip(self.ui, repo, [n], update=False, backup=False)
856 strip(self.ui, repo, [n], update=False, backup=False)
854
857
855 ctx = repo[rev]
858 ctx = repo[rev]
856 ret = hg.merge(repo, rev)
859 ret = hg.merge(repo, rev)
857 if ret:
860 if ret:
858 raise error.Abort(_(b"update returned %d") % ret)
861 raise error.Abort(_(b"update returned %d") % ret)
859 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
862 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
860 if n is None:
863 if n is None:
861 raise error.Abort(_(b"repo commit failed"))
864 raise error.Abort(_(b"repo commit failed"))
862 try:
865 try:
863 ph = patchheader(mergeq.join(patch), self.plainmode)
866 ph = patchheader(mergeq.join(patch), self.plainmode)
864 except Exception:
867 except Exception:
865 raise error.Abort(_(b"unable to read %s") % patch)
868 raise error.Abort(_(b"unable to read %s") % patch)
866
869
867 diffopts = self.patchopts(diffopts, patch)
870 diffopts = self.patchopts(diffopts, patch)
868 patchf = self.opener(patch, b"w")
871 patchf = self.opener(patch, b"w")
869 comments = bytes(ph)
872 comments = bytes(ph)
870 if comments:
873 if comments:
871 patchf.write(comments)
874 patchf.write(comments)
872 self.printdiff(repo, diffopts, head, n, fp=patchf)
875 self.printdiff(repo, diffopts, head, n, fp=patchf)
873 patchf.close()
876 patchf.close()
874 self.removeundo(repo)
877 self.removeundo(repo)
875 return (0, n)
878 return (0, n)
876
879
877 def qparents(self, repo, rev=None):
880 def qparents(self, repo, rev=None):
878 """return the mq handled parent or p1
881 """return the mq handled parent or p1
879
882
880 In some case where mq get himself in being the parent of a merge the
883 In some case where mq get himself in being the parent of a merge the
881 appropriate parent may be p2.
884 appropriate parent may be p2.
882 (eg: an in progress merge started with mq disabled)
885 (eg: an in progress merge started with mq disabled)
883
886
884 If no parent are managed by mq, p1 is returned.
887 If no parent are managed by mq, p1 is returned.
885 """
888 """
886 if rev is None:
889 if rev is None:
887 (p1, p2) = repo.dirstate.parents()
890 (p1, p2) = repo.dirstate.parents()
888 if p2 == nullid:
891 if p2 == nullid:
889 return p1
892 return p1
890 if not self.applied:
893 if not self.applied:
891 return None
894 return None
892 return self.applied[-1].node
895 return self.applied[-1].node
893 p1, p2 = repo.changelog.parents(rev)
896 p1, p2 = repo.changelog.parents(rev)
894 if p2 != nullid and p2 in [x.node for x in self.applied]:
897 if p2 != nullid and p2 in [x.node for x in self.applied]:
895 return p2
898 return p2
896 return p1
899 return p1
897
900
898 def mergepatch(self, repo, mergeq, series, diffopts):
901 def mergepatch(self, repo, mergeq, series, diffopts):
899 if not self.applied:
902 if not self.applied:
900 # each of the patches merged in will have two parents. This
903 # each of the patches merged in will have two parents. This
901 # can confuse the qrefresh, qdiff, and strip code because it
904 # can confuse the qrefresh, qdiff, and strip code because it
902 # needs to know which parent is actually in the patch queue.
905 # needs to know which parent is actually in the patch queue.
903 # so, we insert a merge marker with only one parent. This way
906 # so, we insert a merge marker with only one parent. This way
904 # the first patch in the queue is never a merge patch
907 # the first patch in the queue is never a merge patch
905 #
908 #
906 pname = b".hg.patches.merge.marker"
909 pname = b".hg.patches.merge.marker"
907 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
910 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
908 self.removeundo(repo)
911 self.removeundo(repo)
909 self.applied.append(statusentry(n, pname))
912 self.applied.append(statusentry(n, pname))
910 self.applieddirty = True
913 self.applieddirty = True
911
914
912 head = self.qparents(repo)
915 head = self.qparents(repo)
913
916
914 for patch in series:
917 for patch in series:
915 patch = mergeq.lookup(patch, strict=True)
918 patch = mergeq.lookup(patch, strict=True)
916 if not patch:
919 if not patch:
917 self.ui.warn(_(b"patch %s does not exist\n") % patch)
920 self.ui.warn(_(b"patch %s does not exist\n") % patch)
918 return (1, None)
921 return (1, None)
919 pushable, reason = self.pushable(patch)
922 pushable, reason = self.pushable(patch)
920 if not pushable:
923 if not pushable:
921 self.explainpushable(patch, all_patches=True)
924 self.explainpushable(patch, all_patches=True)
922 continue
925 continue
923 info = mergeq.isapplied(patch)
926 info = mergeq.isapplied(patch)
924 if not info:
927 if not info:
925 self.ui.warn(_(b"patch %s is not applied\n") % patch)
928 self.ui.warn(_(b"patch %s is not applied\n") % patch)
926 return (1, None)
929 return (1, None)
927 rev = info[1]
930 rev = info[1]
928 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
931 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
929 if head:
932 if head:
930 self.applied.append(statusentry(head, patch))
933 self.applied.append(statusentry(head, patch))
931 self.applieddirty = True
934 self.applieddirty = True
932 if err:
935 if err:
933 return (err, head)
936 return (err, head)
934 self.savedirty()
937 self.savedirty()
935 return (0, head)
938 return (0, head)
936
939
937 def patch(self, repo, patchfile):
940 def patch(self, repo, patchfile):
938 '''Apply patchfile to the working directory.
941 '''Apply patchfile to the working directory.
939 patchfile: name of patch file'''
942 patchfile: name of patch file'''
940 files = set()
943 files = set()
941 try:
944 try:
942 fuzz = patchmod.patch(
945 fuzz = patchmod.patch(
943 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
946 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
944 )
947 )
945 return (True, list(files), fuzz)
948 return (True, list(files), fuzz)
946 except Exception as inst:
949 except Exception as inst:
947 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
950 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
948 if not self.ui.verbose:
951 if not self.ui.verbose:
949 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
952 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
950 self.ui.traceback()
953 self.ui.traceback()
951 return (False, list(files), False)
954 return (False, list(files), False)
952
955
953 def apply(
956 def apply(
954 self,
957 self,
955 repo,
958 repo,
956 series,
959 series,
957 list=False,
960 list=False,
958 update_status=True,
961 update_status=True,
959 strict=False,
962 strict=False,
960 patchdir=None,
963 patchdir=None,
961 merge=None,
964 merge=None,
962 all_files=None,
965 all_files=None,
963 tobackup=None,
966 tobackup=None,
964 keepchanges=False,
967 keepchanges=False,
965 ):
968 ):
966 wlock = lock = tr = None
969 wlock = lock = tr = None
967 try:
970 try:
968 wlock = repo.wlock()
971 wlock = repo.wlock()
969 lock = repo.lock()
972 lock = repo.lock()
970 tr = repo.transaction(b"qpush")
973 tr = repo.transaction(b"qpush")
971 try:
974 try:
972 ret = self._apply(
975 ret = self._apply(
973 repo,
976 repo,
974 series,
977 series,
975 list,
978 list,
976 update_status,
979 update_status,
977 strict,
980 strict,
978 patchdir,
981 patchdir,
979 merge,
982 merge,
980 all_files=all_files,
983 all_files=all_files,
981 tobackup=tobackup,
984 tobackup=tobackup,
982 keepchanges=keepchanges,
985 keepchanges=keepchanges,
983 )
986 )
984 tr.close()
987 tr.close()
985 self.savedirty()
988 self.savedirty()
986 return ret
989 return ret
987 except AbortNoCleanup:
990 except AbortNoCleanup:
988 tr.close()
991 tr.close()
989 self.savedirty()
992 self.savedirty()
990 raise
993 raise
991 except: # re-raises
994 except: # re-raises
992 try:
995 try:
993 tr.abort()
996 tr.abort()
994 finally:
997 finally:
995 self.invalidate()
998 self.invalidate()
996 raise
999 raise
997 finally:
1000 finally:
998 release(tr, lock, wlock)
1001 release(tr, lock, wlock)
999 self.removeundo(repo)
1002 self.removeundo(repo)
1000
1003
1001 def _apply(
1004 def _apply(
1002 self,
1005 self,
1003 repo,
1006 repo,
1004 series,
1007 series,
1005 list=False,
1008 list=False,
1006 update_status=True,
1009 update_status=True,
1007 strict=False,
1010 strict=False,
1008 patchdir=None,
1011 patchdir=None,
1009 merge=None,
1012 merge=None,
1010 all_files=None,
1013 all_files=None,
1011 tobackup=None,
1014 tobackup=None,
1012 keepchanges=False,
1015 keepchanges=False,
1013 ):
1016 ):
1014 """returns (error, hash)
1017 """returns (error, hash)
1015
1018
1016 error = 1 for unable to read, 2 for patch failed, 3 for patch
1019 error = 1 for unable to read, 2 for patch failed, 3 for patch
1017 fuzz. tobackup is None or a set of files to backup before they
1020 fuzz. tobackup is None or a set of files to backup before they
1018 are modified by a patch.
1021 are modified by a patch.
1019 """
1022 """
1020 # TODO unify with commands.py
1023 # TODO unify with commands.py
1021 if not patchdir:
1024 if not patchdir:
1022 patchdir = self.path
1025 patchdir = self.path
1023 err = 0
1026 err = 0
1024 n = None
1027 n = None
1025 for patchname in series:
1028 for patchname in series:
1026 pushable, reason = self.pushable(patchname)
1029 pushable, reason = self.pushable(patchname)
1027 if not pushable:
1030 if not pushable:
1028 self.explainpushable(patchname, all_patches=True)
1031 self.explainpushable(patchname, all_patches=True)
1029 continue
1032 continue
1030 self.ui.status(_(b"applying %s\n") % patchname)
1033 self.ui.status(_(b"applying %s\n") % patchname)
1031 pf = os.path.join(patchdir, patchname)
1034 pf = os.path.join(patchdir, patchname)
1032
1035
1033 try:
1036 try:
1034 ph = patchheader(self.join(patchname), self.plainmode)
1037 ph = patchheader(self.join(patchname), self.plainmode)
1035 except IOError:
1038 except IOError:
1036 self.ui.warn(_(b"unable to read %s\n") % patchname)
1039 self.ui.warn(_(b"unable to read %s\n") % patchname)
1037 err = 1
1040 err = 1
1038 break
1041 break
1039
1042
1040 message = ph.message
1043 message = ph.message
1041 if not message:
1044 if not message:
1042 # The commit message should not be translated
1045 # The commit message should not be translated
1043 message = b"imported patch %s\n" % patchname
1046 message = b"imported patch %s\n" % patchname
1044 else:
1047 else:
1045 if list:
1048 if list:
1046 # The commit message should not be translated
1049 # The commit message should not be translated
1047 message.append(b"\nimported patch %s" % patchname)
1050 message.append(b"\nimported patch %s" % patchname)
1048 message = b'\n'.join(message)
1051 message = b'\n'.join(message)
1049
1052
1050 if ph.haspatch:
1053 if ph.haspatch:
1051 if tobackup:
1054 if tobackup:
1052 touched = patchmod.changedfiles(self.ui, repo, pf)
1055 touched = patchmod.changedfiles(self.ui, repo, pf)
1053 touched = set(touched) & tobackup
1056 touched = set(touched) & tobackup
1054 if touched and keepchanges:
1057 if touched and keepchanges:
1055 raise AbortNoCleanup(
1058 raise AbortNoCleanup(
1056 _(b"conflicting local changes found"),
1059 _(b"conflicting local changes found"),
1057 hint=_(b"did you forget to qrefresh?"),
1060 hint=_(b"did you forget to qrefresh?"),
1058 )
1061 )
1059 self.backup(repo, touched, copy=True)
1062 self.backup(repo, touched, copy=True)
1060 tobackup = tobackup - touched
1063 tobackup = tobackup - touched
1061 (patcherr, files, fuzz) = self.patch(repo, pf)
1064 (patcherr, files, fuzz) = self.patch(repo, pf)
1062 if all_files is not None:
1065 if all_files is not None:
1063 all_files.update(files)
1066 all_files.update(files)
1064 patcherr = not patcherr
1067 patcherr = not patcherr
1065 else:
1068 else:
1066 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1069 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1067 patcherr, files, fuzz = 0, [], 0
1070 patcherr, files, fuzz = 0, [], 0
1068
1071
1069 if merge and files:
1072 if merge and files:
1070 # Mark as removed/merged and update dirstate parent info
1073 # Mark as removed/merged and update dirstate parent info
1071 removed = []
1074 removed = []
1072 merged = []
1075 merged = []
1073 for f in files:
1076 for f in files:
1074 if os.path.lexists(repo.wjoin(f)):
1077 if os.path.lexists(repo.wjoin(f)):
1075 merged.append(f)
1078 merged.append(f)
1076 else:
1079 else:
1077 removed.append(f)
1080 removed.append(f)
1078 with repo.dirstate.parentchange():
1081 with repo.dirstate.parentchange():
1079 for f in removed:
1082 for f in removed:
1080 repo.dirstate.remove(f)
1083 repo.dirstate.remove(f)
1081 for f in merged:
1084 for f in merged:
1082 repo.dirstate.merge(f)
1085 repo.dirstate.merge(f)
1083 p1 = repo.dirstate.p1()
1086 p1 = repo.dirstate.p1()
1084 repo.setparents(p1, merge)
1087 repo.setparents(p1, merge)
1085
1088
1086 if all_files and b'.hgsubstate' in all_files:
1089 if all_files and b'.hgsubstate' in all_files:
1087 wctx = repo[None]
1090 wctx = repo[None]
1088 pctx = repo[b'.']
1091 pctx = repo[b'.']
1089 overwrite = False
1092 overwrite = False
1090 mergedsubstate = subrepoutil.submerge(
1093 mergedsubstate = subrepoutil.submerge(
1091 repo, pctx, wctx, wctx, overwrite
1094 repo, pctx, wctx, wctx, overwrite
1092 )
1095 )
1093 files += mergedsubstate.keys()
1096 files += mergedsubstate.keys()
1094
1097
1095 match = scmutil.matchfiles(repo, files or [])
1098 match = scmutil.matchfiles(repo, files or [])
1096 oldtip = repo.changelog.tip()
1099 oldtip = repo.changelog.tip()
1097 n = newcommit(
1100 n = newcommit(
1098 repo, None, message, ph.user, ph.date, match=match, force=True
1101 repo, None, message, ph.user, ph.date, match=match, force=True
1099 )
1102 )
1100 if repo.changelog.tip() == oldtip:
1103 if repo.changelog.tip() == oldtip:
1101 raise error.Abort(
1104 raise error.Abort(
1102 _(b"qpush exactly duplicates child changeset")
1105 _(b"qpush exactly duplicates child changeset")
1103 )
1106 )
1104 if n is None:
1107 if n is None:
1105 raise error.Abort(_(b"repository commit failed"))
1108 raise error.Abort(_(b"repository commit failed"))
1106
1109
1107 if update_status:
1110 if update_status:
1108 self.applied.append(statusentry(n, patchname))
1111 self.applied.append(statusentry(n, patchname))
1109
1112
1110 if patcherr:
1113 if patcherr:
1111 self.ui.warn(
1114 self.ui.warn(
1112 _(b"patch failed, rejects left in working " b"directory\n")
1115 _(b"patch failed, rejects left in working " b"directory\n")
1113 )
1116 )
1114 err = 2
1117 err = 2
1115 break
1118 break
1116
1119
1117 if fuzz and strict:
1120 if fuzz and strict:
1118 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1121 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1119 err = 3
1122 err = 3
1120 break
1123 break
1121 return (err, n)
1124 return (err, n)
1122
1125
1123 def _cleanup(self, patches, numrevs, keep=False):
1126 def _cleanup(self, patches, numrevs, keep=False):
1124 if not keep:
1127 if not keep:
1125 r = self.qrepo()
1128 r = self.qrepo()
1126 if r:
1129 if r:
1127 r[None].forget(patches)
1130 r[None].forget(patches)
1128 for p in patches:
1131 for p in patches:
1129 try:
1132 try:
1130 os.unlink(self.join(p))
1133 os.unlink(self.join(p))
1131 except OSError as inst:
1134 except OSError as inst:
1132 if inst.errno != errno.ENOENT:
1135 if inst.errno != errno.ENOENT:
1133 raise
1136 raise
1134
1137
1135 qfinished = []
1138 qfinished = []
1136 if numrevs:
1139 if numrevs:
1137 qfinished = self.applied[:numrevs]
1140 qfinished = self.applied[:numrevs]
1138 del self.applied[:numrevs]
1141 del self.applied[:numrevs]
1139 self.applieddirty = True
1142 self.applieddirty = True
1140
1143
1141 unknown = []
1144 unknown = []
1142
1145
1143 sortedseries = []
1146 sortedseries = []
1144 for p in patches:
1147 for p in patches:
1145 idx = self.findseries(p)
1148 idx = self.findseries(p)
1146 if idx is None:
1149 if idx is None:
1147 sortedseries.append((-1, p))
1150 sortedseries.append((-1, p))
1148 else:
1151 else:
1149 sortedseries.append((idx, p))
1152 sortedseries.append((idx, p))
1150
1153
1151 sortedseries.sort(reverse=True)
1154 sortedseries.sort(reverse=True)
1152 for (i, p) in sortedseries:
1155 for (i, p) in sortedseries:
1153 if i != -1:
1156 if i != -1:
1154 del self.fullseries[i]
1157 del self.fullseries[i]
1155 else:
1158 else:
1156 unknown.append(p)
1159 unknown.append(p)
1157
1160
1158 if unknown:
1161 if unknown:
1159 if numrevs:
1162 if numrevs:
1160 rev = dict((entry.name, entry.node) for entry in qfinished)
1163 rev = dict((entry.name, entry.node) for entry in qfinished)
1161 for p in unknown:
1164 for p in unknown:
1162 msg = _(b'revision %s refers to unknown patches: %s\n')
1165 msg = _(b'revision %s refers to unknown patches: %s\n')
1163 self.ui.warn(msg % (short(rev[p]), p))
1166 self.ui.warn(msg % (short(rev[p]), p))
1164 else:
1167 else:
1165 msg = _(b'unknown patches: %s\n')
1168 msg = _(b'unknown patches: %s\n')
1166 raise error.Abort(b''.join(msg % p for p in unknown))
1169 raise error.Abort(b''.join(msg % p for p in unknown))
1167
1170
1168 self.parseseries()
1171 self.parseseries()
1169 self.seriesdirty = True
1172 self.seriesdirty = True
1170 return [entry.node for entry in qfinished]
1173 return [entry.node for entry in qfinished]
1171
1174
1172 def _revpatches(self, repo, revs):
1175 def _revpatches(self, repo, revs):
1173 firstrev = repo[self.applied[0].node].rev()
1176 firstrev = repo[self.applied[0].node].rev()
1174 patches = []
1177 patches = []
1175 for i, rev in enumerate(revs):
1178 for i, rev in enumerate(revs):
1176
1179
1177 if rev < firstrev:
1180 if rev < firstrev:
1178 raise error.Abort(_(b'revision %d is not managed') % rev)
1181 raise error.Abort(_(b'revision %d is not managed') % rev)
1179
1182
1180 ctx = repo[rev]
1183 ctx = repo[rev]
1181 base = self.applied[i].node
1184 base = self.applied[i].node
1182 if ctx.node() != base:
1185 if ctx.node() != base:
1183 msg = _(b'cannot delete revision %d above applied patches')
1186 msg = _(b'cannot delete revision %d above applied patches')
1184 raise error.Abort(msg % rev)
1187 raise error.Abort(msg % rev)
1185
1188
1186 patch = self.applied[i].name
1189 patch = self.applied[i].name
1187 for fmt in (b'[mq]: %s', b'imported patch %s'):
1190 for fmt in (b'[mq]: %s', b'imported patch %s'):
1188 if ctx.description() == fmt % patch:
1191 if ctx.description() == fmt % patch:
1189 msg = _(b'patch %s finalized without changeset message\n')
1192 msg = _(b'patch %s finalized without changeset message\n')
1190 repo.ui.status(msg % patch)
1193 repo.ui.status(msg % patch)
1191 break
1194 break
1192
1195
1193 patches.append(patch)
1196 patches.append(patch)
1194 return patches
1197 return patches
1195
1198
1196 def finish(self, repo, revs):
1199 def finish(self, repo, revs):
1197 # Manually trigger phase computation to ensure phasedefaults is
1200 # Manually trigger phase computation to ensure phasedefaults is
1198 # executed before we remove the patches.
1201 # executed before we remove the patches.
1199 repo._phasecache
1202 repo._phasecache
1200 patches = self._revpatches(repo, sorted(revs))
1203 patches = self._revpatches(repo, sorted(revs))
1201 qfinished = self._cleanup(patches, len(patches))
1204 qfinished = self._cleanup(patches, len(patches))
1202 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1205 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1203 # only use this logic when the secret option is added
1206 # only use this logic when the secret option is added
1204 oldqbase = repo[qfinished[0]]
1207 oldqbase = repo[qfinished[0]]
1205 tphase = phases.newcommitphase(repo.ui)
1208 tphase = phases.newcommitphase(repo.ui)
1206 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1209 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1207 with repo.transaction(b'qfinish') as tr:
1210 with repo.transaction(b'qfinish') as tr:
1208 phases.advanceboundary(repo, tr, tphase, qfinished)
1211 phases.advanceboundary(repo, tr, tphase, qfinished)
1209
1212
1210 def delete(self, repo, patches, opts):
1213 def delete(self, repo, patches, opts):
1211 if not patches and not opts.get(b'rev'):
1214 if not patches and not opts.get(b'rev'):
1212 raise error.Abort(
1215 raise error.Abort(
1213 _(b'qdelete requires at least one revision or ' b'patch name')
1216 _(b'qdelete requires at least one revision or ' b'patch name')
1214 )
1217 )
1215
1218
1216 realpatches = []
1219 realpatches = []
1217 for patch in patches:
1220 for patch in patches:
1218 patch = self.lookup(patch, strict=True)
1221 patch = self.lookup(patch, strict=True)
1219 info = self.isapplied(patch)
1222 info = self.isapplied(patch)
1220 if info:
1223 if info:
1221 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1224 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1222 if patch not in self.series:
1225 if patch not in self.series:
1223 raise error.Abort(_(b"patch %s not in series file") % patch)
1226 raise error.Abort(_(b"patch %s not in series file") % patch)
1224 if patch not in realpatches:
1227 if patch not in realpatches:
1225 realpatches.append(patch)
1228 realpatches.append(patch)
1226
1229
1227 numrevs = 0
1230 numrevs = 0
1228 if opts.get(b'rev'):
1231 if opts.get(b'rev'):
1229 if not self.applied:
1232 if not self.applied:
1230 raise error.Abort(_(b'no patches applied'))
1233 raise error.Abort(_(b'no patches applied'))
1231 revs = scmutil.revrange(repo, opts.get(b'rev'))
1234 revs = scmutil.revrange(repo, opts.get(b'rev'))
1232 revs.sort()
1235 revs.sort()
1233 revpatches = self._revpatches(repo, revs)
1236 revpatches = self._revpatches(repo, revs)
1234 realpatches += revpatches
1237 realpatches += revpatches
1235 numrevs = len(revpatches)
1238 numrevs = len(revpatches)
1236
1239
1237 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1240 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1238
1241
1239 def checktoppatch(self, repo):
1242 def checktoppatch(self, repo):
1240 '''check that working directory is at qtip'''
1243 '''check that working directory is at qtip'''
1241 if self.applied:
1244 if self.applied:
1242 top = self.applied[-1].node
1245 top = self.applied[-1].node
1243 patch = self.applied[-1].name
1246 patch = self.applied[-1].name
1244 if repo.dirstate.p1() != top:
1247 if repo.dirstate.p1() != top:
1245 raise error.Abort(_(b"working directory revision is not qtip"))
1248 raise error.Abort(_(b"working directory revision is not qtip"))
1246 return top, patch
1249 return top, patch
1247 return None, None
1250 return None, None
1248
1251
1249 def putsubstate2changes(self, substatestate, changes):
1252 def putsubstate2changes(self, substatestate, changes):
1250 for files in changes[:3]:
1253 for files in changes[:3]:
1251 if b'.hgsubstate' in files:
1254 if b'.hgsubstate' in files:
1252 return # already listed up
1255 return # already listed up
1253 # not yet listed up
1256 # not yet listed up
1254 if substatestate in b'a?':
1257 if substatestate in b'a?':
1255 changes[1].append(b'.hgsubstate')
1258 changes[1].append(b'.hgsubstate')
1256 elif substatestate in b'r':
1259 elif substatestate in b'r':
1257 changes[2].append(b'.hgsubstate')
1260 changes[2].append(b'.hgsubstate')
1258 else: # modified
1261 else: # modified
1259 changes[0].append(b'.hgsubstate')
1262 changes[0].append(b'.hgsubstate')
1260
1263
1261 def checklocalchanges(self, repo, force=False, refresh=True):
1264 def checklocalchanges(self, repo, force=False, refresh=True):
1262 excsuffix = b''
1265 excsuffix = b''
1263 if refresh:
1266 if refresh:
1264 excsuffix = b', qrefresh first'
1267 excsuffix = b', qrefresh first'
1265 # plain versions for i18n tool to detect them
1268 # plain versions for i18n tool to detect them
1266 _(b"local changes found, qrefresh first")
1269 _(b"local changes found, qrefresh first")
1267 _(b"local changed subrepos found, qrefresh first")
1270 _(b"local changed subrepos found, qrefresh first")
1268
1271
1269 s = repo.status()
1272 s = repo.status()
1270 if not force:
1273 if not force:
1271 cmdutil.checkunfinished(repo)
1274 cmdutil.checkunfinished(repo)
1272 if s.modified or s.added or s.removed or s.deleted:
1275 if s.modified or s.added or s.removed or s.deleted:
1273 _(b"local changes found") # i18n tool detection
1276 _(b"local changes found") # i18n tool detection
1274 raise error.Abort(_(b"local changes found" + excsuffix))
1277 raise error.Abort(_(b"local changes found" + excsuffix))
1275 if checksubstate(repo):
1278 if checksubstate(repo):
1276 _(b"local changed subrepos found") # i18n tool detection
1279 _(b"local changed subrepos found") # i18n tool detection
1277 raise error.Abort(
1280 raise error.Abort(
1278 _(b"local changed subrepos found" + excsuffix)
1281 _(b"local changed subrepos found" + excsuffix)
1279 )
1282 )
1280 else:
1283 else:
1281 cmdutil.checkunfinished(repo, skipmerge=True)
1284 cmdutil.checkunfinished(repo, skipmerge=True)
1282 return s
1285 return s
1283
1286
1284 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1287 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1285
1288
1286 def checkreservedname(self, name):
1289 def checkreservedname(self, name):
1287 if name in self._reserved:
1290 if name in self._reserved:
1288 raise error.Abort(
1291 raise error.Abort(
1289 _(b'"%s" cannot be used as the name of a patch') % name
1292 _(b'"%s" cannot be used as the name of a patch') % name
1290 )
1293 )
1291 if name != name.strip():
1294 if name != name.strip():
1292 # whitespace is stripped by parseseries()
1295 # whitespace is stripped by parseseries()
1293 raise error.Abort(
1296 raise error.Abort(
1294 _(b'patch name cannot begin or end with ' b'whitespace')
1297 _(b'patch name cannot begin or end with ' b'whitespace')
1295 )
1298 )
1296 for prefix in (b'.hg', b'.mq'):
1299 for prefix in (b'.hg', b'.mq'):
1297 if name.startswith(prefix):
1300 if name.startswith(prefix):
1298 raise error.Abort(
1301 raise error.Abort(
1299 _(b'patch name cannot begin with "%s"') % prefix
1302 _(b'patch name cannot begin with "%s"') % prefix
1300 )
1303 )
1301 for c in (b'#', b':', b'\r', b'\n'):
1304 for c in (b'#', b':', b'\r', b'\n'):
1302 if c in name:
1305 if c in name:
1303 raise error.Abort(
1306 raise error.Abort(
1304 _(b'%r cannot be used in the name of a patch')
1307 _(b'%r cannot be used in the name of a patch')
1305 % pycompat.bytestr(c)
1308 % pycompat.bytestr(c)
1306 )
1309 )
1307
1310
1308 def checkpatchname(self, name, force=False):
1311 def checkpatchname(self, name, force=False):
1309 self.checkreservedname(name)
1312 self.checkreservedname(name)
1310 if not force and os.path.exists(self.join(name)):
1313 if not force and os.path.exists(self.join(name)):
1311 if os.path.isdir(self.join(name)):
1314 if os.path.isdir(self.join(name)):
1312 raise error.Abort(
1315 raise error.Abort(
1313 _(b'"%s" already exists as a directory') % name
1316 _(b'"%s" already exists as a directory') % name
1314 )
1317 )
1315 else:
1318 else:
1316 raise error.Abort(_(b'patch "%s" already exists') % name)
1319 raise error.Abort(_(b'patch "%s" already exists') % name)
1317
1320
1318 def makepatchname(self, title, fallbackname):
1321 def makepatchname(self, title, fallbackname):
1319 """Return a suitable filename for title, adding a suffix to make
1322 """Return a suitable filename for title, adding a suffix to make
1320 it unique in the existing list"""
1323 it unique in the existing list"""
1321 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1324 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1322 namebase = namebase[:75] # avoid too long name (issue5117)
1325 namebase = namebase[:75] # avoid too long name (issue5117)
1323 if namebase:
1326 if namebase:
1324 try:
1327 try:
1325 self.checkreservedname(namebase)
1328 self.checkreservedname(namebase)
1326 except error.Abort:
1329 except error.Abort:
1327 namebase = fallbackname
1330 namebase = fallbackname
1328 else:
1331 else:
1329 namebase = fallbackname
1332 namebase = fallbackname
1330 name = namebase
1333 name = namebase
1331 i = 0
1334 i = 0
1332 while True:
1335 while True:
1333 if name not in self.fullseries:
1336 if name not in self.fullseries:
1334 try:
1337 try:
1335 self.checkpatchname(name)
1338 self.checkpatchname(name)
1336 break
1339 break
1337 except error.Abort:
1340 except error.Abort:
1338 pass
1341 pass
1339 i += 1
1342 i += 1
1340 name = b'%s__%d' % (namebase, i)
1343 name = b'%s__%d' % (namebase, i)
1341 return name
1344 return name
1342
1345
1343 def checkkeepchanges(self, keepchanges, force):
1346 def checkkeepchanges(self, keepchanges, force):
1344 if force and keepchanges:
1347 if force and keepchanges:
1345 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1348 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1346
1349
1347 def new(self, repo, patchfn, *pats, **opts):
1350 def new(self, repo, patchfn, *pats, **opts):
1348 """options:
1351 """options:
1349 msg: a string or a no-argument function returning a string
1352 msg: a string or a no-argument function returning a string
1350 """
1353 """
1351 opts = pycompat.byteskwargs(opts)
1354 opts = pycompat.byteskwargs(opts)
1352 msg = opts.get(b'msg')
1355 msg = opts.get(b'msg')
1353 edit = opts.get(b'edit')
1356 edit = opts.get(b'edit')
1354 editform = opts.get(b'editform', b'mq.qnew')
1357 editform = opts.get(b'editform', b'mq.qnew')
1355 user = opts.get(b'user')
1358 user = opts.get(b'user')
1356 date = opts.get(b'date')
1359 date = opts.get(b'date')
1357 if date:
1360 if date:
1358 date = dateutil.parsedate(date)
1361 date = dateutil.parsedate(date)
1359 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1362 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1360 if opts.get(b'checkname', True):
1363 if opts.get(b'checkname', True):
1361 self.checkpatchname(patchfn)
1364 self.checkpatchname(patchfn)
1362 inclsubs = checksubstate(repo)
1365 inclsubs = checksubstate(repo)
1363 if inclsubs:
1366 if inclsubs:
1364 substatestate = repo.dirstate[b'.hgsubstate']
1367 substatestate = repo.dirstate[b'.hgsubstate']
1365 if opts.get(b'include') or opts.get(b'exclude') or pats:
1368 if opts.get(b'include') or opts.get(b'exclude') or pats:
1366 # detect missing files in pats
1369 # detect missing files in pats
1367 def badfn(f, msg):
1370 def badfn(f, msg):
1368 if f != b'.hgsubstate': # .hgsubstate is auto-created
1371 if f != b'.hgsubstate': # .hgsubstate is auto-created
1369 raise error.Abort(b'%s: %s' % (f, msg))
1372 raise error.Abort(b'%s: %s' % (f, msg))
1370
1373
1371 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1374 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1372 changes = repo.status(match=match)
1375 changes = repo.status(match=match)
1373 else:
1376 else:
1374 changes = self.checklocalchanges(repo, force=True)
1377 changes = self.checklocalchanges(repo, force=True)
1375 commitfiles = list(inclsubs)
1378 commitfiles = list(inclsubs)
1376 for files in changes[:3]:
1379 for files in changes[:3]:
1377 commitfiles.extend(files)
1380 commitfiles.extend(files)
1378 match = scmutil.matchfiles(repo, commitfiles)
1381 match = scmutil.matchfiles(repo, commitfiles)
1379 if len(repo[None].parents()) > 1:
1382 if len(repo[None].parents()) > 1:
1380 raise error.Abort(_(b'cannot manage merge changesets'))
1383 raise error.Abort(_(b'cannot manage merge changesets'))
1381 self.checktoppatch(repo)
1384 self.checktoppatch(repo)
1382 insert = self.fullseriesend()
1385 insert = self.fullseriesend()
1383 with repo.wlock():
1386 with repo.wlock():
1384 try:
1387 try:
1385 # if patch file write fails, abort early
1388 # if patch file write fails, abort early
1386 p = self.opener(patchfn, b"w")
1389 p = self.opener(patchfn, b"w")
1387 except IOError as e:
1390 except IOError as e:
1388 raise error.Abort(
1391 raise error.Abort(
1389 _(b'cannot write patch "%s": %s')
1392 _(b'cannot write patch "%s": %s')
1390 % (patchfn, encoding.strtolocal(e.strerror))
1393 % (patchfn, encoding.strtolocal(e.strerror))
1391 )
1394 )
1392 try:
1395 try:
1393 defaultmsg = b"[mq]: %s" % patchfn
1396 defaultmsg = b"[mq]: %s" % patchfn
1394 editor = cmdutil.getcommiteditor(editform=editform)
1397 editor = cmdutil.getcommiteditor(editform=editform)
1395 if edit:
1398 if edit:
1396
1399
1397 def finishdesc(desc):
1400 def finishdesc(desc):
1398 if desc.rstrip():
1401 if desc.rstrip():
1399 return desc
1402 return desc
1400 else:
1403 else:
1401 return defaultmsg
1404 return defaultmsg
1402
1405
1403 # i18n: this message is shown in editor with "HG: " prefix
1406 # i18n: this message is shown in editor with "HG: " prefix
1404 extramsg = _(b'Leave message empty to use default message.')
1407 extramsg = _(b'Leave message empty to use default message.')
1405 editor = cmdutil.getcommiteditor(
1408 editor = cmdutil.getcommiteditor(
1406 finishdesc=finishdesc,
1409 finishdesc=finishdesc,
1407 extramsg=extramsg,
1410 extramsg=extramsg,
1408 editform=editform,
1411 editform=editform,
1409 )
1412 )
1410 commitmsg = msg
1413 commitmsg = msg
1411 else:
1414 else:
1412 commitmsg = msg or defaultmsg
1415 commitmsg = msg or defaultmsg
1413
1416
1414 n = newcommit(
1417 n = newcommit(
1415 repo,
1418 repo,
1416 None,
1419 None,
1417 commitmsg,
1420 commitmsg,
1418 user,
1421 user,
1419 date,
1422 date,
1420 match=match,
1423 match=match,
1421 force=True,
1424 force=True,
1422 editor=editor,
1425 editor=editor,
1423 )
1426 )
1424 if n is None:
1427 if n is None:
1425 raise error.Abort(_(b"repo commit failed"))
1428 raise error.Abort(_(b"repo commit failed"))
1426 try:
1429 try:
1427 self.fullseries[insert:insert] = [patchfn]
1430 self.fullseries[insert:insert] = [patchfn]
1428 self.applied.append(statusentry(n, patchfn))
1431 self.applied.append(statusentry(n, patchfn))
1429 self.parseseries()
1432 self.parseseries()
1430 self.seriesdirty = True
1433 self.seriesdirty = True
1431 self.applieddirty = True
1434 self.applieddirty = True
1432 nctx = repo[n]
1435 nctx = repo[n]
1433 ph = patchheader(self.join(patchfn), self.plainmode)
1436 ph = patchheader(self.join(patchfn), self.plainmode)
1434 if user:
1437 if user:
1435 ph.setuser(user)
1438 ph.setuser(user)
1436 if date:
1439 if date:
1437 ph.setdate(b'%d %d' % date)
1440 ph.setdate(b'%d %d' % date)
1438 ph.setparent(hex(nctx.p1().node()))
1441 ph.setparent(hex(nctx.p1().node()))
1439 msg = nctx.description().strip()
1442 msg = nctx.description().strip()
1440 if msg == defaultmsg.strip():
1443 if msg == defaultmsg.strip():
1441 msg = b''
1444 msg = b''
1442 ph.setmessage(msg)
1445 ph.setmessage(msg)
1443 p.write(bytes(ph))
1446 p.write(bytes(ph))
1444 if commitfiles:
1447 if commitfiles:
1445 parent = self.qparents(repo, n)
1448 parent = self.qparents(repo, n)
1446 if inclsubs:
1449 if inclsubs:
1447 self.putsubstate2changes(substatestate, changes)
1450 self.putsubstate2changes(substatestate, changes)
1448 chunks = patchmod.diff(
1451 chunks = patchmod.diff(
1449 repo,
1452 repo,
1450 node1=parent,
1453 node1=parent,
1451 node2=n,
1454 node2=n,
1452 changes=changes,
1455 changes=changes,
1453 opts=diffopts,
1456 opts=diffopts,
1454 )
1457 )
1455 for chunk in chunks:
1458 for chunk in chunks:
1456 p.write(chunk)
1459 p.write(chunk)
1457 p.close()
1460 p.close()
1458 r = self.qrepo()
1461 r = self.qrepo()
1459 if r:
1462 if r:
1460 r[None].add([patchfn])
1463 r[None].add([patchfn])
1461 except: # re-raises
1464 except: # re-raises
1462 repo.rollback()
1465 repo.rollback()
1463 raise
1466 raise
1464 except Exception:
1467 except Exception:
1465 patchpath = self.join(patchfn)
1468 patchpath = self.join(patchfn)
1466 try:
1469 try:
1467 os.unlink(patchpath)
1470 os.unlink(patchpath)
1468 except OSError:
1471 except OSError:
1469 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1472 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1470 raise
1473 raise
1471 self.removeundo(repo)
1474 self.removeundo(repo)
1472
1475
1473 def isapplied(self, patch):
1476 def isapplied(self, patch):
1474 """returns (index, rev, patch)"""
1477 """returns (index, rev, patch)"""
1475 for i, a in enumerate(self.applied):
1478 for i, a in enumerate(self.applied):
1476 if a.name == patch:
1479 if a.name == patch:
1477 return (i, a.node, a.name)
1480 return (i, a.node, a.name)
1478 return None
1481 return None
1479
1482
1480 # if the exact patch name does not exist, we try a few
1483 # if the exact patch name does not exist, we try a few
1481 # variations. If strict is passed, we try only #1
1484 # variations. If strict is passed, we try only #1
1482 #
1485 #
1483 # 1) a number (as string) to indicate an offset in the series file
1486 # 1) a number (as string) to indicate an offset in the series file
1484 # 2) a unique substring of the patch name was given
1487 # 2) a unique substring of the patch name was given
1485 # 3) patchname[-+]num to indicate an offset in the series file
1488 # 3) patchname[-+]num to indicate an offset in the series file
1486 def lookup(self, patch, strict=False):
1489 def lookup(self, patch, strict=False):
1487 def partialname(s):
1490 def partialname(s):
1488 if s in self.series:
1491 if s in self.series:
1489 return s
1492 return s
1490 matches = [x for x in self.series if s in x]
1493 matches = [x for x in self.series if s in x]
1491 if len(matches) > 1:
1494 if len(matches) > 1:
1492 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1495 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1493 for m in matches:
1496 for m in matches:
1494 self.ui.warn(b' %s\n' % m)
1497 self.ui.warn(b' %s\n' % m)
1495 return None
1498 return None
1496 if matches:
1499 if matches:
1497 return matches[0]
1500 return matches[0]
1498 if self.series and self.applied:
1501 if self.series and self.applied:
1499 if s == b'qtip':
1502 if s == b'qtip':
1500 return self.series[self.seriesend(True) - 1]
1503 return self.series[self.seriesend(True) - 1]
1501 if s == b'qbase':
1504 if s == b'qbase':
1502 return self.series[0]
1505 return self.series[0]
1503 return None
1506 return None
1504
1507
1505 if patch in self.series:
1508 if patch in self.series:
1506 return patch
1509 return patch
1507
1510
1508 if not os.path.isfile(self.join(patch)):
1511 if not os.path.isfile(self.join(patch)):
1509 try:
1512 try:
1510 sno = int(patch)
1513 sno = int(patch)
1511 except (ValueError, OverflowError):
1514 except (ValueError, OverflowError):
1512 pass
1515 pass
1513 else:
1516 else:
1514 if -len(self.series) <= sno < len(self.series):
1517 if -len(self.series) <= sno < len(self.series):
1515 return self.series[sno]
1518 return self.series[sno]
1516
1519
1517 if not strict:
1520 if not strict:
1518 res = partialname(patch)
1521 res = partialname(patch)
1519 if res:
1522 if res:
1520 return res
1523 return res
1521 minus = patch.rfind(b'-')
1524 minus = patch.rfind(b'-')
1522 if minus >= 0:
1525 if minus >= 0:
1523 res = partialname(patch[:minus])
1526 res = partialname(patch[:minus])
1524 if res:
1527 if res:
1525 i = self.series.index(res)
1528 i = self.series.index(res)
1526 try:
1529 try:
1527 off = int(patch[minus + 1 :] or 1)
1530 off = int(patch[minus + 1 :] or 1)
1528 except (ValueError, OverflowError):
1531 except (ValueError, OverflowError):
1529 pass
1532 pass
1530 else:
1533 else:
1531 if i - off >= 0:
1534 if i - off >= 0:
1532 return self.series[i - off]
1535 return self.series[i - off]
1533 plus = patch.rfind(b'+')
1536 plus = patch.rfind(b'+')
1534 if plus >= 0:
1537 if plus >= 0:
1535 res = partialname(patch[:plus])
1538 res = partialname(patch[:plus])
1536 if res:
1539 if res:
1537 i = self.series.index(res)
1540 i = self.series.index(res)
1538 try:
1541 try:
1539 off = int(patch[plus + 1 :] or 1)
1542 off = int(patch[plus + 1 :] or 1)
1540 except (ValueError, OverflowError):
1543 except (ValueError, OverflowError):
1541 pass
1544 pass
1542 else:
1545 else:
1543 if i + off < len(self.series):
1546 if i + off < len(self.series):
1544 return self.series[i + off]
1547 return self.series[i + off]
1545 raise error.Abort(_(b"patch %s not in series") % patch)
1548 raise error.Abort(_(b"patch %s not in series") % patch)
1546
1549
1547 def push(
1550 def push(
1548 self,
1551 self,
1549 repo,
1552 repo,
1550 patch=None,
1553 patch=None,
1551 force=False,
1554 force=False,
1552 list=False,
1555 list=False,
1553 mergeq=None,
1556 mergeq=None,
1554 all=False,
1557 all=False,
1555 move=False,
1558 move=False,
1556 exact=False,
1559 exact=False,
1557 nobackup=False,
1560 nobackup=False,
1558 keepchanges=False,
1561 keepchanges=False,
1559 ):
1562 ):
1560 self.checkkeepchanges(keepchanges, force)
1563 self.checkkeepchanges(keepchanges, force)
1561 diffopts = self.diffopts()
1564 diffopts = self.diffopts()
1562 with repo.wlock():
1565 with repo.wlock():
1563 heads = []
1566 heads = []
1564 for hs in repo.branchmap().iterheads():
1567 for hs in repo.branchmap().iterheads():
1565 heads.extend(hs)
1568 heads.extend(hs)
1566 if not heads:
1569 if not heads:
1567 heads = [nullid]
1570 heads = [nullid]
1568 if repo.dirstate.p1() not in heads and not exact:
1571 if repo.dirstate.p1() not in heads and not exact:
1569 self.ui.status(_(b"(working directory not at a head)\n"))
1572 self.ui.status(_(b"(working directory not at a head)\n"))
1570
1573
1571 if not self.series:
1574 if not self.series:
1572 self.ui.warn(_(b'no patches in series\n'))
1575 self.ui.warn(_(b'no patches in series\n'))
1573 return 0
1576 return 0
1574
1577
1575 # Suppose our series file is: A B C and the current 'top'
1578 # Suppose our series file is: A B C and the current 'top'
1576 # patch is B. qpush C should be performed (moving forward)
1579 # patch is B. qpush C should be performed (moving forward)
1577 # qpush B is a NOP (no change) qpush A is an error (can't
1580 # qpush B is a NOP (no change) qpush A is an error (can't
1578 # go backwards with qpush)
1581 # go backwards with qpush)
1579 if patch:
1582 if patch:
1580 patch = self.lookup(patch)
1583 patch = self.lookup(patch)
1581 info = self.isapplied(patch)
1584 info = self.isapplied(patch)
1582 if info and info[0] >= len(self.applied) - 1:
1585 if info and info[0] >= len(self.applied) - 1:
1583 self.ui.warn(
1586 self.ui.warn(
1584 _(b'qpush: %s is already at the top\n') % patch
1587 _(b'qpush: %s is already at the top\n') % patch
1585 )
1588 )
1586 return 0
1589 return 0
1587
1590
1588 pushable, reason = self.pushable(patch)
1591 pushable, reason = self.pushable(patch)
1589 if pushable:
1592 if pushable:
1590 if self.series.index(patch) < self.seriesend():
1593 if self.series.index(patch) < self.seriesend():
1591 raise error.Abort(
1594 raise error.Abort(
1592 _(b"cannot push to a previous patch: %s") % patch
1595 _(b"cannot push to a previous patch: %s") % patch
1593 )
1596 )
1594 else:
1597 else:
1595 if reason:
1598 if reason:
1596 reason = _(b'guarded by %s') % reason
1599 reason = _(b'guarded by %s') % reason
1597 else:
1600 else:
1598 reason = _(b'no matching guards')
1601 reason = _(b'no matching guards')
1599 self.ui.warn(
1602 self.ui.warn(
1600 _(b"cannot push '%s' - %s\n") % (patch, reason)
1603 _(b"cannot push '%s' - %s\n") % (patch, reason)
1601 )
1604 )
1602 return 1
1605 return 1
1603 elif all:
1606 elif all:
1604 patch = self.series[-1]
1607 patch = self.series[-1]
1605 if self.isapplied(patch):
1608 if self.isapplied(patch):
1606 self.ui.warn(_(b'all patches are currently applied\n'))
1609 self.ui.warn(_(b'all patches are currently applied\n'))
1607 return 0
1610 return 0
1608
1611
1609 # Following the above example, starting at 'top' of B:
1612 # Following the above example, starting at 'top' of B:
1610 # qpush should be performed (pushes C), but a subsequent
1613 # qpush should be performed (pushes C), but a subsequent
1611 # qpush without an argument is an error (nothing to
1614 # qpush without an argument is an error (nothing to
1612 # apply). This allows a loop of "...while hg qpush..." to
1615 # apply). This allows a loop of "...while hg qpush..." to
1613 # work as it detects an error when done
1616 # work as it detects an error when done
1614 start = self.seriesend()
1617 start = self.seriesend()
1615 if start == len(self.series):
1618 if start == len(self.series):
1616 self.ui.warn(_(b'patch series already fully applied\n'))
1619 self.ui.warn(_(b'patch series already fully applied\n'))
1617 return 1
1620 return 1
1618 if not force and not keepchanges:
1621 if not force and not keepchanges:
1619 self.checklocalchanges(repo, refresh=self.applied)
1622 self.checklocalchanges(repo, refresh=self.applied)
1620
1623
1621 if exact:
1624 if exact:
1622 if keepchanges:
1625 if keepchanges:
1623 raise error.Abort(
1626 raise error.Abort(
1624 _(b"cannot use --exact and --keep-changes together")
1627 _(b"cannot use --exact and --keep-changes together")
1625 )
1628 )
1626 if move:
1629 if move:
1627 raise error.Abort(
1630 raise error.Abort(
1628 _(b'cannot use --exact and --move ' b'together')
1631 _(b'cannot use --exact and --move ' b'together')
1629 )
1632 )
1630 if self.applied:
1633 if self.applied:
1631 raise error.Abort(
1634 raise error.Abort(
1632 _(b'cannot push --exact with applied ' b'patches')
1635 _(b'cannot push --exact with applied ' b'patches')
1633 )
1636 )
1634 root = self.series[start]
1637 root = self.series[start]
1635 target = patchheader(self.join(root), self.plainmode).parent
1638 target = patchheader(self.join(root), self.plainmode).parent
1636 if not target:
1639 if not target:
1637 raise error.Abort(
1640 raise error.Abort(
1638 _(b"%s does not have a parent recorded") % root
1641 _(b"%s does not have a parent recorded") % root
1639 )
1642 )
1640 if not repo[target] == repo[b'.']:
1643 if not repo[target] == repo[b'.']:
1641 hg.update(repo, target)
1644 hg.update(repo, target)
1642
1645
1643 if move:
1646 if move:
1644 if not patch:
1647 if not patch:
1645 raise error.Abort(_(b"please specify the patch to move"))
1648 raise error.Abort(_(b"please specify the patch to move"))
1646 for fullstart, rpn in enumerate(self.fullseries):
1649 for fullstart, rpn in enumerate(self.fullseries):
1647 # strip markers for patch guards
1650 # strip markers for patch guards
1648 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1651 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1649 break
1652 break
1650 for i, rpn in enumerate(self.fullseries[fullstart:]):
1653 for i, rpn in enumerate(self.fullseries[fullstart:]):
1651 # strip markers for patch guards
1654 # strip markers for patch guards
1652 if self.guard_re.split(rpn, 1)[0] == patch:
1655 if self.guard_re.split(rpn, 1)[0] == patch:
1653 break
1656 break
1654 index = fullstart + i
1657 index = fullstart + i
1655 assert index < len(self.fullseries)
1658 assert index < len(self.fullseries)
1656 fullpatch = self.fullseries[index]
1659 fullpatch = self.fullseries[index]
1657 del self.fullseries[index]
1660 del self.fullseries[index]
1658 self.fullseries.insert(fullstart, fullpatch)
1661 self.fullseries.insert(fullstart, fullpatch)
1659 self.parseseries()
1662 self.parseseries()
1660 self.seriesdirty = True
1663 self.seriesdirty = True
1661
1664
1662 self.applieddirty = True
1665 self.applieddirty = True
1663 if start > 0:
1666 if start > 0:
1664 self.checktoppatch(repo)
1667 self.checktoppatch(repo)
1665 if not patch:
1668 if not patch:
1666 patch = self.series[start]
1669 patch = self.series[start]
1667 end = start + 1
1670 end = start + 1
1668 else:
1671 else:
1669 end = self.series.index(patch, start) + 1
1672 end = self.series.index(patch, start) + 1
1670
1673
1671 tobackup = set()
1674 tobackup = set()
1672 if (not nobackup and force) or keepchanges:
1675 if (not nobackup and force) or keepchanges:
1673 status = self.checklocalchanges(repo, force=True)
1676 status = self.checklocalchanges(repo, force=True)
1674 if keepchanges:
1677 if keepchanges:
1675 tobackup.update(
1678 tobackup.update(
1676 status.modified
1679 status.modified
1677 + status.added
1680 + status.added
1678 + status.removed
1681 + status.removed
1679 + status.deleted
1682 + status.deleted
1680 )
1683 )
1681 else:
1684 else:
1682 tobackup.update(status.modified + status.added)
1685 tobackup.update(status.modified + status.added)
1683
1686
1684 s = self.series[start:end]
1687 s = self.series[start:end]
1685 all_files = set()
1688 all_files = set()
1686 try:
1689 try:
1687 if mergeq:
1690 if mergeq:
1688 ret = self.mergepatch(repo, mergeq, s, diffopts)
1691 ret = self.mergepatch(repo, mergeq, s, diffopts)
1689 else:
1692 else:
1690 ret = self.apply(
1693 ret = self.apply(
1691 repo,
1694 repo,
1692 s,
1695 s,
1693 list,
1696 list,
1694 all_files=all_files,
1697 all_files=all_files,
1695 tobackup=tobackup,
1698 tobackup=tobackup,
1696 keepchanges=keepchanges,
1699 keepchanges=keepchanges,
1697 )
1700 )
1698 except AbortNoCleanup:
1701 except AbortNoCleanup:
1699 raise
1702 raise
1700 except: # re-raises
1703 except: # re-raises
1701 self.ui.warn(_(b'cleaning up working directory...\n'))
1704 self.ui.warn(_(b'cleaning up working directory...\n'))
1702 cmdutil.revert(
1705 cmdutil.revert(
1703 self.ui,
1706 self.ui,
1704 repo,
1707 repo,
1705 repo[b'.'],
1708 repo[b'.'],
1706 repo.dirstate.parents(),
1709 repo.dirstate.parents(),
1707 no_backup=True,
1710 no_backup=True,
1708 )
1711 )
1709 # only remove unknown files that we know we touched or
1712 # only remove unknown files that we know we touched or
1710 # created while patching
1713 # created while patching
1711 for f in all_files:
1714 for f in all_files:
1712 if f not in repo.dirstate:
1715 if f not in repo.dirstate:
1713 repo.wvfs.unlinkpath(f, ignoremissing=True)
1716 repo.wvfs.unlinkpath(f, ignoremissing=True)
1714 self.ui.warn(_(b'done\n'))
1717 self.ui.warn(_(b'done\n'))
1715 raise
1718 raise
1716
1719
1717 if not self.applied:
1720 if not self.applied:
1718 return ret[0]
1721 return ret[0]
1719 top = self.applied[-1].name
1722 top = self.applied[-1].name
1720 if ret[0] and ret[0] > 1:
1723 if ret[0] and ret[0] > 1:
1721 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1724 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1722 self.ui.write(msg % top)
1725 self.ui.write(msg % top)
1723 else:
1726 else:
1724 self.ui.write(_(b"now at: %s\n") % top)
1727 self.ui.write(_(b"now at: %s\n") % top)
1725 return ret[0]
1728 return ret[0]
1726
1729
1727 def pop(
1730 def pop(
1728 self,
1731 self,
1729 repo,
1732 repo,
1730 patch=None,
1733 patch=None,
1731 force=False,
1734 force=False,
1732 update=True,
1735 update=True,
1733 all=False,
1736 all=False,
1734 nobackup=False,
1737 nobackup=False,
1735 keepchanges=False,
1738 keepchanges=False,
1736 ):
1739 ):
1737 self.checkkeepchanges(keepchanges, force)
1740 self.checkkeepchanges(keepchanges, force)
1738 with repo.wlock():
1741 with repo.wlock():
1739 if patch:
1742 if patch:
1740 # index, rev, patch
1743 # index, rev, patch
1741 info = self.isapplied(patch)
1744 info = self.isapplied(patch)
1742 if not info:
1745 if not info:
1743 patch = self.lookup(patch)
1746 patch = self.lookup(patch)
1744 info = self.isapplied(patch)
1747 info = self.isapplied(patch)
1745 if not info:
1748 if not info:
1746 raise error.Abort(_(b"patch %s is not applied") % patch)
1749 raise error.Abort(_(b"patch %s is not applied") % patch)
1747
1750
1748 if not self.applied:
1751 if not self.applied:
1749 # Allow qpop -a to work repeatedly,
1752 # Allow qpop -a to work repeatedly,
1750 # but not qpop without an argument
1753 # but not qpop without an argument
1751 self.ui.warn(_(b"no patches applied\n"))
1754 self.ui.warn(_(b"no patches applied\n"))
1752 return not all
1755 return not all
1753
1756
1754 if all:
1757 if all:
1755 start = 0
1758 start = 0
1756 elif patch:
1759 elif patch:
1757 start = info[0] + 1
1760 start = info[0] + 1
1758 else:
1761 else:
1759 start = len(self.applied) - 1
1762 start = len(self.applied) - 1
1760
1763
1761 if start >= len(self.applied):
1764 if start >= len(self.applied):
1762 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1765 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1763 return
1766 return
1764
1767
1765 if not update:
1768 if not update:
1766 parents = repo.dirstate.parents()
1769 parents = repo.dirstate.parents()
1767 rr = [x.node for x in self.applied]
1770 rr = [x.node for x in self.applied]
1768 for p in parents:
1771 for p in parents:
1769 if p in rr:
1772 if p in rr:
1770 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1773 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1771 update = True
1774 update = True
1772 else:
1775 else:
1773 parents = [p.node() for p in repo[None].parents()]
1776 parents = [p.node() for p in repo[None].parents()]
1774 update = any(
1777 update = any(
1775 entry.node in parents for entry in self.applied[start:]
1778 entry.node in parents for entry in self.applied[start:]
1776 )
1779 )
1777
1780
1778 tobackup = set()
1781 tobackup = set()
1779 if update:
1782 if update:
1780 s = self.checklocalchanges(repo, force=force or keepchanges)
1783 s = self.checklocalchanges(repo, force=force or keepchanges)
1781 if force:
1784 if force:
1782 if not nobackup:
1785 if not nobackup:
1783 tobackup.update(s.modified + s.added)
1786 tobackup.update(s.modified + s.added)
1784 elif keepchanges:
1787 elif keepchanges:
1785 tobackup.update(
1788 tobackup.update(
1786 s.modified + s.added + s.removed + s.deleted
1789 s.modified + s.added + s.removed + s.deleted
1787 )
1790 )
1788
1791
1789 self.applieddirty = True
1792 self.applieddirty = True
1790 end = len(self.applied)
1793 end = len(self.applied)
1791 rev = self.applied[start].node
1794 rev = self.applied[start].node
1792
1795
1793 try:
1796 try:
1794 heads = repo.changelog.heads(rev)
1797 heads = repo.changelog.heads(rev)
1795 except error.LookupError:
1798 except error.LookupError:
1796 node = short(rev)
1799 node = short(rev)
1797 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1800 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1798
1801
1799 if heads != [self.applied[-1].node]:
1802 if heads != [self.applied[-1].node]:
1800 raise error.Abort(
1803 raise error.Abort(
1801 _(
1804 _(
1802 b"popping would remove a revision not "
1805 b"popping would remove a revision not "
1803 b"managed by this patch queue"
1806 b"managed by this patch queue"
1804 )
1807 )
1805 )
1808 )
1806 if not repo[self.applied[-1].node].mutable():
1809 if not repo[self.applied[-1].node].mutable():
1807 raise error.Abort(
1810 raise error.Abort(
1808 _(b"popping would remove a public revision"),
1811 _(b"popping would remove a public revision"),
1809 hint=_(b"see 'hg help phases' for details"),
1812 hint=_(b"see 'hg help phases' for details"),
1810 )
1813 )
1811
1814
1812 # we know there are no local changes, so we can make a simplified
1815 # we know there are no local changes, so we can make a simplified
1813 # form of hg.update.
1816 # form of hg.update.
1814 if update:
1817 if update:
1815 qp = self.qparents(repo, rev)
1818 qp = self.qparents(repo, rev)
1816 ctx = repo[qp]
1819 ctx = repo[qp]
1817 m, a, r, d = repo.status(qp, b'.')[:4]
1820 m, a, r, d = repo.status(qp, b'.')[:4]
1818 if d:
1821 if d:
1819 raise error.Abort(_(b"deletions found between repo revs"))
1822 raise error.Abort(_(b"deletions found between repo revs"))
1820
1823
1821 tobackup = set(a + m + r) & tobackup
1824 tobackup = set(a + m + r) & tobackup
1822 if keepchanges and tobackup:
1825 if keepchanges and tobackup:
1823 raise error.Abort(_(b"local changes found, qrefresh first"))
1826 raise error.Abort(_(b"local changes found, qrefresh first"))
1824 self.backup(repo, tobackup)
1827 self.backup(repo, tobackup)
1825 with repo.dirstate.parentchange():
1828 with repo.dirstate.parentchange():
1826 for f in a:
1829 for f in a:
1827 repo.wvfs.unlinkpath(f, ignoremissing=True)
1830 repo.wvfs.unlinkpath(f, ignoremissing=True)
1828 repo.dirstate.drop(f)
1831 repo.dirstate.drop(f)
1829 for f in m + r:
1832 for f in m + r:
1830 fctx = ctx[f]
1833 fctx = ctx[f]
1831 repo.wwrite(f, fctx.data(), fctx.flags())
1834 repo.wwrite(f, fctx.data(), fctx.flags())
1832 repo.dirstate.normal(f)
1835 repo.dirstate.normal(f)
1833 repo.setparents(qp, nullid)
1836 repo.setparents(qp, nullid)
1834 for patch in reversed(self.applied[start:end]):
1837 for patch in reversed(self.applied[start:end]):
1835 self.ui.status(_(b"popping %s\n") % patch.name)
1838 self.ui.status(_(b"popping %s\n") % patch.name)
1836 del self.applied[start:end]
1839 del self.applied[start:end]
1837 strip(self.ui, repo, [rev], update=False, backup=False)
1840 strip(self.ui, repo, [rev], update=False, backup=False)
1838 for s, state in repo[b'.'].substate.items():
1841 for s, state in repo[b'.'].substate.items():
1839 repo[b'.'].sub(s).get(state)
1842 repo[b'.'].sub(s).get(state)
1840 if self.applied:
1843 if self.applied:
1841 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1844 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1842 else:
1845 else:
1843 self.ui.write(_(b"patch queue now empty\n"))
1846 self.ui.write(_(b"patch queue now empty\n"))
1844
1847
1845 def diff(self, repo, pats, opts):
1848 def diff(self, repo, pats, opts):
1846 top, patch = self.checktoppatch(repo)
1849 top, patch = self.checktoppatch(repo)
1847 if not top:
1850 if not top:
1848 self.ui.write(_(b"no patches applied\n"))
1851 self.ui.write(_(b"no patches applied\n"))
1849 return
1852 return
1850 qp = self.qparents(repo, top)
1853 qp = self.qparents(repo, top)
1851 if opts.get(b'reverse'):
1854 if opts.get(b'reverse'):
1852 node1, node2 = None, qp
1855 node1, node2 = None, qp
1853 else:
1856 else:
1854 node1, node2 = qp, None
1857 node1, node2 = qp, None
1855 diffopts = self.diffopts(opts, patch)
1858 diffopts = self.diffopts(opts, patch)
1856 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1859 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1857
1860
1858 def refresh(self, repo, pats=None, **opts):
1861 def refresh(self, repo, pats=None, **opts):
1859 opts = pycompat.byteskwargs(opts)
1862 opts = pycompat.byteskwargs(opts)
1860 if not self.applied:
1863 if not self.applied:
1861 self.ui.write(_(b"no patches applied\n"))
1864 self.ui.write(_(b"no patches applied\n"))
1862 return 1
1865 return 1
1863 msg = opts.get(b'msg', b'').rstrip()
1866 msg = opts.get(b'msg', b'').rstrip()
1864 edit = opts.get(b'edit')
1867 edit = opts.get(b'edit')
1865 editform = opts.get(b'editform', b'mq.qrefresh')
1868 editform = opts.get(b'editform', b'mq.qrefresh')
1866 newuser = opts.get(b'user')
1869 newuser = opts.get(b'user')
1867 newdate = opts.get(b'date')
1870 newdate = opts.get(b'date')
1868 if newdate:
1871 if newdate:
1869 newdate = b'%d %d' % dateutil.parsedate(newdate)
1872 newdate = b'%d %d' % dateutil.parsedate(newdate)
1870 wlock = repo.wlock()
1873 wlock = repo.wlock()
1871
1874
1872 try:
1875 try:
1873 self.checktoppatch(repo)
1876 self.checktoppatch(repo)
1874 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1877 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1875 if repo.changelog.heads(top) != [top]:
1878 if repo.changelog.heads(top) != [top]:
1876 raise error.Abort(
1879 raise error.Abort(
1877 _(b"cannot qrefresh a revision with children")
1880 _(b"cannot qrefresh a revision with children")
1878 )
1881 )
1879 if not repo[top].mutable():
1882 if not repo[top].mutable():
1880 raise error.Abort(
1883 raise error.Abort(
1881 _(b"cannot qrefresh public revision"),
1884 _(b"cannot qrefresh public revision"),
1882 hint=_(b"see 'hg help phases' for details"),
1885 hint=_(b"see 'hg help phases' for details"),
1883 )
1886 )
1884
1887
1885 cparents = repo.changelog.parents(top)
1888 cparents = repo.changelog.parents(top)
1886 patchparent = self.qparents(repo, top)
1889 patchparent = self.qparents(repo, top)
1887
1890
1888 inclsubs = checksubstate(repo, patchparent)
1891 inclsubs = checksubstate(repo, patchparent)
1889 if inclsubs:
1892 if inclsubs:
1890 substatestate = repo.dirstate[b'.hgsubstate']
1893 substatestate = repo.dirstate[b'.hgsubstate']
1891
1894
1892 ph = patchheader(self.join(patchfn), self.plainmode)
1895 ph = patchheader(self.join(patchfn), self.plainmode)
1893 diffopts = self.diffopts(
1896 diffopts = self.diffopts(
1894 {b'git': opts.get(b'git')}, patchfn, plain=True
1897 {b'git': opts.get(b'git')}, patchfn, plain=True
1895 )
1898 )
1896 if newuser:
1899 if newuser:
1897 ph.setuser(newuser)
1900 ph.setuser(newuser)
1898 if newdate:
1901 if newdate:
1899 ph.setdate(newdate)
1902 ph.setdate(newdate)
1900 ph.setparent(hex(patchparent))
1903 ph.setparent(hex(patchparent))
1901
1904
1902 # only commit new patch when write is complete
1905 # only commit new patch when write is complete
1903 patchf = self.opener(patchfn, b'w', atomictemp=True)
1906 patchf = self.opener(patchfn, b'w', atomictemp=True)
1904
1907
1905 # update the dirstate in place, strip off the qtip commit
1908 # update the dirstate in place, strip off the qtip commit
1906 # and then commit.
1909 # and then commit.
1907 #
1910 #
1908 # this should really read:
1911 # this should really read:
1909 # mm, dd, aa = repo.status(top, patchparent)[:3]
1912 # mm, dd, aa = repo.status(top, patchparent)[:3]
1910 # but we do it backwards to take advantage of manifest/changelog
1913 # but we do it backwards to take advantage of manifest/changelog
1911 # caching against the next repo.status call
1914 # caching against the next repo.status call
1912 mm, aa, dd = repo.status(patchparent, top)[:3]
1915 mm, aa, dd = repo.status(patchparent, top)[:3]
1913 ctx = repo[top]
1916 ctx = repo[top]
1914 aaa = aa[:]
1917 aaa = aa[:]
1915 match1 = scmutil.match(repo[None], pats, opts)
1918 match1 = scmutil.match(repo[None], pats, opts)
1916 # in short mode, we only diff the files included in the
1919 # in short mode, we only diff the files included in the
1917 # patch already plus specified files
1920 # patch already plus specified files
1918 if opts.get(b'short'):
1921 if opts.get(b'short'):
1919 # if amending a patch, we start with existing
1922 # if amending a patch, we start with existing
1920 # files plus specified files - unfiltered
1923 # files plus specified files - unfiltered
1921 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1924 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1922 # filter with include/exclude options
1925 # filter with include/exclude options
1923 match1 = scmutil.match(repo[None], opts=opts)
1926 match1 = scmutil.match(repo[None], opts=opts)
1924 else:
1927 else:
1925 match = scmutil.matchall(repo)
1928 match = scmutil.matchall(repo)
1926 m, a, r, d = repo.status(match=match)[:4]
1929 m, a, r, d = repo.status(match=match)[:4]
1927 mm = set(mm)
1930 mm = set(mm)
1928 aa = set(aa)
1931 aa = set(aa)
1929 dd = set(dd)
1932 dd = set(dd)
1930
1933
1931 # we might end up with files that were added between
1934 # we might end up with files that were added between
1932 # qtip and the dirstate parent, but then changed in the
1935 # qtip and the dirstate parent, but then changed in the
1933 # local dirstate. in this case, we want them to only
1936 # local dirstate. in this case, we want them to only
1934 # show up in the added section
1937 # show up in the added section
1935 for x in m:
1938 for x in m:
1936 if x not in aa:
1939 if x not in aa:
1937 mm.add(x)
1940 mm.add(x)
1938 # we might end up with files added by the local dirstate that
1941 # we might end up with files added by the local dirstate that
1939 # were deleted by the patch. In this case, they should only
1942 # were deleted by the patch. In this case, they should only
1940 # show up in the changed section.
1943 # show up in the changed section.
1941 for x in a:
1944 for x in a:
1942 if x in dd:
1945 if x in dd:
1943 dd.remove(x)
1946 dd.remove(x)
1944 mm.add(x)
1947 mm.add(x)
1945 else:
1948 else:
1946 aa.add(x)
1949 aa.add(x)
1947 # make sure any files deleted in the local dirstate
1950 # make sure any files deleted in the local dirstate
1948 # are not in the add or change column of the patch
1951 # are not in the add or change column of the patch
1949 forget = []
1952 forget = []
1950 for x in d + r:
1953 for x in d + r:
1951 if x in aa:
1954 if x in aa:
1952 aa.remove(x)
1955 aa.remove(x)
1953 forget.append(x)
1956 forget.append(x)
1954 continue
1957 continue
1955 else:
1958 else:
1956 mm.discard(x)
1959 mm.discard(x)
1957 dd.add(x)
1960 dd.add(x)
1958
1961
1959 m = list(mm)
1962 m = list(mm)
1960 r = list(dd)
1963 r = list(dd)
1961 a = list(aa)
1964 a = list(aa)
1962
1965
1963 # create 'match' that includes the files to be recommitted.
1966 # create 'match' that includes the files to be recommitted.
1964 # apply match1 via repo.status to ensure correct case handling.
1967 # apply match1 via repo.status to ensure correct case handling.
1965 cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
1968 cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
1966 allmatches = set(cm + ca + cr + cd)
1969 allmatches = set(cm + ca + cr + cd)
1967 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1970 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1968
1971
1969 files = set(inclsubs)
1972 files = set(inclsubs)
1970 for x in refreshchanges:
1973 for x in refreshchanges:
1971 files.update(x)
1974 files.update(x)
1972 match = scmutil.matchfiles(repo, files)
1975 match = scmutil.matchfiles(repo, files)
1973
1976
1974 bmlist = repo[top].bookmarks()
1977 bmlist = repo[top].bookmarks()
1975
1978
1976 dsguard = None
1979 dsguard = None
1977 try:
1980 try:
1978 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
1981 dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
1979 if diffopts.git or diffopts.upgrade:
1982 if diffopts.git or diffopts.upgrade:
1980 copies = {}
1983 copies = {}
1981 for dst in a:
1984 for dst in a:
1982 src = repo.dirstate.copied(dst)
1985 src = repo.dirstate.copied(dst)
1983 # during qfold, the source file for copies may
1986 # during qfold, the source file for copies may
1984 # be removed. Treat this as a simple add.
1987 # be removed. Treat this as a simple add.
1985 if src is not None and src in repo.dirstate:
1988 if src is not None and src in repo.dirstate:
1986 copies.setdefault(src, []).append(dst)
1989 copies.setdefault(src, []).append(dst)
1987 repo.dirstate.add(dst)
1990 repo.dirstate.add(dst)
1988 # remember the copies between patchparent and qtip
1991 # remember the copies between patchparent and qtip
1989 for dst in aaa:
1992 for dst in aaa:
1990 src = ctx[dst].copysource()
1993 src = ctx[dst].copysource()
1991 if src:
1994 if src:
1992 copies.setdefault(src, []).extend(
1995 copies.setdefault(src, []).extend(
1993 copies.get(dst, [])
1996 copies.get(dst, [])
1994 )
1997 )
1995 if dst in a:
1998 if dst in a:
1996 copies[src].append(dst)
1999 copies[src].append(dst)
1997 # we can't copy a file created by the patch itself
2000 # we can't copy a file created by the patch itself
1998 if dst in copies:
2001 if dst in copies:
1999 del copies[dst]
2002 del copies[dst]
2000 for src, dsts in copies.iteritems():
2003 for src, dsts in copies.iteritems():
2001 for dst in dsts:
2004 for dst in dsts:
2002 repo.dirstate.copy(src, dst)
2005 repo.dirstate.copy(src, dst)
2003 else:
2006 else:
2004 for dst in a:
2007 for dst in a:
2005 repo.dirstate.add(dst)
2008 repo.dirstate.add(dst)
2006 # Drop useless copy information
2009 # Drop useless copy information
2007 for f in list(repo.dirstate.copies()):
2010 for f in list(repo.dirstate.copies()):
2008 repo.dirstate.copy(None, f)
2011 repo.dirstate.copy(None, f)
2009 for f in r:
2012 for f in r:
2010 repo.dirstate.remove(f)
2013 repo.dirstate.remove(f)
2011 # if the patch excludes a modified file, mark that
2014 # if the patch excludes a modified file, mark that
2012 # file with mtime=0 so status can see it.
2015 # file with mtime=0 so status can see it.
2013 mm = []
2016 mm = []
2014 for i in pycompat.xrange(len(m) - 1, -1, -1):
2017 for i in pycompat.xrange(len(m) - 1, -1, -1):
2015 if not match1(m[i]):
2018 if not match1(m[i]):
2016 mm.append(m[i])
2019 mm.append(m[i])
2017 del m[i]
2020 del m[i]
2018 for f in m:
2021 for f in m:
2019 repo.dirstate.normal(f)
2022 repo.dirstate.normal(f)
2020 for f in mm:
2023 for f in mm:
2021 repo.dirstate.normallookup(f)
2024 repo.dirstate.normallookup(f)
2022 for f in forget:
2025 for f in forget:
2023 repo.dirstate.drop(f)
2026 repo.dirstate.drop(f)
2024
2027
2025 user = ph.user or ctx.user()
2028 user = ph.user or ctx.user()
2026
2029
2027 oldphase = repo[top].phase()
2030 oldphase = repo[top].phase()
2028
2031
2029 # assumes strip can roll itself back if interrupted
2032 # assumes strip can roll itself back if interrupted
2030 repo.setparents(*cparents)
2033 repo.setparents(*cparents)
2031 self.applied.pop()
2034 self.applied.pop()
2032 self.applieddirty = True
2035 self.applieddirty = True
2033 strip(self.ui, repo, [top], update=False, backup=False)
2036 strip(self.ui, repo, [top], update=False, backup=False)
2034 dsguard.close()
2037 dsguard.close()
2035 finally:
2038 finally:
2036 release(dsguard)
2039 release(dsguard)
2037
2040
2038 try:
2041 try:
2039 # might be nice to attempt to roll back strip after this
2042 # might be nice to attempt to roll back strip after this
2040
2043
2041 defaultmsg = b"[mq]: %s" % patchfn
2044 defaultmsg = b"[mq]: %s" % patchfn
2042 editor = cmdutil.getcommiteditor(editform=editform)
2045 editor = cmdutil.getcommiteditor(editform=editform)
2043 if edit:
2046 if edit:
2044
2047
2045 def finishdesc(desc):
2048 def finishdesc(desc):
2046 if desc.rstrip():
2049 if desc.rstrip():
2047 ph.setmessage(desc)
2050 ph.setmessage(desc)
2048 return desc
2051 return desc
2049 return defaultmsg
2052 return defaultmsg
2050
2053
2051 # i18n: this message is shown in editor with "HG: " prefix
2054 # i18n: this message is shown in editor with "HG: " prefix
2052 extramsg = _(b'Leave message empty to use default message.')
2055 extramsg = _(b'Leave message empty to use default message.')
2053 editor = cmdutil.getcommiteditor(
2056 editor = cmdutil.getcommiteditor(
2054 finishdesc=finishdesc,
2057 finishdesc=finishdesc,
2055 extramsg=extramsg,
2058 extramsg=extramsg,
2056 editform=editform,
2059 editform=editform,
2057 )
2060 )
2058 message = msg or b"\n".join(ph.message)
2061 message = msg or b"\n".join(ph.message)
2059 elif not msg:
2062 elif not msg:
2060 if not ph.message:
2063 if not ph.message:
2061 message = defaultmsg
2064 message = defaultmsg
2062 else:
2065 else:
2063 message = b"\n".join(ph.message)
2066 message = b"\n".join(ph.message)
2064 else:
2067 else:
2065 message = msg
2068 message = msg
2066 ph.setmessage(msg)
2069 ph.setmessage(msg)
2067
2070
2068 # Ensure we create a new changeset in the same phase than
2071 # Ensure we create a new changeset in the same phase than
2069 # the old one.
2072 # the old one.
2070 lock = tr = None
2073 lock = tr = None
2071 try:
2074 try:
2072 lock = repo.lock()
2075 lock = repo.lock()
2073 tr = repo.transaction(b'mq')
2076 tr = repo.transaction(b'mq')
2074 n = newcommit(
2077 n = newcommit(
2075 repo,
2078 repo,
2076 oldphase,
2079 oldphase,
2077 message,
2080 message,
2078 user,
2081 user,
2079 ph.date,
2082 ph.date,
2080 match=match,
2083 match=match,
2081 force=True,
2084 force=True,
2082 editor=editor,
2085 editor=editor,
2083 )
2086 )
2084 # only write patch after a successful commit
2087 # only write patch after a successful commit
2085 c = [list(x) for x in refreshchanges]
2088 c = [list(x) for x in refreshchanges]
2086 if inclsubs:
2089 if inclsubs:
2087 self.putsubstate2changes(substatestate, c)
2090 self.putsubstate2changes(substatestate, c)
2088 chunks = patchmod.diff(
2091 chunks = patchmod.diff(
2089 repo, patchparent, changes=c, opts=diffopts
2092 repo, patchparent, changes=c, opts=diffopts
2090 )
2093 )
2091 comments = bytes(ph)
2094 comments = bytes(ph)
2092 if comments:
2095 if comments:
2093 patchf.write(comments)
2096 patchf.write(comments)
2094 for chunk in chunks:
2097 for chunk in chunks:
2095 patchf.write(chunk)
2098 patchf.write(chunk)
2096 patchf.close()
2099 patchf.close()
2097
2100
2098 marks = repo._bookmarks
2101 marks = repo._bookmarks
2099 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2102 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2100 tr.close()
2103 tr.close()
2101
2104
2102 self.applied.append(statusentry(n, patchfn))
2105 self.applied.append(statusentry(n, patchfn))
2103 finally:
2106 finally:
2104 lockmod.release(tr, lock)
2107 lockmod.release(tr, lock)
2105 except: # re-raises
2108 except: # re-raises
2106 ctx = repo[cparents[0]]
2109 ctx = repo[cparents[0]]
2107 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2110 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2108 self.savedirty()
2111 self.savedirty()
2109 self.ui.warn(
2112 self.ui.warn(
2110 _(
2113 _(
2111 b'qrefresh interrupted while patch was popped! '
2114 b'qrefresh interrupted while patch was popped! '
2112 b'(revert --all, qpush to recover)\n'
2115 b'(revert --all, qpush to recover)\n'
2113 )
2116 )
2114 )
2117 )
2115 raise
2118 raise
2116 finally:
2119 finally:
2117 wlock.release()
2120 wlock.release()
2118 self.removeundo(repo)
2121 self.removeundo(repo)
2119
2122
2120 def init(self, repo, create=False):
2123 def init(self, repo, create=False):
2121 if not create and os.path.isdir(self.path):
2124 if not create and os.path.isdir(self.path):
2122 raise error.Abort(_(b"patch queue directory already exists"))
2125 raise error.Abort(_(b"patch queue directory already exists"))
2123 try:
2126 try:
2124 os.mkdir(self.path)
2127 os.mkdir(self.path)
2125 except OSError as inst:
2128 except OSError as inst:
2126 if inst.errno != errno.EEXIST or not create:
2129 if inst.errno != errno.EEXIST or not create:
2127 raise
2130 raise
2128 if create:
2131 if create:
2129 return self.qrepo(create=True)
2132 return self.qrepo(create=True)
2130
2133
2131 def unapplied(self, repo, patch=None):
2134 def unapplied(self, repo, patch=None):
2132 if patch and patch not in self.series:
2135 if patch and patch not in self.series:
2133 raise error.Abort(_(b"patch %s is not in series file") % patch)
2136 raise error.Abort(_(b"patch %s is not in series file") % patch)
2134 if not patch:
2137 if not patch:
2135 start = self.seriesend()
2138 start = self.seriesend()
2136 else:
2139 else:
2137 start = self.series.index(patch) + 1
2140 start = self.series.index(patch) + 1
2138 unapplied = []
2141 unapplied = []
2139 for i in pycompat.xrange(start, len(self.series)):
2142 for i in pycompat.xrange(start, len(self.series)):
2140 pushable, reason = self.pushable(i)
2143 pushable, reason = self.pushable(i)
2141 if pushable:
2144 if pushable:
2142 unapplied.append((i, self.series[i]))
2145 unapplied.append((i, self.series[i]))
2143 self.explainpushable(i)
2146 self.explainpushable(i)
2144 return unapplied
2147 return unapplied
2145
2148
2146 def qseries(
2149 def qseries(
2147 self,
2150 self,
2148 repo,
2151 repo,
2149 missing=None,
2152 missing=None,
2150 start=0,
2153 start=0,
2151 length=None,
2154 length=None,
2152 status=None,
2155 status=None,
2153 summary=False,
2156 summary=False,
2154 ):
2157 ):
2155 def displayname(pfx, patchname, state):
2158 def displayname(pfx, patchname, state):
2156 if pfx:
2159 if pfx:
2157 self.ui.write(pfx)
2160 self.ui.write(pfx)
2158 if summary:
2161 if summary:
2159 ph = patchheader(self.join(patchname), self.plainmode)
2162 ph = patchheader(self.join(patchname), self.plainmode)
2160 if ph.message:
2163 if ph.message:
2161 msg = ph.message[0]
2164 msg = ph.message[0]
2162 else:
2165 else:
2163 msg = b''
2166 msg = b''
2164
2167
2165 if self.ui.formatted():
2168 if self.ui.formatted():
2166 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2169 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2167 if width > 0:
2170 if width > 0:
2168 msg = stringutil.ellipsis(msg, width)
2171 msg = stringutil.ellipsis(msg, width)
2169 else:
2172 else:
2170 msg = b''
2173 msg = b''
2171 self.ui.write(patchname, label=b'qseries.' + state)
2174 self.ui.write(patchname, label=b'qseries.' + state)
2172 self.ui.write(b': ')
2175 self.ui.write(b': ')
2173 self.ui.write(msg, label=b'qseries.message.' + state)
2176 self.ui.write(msg, label=b'qseries.message.' + state)
2174 else:
2177 else:
2175 self.ui.write(patchname, label=b'qseries.' + state)
2178 self.ui.write(patchname, label=b'qseries.' + state)
2176 self.ui.write(b'\n')
2179 self.ui.write(b'\n')
2177
2180
2178 applied = {p.name for p in self.applied}
2181 applied = {p.name for p in self.applied}
2179 if length is None:
2182 if length is None:
2180 length = len(self.series) - start
2183 length = len(self.series) - start
2181 if not missing:
2184 if not missing:
2182 if self.ui.verbose:
2185 if self.ui.verbose:
2183 idxwidth = len(b"%d" % (start + length - 1))
2186 idxwidth = len(b"%d" % (start + length - 1))
2184 for i in pycompat.xrange(start, start + length):
2187 for i in pycompat.xrange(start, start + length):
2185 patch = self.series[i]
2188 patch = self.series[i]
2186 if patch in applied:
2189 if patch in applied:
2187 char, state = b'A', b'applied'
2190 char, state = b'A', b'applied'
2188 elif self.pushable(i)[0]:
2191 elif self.pushable(i)[0]:
2189 char, state = b'U', b'unapplied'
2192 char, state = b'U', b'unapplied'
2190 else:
2193 else:
2191 char, state = b'G', b'guarded'
2194 char, state = b'G', b'guarded'
2192 pfx = b''
2195 pfx = b''
2193 if self.ui.verbose:
2196 if self.ui.verbose:
2194 pfx = b'%*d %s ' % (idxwidth, i, char)
2197 pfx = b'%*d %s ' % (idxwidth, i, char)
2195 elif status and status != char:
2198 elif status and status != char:
2196 continue
2199 continue
2197 displayname(pfx, patch, state)
2200 displayname(pfx, patch, state)
2198 else:
2201 else:
2199 msng_list = []
2202 msng_list = []
2200 for root, dirs, files in os.walk(self.path):
2203 for root, dirs, files in os.walk(self.path):
2201 d = root[len(self.path) + 1 :]
2204 d = root[len(self.path) + 1 :]
2202 for f in files:
2205 for f in files:
2203 fl = os.path.join(d, f)
2206 fl = os.path.join(d, f)
2204 if (
2207 if (
2205 fl not in self.series
2208 fl not in self.series
2206 and fl
2209 and fl
2207 not in (
2210 not in (
2208 self.statuspath,
2211 self.statuspath,
2209 self.seriespath,
2212 self.seriespath,
2210 self.guardspath,
2213 self.guardspath,
2211 )
2214 )
2212 and not fl.startswith(b'.')
2215 and not fl.startswith(b'.')
2213 ):
2216 ):
2214 msng_list.append(fl)
2217 msng_list.append(fl)
2215 for x in sorted(msng_list):
2218 for x in sorted(msng_list):
2216 pfx = self.ui.verbose and b'D ' or b''
2219 pfx = self.ui.verbose and b'D ' or b''
2217 displayname(pfx, x, b'missing')
2220 displayname(pfx, x, b'missing')
2218
2221
2219 def issaveline(self, l):
2222 def issaveline(self, l):
2220 if l.name == b'.hg.patches.save.line':
2223 if l.name == b'.hg.patches.save.line':
2221 return True
2224 return True
2222
2225
2223 def qrepo(self, create=False):
2226 def qrepo(self, create=False):
2224 ui = self.baseui.copy()
2227 ui = self.baseui.copy()
2225 # copy back attributes set by ui.pager()
2228 # copy back attributes set by ui.pager()
2226 if self.ui.pageractive and not ui.pageractive:
2229 if self.ui.pageractive and not ui.pageractive:
2227 ui.pageractive = self.ui.pageractive
2230 ui.pageractive = self.ui.pageractive
2228 # internal config: ui.formatted
2231 # internal config: ui.formatted
2229 ui.setconfig(
2232 ui.setconfig(
2230 b'ui',
2233 b'ui',
2231 b'formatted',
2234 b'formatted',
2232 self.ui.config(b'ui', b'formatted'),
2235 self.ui.config(b'ui', b'formatted'),
2233 b'mqpager',
2236 b'mqpager',
2234 )
2237 )
2235 ui.setconfig(
2238 ui.setconfig(
2236 b'ui',
2239 b'ui',
2237 b'interactive',
2240 b'interactive',
2238 self.ui.config(b'ui', b'interactive'),
2241 self.ui.config(b'ui', b'interactive'),
2239 b'mqpager',
2242 b'mqpager',
2240 )
2243 )
2241 if create or os.path.isdir(self.join(b".hg")):
2244 if create or os.path.isdir(self.join(b".hg")):
2242 return hg.repository(ui, path=self.path, create=create)
2245 return hg.repository(ui, path=self.path, create=create)
2243
2246
2244 def restore(self, repo, rev, delete=None, qupdate=None):
2247 def restore(self, repo, rev, delete=None, qupdate=None):
2245 desc = repo[rev].description().strip()
2248 desc = repo[rev].description().strip()
2246 lines = desc.splitlines()
2249 lines = desc.splitlines()
2247 i = 0
2250 i = 0
2248 datastart = None
2251 datastart = None
2249 series = []
2252 series = []
2250 applied = []
2253 applied = []
2251 qpp = None
2254 qpp = None
2252 for i, line in enumerate(lines):
2255 for i, line in enumerate(lines):
2253 if line == b'Patch Data:':
2256 if line == b'Patch Data:':
2254 datastart = i + 1
2257 datastart = i + 1
2255 elif line.startswith(b'Dirstate:'):
2258 elif line.startswith(b'Dirstate:'):
2256 l = line.rstrip()
2259 l = line.rstrip()
2257 l = l[10:].split(b' ')
2260 l = l[10:].split(b' ')
2258 qpp = [bin(x) for x in l]
2261 qpp = [bin(x) for x in l]
2259 elif datastart is not None:
2262 elif datastart is not None:
2260 l = line.rstrip()
2263 l = line.rstrip()
2261 n, name = l.split(b':', 1)
2264 n, name = l.split(b':', 1)
2262 if n:
2265 if n:
2263 applied.append(statusentry(bin(n), name))
2266 applied.append(statusentry(bin(n), name))
2264 else:
2267 else:
2265 series.append(l)
2268 series.append(l)
2266 if datastart is None:
2269 if datastart is None:
2267 self.ui.warn(_(b"no saved patch data found\n"))
2270 self.ui.warn(_(b"no saved patch data found\n"))
2268 return 1
2271 return 1
2269 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2272 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2270 self.fullseries = series
2273 self.fullseries = series
2271 self.applied = applied
2274 self.applied = applied
2272 self.parseseries()
2275 self.parseseries()
2273 self.seriesdirty = True
2276 self.seriesdirty = True
2274 self.applieddirty = True
2277 self.applieddirty = True
2275 heads = repo.changelog.heads()
2278 heads = repo.changelog.heads()
2276 if delete:
2279 if delete:
2277 if rev not in heads:
2280 if rev not in heads:
2278 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2281 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2279 else:
2282 else:
2280 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2283 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2281 pp = repo.dirstate.parents()
2284 pp = repo.dirstate.parents()
2282 if rev in pp:
2285 if rev in pp:
2283 update = True
2286 update = True
2284 else:
2287 else:
2285 update = False
2288 update = False
2286 strip(self.ui, repo, [rev], update=update, backup=False)
2289 strip(self.ui, repo, [rev], update=update, backup=False)
2287 if qpp:
2290 if qpp:
2288 self.ui.warn(
2291 self.ui.warn(
2289 _(b"saved queue repository parents: %s %s\n")
2292 _(b"saved queue repository parents: %s %s\n")
2290 % (short(qpp[0]), short(qpp[1]))
2293 % (short(qpp[0]), short(qpp[1]))
2291 )
2294 )
2292 if qupdate:
2295 if qupdate:
2293 self.ui.status(_(b"updating queue directory\n"))
2296 self.ui.status(_(b"updating queue directory\n"))
2294 r = self.qrepo()
2297 r = self.qrepo()
2295 if not r:
2298 if not r:
2296 self.ui.warn(_(b"unable to load queue repository\n"))
2299 self.ui.warn(_(b"unable to load queue repository\n"))
2297 return 1
2300 return 1
2298 hg.clean(r, qpp[0])
2301 hg.clean(r, qpp[0])
2299
2302
2300 def save(self, repo, msg=None):
2303 def save(self, repo, msg=None):
2301 if not self.applied:
2304 if not self.applied:
2302 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2305 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2303 return 1
2306 return 1
2304 if self.issaveline(self.applied[-1]):
2307 if self.issaveline(self.applied[-1]):
2305 self.ui.warn(_(b"status is already saved\n"))
2308 self.ui.warn(_(b"status is already saved\n"))
2306 return 1
2309 return 1
2307
2310
2308 if not msg:
2311 if not msg:
2309 msg = _(b"hg patches saved state")
2312 msg = _(b"hg patches saved state")
2310 else:
2313 else:
2311 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2314 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2312 r = self.qrepo()
2315 r = self.qrepo()
2313 if r:
2316 if r:
2314 pp = r.dirstate.parents()
2317 pp = r.dirstate.parents()
2315 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2318 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2316 msg += b"\n\nPatch Data:\n"
2319 msg += b"\n\nPatch Data:\n"
2317 msg += b''.join(b'%s\n' % x for x in self.applied)
2320 msg += b''.join(b'%s\n' % x for x in self.applied)
2318 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2321 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2319 n = repo.commit(msg, force=True)
2322 n = repo.commit(msg, force=True)
2320 if not n:
2323 if not n:
2321 self.ui.warn(_(b"repo commit failed\n"))
2324 self.ui.warn(_(b"repo commit failed\n"))
2322 return 1
2325 return 1
2323 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2326 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2324 self.applieddirty = True
2327 self.applieddirty = True
2325 self.removeundo(repo)
2328 self.removeundo(repo)
2326
2329
2327 def fullseriesend(self):
2330 def fullseriesend(self):
2328 if self.applied:
2331 if self.applied:
2329 p = self.applied[-1].name
2332 p = self.applied[-1].name
2330 end = self.findseries(p)
2333 end = self.findseries(p)
2331 if end is None:
2334 if end is None:
2332 return len(self.fullseries)
2335 return len(self.fullseries)
2333 return end + 1
2336 return end + 1
2334 return 0
2337 return 0
2335
2338
2336 def seriesend(self, all_patches=False):
2339 def seriesend(self, all_patches=False):
2337 """If all_patches is False, return the index of the next pushable patch
2340 """If all_patches is False, return the index of the next pushable patch
2338 in the series, or the series length. If all_patches is True, return the
2341 in the series, or the series length. If all_patches is True, return the
2339 index of the first patch past the last applied one.
2342 index of the first patch past the last applied one.
2340 """
2343 """
2341 end = 0
2344 end = 0
2342
2345
2343 def nextpatch(start):
2346 def nextpatch(start):
2344 if all_patches or start >= len(self.series):
2347 if all_patches or start >= len(self.series):
2345 return start
2348 return start
2346 for i in pycompat.xrange(start, len(self.series)):
2349 for i in pycompat.xrange(start, len(self.series)):
2347 p, reason = self.pushable(i)
2350 p, reason = self.pushable(i)
2348 if p:
2351 if p:
2349 return i
2352 return i
2350 self.explainpushable(i)
2353 self.explainpushable(i)
2351 return len(self.series)
2354 return len(self.series)
2352
2355
2353 if self.applied:
2356 if self.applied:
2354 p = self.applied[-1].name
2357 p = self.applied[-1].name
2355 try:
2358 try:
2356 end = self.series.index(p)
2359 end = self.series.index(p)
2357 except ValueError:
2360 except ValueError:
2358 return 0
2361 return 0
2359 return nextpatch(end + 1)
2362 return nextpatch(end + 1)
2360 return nextpatch(end)
2363 return nextpatch(end)
2361
2364
2362 def appliedname(self, index):
2365 def appliedname(self, index):
2363 pname = self.applied[index].name
2366 pname = self.applied[index].name
2364 if not self.ui.verbose:
2367 if not self.ui.verbose:
2365 p = pname
2368 p = pname
2366 else:
2369 else:
2367 p = (b"%d" % self.series.index(pname)) + b" " + pname
2370 p = (b"%d" % self.series.index(pname)) + b" " + pname
2368 return p
2371 return p
2369
2372
2370 def qimport(
2373 def qimport(
2371 self,
2374 self,
2372 repo,
2375 repo,
2373 files,
2376 files,
2374 patchname=None,
2377 patchname=None,
2375 rev=None,
2378 rev=None,
2376 existing=None,
2379 existing=None,
2377 force=None,
2380 force=None,
2378 git=False,
2381 git=False,
2379 ):
2382 ):
2380 def checkseries(patchname):
2383 def checkseries(patchname):
2381 if patchname in self.series:
2384 if patchname in self.series:
2382 raise error.Abort(
2385 raise error.Abort(
2383 _(b'patch %s is already in the series file') % patchname
2386 _(b'patch %s is already in the series file') % patchname
2384 )
2387 )
2385
2388
2386 if rev:
2389 if rev:
2387 if files:
2390 if files:
2388 raise error.Abort(
2391 raise error.Abort(
2389 _(b'option "-r" not valid when importing ' b'files')
2392 _(b'option "-r" not valid when importing ' b'files')
2390 )
2393 )
2391 rev = scmutil.revrange(repo, rev)
2394 rev = scmutil.revrange(repo, rev)
2392 rev.sort(reverse=True)
2395 rev.sort(reverse=True)
2393 elif not files:
2396 elif not files:
2394 raise error.Abort(_(b'no files or revisions specified'))
2397 raise error.Abort(_(b'no files or revisions specified'))
2395 if (len(files) > 1 or len(rev) > 1) and patchname:
2398 if (len(files) > 1 or len(rev) > 1) and patchname:
2396 raise error.Abort(
2399 raise error.Abort(
2397 _(b'option "-n" not valid when importing multiple ' b'patches')
2400 _(b'option "-n" not valid when importing multiple ' b'patches')
2398 )
2401 )
2399 imported = []
2402 imported = []
2400 if rev:
2403 if rev:
2401 # If mq patches are applied, we can only import revisions
2404 # If mq patches are applied, we can only import revisions
2402 # that form a linear path to qbase.
2405 # that form a linear path to qbase.
2403 # Otherwise, they should form a linear path to a head.
2406 # Otherwise, they should form a linear path to a head.
2404 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2407 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2405 if len(heads) > 1:
2408 if len(heads) > 1:
2406 raise error.Abort(
2409 raise error.Abort(
2407 _(b'revision %d is the root of more than one ' b'branch')
2410 _(b'revision %d is the root of more than one ' b'branch')
2408 % rev.last()
2411 % rev.last()
2409 )
2412 )
2410 if self.applied:
2413 if self.applied:
2411 base = repo.changelog.node(rev.first())
2414 base = repo.changelog.node(rev.first())
2412 if base in [n.node for n in self.applied]:
2415 if base in [n.node for n in self.applied]:
2413 raise error.Abort(
2416 raise error.Abort(
2414 _(b'revision %d is already managed') % rev.first()
2417 _(b'revision %d is already managed') % rev.first()
2415 )
2418 )
2416 if heads != [self.applied[-1].node]:
2419 if heads != [self.applied[-1].node]:
2417 raise error.Abort(
2420 raise error.Abort(
2418 _(b'revision %d is not the parent of ' b'the queue')
2421 _(b'revision %d is not the parent of ' b'the queue')
2419 % rev.first()
2422 % rev.first()
2420 )
2423 )
2421 base = repo.changelog.rev(self.applied[0].node)
2424 base = repo.changelog.rev(self.applied[0].node)
2422 lastparent = repo.changelog.parentrevs(base)[0]
2425 lastparent = repo.changelog.parentrevs(base)[0]
2423 else:
2426 else:
2424 if heads != [repo.changelog.node(rev.first())]:
2427 if heads != [repo.changelog.node(rev.first())]:
2425 raise error.Abort(
2428 raise error.Abort(
2426 _(b'revision %d has unmanaged children') % rev.first()
2429 _(b'revision %d has unmanaged children') % rev.first()
2427 )
2430 )
2428 lastparent = None
2431 lastparent = None
2429
2432
2430 diffopts = self.diffopts({b'git': git})
2433 diffopts = self.diffopts({b'git': git})
2431 with repo.transaction(b'qimport') as tr:
2434 with repo.transaction(b'qimport') as tr:
2432 for r in rev:
2435 for r in rev:
2433 if not repo[r].mutable():
2436 if not repo[r].mutable():
2434 raise error.Abort(
2437 raise error.Abort(
2435 _(b'revision %d is not mutable') % r,
2438 _(b'revision %d is not mutable') % r,
2436 hint=_(b"see 'hg help phases' " b'for details'),
2439 hint=_(b"see 'hg help phases' " b'for details'),
2437 )
2440 )
2438 p1, p2 = repo.changelog.parentrevs(r)
2441 p1, p2 = repo.changelog.parentrevs(r)
2439 n = repo.changelog.node(r)
2442 n = repo.changelog.node(r)
2440 if p2 != nullrev:
2443 if p2 != nullrev:
2441 raise error.Abort(
2444 raise error.Abort(
2442 _(b'cannot import merge revision %d') % r
2445 _(b'cannot import merge revision %d') % r
2443 )
2446 )
2444 if lastparent and lastparent != r:
2447 if lastparent and lastparent != r:
2445 raise error.Abort(
2448 raise error.Abort(
2446 _(b'revision %d is not the parent of ' b'%d')
2449 _(b'revision %d is not the parent of ' b'%d')
2447 % (r, lastparent)
2450 % (r, lastparent)
2448 )
2451 )
2449 lastparent = p1
2452 lastparent = p1
2450
2453
2451 if not patchname:
2454 if not patchname:
2452 patchname = self.makepatchname(
2455 patchname = self.makepatchname(
2453 repo[r].description().split(b'\n', 1)[0],
2456 repo[r].description().split(b'\n', 1)[0],
2454 b'%d.diff' % r,
2457 b'%d.diff' % r,
2455 )
2458 )
2456 checkseries(patchname)
2459 checkseries(patchname)
2457 self.checkpatchname(patchname, force)
2460 self.checkpatchname(patchname, force)
2458 self.fullseries.insert(0, patchname)
2461 self.fullseries.insert(0, patchname)
2459
2462
2460 with self.opener(patchname, b"w") as fp:
2463 with self.opener(patchname, b"w") as fp:
2461 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2464 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2462
2465
2463 se = statusentry(n, patchname)
2466 se = statusentry(n, patchname)
2464 self.applied.insert(0, se)
2467 self.applied.insert(0, se)
2465
2468
2466 self.added.append(patchname)
2469 self.added.append(patchname)
2467 imported.append(patchname)
2470 imported.append(patchname)
2468 patchname = None
2471 patchname = None
2469 if rev and repo.ui.configbool(b'mq', b'secret'):
2472 if rev and repo.ui.configbool(b'mq', b'secret'):
2470 # if we added anything with --rev, move the secret root
2473 # if we added anything with --rev, move the secret root
2471 phases.retractboundary(repo, tr, phases.secret, [n])
2474 phases.retractboundary(repo, tr, phases.secret, [n])
2472 self.parseseries()
2475 self.parseseries()
2473 self.applieddirty = True
2476 self.applieddirty = True
2474 self.seriesdirty = True
2477 self.seriesdirty = True
2475
2478
2476 for i, filename in enumerate(files):
2479 for i, filename in enumerate(files):
2477 if existing:
2480 if existing:
2478 if filename == b'-':
2481 if filename == b'-':
2479 raise error.Abort(
2482 raise error.Abort(
2480 _(b'-e is incompatible with import from -')
2483 _(b'-e is incompatible with import from -')
2481 )
2484 )
2482 filename = normname(filename)
2485 filename = normname(filename)
2483 self.checkreservedname(filename)
2486 self.checkreservedname(filename)
2484 if util.url(filename).islocal():
2487 if util.url(filename).islocal():
2485 originpath = self.join(filename)
2488 originpath = self.join(filename)
2486 if not os.path.isfile(originpath):
2489 if not os.path.isfile(originpath):
2487 raise error.Abort(
2490 raise error.Abort(
2488 _(b"patch %s does not exist") % filename
2491 _(b"patch %s does not exist") % filename
2489 )
2492 )
2490
2493
2491 if patchname:
2494 if patchname:
2492 self.checkpatchname(patchname, force)
2495 self.checkpatchname(patchname, force)
2493
2496
2494 self.ui.write(
2497 self.ui.write(
2495 _(b'renaming %s to %s\n') % (filename, patchname)
2498 _(b'renaming %s to %s\n') % (filename, patchname)
2496 )
2499 )
2497 util.rename(originpath, self.join(patchname))
2500 util.rename(originpath, self.join(patchname))
2498 else:
2501 else:
2499 patchname = filename
2502 patchname = filename
2500
2503
2501 else:
2504 else:
2502 if filename == b'-' and not patchname:
2505 if filename == b'-' and not patchname:
2503 raise error.Abort(
2506 raise error.Abort(
2504 _(b'need --name to import a patch from -')
2507 _(b'need --name to import a patch from -')
2505 )
2508 )
2506 elif not patchname:
2509 elif not patchname:
2507 patchname = normname(
2510 patchname = normname(
2508 os.path.basename(filename.rstrip(b'/'))
2511 os.path.basename(filename.rstrip(b'/'))
2509 )
2512 )
2510 self.checkpatchname(patchname, force)
2513 self.checkpatchname(patchname, force)
2511 try:
2514 try:
2512 if filename == b'-':
2515 if filename == b'-':
2513 text = self.ui.fin.read()
2516 text = self.ui.fin.read()
2514 else:
2517 else:
2515 fp = hg.openpath(self.ui, filename)
2518 fp = hg.openpath(self.ui, filename)
2516 text = fp.read()
2519 text = fp.read()
2517 fp.close()
2520 fp.close()
2518 except (OSError, IOError):
2521 except (OSError, IOError):
2519 raise error.Abort(_(b"unable to read file %s") % filename)
2522 raise error.Abort(_(b"unable to read file %s") % filename)
2520 patchf = self.opener(patchname, b"w")
2523 patchf = self.opener(patchname, b"w")
2521 patchf.write(text)
2524 patchf.write(text)
2522 patchf.close()
2525 patchf.close()
2523 if not force:
2526 if not force:
2524 checkseries(patchname)
2527 checkseries(patchname)
2525 if patchname not in self.series:
2528 if patchname not in self.series:
2526 index = self.fullseriesend() + i
2529 index = self.fullseriesend() + i
2527 self.fullseries[index:index] = [patchname]
2530 self.fullseries[index:index] = [patchname]
2528 self.parseseries()
2531 self.parseseries()
2529 self.seriesdirty = True
2532 self.seriesdirty = True
2530 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2533 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2531 self.added.append(patchname)
2534 self.added.append(patchname)
2532 imported.append(patchname)
2535 imported.append(patchname)
2533 patchname = None
2536 patchname = None
2534
2537
2535 self.removeundo(repo)
2538 self.removeundo(repo)
2536 return imported
2539 return imported
2537
2540
2538
2541
2539 def fixkeepchangesopts(ui, opts):
2542 def fixkeepchangesopts(ui, opts):
2540 if (
2543 if (
2541 not ui.configbool(b'mq', b'keepchanges')
2544 not ui.configbool(b'mq', b'keepchanges')
2542 or opts.get(b'force')
2545 or opts.get(b'force')
2543 or opts.get(b'exact')
2546 or opts.get(b'exact')
2544 ):
2547 ):
2545 return opts
2548 return opts
2546 opts = dict(opts)
2549 opts = dict(opts)
2547 opts[b'keep_changes'] = True
2550 opts[b'keep_changes'] = True
2548 return opts
2551 return opts
2549
2552
2550
2553
2551 @command(
2554 @command(
2552 b"qdelete|qremove|qrm",
2555 b"qdelete|qremove|qrm",
2553 [
2556 [
2554 (b'k', b'keep', None, _(b'keep patch file')),
2557 (b'k', b'keep', None, _(b'keep patch file')),
2555 (
2558 (
2556 b'r',
2559 b'r',
2557 b'rev',
2560 b'rev',
2558 [],
2561 [],
2559 _(b'stop managing a revision (DEPRECATED)'),
2562 _(b'stop managing a revision (DEPRECATED)'),
2560 _(b'REV'),
2563 _(b'REV'),
2561 ),
2564 ),
2562 ],
2565 ],
2563 _(b'hg qdelete [-k] [PATCH]...'),
2566 _(b'hg qdelete [-k] [PATCH]...'),
2564 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2567 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2565 )
2568 )
2566 def delete(ui, repo, *patches, **opts):
2569 def delete(ui, repo, *patches, **opts):
2567 """remove patches from queue
2570 """remove patches from queue
2568
2571
2569 The patches must not be applied, and at least one patch is required. Exact
2572 The patches must not be applied, and at least one patch is required. Exact
2570 patch identifiers must be given. With -k/--keep, the patch files are
2573 patch identifiers must be given. With -k/--keep, the patch files are
2571 preserved in the patch directory.
2574 preserved in the patch directory.
2572
2575
2573 To stop managing a patch and move it into permanent history,
2576 To stop managing a patch and move it into permanent history,
2574 use the :hg:`qfinish` command."""
2577 use the :hg:`qfinish` command."""
2575 q = repo.mq
2578 q = repo.mq
2576 q.delete(repo, patches, pycompat.byteskwargs(opts))
2579 q.delete(repo, patches, pycompat.byteskwargs(opts))
2577 q.savedirty()
2580 q.savedirty()
2578 return 0
2581 return 0
2579
2582
2580
2583
2581 @command(
2584 @command(
2582 b"qapplied",
2585 b"qapplied",
2583 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2586 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2584 + seriesopts,
2587 + seriesopts,
2585 _(b'hg qapplied [-1] [-s] [PATCH]'),
2588 _(b'hg qapplied [-1] [-s] [PATCH]'),
2586 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2589 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2587 )
2590 )
2588 def applied(ui, repo, patch=None, **opts):
2591 def applied(ui, repo, patch=None, **opts):
2589 """print the patches already applied
2592 """print the patches already applied
2590
2593
2591 Returns 0 on success."""
2594 Returns 0 on success."""
2592
2595
2593 q = repo.mq
2596 q = repo.mq
2594 opts = pycompat.byteskwargs(opts)
2597 opts = pycompat.byteskwargs(opts)
2595
2598
2596 if patch:
2599 if patch:
2597 if patch not in q.series:
2600 if patch not in q.series:
2598 raise error.Abort(_(b"patch %s is not in series file") % patch)
2601 raise error.Abort(_(b"patch %s is not in series file") % patch)
2599 end = q.series.index(patch) + 1
2602 end = q.series.index(patch) + 1
2600 else:
2603 else:
2601 end = q.seriesend(True)
2604 end = q.seriesend(True)
2602
2605
2603 if opts.get(b'last') and not end:
2606 if opts.get(b'last') and not end:
2604 ui.write(_(b"no patches applied\n"))
2607 ui.write(_(b"no patches applied\n"))
2605 return 1
2608 return 1
2606 elif opts.get(b'last') and end == 1:
2609 elif opts.get(b'last') and end == 1:
2607 ui.write(_(b"only one patch applied\n"))
2610 ui.write(_(b"only one patch applied\n"))
2608 return 1
2611 return 1
2609 elif opts.get(b'last'):
2612 elif opts.get(b'last'):
2610 start = end - 2
2613 start = end - 2
2611 end = 1
2614 end = 1
2612 else:
2615 else:
2613 start = 0
2616 start = 0
2614
2617
2615 q.qseries(
2618 q.qseries(
2616 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2619 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2617 )
2620 )
2618
2621
2619
2622
2620 @command(
2623 @command(
2621 b"qunapplied",
2624 b"qunapplied",
2622 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2625 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2623 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2626 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2624 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2627 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2625 )
2628 )
2626 def unapplied(ui, repo, patch=None, **opts):
2629 def unapplied(ui, repo, patch=None, **opts):
2627 """print the patches not yet applied
2630 """print the patches not yet applied
2628
2631
2629 Returns 0 on success."""
2632 Returns 0 on success."""
2630
2633
2631 q = repo.mq
2634 q = repo.mq
2632 opts = pycompat.byteskwargs(opts)
2635 opts = pycompat.byteskwargs(opts)
2633 if patch:
2636 if patch:
2634 if patch not in q.series:
2637 if patch not in q.series:
2635 raise error.Abort(_(b"patch %s is not in series file") % patch)
2638 raise error.Abort(_(b"patch %s is not in series file") % patch)
2636 start = q.series.index(patch) + 1
2639 start = q.series.index(patch) + 1
2637 else:
2640 else:
2638 start = q.seriesend(True)
2641 start = q.seriesend(True)
2639
2642
2640 if start == len(q.series) and opts.get(b'first'):
2643 if start == len(q.series) and opts.get(b'first'):
2641 ui.write(_(b"all patches applied\n"))
2644 ui.write(_(b"all patches applied\n"))
2642 return 1
2645 return 1
2643
2646
2644 if opts.get(b'first'):
2647 if opts.get(b'first'):
2645 length = 1
2648 length = 1
2646 else:
2649 else:
2647 length = None
2650 length = None
2648 q.qseries(
2651 q.qseries(
2649 repo,
2652 repo,
2650 start=start,
2653 start=start,
2651 length=length,
2654 length=length,
2652 status=b'U',
2655 status=b'U',
2653 summary=opts.get(b'summary'),
2656 summary=opts.get(b'summary'),
2654 )
2657 )
2655
2658
2656
2659
2657 @command(
2660 @command(
2658 b"qimport",
2661 b"qimport",
2659 [
2662 [
2660 (b'e', b'existing', None, _(b'import file in patch directory')),
2663 (b'e', b'existing', None, _(b'import file in patch directory')),
2661 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2664 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2662 (b'f', b'force', None, _(b'overwrite existing files')),
2665 (b'f', b'force', None, _(b'overwrite existing files')),
2663 (
2666 (
2664 b'r',
2667 b'r',
2665 b'rev',
2668 b'rev',
2666 [],
2669 [],
2667 _(b'place existing revisions under mq control'),
2670 _(b'place existing revisions under mq control'),
2668 _(b'REV'),
2671 _(b'REV'),
2669 ),
2672 ),
2670 (b'g', b'git', None, _(b'use git extended diff format')),
2673 (b'g', b'git', None, _(b'use git extended diff format')),
2671 (b'P', b'push', None, _(b'qpush after importing')),
2674 (b'P', b'push', None, _(b'qpush after importing')),
2672 ],
2675 ],
2673 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2676 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2674 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2677 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2675 )
2678 )
2676 def qimport(ui, repo, *filename, **opts):
2679 def qimport(ui, repo, *filename, **opts):
2677 """import a patch or existing changeset
2680 """import a patch or existing changeset
2678
2681
2679 The patch is inserted into the series after the last applied
2682 The patch is inserted into the series after the last applied
2680 patch. If no patches have been applied, qimport prepends the patch
2683 patch. If no patches have been applied, qimport prepends the patch
2681 to the series.
2684 to the series.
2682
2685
2683 The patch will have the same name as its source file unless you
2686 The patch will have the same name as its source file unless you
2684 give it a new one with -n/--name.
2687 give it a new one with -n/--name.
2685
2688
2686 You can register an existing patch inside the patch directory with
2689 You can register an existing patch inside the patch directory with
2687 the -e/--existing flag.
2690 the -e/--existing flag.
2688
2691
2689 With -f/--force, an existing patch of the same name will be
2692 With -f/--force, an existing patch of the same name will be
2690 overwritten.
2693 overwritten.
2691
2694
2692 An existing changeset may be placed under mq control with -r/--rev
2695 An existing changeset may be placed under mq control with -r/--rev
2693 (e.g. qimport --rev . -n patch will place the current revision
2696 (e.g. qimport --rev . -n patch will place the current revision
2694 under mq control). With -g/--git, patches imported with --rev will
2697 under mq control). With -g/--git, patches imported with --rev will
2695 use the git diff format. See the diffs help topic for information
2698 use the git diff format. See the diffs help topic for information
2696 on why this is important for preserving rename/copy information
2699 on why this is important for preserving rename/copy information
2697 and permission changes. Use :hg:`qfinish` to remove changesets
2700 and permission changes. Use :hg:`qfinish` to remove changesets
2698 from mq control.
2701 from mq control.
2699
2702
2700 To import a patch from standard input, pass - as the patch file.
2703 To import a patch from standard input, pass - as the patch file.
2701 When importing from standard input, a patch name must be specified
2704 When importing from standard input, a patch name must be specified
2702 using the --name flag.
2705 using the --name flag.
2703
2706
2704 To import an existing patch while renaming it::
2707 To import an existing patch while renaming it::
2705
2708
2706 hg qimport -e existing-patch -n new-name
2709 hg qimport -e existing-patch -n new-name
2707
2710
2708 Returns 0 if import succeeded.
2711 Returns 0 if import succeeded.
2709 """
2712 """
2710 opts = pycompat.byteskwargs(opts)
2713 opts = pycompat.byteskwargs(opts)
2711 with repo.lock(): # cause this may move phase
2714 with repo.lock(): # cause this may move phase
2712 q = repo.mq
2715 q = repo.mq
2713 try:
2716 try:
2714 imported = q.qimport(
2717 imported = q.qimport(
2715 repo,
2718 repo,
2716 filename,
2719 filename,
2717 patchname=opts.get(b'name'),
2720 patchname=opts.get(b'name'),
2718 existing=opts.get(b'existing'),
2721 existing=opts.get(b'existing'),
2719 force=opts.get(b'force'),
2722 force=opts.get(b'force'),
2720 rev=opts.get(b'rev'),
2723 rev=opts.get(b'rev'),
2721 git=opts.get(b'git'),
2724 git=opts.get(b'git'),
2722 )
2725 )
2723 finally:
2726 finally:
2724 q.savedirty()
2727 q.savedirty()
2725
2728
2726 if imported and opts.get(b'push') and not opts.get(b'rev'):
2729 if imported and opts.get(b'push') and not opts.get(b'rev'):
2727 return q.push(repo, imported[-1])
2730 return q.push(repo, imported[-1])
2728 return 0
2731 return 0
2729
2732
2730
2733
2731 def qinit(ui, repo, create):
2734 def qinit(ui, repo, create):
2732 """initialize a new queue repository
2735 """initialize a new queue repository
2733
2736
2734 This command also creates a series file for ordering patches, and
2737 This command also creates a series file for ordering patches, and
2735 an mq-specific .hgignore file in the queue repository, to exclude
2738 an mq-specific .hgignore file in the queue repository, to exclude
2736 the status and guards files (these contain mostly transient state).
2739 the status and guards files (these contain mostly transient state).
2737
2740
2738 Returns 0 if initialization succeeded."""
2741 Returns 0 if initialization succeeded."""
2739 q = repo.mq
2742 q = repo.mq
2740 r = q.init(repo, create)
2743 r = q.init(repo, create)
2741 q.savedirty()
2744 q.savedirty()
2742 if r:
2745 if r:
2743 if not os.path.exists(r.wjoin(b'.hgignore')):
2746 if not os.path.exists(r.wjoin(b'.hgignore')):
2744 fp = r.wvfs(b'.hgignore', b'w')
2747 fp = r.wvfs(b'.hgignore', b'w')
2745 fp.write(b'^\\.hg\n')
2748 fp.write(b'^\\.hg\n')
2746 fp.write(b'^\\.mq\n')
2749 fp.write(b'^\\.mq\n')
2747 fp.write(b'syntax: glob\n')
2750 fp.write(b'syntax: glob\n')
2748 fp.write(b'status\n')
2751 fp.write(b'status\n')
2749 fp.write(b'guards\n')
2752 fp.write(b'guards\n')
2750 fp.close()
2753 fp.close()
2751 if not os.path.exists(r.wjoin(b'series')):
2754 if not os.path.exists(r.wjoin(b'series')):
2752 r.wvfs(b'series', b'w').close()
2755 r.wvfs(b'series', b'w').close()
2753 r[None].add([b'.hgignore', b'series'])
2756 r[None].add([b'.hgignore', b'series'])
2754 commands.add(ui, r)
2757 commands.add(ui, r)
2755 return 0
2758 return 0
2756
2759
2757
2760
2758 @command(
2761 @command(
2759 b"qinit",
2762 b"qinit",
2760 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2763 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2761 _(b'hg qinit [-c]'),
2764 _(b'hg qinit [-c]'),
2762 helpcategory=command.CATEGORY_REPO_CREATION,
2765 helpcategory=command.CATEGORY_REPO_CREATION,
2763 helpbasic=True,
2766 helpbasic=True,
2764 )
2767 )
2765 def init(ui, repo, **opts):
2768 def init(ui, repo, **opts):
2766 """init a new queue repository (DEPRECATED)
2769 """init a new queue repository (DEPRECATED)
2767
2770
2768 The queue repository is unversioned by default. If
2771 The queue repository is unversioned by default. If
2769 -c/--create-repo is specified, qinit will create a separate nested
2772 -c/--create-repo is specified, qinit will create a separate nested
2770 repository for patches (qinit -c may also be run later to convert
2773 repository for patches (qinit -c may also be run later to convert
2771 an unversioned patch repository into a versioned one). You can use
2774 an unversioned patch repository into a versioned one). You can use
2772 qcommit to commit changes to this queue repository.
2775 qcommit to commit changes to this queue repository.
2773
2776
2774 This command is deprecated. Without -c, it's implied by other relevant
2777 This command is deprecated. Without -c, it's implied by other relevant
2775 commands. With -c, use :hg:`init --mq` instead."""
2778 commands. With -c, use :hg:`init --mq` instead."""
2776 return qinit(ui, repo, create=opts.get(r'create_repo'))
2779 return qinit(ui, repo, create=opts.get(r'create_repo'))
2777
2780
2778
2781
2779 @command(
2782 @command(
2780 b"qclone",
2783 b"qclone",
2781 [
2784 [
2782 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2785 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2783 (
2786 (
2784 b'U',
2787 b'U',
2785 b'noupdate',
2788 b'noupdate',
2786 None,
2789 None,
2787 _(b'do not update the new working directories'),
2790 _(b'do not update the new working directories'),
2788 ),
2791 ),
2789 (
2792 (
2790 b'',
2793 b'',
2791 b'uncompressed',
2794 b'uncompressed',
2792 None,
2795 None,
2793 _(b'use uncompressed transfer (fast over LAN)'),
2796 _(b'use uncompressed transfer (fast over LAN)'),
2794 ),
2797 ),
2795 (
2798 (
2796 b'p',
2799 b'p',
2797 b'patches',
2800 b'patches',
2798 b'',
2801 b'',
2799 _(b'location of source patch repository'),
2802 _(b'location of source patch repository'),
2800 _(b'REPO'),
2803 _(b'REPO'),
2801 ),
2804 ),
2802 ]
2805 ]
2803 + cmdutil.remoteopts,
2806 + cmdutil.remoteopts,
2804 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2807 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2805 helpcategory=command.CATEGORY_REPO_CREATION,
2808 helpcategory=command.CATEGORY_REPO_CREATION,
2806 norepo=True,
2809 norepo=True,
2807 )
2810 )
2808 def clone(ui, source, dest=None, **opts):
2811 def clone(ui, source, dest=None, **opts):
2809 '''clone main and patch repository at same time
2812 '''clone main and patch repository at same time
2810
2813
2811 If source is local, destination will have no patches applied. If
2814 If source is local, destination will have no patches applied. If
2812 source is remote, this command can not check if patches are
2815 source is remote, this command can not check if patches are
2813 applied in source, so cannot guarantee that patches are not
2816 applied in source, so cannot guarantee that patches are not
2814 applied in destination. If you clone remote repository, be sure
2817 applied in destination. If you clone remote repository, be sure
2815 before that it has no patches applied.
2818 before that it has no patches applied.
2816
2819
2817 Source patch repository is looked for in <src>/.hg/patches by
2820 Source patch repository is looked for in <src>/.hg/patches by
2818 default. Use -p <url> to change.
2821 default. Use -p <url> to change.
2819
2822
2820 The patch directory must be a nested Mercurial repository, as
2823 The patch directory must be a nested Mercurial repository, as
2821 would be created by :hg:`init --mq`.
2824 would be created by :hg:`init --mq`.
2822
2825
2823 Return 0 on success.
2826 Return 0 on success.
2824 '''
2827 '''
2825 opts = pycompat.byteskwargs(opts)
2828 opts = pycompat.byteskwargs(opts)
2826
2829
2827 def patchdir(repo):
2830 def patchdir(repo):
2828 """compute a patch repo url from a repo object"""
2831 """compute a patch repo url from a repo object"""
2829 url = repo.url()
2832 url = repo.url()
2830 if url.endswith(b'/'):
2833 if url.endswith(b'/'):
2831 url = url[:-1]
2834 url = url[:-1]
2832 return url + b'/.hg/patches'
2835 return url + b'/.hg/patches'
2833
2836
2834 # main repo (destination and sources)
2837 # main repo (destination and sources)
2835 if dest is None:
2838 if dest is None:
2836 dest = hg.defaultdest(source)
2839 dest = hg.defaultdest(source)
2837 sr = hg.peer(ui, opts, ui.expandpath(source))
2840 sr = hg.peer(ui, opts, ui.expandpath(source))
2838
2841
2839 # patches repo (source only)
2842 # patches repo (source only)
2840 if opts.get(b'patches'):
2843 if opts.get(b'patches'):
2841 patchespath = ui.expandpath(opts.get(b'patches'))
2844 patchespath = ui.expandpath(opts.get(b'patches'))
2842 else:
2845 else:
2843 patchespath = patchdir(sr)
2846 patchespath = patchdir(sr)
2844 try:
2847 try:
2845 hg.peer(ui, opts, patchespath)
2848 hg.peer(ui, opts, patchespath)
2846 except error.RepoError:
2849 except error.RepoError:
2847 raise error.Abort(
2850 raise error.Abort(
2848 _(b'versioned patch repository not found' b' (see init --mq)')
2851 _(b'versioned patch repository not found' b' (see init --mq)')
2849 )
2852 )
2850 qbase, destrev = None, None
2853 qbase, destrev = None, None
2851 if sr.local():
2854 if sr.local():
2852 repo = sr.local()
2855 repo = sr.local()
2853 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2856 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2854 qbase = repo.mq.applied[0].node
2857 qbase = repo.mq.applied[0].node
2855 if not hg.islocal(dest):
2858 if not hg.islocal(dest):
2856 heads = set(repo.heads())
2859 heads = set(repo.heads())
2857 destrev = list(heads.difference(repo.heads(qbase)))
2860 destrev = list(heads.difference(repo.heads(qbase)))
2858 destrev.append(repo.changelog.parents(qbase)[0])
2861 destrev.append(repo.changelog.parents(qbase)[0])
2859 elif sr.capable(b'lookup'):
2862 elif sr.capable(b'lookup'):
2860 try:
2863 try:
2861 qbase = sr.lookup(b'qbase')
2864 qbase = sr.lookup(b'qbase')
2862 except error.RepoError:
2865 except error.RepoError:
2863 pass
2866 pass
2864
2867
2865 ui.note(_(b'cloning main repository\n'))
2868 ui.note(_(b'cloning main repository\n'))
2866 sr, dr = hg.clone(
2869 sr, dr = hg.clone(
2867 ui,
2870 ui,
2868 opts,
2871 opts,
2869 sr.url(),
2872 sr.url(),
2870 dest,
2873 dest,
2871 pull=opts.get(b'pull'),
2874 pull=opts.get(b'pull'),
2872 revs=destrev,
2875 revs=destrev,
2873 update=False,
2876 update=False,
2874 stream=opts.get(b'uncompressed'),
2877 stream=opts.get(b'uncompressed'),
2875 )
2878 )
2876
2879
2877 ui.note(_(b'cloning patch repository\n'))
2880 ui.note(_(b'cloning patch repository\n'))
2878 hg.clone(
2881 hg.clone(
2879 ui,
2882 ui,
2880 opts,
2883 opts,
2881 opts.get(b'patches') or patchdir(sr),
2884 opts.get(b'patches') or patchdir(sr),
2882 patchdir(dr),
2885 patchdir(dr),
2883 pull=opts.get(b'pull'),
2886 pull=opts.get(b'pull'),
2884 update=not opts.get(b'noupdate'),
2887 update=not opts.get(b'noupdate'),
2885 stream=opts.get(b'uncompressed'),
2888 stream=opts.get(b'uncompressed'),
2886 )
2889 )
2887
2890
2888 if dr.local():
2891 if dr.local():
2889 repo = dr.local()
2892 repo = dr.local()
2890 if qbase:
2893 if qbase:
2891 ui.note(
2894 ui.note(
2892 _(
2895 _(
2893 b'stripping applied patches from destination '
2896 b'stripping applied patches from destination '
2894 b'repository\n'
2897 b'repository\n'
2895 )
2898 )
2896 )
2899 )
2897 strip(ui, repo, [qbase], update=False, backup=None)
2900 strip(ui, repo, [qbase], update=False, backup=None)
2898 if not opts.get(b'noupdate'):
2901 if not opts.get(b'noupdate'):
2899 ui.note(_(b'updating destination repository\n'))
2902 ui.note(_(b'updating destination repository\n'))
2900 hg.update(repo, repo.changelog.tip())
2903 hg.update(repo, repo.changelog.tip())
2901
2904
2902
2905
2903 @command(
2906 @command(
2904 b"qcommit|qci",
2907 b"qcommit|qci",
2905 commands.table[b"commit|ci"][1],
2908 commands.table[b"commit|ci"][1],
2906 _(b'hg qcommit [OPTION]... [FILE]...'),
2909 _(b'hg qcommit [OPTION]... [FILE]...'),
2907 helpcategory=command.CATEGORY_COMMITTING,
2910 helpcategory=command.CATEGORY_COMMITTING,
2908 inferrepo=True,
2911 inferrepo=True,
2909 )
2912 )
2910 def commit(ui, repo, *pats, **opts):
2913 def commit(ui, repo, *pats, **opts):
2911 """commit changes in the queue repository (DEPRECATED)
2914 """commit changes in the queue repository (DEPRECATED)
2912
2915
2913 This command is deprecated; use :hg:`commit --mq` instead."""
2916 This command is deprecated; use :hg:`commit --mq` instead."""
2914 q = repo.mq
2917 q = repo.mq
2915 r = q.qrepo()
2918 r = q.qrepo()
2916 if not r:
2919 if not r:
2917 raise error.Abort(b'no queue repository')
2920 raise error.Abort(b'no queue repository')
2918 commands.commit(r.ui, r, *pats, **opts)
2921 commands.commit(r.ui, r, *pats, **opts)
2919
2922
2920
2923
2921 @command(
2924 @command(
2922 b"qseries",
2925 b"qseries",
2923 [(b'm', b'missing', None, _(b'print patches not in series')),] + seriesopts,
2926 [(b'm', b'missing', None, _(b'print patches not in series')),] + seriesopts,
2924 _(b'hg qseries [-ms]'),
2927 _(b'hg qseries [-ms]'),
2925 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2928 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2926 )
2929 )
2927 def series(ui, repo, **opts):
2930 def series(ui, repo, **opts):
2928 """print the entire series file
2931 """print the entire series file
2929
2932
2930 Returns 0 on success."""
2933 Returns 0 on success."""
2931 repo.mq.qseries(
2934 repo.mq.qseries(
2932 repo, missing=opts.get(r'missing'), summary=opts.get(r'summary')
2935 repo, missing=opts.get(r'missing'), summary=opts.get(r'summary')
2933 )
2936 )
2934 return 0
2937 return 0
2935
2938
2936
2939
2937 @command(
2940 @command(
2938 b"qtop",
2941 b"qtop",
2939 seriesopts,
2942 seriesopts,
2940 _(b'hg qtop [-s]'),
2943 _(b'hg qtop [-s]'),
2941 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2944 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2942 )
2945 )
2943 def top(ui, repo, **opts):
2946 def top(ui, repo, **opts):
2944 """print the name of the current patch
2947 """print the name of the current patch
2945
2948
2946 Returns 0 on success."""
2949 Returns 0 on success."""
2947 q = repo.mq
2950 q = repo.mq
2948 if q.applied:
2951 if q.applied:
2949 t = q.seriesend(True)
2952 t = q.seriesend(True)
2950 else:
2953 else:
2951 t = 0
2954 t = 0
2952
2955
2953 if t:
2956 if t:
2954 q.qseries(
2957 q.qseries(
2955 repo,
2958 repo,
2956 start=t - 1,
2959 start=t - 1,
2957 length=1,
2960 length=1,
2958 status=b'A',
2961 status=b'A',
2959 summary=opts.get(r'summary'),
2962 summary=opts.get(r'summary'),
2960 )
2963 )
2961 else:
2964 else:
2962 ui.write(_(b"no patches applied\n"))
2965 ui.write(_(b"no patches applied\n"))
2963 return 1
2966 return 1
2964
2967
2965
2968
2966 @command(
2969 @command(
2967 b"qnext",
2970 b"qnext",
2968 seriesopts,
2971 seriesopts,
2969 _(b'hg qnext [-s]'),
2972 _(b'hg qnext [-s]'),
2970 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2973 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2971 )
2974 )
2972 def next(ui, repo, **opts):
2975 def next(ui, repo, **opts):
2973 """print the name of the next pushable patch
2976 """print the name of the next pushable patch
2974
2977
2975 Returns 0 on success."""
2978 Returns 0 on success."""
2976 q = repo.mq
2979 q = repo.mq
2977 end = q.seriesend()
2980 end = q.seriesend()
2978 if end == len(q.series):
2981 if end == len(q.series):
2979 ui.write(_(b"all patches applied\n"))
2982 ui.write(_(b"all patches applied\n"))
2980 return 1
2983 return 1
2981 q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
2984 q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
2982
2985
2983
2986
2984 @command(
2987 @command(
2985 b"qprev",
2988 b"qprev",
2986 seriesopts,
2989 seriesopts,
2987 _(b'hg qprev [-s]'),
2990 _(b'hg qprev [-s]'),
2988 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2991 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2989 )
2992 )
2990 def prev(ui, repo, **opts):
2993 def prev(ui, repo, **opts):
2991 """print the name of the preceding applied patch
2994 """print the name of the preceding applied patch
2992
2995
2993 Returns 0 on success."""
2996 Returns 0 on success."""
2994 q = repo.mq
2997 q = repo.mq
2995 l = len(q.applied)
2998 l = len(q.applied)
2996 if l == 1:
2999 if l == 1:
2997 ui.write(_(b"only one patch applied\n"))
3000 ui.write(_(b"only one patch applied\n"))
2998 return 1
3001 return 1
2999 if not l:
3002 if not l:
3000 ui.write(_(b"no patches applied\n"))
3003 ui.write(_(b"no patches applied\n"))
3001 return 1
3004 return 1
3002 idx = q.series.index(q.applied[-2].name)
3005 idx = q.series.index(q.applied[-2].name)
3003 q.qseries(
3006 q.qseries(
3004 repo, start=idx, length=1, status=b'A', summary=opts.get(r'summary')
3007 repo, start=idx, length=1, status=b'A', summary=opts.get(r'summary')
3005 )
3008 )
3006
3009
3007
3010
3008 def setupheaderopts(ui, opts):
3011 def setupheaderopts(ui, opts):
3009 if not opts.get(b'user') and opts.get(b'currentuser'):
3012 if not opts.get(b'user') and opts.get(b'currentuser'):
3010 opts[b'user'] = ui.username()
3013 opts[b'user'] = ui.username()
3011 if not opts.get(b'date') and opts.get(b'currentdate'):
3014 if not opts.get(b'date') and opts.get(b'currentdate'):
3012 opts[b'date'] = b"%d %d" % dateutil.makedate()
3015 opts[b'date'] = b"%d %d" % dateutil.makedate()
3013
3016
3014
3017
3015 @command(
3018 @command(
3016 b"qnew",
3019 b"qnew",
3017 [
3020 [
3018 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3021 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3019 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3022 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3020 (b'g', b'git', None, _(b'use git extended diff format')),
3023 (b'g', b'git', None, _(b'use git extended diff format')),
3021 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3024 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3022 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3025 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3023 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3026 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3024 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3027 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3025 ]
3028 ]
3026 + cmdutil.walkopts
3029 + cmdutil.walkopts
3027 + cmdutil.commitopts,
3030 + cmdutil.commitopts,
3028 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3031 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3029 helpcategory=command.CATEGORY_COMMITTING,
3032 helpcategory=command.CATEGORY_COMMITTING,
3030 helpbasic=True,
3033 helpbasic=True,
3031 inferrepo=True,
3034 inferrepo=True,
3032 )
3035 )
3033 def new(ui, repo, patch, *args, **opts):
3036 def new(ui, repo, patch, *args, **opts):
3034 """create a new patch
3037 """create a new patch
3035
3038
3036 qnew creates a new patch on top of the currently-applied patch (if
3039 qnew creates a new patch on top of the currently-applied patch (if
3037 any). The patch will be initialized with any outstanding changes
3040 any). The patch will be initialized with any outstanding changes
3038 in the working directory. You may also use -I/--include,
3041 in the working directory. You may also use -I/--include,
3039 -X/--exclude, and/or a list of files after the patch name to add
3042 -X/--exclude, and/or a list of files after the patch name to add
3040 only changes to matching files to the new patch, leaving the rest
3043 only changes to matching files to the new patch, leaving the rest
3041 as uncommitted modifications.
3044 as uncommitted modifications.
3042
3045
3043 -u/--user and -d/--date can be used to set the (given) user and
3046 -u/--user and -d/--date can be used to set the (given) user and
3044 date, respectively. -U/--currentuser and -D/--currentdate set user
3047 date, respectively. -U/--currentuser and -D/--currentdate set user
3045 to current user and date to current date.
3048 to current user and date to current date.
3046
3049
3047 -e/--edit, -m/--message or -l/--logfile set the patch header as
3050 -e/--edit, -m/--message or -l/--logfile set the patch header as
3048 well as the commit message. If none is specified, the header is
3051 well as the commit message. If none is specified, the header is
3049 empty and the commit message is '[mq]: PATCH'.
3052 empty and the commit message is '[mq]: PATCH'.
3050
3053
3051 Use the -g/--git option to keep the patch in the git extended diff
3054 Use the -g/--git option to keep the patch in the git extended diff
3052 format. Read the diffs help topic for more information on why this
3055 format. Read the diffs help topic for more information on why this
3053 is important for preserving permission changes and copy/rename
3056 is important for preserving permission changes and copy/rename
3054 information.
3057 information.
3055
3058
3056 Returns 0 on successful creation of a new patch.
3059 Returns 0 on successful creation of a new patch.
3057 """
3060 """
3058 opts = pycompat.byteskwargs(opts)
3061 opts = pycompat.byteskwargs(opts)
3059 msg = cmdutil.logmessage(ui, opts)
3062 msg = cmdutil.logmessage(ui, opts)
3060 q = repo.mq
3063 q = repo.mq
3061 opts[b'msg'] = msg
3064 opts[b'msg'] = msg
3062 setupheaderopts(ui, opts)
3065 setupheaderopts(ui, opts)
3063 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3066 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3064 q.savedirty()
3067 q.savedirty()
3065 return 0
3068 return 0
3066
3069
3067
3070
3068 @command(
3071 @command(
3069 b"qrefresh",
3072 b"qrefresh",
3070 [
3073 [
3071 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3074 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3072 (b'g', b'git', None, _(b'use git extended diff format')),
3075 (b'g', b'git', None, _(b'use git extended diff format')),
3073 (
3076 (
3074 b's',
3077 b's',
3075 b'short',
3078 b'short',
3076 None,
3079 None,
3077 _(b'refresh only files already in the patch and specified files'),
3080 _(b'refresh only files already in the patch and specified files'),
3078 ),
3081 ),
3079 (
3082 (
3080 b'U',
3083 b'U',
3081 b'currentuser',
3084 b'currentuser',
3082 None,
3085 None,
3083 _(b'add/update author field in patch with current user'),
3086 _(b'add/update author field in patch with current user'),
3084 ),
3087 ),
3085 (
3088 (
3086 b'u',
3089 b'u',
3087 b'user',
3090 b'user',
3088 b'',
3091 b'',
3089 _(b'add/update author field in patch with given user'),
3092 _(b'add/update author field in patch with given user'),
3090 _(b'USER'),
3093 _(b'USER'),
3091 ),
3094 ),
3092 (
3095 (
3093 b'D',
3096 b'D',
3094 b'currentdate',
3097 b'currentdate',
3095 None,
3098 None,
3096 _(b'add/update date field in patch with current date'),
3099 _(b'add/update date field in patch with current date'),
3097 ),
3100 ),
3098 (
3101 (
3099 b'd',
3102 b'd',
3100 b'date',
3103 b'date',
3101 b'',
3104 b'',
3102 _(b'add/update date field in patch with given date'),
3105 _(b'add/update date field in patch with given date'),
3103 _(b'DATE'),
3106 _(b'DATE'),
3104 ),
3107 ),
3105 ]
3108 ]
3106 + cmdutil.walkopts
3109 + cmdutil.walkopts
3107 + cmdutil.commitopts,
3110 + cmdutil.commitopts,
3108 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3111 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3109 helpcategory=command.CATEGORY_COMMITTING,
3112 helpcategory=command.CATEGORY_COMMITTING,
3110 helpbasic=True,
3113 helpbasic=True,
3111 inferrepo=True,
3114 inferrepo=True,
3112 )
3115 )
3113 def refresh(ui, repo, *pats, **opts):
3116 def refresh(ui, repo, *pats, **opts):
3114 """update the current patch
3117 """update the current patch
3115
3118
3116 If any file patterns are provided, the refreshed patch will
3119 If any file patterns are provided, the refreshed patch will
3117 contain only the modifications that match those patterns; the
3120 contain only the modifications that match those patterns; the
3118 remaining modifications will remain in the working directory.
3121 remaining modifications will remain in the working directory.
3119
3122
3120 If -s/--short is specified, files currently included in the patch
3123 If -s/--short is specified, files currently included in the patch
3121 will be refreshed just like matched files and remain in the patch.
3124 will be refreshed just like matched files and remain in the patch.
3122
3125
3123 If -e/--edit is specified, Mercurial will start your configured editor for
3126 If -e/--edit is specified, Mercurial will start your configured editor for
3124 you to enter a message. In case qrefresh fails, you will find a backup of
3127 you to enter a message. In case qrefresh fails, you will find a backup of
3125 your message in ``.hg/last-message.txt``.
3128 your message in ``.hg/last-message.txt``.
3126
3129
3127 hg add/remove/copy/rename work as usual, though you might want to
3130 hg add/remove/copy/rename work as usual, though you might want to
3128 use git-style patches (-g/--git or [diff] git=1) to track copies
3131 use git-style patches (-g/--git or [diff] git=1) to track copies
3129 and renames. See the diffs help topic for more information on the
3132 and renames. See the diffs help topic for more information on the
3130 git diff format.
3133 git diff format.
3131
3134
3132 Returns 0 on success.
3135 Returns 0 on success.
3133 """
3136 """
3134 opts = pycompat.byteskwargs(opts)
3137 opts = pycompat.byteskwargs(opts)
3135 q = repo.mq
3138 q = repo.mq
3136 message = cmdutil.logmessage(ui, opts)
3139 message = cmdutil.logmessage(ui, opts)
3137 setupheaderopts(ui, opts)
3140 setupheaderopts(ui, opts)
3138 with repo.wlock():
3141 with repo.wlock():
3139 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3142 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3140 q.savedirty()
3143 q.savedirty()
3141 return ret
3144 return ret
3142
3145
3143
3146
3144 @command(
3147 @command(
3145 b"qdiff",
3148 b"qdiff",
3146 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3149 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3147 _(b'hg qdiff [OPTION]... [FILE]...'),
3150 _(b'hg qdiff [OPTION]... [FILE]...'),
3148 helpcategory=command.CATEGORY_FILE_CONTENTS,
3151 helpcategory=command.CATEGORY_FILE_CONTENTS,
3149 helpbasic=True,
3152 helpbasic=True,
3150 inferrepo=True,
3153 inferrepo=True,
3151 )
3154 )
3152 def diff(ui, repo, *pats, **opts):
3155 def diff(ui, repo, *pats, **opts):
3153 """diff of the current patch and subsequent modifications
3156 """diff of the current patch and subsequent modifications
3154
3157
3155 Shows a diff which includes the current patch as well as any
3158 Shows a diff which includes the current patch as well as any
3156 changes which have been made in the working directory since the
3159 changes which have been made in the working directory since the
3157 last refresh (thus showing what the current patch would become
3160 last refresh (thus showing what the current patch would become
3158 after a qrefresh).
3161 after a qrefresh).
3159
3162
3160 Use :hg:`diff` if you only want to see the changes made since the
3163 Use :hg:`diff` if you only want to see the changes made since the
3161 last qrefresh, or :hg:`export qtip` if you want to see changes
3164 last qrefresh, or :hg:`export qtip` if you want to see changes
3162 made by the current patch without including changes made since the
3165 made by the current patch without including changes made since the
3163 qrefresh.
3166 qrefresh.
3164
3167
3165 Returns 0 on success.
3168 Returns 0 on success.
3166 """
3169 """
3167 ui.pager(b'qdiff')
3170 ui.pager(b'qdiff')
3168 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3171 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3169 return 0
3172 return 0
3170
3173
3171
3174
3172 @command(
3175 @command(
3173 b'qfold',
3176 b'qfold',
3174 [
3177 [
3175 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3178 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3176 (b'k', b'keep', None, _(b'keep folded patch files')),
3179 (b'k', b'keep', None, _(b'keep folded patch files')),
3177 ]
3180 ]
3178 + cmdutil.commitopts,
3181 + cmdutil.commitopts,
3179 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3182 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3180 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3183 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3181 )
3184 )
3182 def fold(ui, repo, *files, **opts):
3185 def fold(ui, repo, *files, **opts):
3183 """fold the named patches into the current patch
3186 """fold the named patches into the current patch
3184
3187
3185 Patches must not yet be applied. Each patch will be successively
3188 Patches must not yet be applied. Each patch will be successively
3186 applied to the current patch in the order given. If all the
3189 applied to the current patch in the order given. If all the
3187 patches apply successfully, the current patch will be refreshed
3190 patches apply successfully, the current patch will be refreshed
3188 with the new cumulative patch, and the folded patches will be
3191 with the new cumulative patch, and the folded patches will be
3189 deleted. With -k/--keep, the folded patch files will not be
3192 deleted. With -k/--keep, the folded patch files will not be
3190 removed afterwards.
3193 removed afterwards.
3191
3194
3192 The header for each folded patch will be concatenated with the
3195 The header for each folded patch will be concatenated with the
3193 current patch header, separated by a line of ``* * *``.
3196 current patch header, separated by a line of ``* * *``.
3194
3197
3195 Returns 0 on success."""
3198 Returns 0 on success."""
3196 opts = pycompat.byteskwargs(opts)
3199 opts = pycompat.byteskwargs(opts)
3197 q = repo.mq
3200 q = repo.mq
3198 if not files:
3201 if not files:
3199 raise error.Abort(_(b'qfold requires at least one patch name'))
3202 raise error.Abort(_(b'qfold requires at least one patch name'))
3200 if not q.checktoppatch(repo)[0]:
3203 if not q.checktoppatch(repo)[0]:
3201 raise error.Abort(_(b'no patches applied'))
3204 raise error.Abort(_(b'no patches applied'))
3202 q.checklocalchanges(repo)
3205 q.checklocalchanges(repo)
3203
3206
3204 message = cmdutil.logmessage(ui, opts)
3207 message = cmdutil.logmessage(ui, opts)
3205
3208
3206 parent = q.lookup(b'qtip')
3209 parent = q.lookup(b'qtip')
3207 patches = []
3210 patches = []
3208 messages = []
3211 messages = []
3209 for f in files:
3212 for f in files:
3210 p = q.lookup(f)
3213 p = q.lookup(f)
3211 if p in patches or p == parent:
3214 if p in patches or p == parent:
3212 ui.warn(_(b'skipping already folded patch %s\n') % p)
3215 ui.warn(_(b'skipping already folded patch %s\n') % p)
3213 if q.isapplied(p):
3216 if q.isapplied(p):
3214 raise error.Abort(
3217 raise error.Abort(
3215 _(b'qfold cannot fold already applied patch %s') % p
3218 _(b'qfold cannot fold already applied patch %s') % p
3216 )
3219 )
3217 patches.append(p)
3220 patches.append(p)
3218
3221
3219 for p in patches:
3222 for p in patches:
3220 if not message:
3223 if not message:
3221 ph = patchheader(q.join(p), q.plainmode)
3224 ph = patchheader(q.join(p), q.plainmode)
3222 if ph.message:
3225 if ph.message:
3223 messages.append(ph.message)
3226 messages.append(ph.message)
3224 pf = q.join(p)
3227 pf = q.join(p)
3225 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3228 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3226 if not patchsuccess:
3229 if not patchsuccess:
3227 raise error.Abort(_(b'error folding patch %s') % p)
3230 raise error.Abort(_(b'error folding patch %s') % p)
3228
3231
3229 if not message:
3232 if not message:
3230 ph = patchheader(q.join(parent), q.plainmode)
3233 ph = patchheader(q.join(parent), q.plainmode)
3231 message = ph.message
3234 message = ph.message
3232 for msg in messages:
3235 for msg in messages:
3233 if msg:
3236 if msg:
3234 if message:
3237 if message:
3235 message.append(b'* * *')
3238 message.append(b'* * *')
3236 message.extend(msg)
3239 message.extend(msg)
3237 message = b'\n'.join(message)
3240 message = b'\n'.join(message)
3238
3241
3239 diffopts = q.patchopts(q.diffopts(), *patches)
3242 diffopts = q.patchopts(q.diffopts(), *patches)
3240 with repo.wlock():
3243 with repo.wlock():
3241 q.refresh(
3244 q.refresh(
3242 repo,
3245 repo,
3243 msg=message,
3246 msg=message,
3244 git=diffopts.git,
3247 git=diffopts.git,
3245 edit=opts.get(b'edit'),
3248 edit=opts.get(b'edit'),
3246 editform=b'mq.qfold',
3249 editform=b'mq.qfold',
3247 )
3250 )
3248 q.delete(repo, patches, opts)
3251 q.delete(repo, patches, opts)
3249 q.savedirty()
3252 q.savedirty()
3250
3253
3251
3254
3252 @command(
3255 @command(
3253 b"qgoto",
3256 b"qgoto",
3254 [
3257 [
3255 (
3258 (
3256 b'',
3259 b'',
3257 b'keep-changes',
3260 b'keep-changes',
3258 None,
3261 None,
3259 _(b'tolerate non-conflicting local changes'),
3262 _(b'tolerate non-conflicting local changes'),
3260 ),
3263 ),
3261 (b'f', b'force', None, _(b'overwrite any local changes')),
3264 (b'f', b'force', None, _(b'overwrite any local changes')),
3262 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3265 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3263 ],
3266 ],
3264 _(b'hg qgoto [OPTION]... PATCH'),
3267 _(b'hg qgoto [OPTION]... PATCH'),
3265 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3268 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3266 )
3269 )
3267 def goto(ui, repo, patch, **opts):
3270 def goto(ui, repo, patch, **opts):
3268 '''push or pop patches until named patch is at top of stack
3271 '''push or pop patches until named patch is at top of stack
3269
3272
3270 Returns 0 on success.'''
3273 Returns 0 on success.'''
3271 opts = pycompat.byteskwargs(opts)
3274 opts = pycompat.byteskwargs(opts)
3272 opts = fixkeepchangesopts(ui, opts)
3275 opts = fixkeepchangesopts(ui, opts)
3273 q = repo.mq
3276 q = repo.mq
3274 patch = q.lookup(patch)
3277 patch = q.lookup(patch)
3275 nobackup = opts.get(b'no_backup')
3278 nobackup = opts.get(b'no_backup')
3276 keepchanges = opts.get(b'keep_changes')
3279 keepchanges = opts.get(b'keep_changes')
3277 if q.isapplied(patch):
3280 if q.isapplied(patch):
3278 ret = q.pop(
3281 ret = q.pop(
3279 repo,
3282 repo,
3280 patch,
3283 patch,
3281 force=opts.get(b'force'),
3284 force=opts.get(b'force'),
3282 nobackup=nobackup,
3285 nobackup=nobackup,
3283 keepchanges=keepchanges,
3286 keepchanges=keepchanges,
3284 )
3287 )
3285 else:
3288 else:
3286 ret = q.push(
3289 ret = q.push(
3287 repo,
3290 repo,
3288 patch,
3291 patch,
3289 force=opts.get(b'force'),
3292 force=opts.get(b'force'),
3290 nobackup=nobackup,
3293 nobackup=nobackup,
3291 keepchanges=keepchanges,
3294 keepchanges=keepchanges,
3292 )
3295 )
3293 q.savedirty()
3296 q.savedirty()
3294 return ret
3297 return ret
3295
3298
3296
3299
3297 @command(
3300 @command(
3298 b"qguard",
3301 b"qguard",
3299 [
3302 [
3300 (b'l', b'list', None, _(b'list all patches and guards')),
3303 (b'l', b'list', None, _(b'list all patches and guards')),
3301 (b'n', b'none', None, _(b'drop all guards')),
3304 (b'n', b'none', None, _(b'drop all guards')),
3302 ],
3305 ],
3303 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3306 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3304 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3307 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3305 )
3308 )
3306 def guard(ui, repo, *args, **opts):
3309 def guard(ui, repo, *args, **opts):
3307 '''set or print guards for a patch
3310 '''set or print guards for a patch
3308
3311
3309 Guards control whether a patch can be pushed. A patch with no
3312 Guards control whether a patch can be pushed. A patch with no
3310 guards is always pushed. A patch with a positive guard ("+foo") is
3313 guards is always pushed. A patch with a positive guard ("+foo") is
3311 pushed only if the :hg:`qselect` command has activated it. A patch with
3314 pushed only if the :hg:`qselect` command has activated it. A patch with
3312 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3315 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3313 has activated it.
3316 has activated it.
3314
3317
3315 With no arguments, print the currently active guards.
3318 With no arguments, print the currently active guards.
3316 With arguments, set guards for the named patch.
3319 With arguments, set guards for the named patch.
3317
3320
3318 .. note::
3321 .. note::
3319
3322
3320 Specifying negative guards now requires '--'.
3323 Specifying negative guards now requires '--'.
3321
3324
3322 To set guards on another patch::
3325 To set guards on another patch::
3323
3326
3324 hg qguard other.patch -- +2.6.17 -stable
3327 hg qguard other.patch -- +2.6.17 -stable
3325
3328
3326 Returns 0 on success.
3329 Returns 0 on success.
3327 '''
3330 '''
3328
3331
3329 def status(idx):
3332 def status(idx):
3330 guards = q.seriesguards[idx] or [b'unguarded']
3333 guards = q.seriesguards[idx] or [b'unguarded']
3331 if q.series[idx] in applied:
3334 if q.series[idx] in applied:
3332 state = b'applied'
3335 state = b'applied'
3333 elif q.pushable(idx)[0]:
3336 elif q.pushable(idx)[0]:
3334 state = b'unapplied'
3337 state = b'unapplied'
3335 else:
3338 else:
3336 state = b'guarded'
3339 state = b'guarded'
3337 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3340 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3338 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3341 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3339
3342
3340 for i, guard in enumerate(guards):
3343 for i, guard in enumerate(guards):
3341 if guard.startswith(b'+'):
3344 if guard.startswith(b'+'):
3342 ui.write(guard, label=b'qguard.positive')
3345 ui.write(guard, label=b'qguard.positive')
3343 elif guard.startswith(b'-'):
3346 elif guard.startswith(b'-'):
3344 ui.write(guard, label=b'qguard.negative')
3347 ui.write(guard, label=b'qguard.negative')
3345 else:
3348 else:
3346 ui.write(guard, label=b'qguard.unguarded')
3349 ui.write(guard, label=b'qguard.unguarded')
3347 if i != len(guards) - 1:
3350 if i != len(guards) - 1:
3348 ui.write(b' ')
3351 ui.write(b' ')
3349 ui.write(b'\n')
3352 ui.write(b'\n')
3350
3353
3351 q = repo.mq
3354 q = repo.mq
3352 applied = set(p.name for p in q.applied)
3355 applied = set(p.name for p in q.applied)
3353 patch = None
3356 patch = None
3354 args = list(args)
3357 args = list(args)
3355 if opts.get(r'list'):
3358 if opts.get(r'list'):
3356 if args or opts.get(r'none'):
3359 if args or opts.get(r'none'):
3357 raise error.Abort(
3360 raise error.Abort(
3358 _(b'cannot mix -l/--list with options or ' b'arguments')
3361 _(b'cannot mix -l/--list with options or ' b'arguments')
3359 )
3362 )
3360 for i in pycompat.xrange(len(q.series)):
3363 for i in pycompat.xrange(len(q.series)):
3361 status(i)
3364 status(i)
3362 return
3365 return
3363 if not args or args[0][0:1] in b'-+':
3366 if not args or args[0][0:1] in b'-+':
3364 if not q.applied:
3367 if not q.applied:
3365 raise error.Abort(_(b'no patches applied'))
3368 raise error.Abort(_(b'no patches applied'))
3366 patch = q.applied[-1].name
3369 patch = q.applied[-1].name
3367 if patch is None and args[0][0:1] not in b'-+':
3370 if patch is None and args[0][0:1] not in b'-+':
3368 patch = args.pop(0)
3371 patch = args.pop(0)
3369 if patch is None:
3372 if patch is None:
3370 raise error.Abort(_(b'no patch to work with'))
3373 raise error.Abort(_(b'no patch to work with'))
3371 if args or opts.get(r'none'):
3374 if args or opts.get(r'none'):
3372 idx = q.findseries(patch)
3375 idx = q.findseries(patch)
3373 if idx is None:
3376 if idx is None:
3374 raise error.Abort(_(b'no patch named %s') % patch)
3377 raise error.Abort(_(b'no patch named %s') % patch)
3375 q.setguards(idx, args)
3378 q.setguards(idx, args)
3376 q.savedirty()
3379 q.savedirty()
3377 else:
3380 else:
3378 status(q.series.index(q.lookup(patch)))
3381 status(q.series.index(q.lookup(patch)))
3379
3382
3380
3383
3381 @command(
3384 @command(
3382 b"qheader",
3385 b"qheader",
3383 [],
3386 [],
3384 _(b'hg qheader [PATCH]'),
3387 _(b'hg qheader [PATCH]'),
3385 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3388 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3386 )
3389 )
3387 def header(ui, repo, patch=None):
3390 def header(ui, repo, patch=None):
3388 """print the header of the topmost or specified patch
3391 """print the header of the topmost or specified patch
3389
3392
3390 Returns 0 on success."""
3393 Returns 0 on success."""
3391 q = repo.mq
3394 q = repo.mq
3392
3395
3393 if patch:
3396 if patch:
3394 patch = q.lookup(patch)
3397 patch = q.lookup(patch)
3395 else:
3398 else:
3396 if not q.applied:
3399 if not q.applied:
3397 ui.write(_(b'no patches applied\n'))
3400 ui.write(_(b'no patches applied\n'))
3398 return 1
3401 return 1
3399 patch = q.lookup(b'qtip')
3402 patch = q.lookup(b'qtip')
3400 ph = patchheader(q.join(patch), q.plainmode)
3403 ph = patchheader(q.join(patch), q.plainmode)
3401
3404
3402 ui.write(b'\n'.join(ph.message) + b'\n')
3405 ui.write(b'\n'.join(ph.message) + b'\n')
3403
3406
3404
3407
3405 def lastsavename(path):
3408 def lastsavename(path):
3406 (directory, base) = os.path.split(path)
3409 (directory, base) = os.path.split(path)
3407 names = os.listdir(directory)
3410 names = os.listdir(directory)
3408 namere = re.compile(b"%s.([0-9]+)" % base)
3411 namere = re.compile(b"%s.([0-9]+)" % base)
3409 maxindex = None
3412 maxindex = None
3410 maxname = None
3413 maxname = None
3411 for f in names:
3414 for f in names:
3412 m = namere.match(f)
3415 m = namere.match(f)
3413 if m:
3416 if m:
3414 index = int(m.group(1))
3417 index = int(m.group(1))
3415 if maxindex is None or index > maxindex:
3418 if maxindex is None or index > maxindex:
3416 maxindex = index
3419 maxindex = index
3417 maxname = f
3420 maxname = f
3418 if maxname:
3421 if maxname:
3419 return (os.path.join(directory, maxname), maxindex)
3422 return (os.path.join(directory, maxname), maxindex)
3420 return (None, None)
3423 return (None, None)
3421
3424
3422
3425
3423 def savename(path):
3426 def savename(path):
3424 (last, index) = lastsavename(path)
3427 (last, index) = lastsavename(path)
3425 if last is None:
3428 if last is None:
3426 index = 0
3429 index = 0
3427 newpath = path + b".%d" % (index + 1)
3430 newpath = path + b".%d" % (index + 1)
3428 return newpath
3431 return newpath
3429
3432
3430
3433
3431 @command(
3434 @command(
3432 b"qpush",
3435 b"qpush",
3433 [
3436 [
3434 (
3437 (
3435 b'',
3438 b'',
3436 b'keep-changes',
3439 b'keep-changes',
3437 None,
3440 None,
3438 _(b'tolerate non-conflicting local changes'),
3441 _(b'tolerate non-conflicting local changes'),
3439 ),
3442 ),
3440 (b'f', b'force', None, _(b'apply on top of local changes')),
3443 (b'f', b'force', None, _(b'apply on top of local changes')),
3441 (
3444 (
3442 b'e',
3445 b'e',
3443 b'exact',
3446 b'exact',
3444 None,
3447 None,
3445 _(b'apply the target patch to its recorded parent'),
3448 _(b'apply the target patch to its recorded parent'),
3446 ),
3449 ),
3447 (b'l', b'list', None, _(b'list patch name in commit text')),
3450 (b'l', b'list', None, _(b'list patch name in commit text')),
3448 (b'a', b'all', None, _(b'apply all patches')),
3451 (b'a', b'all', None, _(b'apply all patches')),
3449 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3452 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3450 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3453 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3451 (
3454 (
3452 b'',
3455 b'',
3453 b'move',
3456 b'move',
3454 None,
3457 None,
3455 _(b'reorder patch series and apply only the patch'),
3458 _(b'reorder patch series and apply only the patch'),
3456 ),
3459 ),
3457 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3460 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3458 ],
3461 ],
3459 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3462 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3460 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3463 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3461 helpbasic=True,
3464 helpbasic=True,
3462 )
3465 )
3463 def push(ui, repo, patch=None, **opts):
3466 def push(ui, repo, patch=None, **opts):
3464 """push the next patch onto the stack
3467 """push the next patch onto the stack
3465
3468
3466 By default, abort if the working directory contains uncommitted
3469 By default, abort if the working directory contains uncommitted
3467 changes. With --keep-changes, abort only if the uncommitted files
3470 changes. With --keep-changes, abort only if the uncommitted files
3468 overlap with patched files. With -f/--force, backup and patch over
3471 overlap with patched files. With -f/--force, backup and patch over
3469 uncommitted changes.
3472 uncommitted changes.
3470
3473
3471 Return 0 on success.
3474 Return 0 on success.
3472 """
3475 """
3473 q = repo.mq
3476 q = repo.mq
3474 mergeq = None
3477 mergeq = None
3475
3478
3476 opts = pycompat.byteskwargs(opts)
3479 opts = pycompat.byteskwargs(opts)
3477 opts = fixkeepchangesopts(ui, opts)
3480 opts = fixkeepchangesopts(ui, opts)
3478 if opts.get(b'merge'):
3481 if opts.get(b'merge'):
3479 if opts.get(b'name'):
3482 if opts.get(b'name'):
3480 newpath = repo.vfs.join(opts.get(b'name'))
3483 newpath = repo.vfs.join(opts.get(b'name'))
3481 else:
3484 else:
3482 newpath, i = lastsavename(q.path)
3485 newpath, i = lastsavename(q.path)
3483 if not newpath:
3486 if not newpath:
3484 ui.warn(_(b"no saved queues found, please use -n\n"))
3487 ui.warn(_(b"no saved queues found, please use -n\n"))
3485 return 1
3488 return 1
3486 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3489 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3487 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3490 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3488 ret = q.push(
3491 ret = q.push(
3489 repo,
3492 repo,
3490 patch,
3493 patch,
3491 force=opts.get(b'force'),
3494 force=opts.get(b'force'),
3492 list=opts.get(b'list'),
3495 list=opts.get(b'list'),
3493 mergeq=mergeq,
3496 mergeq=mergeq,
3494 all=opts.get(b'all'),
3497 all=opts.get(b'all'),
3495 move=opts.get(b'move'),
3498 move=opts.get(b'move'),
3496 exact=opts.get(b'exact'),
3499 exact=opts.get(b'exact'),
3497 nobackup=opts.get(b'no_backup'),
3500 nobackup=opts.get(b'no_backup'),
3498 keepchanges=opts.get(b'keep_changes'),
3501 keepchanges=opts.get(b'keep_changes'),
3499 )
3502 )
3500 return ret
3503 return ret
3501
3504
3502
3505
3503 @command(
3506 @command(
3504 b"qpop",
3507 b"qpop",
3505 [
3508 [
3506 (b'a', b'all', None, _(b'pop all patches')),
3509 (b'a', b'all', None, _(b'pop all patches')),
3507 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3510 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3508 (
3511 (
3509 b'',
3512 b'',
3510 b'keep-changes',
3513 b'keep-changes',
3511 None,
3514 None,
3512 _(b'tolerate non-conflicting local changes'),
3515 _(b'tolerate non-conflicting local changes'),
3513 ),
3516 ),
3514 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3517 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3515 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3518 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3516 ],
3519 ],
3517 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3520 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3518 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3521 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3519 helpbasic=True,
3522 helpbasic=True,
3520 )
3523 )
3521 def pop(ui, repo, patch=None, **opts):
3524 def pop(ui, repo, patch=None, **opts):
3522 """pop the current patch off the stack
3525 """pop the current patch off the stack
3523
3526
3524 Without argument, pops off the top of the patch stack. If given a
3527 Without argument, pops off the top of the patch stack. If given a
3525 patch name, keeps popping off patches until the named patch is at
3528 patch name, keeps popping off patches until the named patch is at
3526 the top of the stack.
3529 the top of the stack.
3527
3530
3528 By default, abort if the working directory contains uncommitted
3531 By default, abort if the working directory contains uncommitted
3529 changes. With --keep-changes, abort only if the uncommitted files
3532 changes. With --keep-changes, abort only if the uncommitted files
3530 overlap with patched files. With -f/--force, backup and discard
3533 overlap with patched files. With -f/--force, backup and discard
3531 changes made to such files.
3534 changes made to such files.
3532
3535
3533 Return 0 on success.
3536 Return 0 on success.
3534 """
3537 """
3535 opts = pycompat.byteskwargs(opts)
3538 opts = pycompat.byteskwargs(opts)
3536 opts = fixkeepchangesopts(ui, opts)
3539 opts = fixkeepchangesopts(ui, opts)
3537 localupdate = True
3540 localupdate = True
3538 if opts.get(b'name'):
3541 if opts.get(b'name'):
3539 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3542 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3540 ui.warn(_(b'using patch queue: %s\n') % q.path)
3543 ui.warn(_(b'using patch queue: %s\n') % q.path)
3541 localupdate = False
3544 localupdate = False
3542 else:
3545 else:
3543 q = repo.mq
3546 q = repo.mq
3544 ret = q.pop(
3547 ret = q.pop(
3545 repo,
3548 repo,
3546 patch,
3549 patch,
3547 force=opts.get(b'force'),
3550 force=opts.get(b'force'),
3548 update=localupdate,
3551 update=localupdate,
3549 all=opts.get(b'all'),
3552 all=opts.get(b'all'),
3550 nobackup=opts.get(b'no_backup'),
3553 nobackup=opts.get(b'no_backup'),
3551 keepchanges=opts.get(b'keep_changes'),
3554 keepchanges=opts.get(b'keep_changes'),
3552 )
3555 )
3553 q.savedirty()
3556 q.savedirty()
3554 return ret
3557 return ret
3555
3558
3556
3559
3557 @command(
3560 @command(
3558 b"qrename|qmv",
3561 b"qrename|qmv",
3559 [],
3562 [],
3560 _(b'hg qrename PATCH1 [PATCH2]'),
3563 _(b'hg qrename PATCH1 [PATCH2]'),
3561 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3564 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3562 )
3565 )
3563 def rename(ui, repo, patch, name=None, **opts):
3566 def rename(ui, repo, patch, name=None, **opts):
3564 """rename a patch
3567 """rename a patch
3565
3568
3566 With one argument, renames the current patch to PATCH1.
3569 With one argument, renames the current patch to PATCH1.
3567 With two arguments, renames PATCH1 to PATCH2.
3570 With two arguments, renames PATCH1 to PATCH2.
3568
3571
3569 Returns 0 on success."""
3572 Returns 0 on success."""
3570 q = repo.mq
3573 q = repo.mq
3571 if not name:
3574 if not name:
3572 name = patch
3575 name = patch
3573 patch = None
3576 patch = None
3574
3577
3575 if patch:
3578 if patch:
3576 patch = q.lookup(patch)
3579 patch = q.lookup(patch)
3577 else:
3580 else:
3578 if not q.applied:
3581 if not q.applied:
3579 ui.write(_(b'no patches applied\n'))
3582 ui.write(_(b'no patches applied\n'))
3580 return
3583 return
3581 patch = q.lookup(b'qtip')
3584 patch = q.lookup(b'qtip')
3582 absdest = q.join(name)
3585 absdest = q.join(name)
3583 if os.path.isdir(absdest):
3586 if os.path.isdir(absdest):
3584 name = normname(os.path.join(name, os.path.basename(patch)))
3587 name = normname(os.path.join(name, os.path.basename(patch)))
3585 absdest = q.join(name)
3588 absdest = q.join(name)
3586 q.checkpatchname(name)
3589 q.checkpatchname(name)
3587
3590
3588 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3591 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3589 i = q.findseries(patch)
3592 i = q.findseries(patch)
3590 guards = q.guard_re.findall(q.fullseries[i])
3593 guards = q.guard_re.findall(q.fullseries[i])
3591 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3594 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3592 q.parseseries()
3595 q.parseseries()
3593 q.seriesdirty = True
3596 q.seriesdirty = True
3594
3597
3595 info = q.isapplied(patch)
3598 info = q.isapplied(patch)
3596 if info:
3599 if info:
3597 q.applied[info[0]] = statusentry(info[1], name)
3600 q.applied[info[0]] = statusentry(info[1], name)
3598 q.applieddirty = True
3601 q.applieddirty = True
3599
3602
3600 destdir = os.path.dirname(absdest)
3603 destdir = os.path.dirname(absdest)
3601 if not os.path.isdir(destdir):
3604 if not os.path.isdir(destdir):
3602 os.makedirs(destdir)
3605 os.makedirs(destdir)
3603 util.rename(q.join(patch), absdest)
3606 util.rename(q.join(patch), absdest)
3604 r = q.qrepo()
3607 r = q.qrepo()
3605 if r and patch in r.dirstate:
3608 if r and patch in r.dirstate:
3606 wctx = r[None]
3609 wctx = r[None]
3607 with r.wlock():
3610 with r.wlock():
3608 if r.dirstate[patch] == b'a':
3611 if r.dirstate[patch] == b'a':
3609 r.dirstate.drop(patch)
3612 r.dirstate.drop(patch)
3610 r.dirstate.add(name)
3613 r.dirstate.add(name)
3611 else:
3614 else:
3612 wctx.copy(patch, name)
3615 wctx.copy(patch, name)
3613 wctx.forget([patch])
3616 wctx.forget([patch])
3614
3617
3615 q.savedirty()
3618 q.savedirty()
3616
3619
3617
3620
3618 @command(
3621 @command(
3619 b"qrestore",
3622 b"qrestore",
3620 [
3623 [
3621 (b'd', b'delete', None, _(b'delete save entry')),
3624 (b'd', b'delete', None, _(b'delete save entry')),
3622 (b'u', b'update', None, _(b'update queue working directory')),
3625 (b'u', b'update', None, _(b'update queue working directory')),
3623 ],
3626 ],
3624 _(b'hg qrestore [-d] [-u] REV'),
3627 _(b'hg qrestore [-d] [-u] REV'),
3625 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3628 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3626 )
3629 )
3627 def restore(ui, repo, rev, **opts):
3630 def restore(ui, repo, rev, **opts):
3628 """restore the queue state saved by a revision (DEPRECATED)
3631 """restore the queue state saved by a revision (DEPRECATED)
3629
3632
3630 This command is deprecated, use :hg:`rebase` instead."""
3633 This command is deprecated, use :hg:`rebase` instead."""
3631 rev = repo.lookup(rev)
3634 rev = repo.lookup(rev)
3632 q = repo.mq
3635 q = repo.mq
3633 q.restore(
3636 q.restore(
3634 repo, rev, delete=opts.get(r'delete'), qupdate=opts.get(r'update')
3637 repo, rev, delete=opts.get(r'delete'), qupdate=opts.get(r'update')
3635 )
3638 )
3636 q.savedirty()
3639 q.savedirty()
3637 return 0
3640 return 0
3638
3641
3639
3642
3640 @command(
3643 @command(
3641 b"qsave",
3644 b"qsave",
3642 [
3645 [
3643 (b'c', b'copy', None, _(b'copy patch directory')),
3646 (b'c', b'copy', None, _(b'copy patch directory')),
3644 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3647 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3645 (b'e', b'empty', None, _(b'clear queue status file')),
3648 (b'e', b'empty', None, _(b'clear queue status file')),
3646 (b'f', b'force', None, _(b'force copy')),
3649 (b'f', b'force', None, _(b'force copy')),
3647 ]
3650 ]
3648 + cmdutil.commitopts,
3651 + cmdutil.commitopts,
3649 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3652 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3650 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3653 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3651 )
3654 )
3652 def save(ui, repo, **opts):
3655 def save(ui, repo, **opts):
3653 """save current queue state (DEPRECATED)
3656 """save current queue state (DEPRECATED)
3654
3657
3655 This command is deprecated, use :hg:`rebase` instead."""
3658 This command is deprecated, use :hg:`rebase` instead."""
3656 q = repo.mq
3659 q = repo.mq
3657 opts = pycompat.byteskwargs(opts)
3660 opts = pycompat.byteskwargs(opts)
3658 message = cmdutil.logmessage(ui, opts)
3661 message = cmdutil.logmessage(ui, opts)
3659 ret = q.save(repo, msg=message)
3662 ret = q.save(repo, msg=message)
3660 if ret:
3663 if ret:
3661 return ret
3664 return ret
3662 q.savedirty() # save to .hg/patches before copying
3665 q.savedirty() # save to .hg/patches before copying
3663 if opts.get(b'copy'):
3666 if opts.get(b'copy'):
3664 path = q.path
3667 path = q.path
3665 if opts.get(b'name'):
3668 if opts.get(b'name'):
3666 newpath = os.path.join(q.basepath, opts.get(b'name'))
3669 newpath = os.path.join(q.basepath, opts.get(b'name'))
3667 if os.path.exists(newpath):
3670 if os.path.exists(newpath):
3668 if not os.path.isdir(newpath):
3671 if not os.path.isdir(newpath):
3669 raise error.Abort(
3672 raise error.Abort(
3670 _(b'destination %s exists and is not ' b'a directory')
3673 _(b'destination %s exists and is not ' b'a directory')
3671 % newpath
3674 % newpath
3672 )
3675 )
3673 if not opts.get(b'force'):
3676 if not opts.get(b'force'):
3674 raise error.Abort(
3677 raise error.Abort(
3675 _(b'destination %s exists, ' b'use -f to force')
3678 _(b'destination %s exists, ' b'use -f to force')
3676 % newpath
3679 % newpath
3677 )
3680 )
3678 else:
3681 else:
3679 newpath = savename(path)
3682 newpath = savename(path)
3680 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3683 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3681 util.copyfiles(path, newpath)
3684 util.copyfiles(path, newpath)
3682 if opts.get(b'empty'):
3685 if opts.get(b'empty'):
3683 del q.applied[:]
3686 del q.applied[:]
3684 q.applieddirty = True
3687 q.applieddirty = True
3685 q.savedirty()
3688 q.savedirty()
3686 return 0
3689 return 0
3687
3690
3688
3691
3689 @command(
3692 @command(
3690 b"qselect",
3693 b"qselect",
3691 [
3694 [
3692 (b'n', b'none', None, _(b'disable all guards')),
3695 (b'n', b'none', None, _(b'disable all guards')),
3693 (b's', b'series', None, _(b'list all guards in series file')),
3696 (b's', b'series', None, _(b'list all guards in series file')),
3694 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3697 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3695 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3698 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3696 ],
3699 ],
3697 _(b'hg qselect [OPTION]... [GUARD]...'),
3700 _(b'hg qselect [OPTION]... [GUARD]...'),
3698 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3701 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3699 )
3702 )
3700 def select(ui, repo, *args, **opts):
3703 def select(ui, repo, *args, **opts):
3701 '''set or print guarded patches to push
3704 '''set or print guarded patches to push
3702
3705
3703 Use the :hg:`qguard` command to set or print guards on patch, then use
3706 Use the :hg:`qguard` command to set or print guards on patch, then use
3704 qselect to tell mq which guards to use. A patch will be pushed if
3707 qselect to tell mq which guards to use. A patch will be pushed if
3705 it has no guards or any positive guards match the currently
3708 it has no guards or any positive guards match the currently
3706 selected guard, but will not be pushed if any negative guards
3709 selected guard, but will not be pushed if any negative guards
3707 match the current guard. For example::
3710 match the current guard. For example::
3708
3711
3709 qguard foo.patch -- -stable (negative guard)
3712 qguard foo.patch -- -stable (negative guard)
3710 qguard bar.patch +stable (positive guard)
3713 qguard bar.patch +stable (positive guard)
3711 qselect stable
3714 qselect stable
3712
3715
3713 This activates the "stable" guard. mq will skip foo.patch (because
3716 This activates the "stable" guard. mq will skip foo.patch (because
3714 it has a negative match) but push bar.patch (because it has a
3717 it has a negative match) but push bar.patch (because it has a
3715 positive match).
3718 positive match).
3716
3719
3717 With no arguments, prints the currently active guards.
3720 With no arguments, prints the currently active guards.
3718 With one argument, sets the active guard.
3721 With one argument, sets the active guard.
3719
3722
3720 Use -n/--none to deactivate guards (no other arguments needed).
3723 Use -n/--none to deactivate guards (no other arguments needed).
3721 When no guards are active, patches with positive guards are
3724 When no guards are active, patches with positive guards are
3722 skipped and patches with negative guards are pushed.
3725 skipped and patches with negative guards are pushed.
3723
3726
3724 qselect can change the guards on applied patches. It does not pop
3727 qselect can change the guards on applied patches. It does not pop
3725 guarded patches by default. Use --pop to pop back to the last
3728 guarded patches by default. Use --pop to pop back to the last
3726 applied patch that is not guarded. Use --reapply (which implies
3729 applied patch that is not guarded. Use --reapply (which implies
3727 --pop) to push back to the current patch afterwards, but skip
3730 --pop) to push back to the current patch afterwards, but skip
3728 guarded patches.
3731 guarded patches.
3729
3732
3730 Use -s/--series to print a list of all guards in the series file
3733 Use -s/--series to print a list of all guards in the series file
3731 (no other arguments needed). Use -v for more information.
3734 (no other arguments needed). Use -v for more information.
3732
3735
3733 Returns 0 on success.'''
3736 Returns 0 on success.'''
3734
3737
3735 q = repo.mq
3738 q = repo.mq
3736 opts = pycompat.byteskwargs(opts)
3739 opts = pycompat.byteskwargs(opts)
3737 guards = q.active()
3740 guards = q.active()
3738 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3741 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3739 if args or opts.get(b'none'):
3742 if args or opts.get(b'none'):
3740 old_unapplied = q.unapplied(repo)
3743 old_unapplied = q.unapplied(repo)
3741 old_guarded = [
3744 old_guarded = [
3742 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3745 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3743 ]
3746 ]
3744 q.setactive(args)
3747 q.setactive(args)
3745 q.savedirty()
3748 q.savedirty()
3746 if not args:
3749 if not args:
3747 ui.status(_(b'guards deactivated\n'))
3750 ui.status(_(b'guards deactivated\n'))
3748 if not opts.get(b'pop') and not opts.get(b'reapply'):
3751 if not opts.get(b'pop') and not opts.get(b'reapply'):
3749 unapplied = q.unapplied(repo)
3752 unapplied = q.unapplied(repo)
3750 guarded = [
3753 guarded = [
3751 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3754 i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
3752 ]
3755 ]
3753 if len(unapplied) != len(old_unapplied):
3756 if len(unapplied) != len(old_unapplied):
3754 ui.status(
3757 ui.status(
3755 _(
3758 _(
3756 b'number of unguarded, unapplied patches has '
3759 b'number of unguarded, unapplied patches has '
3757 b'changed from %d to %d\n'
3760 b'changed from %d to %d\n'
3758 )
3761 )
3759 % (len(old_unapplied), len(unapplied))
3762 % (len(old_unapplied), len(unapplied))
3760 )
3763 )
3761 if len(guarded) != len(old_guarded):
3764 if len(guarded) != len(old_guarded):
3762 ui.status(
3765 ui.status(
3763 _(
3766 _(
3764 b'number of guarded, applied patches has changed '
3767 b'number of guarded, applied patches has changed '
3765 b'from %d to %d\n'
3768 b'from %d to %d\n'
3766 )
3769 )
3767 % (len(old_guarded), len(guarded))
3770 % (len(old_guarded), len(guarded))
3768 )
3771 )
3769 elif opts.get(b'series'):
3772 elif opts.get(b'series'):
3770 guards = {}
3773 guards = {}
3771 noguards = 0
3774 noguards = 0
3772 for gs in q.seriesguards:
3775 for gs in q.seriesguards:
3773 if not gs:
3776 if not gs:
3774 noguards += 1
3777 noguards += 1
3775 for g in gs:
3778 for g in gs:
3776 guards.setdefault(g, 0)
3779 guards.setdefault(g, 0)
3777 guards[g] += 1
3780 guards[g] += 1
3778 if ui.verbose:
3781 if ui.verbose:
3779 guards[b'NONE'] = noguards
3782 guards[b'NONE'] = noguards
3780 guards = list(guards.items())
3783 guards = list(guards.items())
3781 guards.sort(key=lambda x: x[0][1:])
3784 guards.sort(key=lambda x: x[0][1:])
3782 if guards:
3785 if guards:
3783 ui.note(_(b'guards in series file:\n'))
3786 ui.note(_(b'guards in series file:\n'))
3784 for guard, count in guards:
3787 for guard, count in guards:
3785 ui.note(b'%2d ' % count)
3788 ui.note(b'%2d ' % count)
3786 ui.write(guard, b'\n')
3789 ui.write(guard, b'\n')
3787 else:
3790 else:
3788 ui.note(_(b'no guards in series file\n'))
3791 ui.note(_(b'no guards in series file\n'))
3789 else:
3792 else:
3790 if guards:
3793 if guards:
3791 ui.note(_(b'active guards:\n'))
3794 ui.note(_(b'active guards:\n'))
3792 for g in guards:
3795 for g in guards:
3793 ui.write(g, b'\n')
3796 ui.write(g, b'\n')
3794 else:
3797 else:
3795 ui.write(_(b'no active guards\n'))
3798 ui.write(_(b'no active guards\n'))
3796 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3799 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3797 popped = False
3800 popped = False
3798 if opts.get(b'pop') or opts.get(b'reapply'):
3801 if opts.get(b'pop') or opts.get(b'reapply'):
3799 for i in pycompat.xrange(len(q.applied)):
3802 for i in pycompat.xrange(len(q.applied)):
3800 if not pushable(i):
3803 if not pushable(i):
3801 ui.status(_(b'popping guarded patches\n'))
3804 ui.status(_(b'popping guarded patches\n'))
3802 popped = True
3805 popped = True
3803 if i == 0:
3806 if i == 0:
3804 q.pop(repo, all=True)
3807 q.pop(repo, all=True)
3805 else:
3808 else:
3806 q.pop(repo, q.applied[i - 1].name)
3809 q.pop(repo, q.applied[i - 1].name)
3807 break
3810 break
3808 if popped:
3811 if popped:
3809 try:
3812 try:
3810 if reapply:
3813 if reapply:
3811 ui.status(_(b'reapplying unguarded patches\n'))
3814 ui.status(_(b'reapplying unguarded patches\n'))
3812 q.push(repo, reapply)
3815 q.push(repo, reapply)
3813 finally:
3816 finally:
3814 q.savedirty()
3817 q.savedirty()
3815
3818
3816
3819
3817 @command(
3820 @command(
3818 b"qfinish",
3821 b"qfinish",
3819 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3822 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3820 _(b'hg qfinish [-a] [REV]...'),
3823 _(b'hg qfinish [-a] [REV]...'),
3821 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3824 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3822 )
3825 )
3823 def finish(ui, repo, *revrange, **opts):
3826 def finish(ui, repo, *revrange, **opts):
3824 """move applied patches into repository history
3827 """move applied patches into repository history
3825
3828
3826 Finishes the specified revisions (corresponding to applied
3829 Finishes the specified revisions (corresponding to applied
3827 patches) by moving them out of mq control into regular repository
3830 patches) by moving them out of mq control into regular repository
3828 history.
3831 history.
3829
3832
3830 Accepts a revision range or the -a/--applied option. If --applied
3833 Accepts a revision range or the -a/--applied option. If --applied
3831 is specified, all applied mq revisions are removed from mq
3834 is specified, all applied mq revisions are removed from mq
3832 control. Otherwise, the given revisions must be at the base of the
3835 control. Otherwise, the given revisions must be at the base of the
3833 stack of applied patches.
3836 stack of applied patches.
3834
3837
3835 This can be especially useful if your changes have been applied to
3838 This can be especially useful if your changes have been applied to
3836 an upstream repository, or if you are about to push your changes
3839 an upstream repository, or if you are about to push your changes
3837 to upstream.
3840 to upstream.
3838
3841
3839 Returns 0 on success.
3842 Returns 0 on success.
3840 """
3843 """
3841 if not opts.get(r'applied') and not revrange:
3844 if not opts.get(r'applied') and not revrange:
3842 raise error.Abort(_(b'no revisions specified'))
3845 raise error.Abort(_(b'no revisions specified'))
3843 elif opts.get(r'applied'):
3846 elif opts.get(r'applied'):
3844 revrange = (b'qbase::qtip',) + revrange
3847 revrange = (b'qbase::qtip',) + revrange
3845
3848
3846 q = repo.mq
3849 q = repo.mq
3847 if not q.applied:
3850 if not q.applied:
3848 ui.status(_(b'no patches applied\n'))
3851 ui.status(_(b'no patches applied\n'))
3849 return 0
3852 return 0
3850
3853
3851 revs = scmutil.revrange(repo, revrange)
3854 revs = scmutil.revrange(repo, revrange)
3852 if repo[b'.'].rev() in revs and repo[None].files():
3855 if repo[b'.'].rev() in revs and repo[None].files():
3853 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3856 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3854 # queue.finish may changes phases but leave the responsibility to lock the
3857 # queue.finish may changes phases but leave the responsibility to lock the
3855 # repo to the caller to avoid deadlock with wlock. This command code is
3858 # repo to the caller to avoid deadlock with wlock. This command code is
3856 # responsibility for this locking.
3859 # responsibility for this locking.
3857 with repo.lock():
3860 with repo.lock():
3858 q.finish(repo, revs)
3861 q.finish(repo, revs)
3859 q.savedirty()
3862 q.savedirty()
3860 return 0
3863 return 0
3861
3864
3862
3865
3863 @command(
3866 @command(
3864 b"qqueue",
3867 b"qqueue",
3865 [
3868 [
3866 (b'l', b'list', False, _(b'list all available queues')),
3869 (b'l', b'list', False, _(b'list all available queues')),
3867 (b'', b'active', False, _(b'print name of active queue')),
3870 (b'', b'active', False, _(b'print name of active queue')),
3868 (b'c', b'create', False, _(b'create new queue')),
3871 (b'c', b'create', False, _(b'create new queue')),
3869 (b'', b'rename', False, _(b'rename active queue')),
3872 (b'', b'rename', False, _(b'rename active queue')),
3870 (b'', b'delete', False, _(b'delete reference to queue')),
3873 (b'', b'delete', False, _(b'delete reference to queue')),
3871 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3874 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3872 ],
3875 ],
3873 _(b'[OPTION] [QUEUE]'),
3876 _(b'[OPTION] [QUEUE]'),
3874 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3877 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3875 )
3878 )
3876 def qqueue(ui, repo, name=None, **opts):
3879 def qqueue(ui, repo, name=None, **opts):
3877 '''manage multiple patch queues
3880 '''manage multiple patch queues
3878
3881
3879 Supports switching between different patch queues, as well as creating
3882 Supports switching between different patch queues, as well as creating
3880 new patch queues and deleting existing ones.
3883 new patch queues and deleting existing ones.
3881
3884
3882 Omitting a queue name or specifying -l/--list will show you the registered
3885 Omitting a queue name or specifying -l/--list will show you the registered
3883 queues - by default the "normal" patches queue is registered. The currently
3886 queues - by default the "normal" patches queue is registered. The currently
3884 active queue will be marked with "(active)". Specifying --active will print
3887 active queue will be marked with "(active)". Specifying --active will print
3885 only the name of the active queue.
3888 only the name of the active queue.
3886
3889
3887 To create a new queue, use -c/--create. The queue is automatically made
3890 To create a new queue, use -c/--create. The queue is automatically made
3888 active, except in the case where there are applied patches from the
3891 active, except in the case where there are applied patches from the
3889 currently active queue in the repository. Then the queue will only be
3892 currently active queue in the repository. Then the queue will only be
3890 created and switching will fail.
3893 created and switching will fail.
3891
3894
3892 To delete an existing queue, use --delete. You cannot delete the currently
3895 To delete an existing queue, use --delete. You cannot delete the currently
3893 active queue.
3896 active queue.
3894
3897
3895 Returns 0 on success.
3898 Returns 0 on success.
3896 '''
3899 '''
3897 q = repo.mq
3900 q = repo.mq
3898 _defaultqueue = b'patches'
3901 _defaultqueue = b'patches'
3899 _allqueues = b'patches.queues'
3902 _allqueues = b'patches.queues'
3900 _activequeue = b'patches.queue'
3903 _activequeue = b'patches.queue'
3901
3904
3902 def _getcurrent():
3905 def _getcurrent():
3903 cur = os.path.basename(q.path)
3906 cur = os.path.basename(q.path)
3904 if cur.startswith(b'patches-'):
3907 if cur.startswith(b'patches-'):
3905 cur = cur[8:]
3908 cur = cur[8:]
3906 return cur
3909 return cur
3907
3910
3908 def _noqueues():
3911 def _noqueues():
3909 try:
3912 try:
3910 fh = repo.vfs(_allqueues, b'r')
3913 fh = repo.vfs(_allqueues, b'r')
3911 fh.close()
3914 fh.close()
3912 except IOError:
3915 except IOError:
3913 return True
3916 return True
3914
3917
3915 return False
3918 return False
3916
3919
3917 def _getqueues():
3920 def _getqueues():
3918 current = _getcurrent()
3921 current = _getcurrent()
3919
3922
3920 try:
3923 try:
3921 fh = repo.vfs(_allqueues, b'r')
3924 fh = repo.vfs(_allqueues, b'r')
3922 queues = [queue.strip() for queue in fh if queue.strip()]
3925 queues = [queue.strip() for queue in fh if queue.strip()]
3923 fh.close()
3926 fh.close()
3924 if current not in queues:
3927 if current not in queues:
3925 queues.append(current)
3928 queues.append(current)
3926 except IOError:
3929 except IOError:
3927 queues = [_defaultqueue]
3930 queues = [_defaultqueue]
3928
3931
3929 return sorted(queues)
3932 return sorted(queues)
3930
3933
3931 def _setactive(name):
3934 def _setactive(name):
3932 if q.applied:
3935 if q.applied:
3933 raise error.Abort(
3936 raise error.Abort(
3934 _(
3937 _(
3935 b'new queue created, but cannot make active '
3938 b'new queue created, but cannot make active '
3936 b'as patches are applied'
3939 b'as patches are applied'
3937 )
3940 )
3938 )
3941 )
3939 _setactivenocheck(name)
3942 _setactivenocheck(name)
3940
3943
3941 def _setactivenocheck(name):
3944 def _setactivenocheck(name):
3942 fh = repo.vfs(_activequeue, b'w')
3945 fh = repo.vfs(_activequeue, b'w')
3943 if name != b'patches':
3946 if name != b'patches':
3944 fh.write(name)
3947 fh.write(name)
3945 fh.close()
3948 fh.close()
3946
3949
3947 def _addqueue(name):
3950 def _addqueue(name):
3948 fh = repo.vfs(_allqueues, b'a')
3951 fh = repo.vfs(_allqueues, b'a')
3949 fh.write(b'%s\n' % (name,))
3952 fh.write(b'%s\n' % (name,))
3950 fh.close()
3953 fh.close()
3951
3954
3952 def _queuedir(name):
3955 def _queuedir(name):
3953 if name == b'patches':
3956 if name == b'patches':
3954 return repo.vfs.join(b'patches')
3957 return repo.vfs.join(b'patches')
3955 else:
3958 else:
3956 return repo.vfs.join(b'patches-' + name)
3959 return repo.vfs.join(b'patches-' + name)
3957
3960
3958 def _validname(name):
3961 def _validname(name):
3959 for n in name:
3962 for n in name:
3960 if n in b':\\/.':
3963 if n in b':\\/.':
3961 return False
3964 return False
3962 return True
3965 return True
3963
3966
3964 def _delete(name):
3967 def _delete(name):
3965 if name not in existing:
3968 if name not in existing:
3966 raise error.Abort(_(b'cannot delete queue that does not exist'))
3969 raise error.Abort(_(b'cannot delete queue that does not exist'))
3967
3970
3968 current = _getcurrent()
3971 current = _getcurrent()
3969
3972
3970 if name == current:
3973 if name == current:
3971 raise error.Abort(_(b'cannot delete currently active queue'))
3974 raise error.Abort(_(b'cannot delete currently active queue'))
3972
3975
3973 fh = repo.vfs(b'patches.queues.new', b'w')
3976 fh = repo.vfs(b'patches.queues.new', b'w')
3974 for queue in existing:
3977 for queue in existing:
3975 if queue == name:
3978 if queue == name:
3976 continue
3979 continue
3977 fh.write(b'%s\n' % (queue,))
3980 fh.write(b'%s\n' % (queue,))
3978 fh.close()
3981 fh.close()
3979 repo.vfs.rename(b'patches.queues.new', _allqueues)
3982 repo.vfs.rename(b'patches.queues.new', _allqueues)
3980
3983
3981 opts = pycompat.byteskwargs(opts)
3984 opts = pycompat.byteskwargs(opts)
3982 if not name or opts.get(b'list') or opts.get(b'active'):
3985 if not name or opts.get(b'list') or opts.get(b'active'):
3983 current = _getcurrent()
3986 current = _getcurrent()
3984 if opts.get(b'active'):
3987 if opts.get(b'active'):
3985 ui.write(b'%s\n' % (current,))
3988 ui.write(b'%s\n' % (current,))
3986 return
3989 return
3987 for queue in _getqueues():
3990 for queue in _getqueues():
3988 ui.write(b'%s' % (queue,))
3991 ui.write(b'%s' % (queue,))
3989 if queue == current and not ui.quiet:
3992 if queue == current and not ui.quiet:
3990 ui.write(_(b' (active)\n'))
3993 ui.write(_(b' (active)\n'))
3991 else:
3994 else:
3992 ui.write(b'\n')
3995 ui.write(b'\n')
3993 return
3996 return
3994
3997
3995 if not _validname(name):
3998 if not _validname(name):
3996 raise error.Abort(
3999 raise error.Abort(
3997 _(b'invalid queue name, may not contain the characters ":\\/."')
4000 _(b'invalid queue name, may not contain the characters ":\\/."')
3998 )
4001 )
3999
4002
4000 with repo.wlock():
4003 with repo.wlock():
4001 existing = _getqueues()
4004 existing = _getqueues()
4002
4005
4003 if opts.get(b'create'):
4006 if opts.get(b'create'):
4004 if name in existing:
4007 if name in existing:
4005 raise error.Abort(_(b'queue "%s" already exists') % name)
4008 raise error.Abort(_(b'queue "%s" already exists') % name)
4006 if _noqueues():
4009 if _noqueues():
4007 _addqueue(_defaultqueue)
4010 _addqueue(_defaultqueue)
4008 _addqueue(name)
4011 _addqueue(name)
4009 _setactive(name)
4012 _setactive(name)
4010 elif opts.get(b'rename'):
4013 elif opts.get(b'rename'):
4011 current = _getcurrent()
4014 current = _getcurrent()
4012 if name == current:
4015 if name == current:
4013 raise error.Abort(
4016 raise error.Abort(
4014 _(b'can\'t rename "%s" to its current name') % name
4017 _(b'can\'t rename "%s" to its current name') % name
4015 )
4018 )
4016 if name in existing:
4019 if name in existing:
4017 raise error.Abort(_(b'queue "%s" already exists') % name)
4020 raise error.Abort(_(b'queue "%s" already exists') % name)
4018
4021
4019 olddir = _queuedir(current)
4022 olddir = _queuedir(current)
4020 newdir = _queuedir(name)
4023 newdir = _queuedir(name)
4021
4024
4022 if os.path.exists(newdir):
4025 if os.path.exists(newdir):
4023 raise error.Abort(
4026 raise error.Abort(
4024 _(b'non-queue directory "%s" already exists') % newdir
4027 _(b'non-queue directory "%s" already exists') % newdir
4025 )
4028 )
4026
4029
4027 fh = repo.vfs(b'patches.queues.new', b'w')
4030 fh = repo.vfs(b'patches.queues.new', b'w')
4028 for queue in existing:
4031 for queue in existing:
4029 if queue == current:
4032 if queue == current:
4030 fh.write(b'%s\n' % (name,))
4033 fh.write(b'%s\n' % (name,))
4031 if os.path.exists(olddir):
4034 if os.path.exists(olddir):
4032 util.rename(olddir, newdir)
4035 util.rename(olddir, newdir)
4033 else:
4036 else:
4034 fh.write(b'%s\n' % (queue,))
4037 fh.write(b'%s\n' % (queue,))
4035 fh.close()
4038 fh.close()
4036 repo.vfs.rename(b'patches.queues.new', _allqueues)
4039 repo.vfs.rename(b'patches.queues.new', _allqueues)
4037 _setactivenocheck(name)
4040 _setactivenocheck(name)
4038 elif opts.get(b'delete'):
4041 elif opts.get(b'delete'):
4039 _delete(name)
4042 _delete(name)
4040 elif opts.get(b'purge'):
4043 elif opts.get(b'purge'):
4041 if name in existing:
4044 if name in existing:
4042 _delete(name)
4045 _delete(name)
4043 qdir = _queuedir(name)
4046 qdir = _queuedir(name)
4044 if os.path.exists(qdir):
4047 if os.path.exists(qdir):
4045 shutil.rmtree(qdir)
4048 shutil.rmtree(qdir)
4046 else:
4049 else:
4047 if name not in existing:
4050 if name not in existing:
4048 raise error.Abort(_(b'use --create to create a new queue'))
4051 raise error.Abort(_(b'use --create to create a new queue'))
4049 _setactive(name)
4052 _setactive(name)
4050
4053
4051
4054
4052 def mqphasedefaults(repo, roots):
4055 def mqphasedefaults(repo, roots):
4053 """callback used to set mq changeset as secret when no phase data exists"""
4056 """callback used to set mq changeset as secret when no phase data exists"""
4054 if repo.mq.applied:
4057 if repo.mq.applied:
4055 if repo.ui.configbool(b'mq', b'secret'):
4058 if repo.ui.configbool(b'mq', b'secret'):
4056 mqphase = phases.secret
4059 mqphase = phases.secret
4057 else:
4060 else:
4058 mqphase = phases.draft
4061 mqphase = phases.draft
4059 qbase = repo[repo.mq.applied[0].node]
4062 qbase = repo[repo.mq.applied[0].node]
4060 roots[mqphase].add(qbase.node())
4063 roots[mqphase].add(qbase.node())
4061 return roots
4064 return roots
4062
4065
4063
4066
4064 def reposetup(ui, repo):
4067 def reposetup(ui, repo):
4065 class mqrepo(repo.__class__):
4068 class mqrepo(repo.__class__):
4066 @localrepo.unfilteredpropertycache
4069 @localrepo.unfilteredpropertycache
4067 def mq(self):
4070 def mq(self):
4068 return queue(self.ui, self.baseui, self.path)
4071 return queue(self.ui, self.baseui, self.path)
4069
4072
4070 def invalidateall(self):
4073 def invalidateall(self):
4071 super(mqrepo, self).invalidateall()
4074 super(mqrepo, self).invalidateall()
4072 if localrepo.hasunfilteredcache(self, r'mq'):
4075 if localrepo.hasunfilteredcache(self, r'mq'):
4073 # recreate mq in case queue path was changed
4076 # recreate mq in case queue path was changed
4074 delattr(self.unfiltered(), r'mq')
4077 delattr(self.unfiltered(), r'mq')
4075
4078
4076 def abortifwdirpatched(self, errmsg, force=False):
4079 def abortifwdirpatched(self, errmsg, force=False):
4077 if self.mq.applied and self.mq.checkapplied and not force:
4080 if self.mq.applied and self.mq.checkapplied and not force:
4078 parents = self.dirstate.parents()
4081 parents = self.dirstate.parents()
4079 patches = [s.node for s in self.mq.applied]
4082 patches = [s.node for s in self.mq.applied]
4080 if any(p in patches for p in parents):
4083 if any(p in patches for p in parents):
4081 raise error.Abort(errmsg)
4084 raise error.Abort(errmsg)
4082
4085
4083 def commit(
4086 def commit(
4084 self,
4087 self,
4085 text=b"",
4088 text=b"",
4086 user=None,
4089 user=None,
4087 date=None,
4090 date=None,
4088 match=None,
4091 match=None,
4089 force=False,
4092 force=False,
4090 editor=False,
4093 editor=False,
4091 extra=None,
4094 extra=None,
4092 ):
4095 ):
4093 if extra is None:
4096 if extra is None:
4094 extra = {}
4097 extra = {}
4095 self.abortifwdirpatched(
4098 self.abortifwdirpatched(
4096 _(b'cannot commit over an applied mq patch'), force
4099 _(b'cannot commit over an applied mq patch'), force
4097 )
4100 )
4098
4101
4099 return super(mqrepo, self).commit(
4102 return super(mqrepo, self).commit(
4100 text, user, date, match, force, editor, extra
4103 text, user, date, match, force, editor, extra
4101 )
4104 )
4102
4105
4103 def checkpush(self, pushop):
4106 def checkpush(self, pushop):
4104 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4107 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4105 outapplied = [e.node for e in self.mq.applied]
4108 outapplied = [e.node for e in self.mq.applied]
4106 if pushop.revs:
4109 if pushop.revs:
4107 # Assume applied patches have no non-patch descendants and
4110 # Assume applied patches have no non-patch descendants and
4108 # are not on remote already. Filtering any changeset not
4111 # are not on remote already. Filtering any changeset not
4109 # pushed.
4112 # pushed.
4110 heads = set(pushop.revs)
4113 heads = set(pushop.revs)
4111 for node in reversed(outapplied):
4114 for node in reversed(outapplied):
4112 if node in heads:
4115 if node in heads:
4113 break
4116 break
4114 else:
4117 else:
4115 outapplied.pop()
4118 outapplied.pop()
4116 # looking for pushed and shared changeset
4119 # looking for pushed and shared changeset
4117 for node in outapplied:
4120 for node in outapplied:
4118 if self[node].phase() < phases.secret:
4121 if self[node].phase() < phases.secret:
4119 raise error.Abort(_(b'source has mq patches applied'))
4122 raise error.Abort(_(b'source has mq patches applied'))
4120 # no non-secret patches pushed
4123 # no non-secret patches pushed
4121 super(mqrepo, self).checkpush(pushop)
4124 super(mqrepo, self).checkpush(pushop)
4122
4125
4123 def _findtags(self):
4126 def _findtags(self):
4124 '''augment tags from base class with patch tags'''
4127 '''augment tags from base class with patch tags'''
4125 result = super(mqrepo, self)._findtags()
4128 result = super(mqrepo, self)._findtags()
4126
4129
4127 q = self.mq
4130 q = self.mq
4128 if not q.applied:
4131 if not q.applied:
4129 return result
4132 return result
4130
4133
4131 mqtags = [(patch.node, patch.name) for patch in q.applied]
4134 mqtags = [(patch.node, patch.name) for patch in q.applied]
4132
4135
4133 try:
4136 try:
4134 # for now ignore filtering business
4137 # for now ignore filtering business
4135 self.unfiltered().changelog.rev(mqtags[-1][0])
4138 self.unfiltered().changelog.rev(mqtags[-1][0])
4136 except error.LookupError:
4139 except error.LookupError:
4137 self.ui.warn(
4140 self.ui.warn(
4138 _(b'mq status file refers to unknown node %s\n')
4141 _(b'mq status file refers to unknown node %s\n')
4139 % short(mqtags[-1][0])
4142 % short(mqtags[-1][0])
4140 )
4143 )
4141 return result
4144 return result
4142
4145
4143 # do not add fake tags for filtered revisions
4146 # do not add fake tags for filtered revisions
4144 included = self.changelog.hasnode
4147 included = self.changelog.hasnode
4145 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4148 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4146 if not mqtags:
4149 if not mqtags:
4147 return result
4150 return result
4148
4151
4149 mqtags.append((mqtags[-1][0], b'qtip'))
4152 mqtags.append((mqtags[-1][0], b'qtip'))
4150 mqtags.append((mqtags[0][0], b'qbase'))
4153 mqtags.append((mqtags[0][0], b'qbase'))
4151 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4154 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4152 tags = result[0]
4155 tags = result[0]
4153 for patch in mqtags:
4156 for patch in mqtags:
4154 if patch[1] in tags:
4157 if patch[1] in tags:
4155 self.ui.warn(
4158 self.ui.warn(
4156 _(b'tag %s overrides mq patch of the same ' b'name\n')
4159 _(b'tag %s overrides mq patch of the same ' b'name\n')
4157 % patch[1]
4160 % patch[1]
4158 )
4161 )
4159 else:
4162 else:
4160 tags[patch[1]] = patch[0]
4163 tags[patch[1]] = patch[0]
4161
4164
4162 return result
4165 return result
4163
4166
4164 if repo.local():
4167 if repo.local():
4165 repo.__class__ = mqrepo
4168 repo.__class__ = mqrepo
4166
4169
4167 repo._phasedefaults.append(mqphasedefaults)
4170 repo._phasedefaults.append(mqphasedefaults)
4168
4171
4169
4172
4170 def mqimport(orig, ui, repo, *args, **kwargs):
4173 def mqimport(orig, ui, repo, *args, **kwargs):
4171 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4174 if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
4172 r'no_commit', False
4175 r'no_commit', False
4173 ):
4176 ):
4174 repo.abortifwdirpatched(
4177 repo.abortifwdirpatched(
4175 _(b'cannot import over an applied patch'), kwargs.get(r'force')
4178 _(b'cannot import over an applied patch'), kwargs.get(r'force')
4176 )
4179 )
4177 return orig(ui, repo, *args, **kwargs)
4180 return orig(ui, repo, *args, **kwargs)
4178
4181
4179
4182
4180 def mqinit(orig, ui, *args, **kwargs):
4183 def mqinit(orig, ui, *args, **kwargs):
4181 mq = kwargs.pop(r'mq', None)
4184 mq = kwargs.pop(r'mq', None)
4182
4185
4183 if not mq:
4186 if not mq:
4184 return orig(ui, *args, **kwargs)
4187 return orig(ui, *args, **kwargs)
4185
4188
4186 if args:
4189 if args:
4187 repopath = args[0]
4190 repopath = args[0]
4188 if not hg.islocal(repopath):
4191 if not hg.islocal(repopath):
4189 raise error.Abort(
4192 raise error.Abort(
4190 _(b'only a local queue repository ' b'may be initialized')
4193 _(b'only a local queue repository ' b'may be initialized')
4191 )
4194 )
4192 else:
4195 else:
4193 repopath = cmdutil.findrepo(encoding.getcwd())
4196 repopath = cmdutil.findrepo(encoding.getcwd())
4194 if not repopath:
4197 if not repopath:
4195 raise error.Abort(
4198 raise error.Abort(
4196 _(b'there is no Mercurial repository here ' b'(.hg not found)')
4199 _(b'there is no Mercurial repository here ' b'(.hg not found)')
4197 )
4200 )
4198 repo = hg.repository(ui, repopath)
4201 repo = hg.repository(ui, repopath)
4199 return qinit(ui, repo, True)
4202 return qinit(ui, repo, True)
4200
4203
4201
4204
4202 def mqcommand(orig, ui, repo, *args, **kwargs):
4205 def mqcommand(orig, ui, repo, *args, **kwargs):
4203 """Add --mq option to operate on patch repository instead of main"""
4206 """Add --mq option to operate on patch repository instead of main"""
4204
4207
4205 # some commands do not like getting unknown options
4208 # some commands do not like getting unknown options
4206 mq = kwargs.pop(r'mq', None)
4209 mq = kwargs.pop(r'mq', None)
4207
4210
4208 if not mq:
4211 if not mq:
4209 return orig(ui, repo, *args, **kwargs)
4212 return orig(ui, repo, *args, **kwargs)
4210
4213
4211 q = repo.mq
4214 q = repo.mq
4212 r = q.qrepo()
4215 r = q.qrepo()
4213 if not r:
4216 if not r:
4214 raise error.Abort(_(b'no queue repository'))
4217 raise error.Abort(_(b'no queue repository'))
4215 return orig(r.ui, r, *args, **kwargs)
4218 return orig(r.ui, r, *args, **kwargs)
4216
4219
4217
4220
4218 def summaryhook(ui, repo):
4221 def summaryhook(ui, repo):
4219 q = repo.mq
4222 q = repo.mq
4220 m = []
4223 m = []
4221 a, u = len(q.applied), len(q.unapplied(repo))
4224 a, u = len(q.applied), len(q.unapplied(repo))
4222 if a:
4225 if a:
4223 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4226 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4224 if u:
4227 if u:
4225 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4228 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4226 if m:
4229 if m:
4227 # i18n: column positioning for "hg summary"
4230 # i18n: column positioning for "hg summary"
4228 ui.write(_(b"mq: %s\n") % b', '.join(m))
4231 ui.write(_(b"mq: %s\n") % b', '.join(m))
4229 else:
4232 else:
4230 # i18n: column positioning for "hg summary"
4233 # i18n: column positioning for "hg summary"
4231 ui.note(_(b"mq: (empty queue)\n"))
4234 ui.note(_(b"mq: (empty queue)\n"))
4232
4235
4233
4236
4234 revsetpredicate = registrar.revsetpredicate()
4237 revsetpredicate = registrar.revsetpredicate()
4235
4238
4236
4239
4237 @revsetpredicate(b'mq()')
4240 @revsetpredicate(b'mq()')
4238 def revsetmq(repo, subset, x):
4241 def revsetmq(repo, subset, x):
4239 """Changesets managed by MQ.
4242 """Changesets managed by MQ.
4240 """
4243 """
4241 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4244 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4242 applied = {repo[r.node].rev() for r in repo.mq.applied}
4245 applied = {repo[r.node].rev() for r in repo.mq.applied}
4243 return smartset.baseset([r for r in subset if r in applied])
4246 return smartset.baseset([r for r in subset if r in applied])
4244
4247
4245
4248
4246 # tell hggettext to extract docstrings from these functions:
4249 # tell hggettext to extract docstrings from these functions:
4247 i18nfunctions = [revsetmq]
4250 i18nfunctions = [revsetmq]
4248
4251
4249
4252
4250 def extsetup(ui):
4253 def extsetup(ui):
4251 # Ensure mq wrappers are called first, regardless of extension load order by
4254 # Ensure mq wrappers are called first, regardless of extension load order by
4252 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4255 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4253 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4256 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4254
4257
4255 extensions.wrapcommand(commands.table, b'import', mqimport)
4258 extensions.wrapcommand(commands.table, b'import', mqimport)
4256 cmdutil.summaryhooks.add(b'mq', summaryhook)
4259 cmdutil.summaryhooks.add(b'mq', summaryhook)
4257
4260
4258 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4261 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4259 entry[1].extend(mqopt)
4262 entry[1].extend(mqopt)
4260
4263
4261 def dotable(cmdtable):
4264 def dotable(cmdtable):
4262 for cmd, entry in cmdtable.iteritems():
4265 for cmd, entry in cmdtable.iteritems():
4263 cmd = cmdutil.parsealiases(cmd)[0]
4266 cmd = cmdutil.parsealiases(cmd)[0]
4264 func = entry[0]
4267 func = entry[0]
4265 if func.norepo:
4268 if func.norepo:
4266 continue
4269 continue
4267 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4270 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4268 entry[1].extend(mqopt)
4271 entry[1].extend(mqopt)
4269
4272
4270 dotable(commands.table)
4273 dotable(commands.table)
4271
4274
4272 for extname, extmodule in extensions.extensions():
4275 for extname, extmodule in extensions.extensions():
4273 if extmodule.__file__ != __file__:
4276 if extmodule.__file__ != __file__:
4274 dotable(getattr(extmodule, 'cmdtable', {}))
4277 dotable(getattr(extmodule, 'cmdtable', {}))
4275
4278
4276
4279
4277 colortable = {
4280 colortable = {
4278 b'qguard.negative': b'red',
4281 b'qguard.negative': b'red',
4279 b'qguard.positive': b'yellow',
4282 b'qguard.positive': b'yellow',
4280 b'qguard.unguarded': b'green',
4283 b'qguard.unguarded': b'green',
4281 b'qseries.applied': b'blue bold underline',
4284 b'qseries.applied': b'blue bold underline',
4282 b'qseries.guarded': b'black bold',
4285 b'qseries.guarded': b'black bold',
4283 b'qseries.missing': b'red bold',
4286 b'qseries.missing': b'red bold',
4284 b'qseries.unapplied': b'black bold',
4287 b'qseries.unapplied': b'black bold',
4285 }
4288 }
@@ -1,1256 +1,1257
1 # phabricator.py - simple Phabricator integration
1 # phabricator.py - simple Phabricator integration
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """simple Phabricator integration (EXPERIMENTAL)
7 """simple Phabricator integration (EXPERIMENTAL)
8
8
9 This extension provides a ``phabsend`` command which sends a stack of
9 This extension provides a ``phabsend`` command which sends a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 to update statuses in batch.
12 to update statuses in batch.
13
13
14 By default, Phabricator requires ``Test Plan`` which might prevent some
14 By default, Phabricator requires ``Test Plan`` which might prevent some
15 changeset from being sent. The requirement could be disabled by changing
15 changeset from being sent. The requirement could be disabled by changing
16 ``differential.require-test-plan-field`` config server side.
16 ``differential.require-test-plan-field`` config server side.
17
17
18 Config::
18 Config::
19
19
20 [phabricator]
20 [phabricator]
21 # Phabricator URL
21 # Phabricator URL
22 url = https://phab.example.com/
22 url = https://phab.example.com/
23
23
24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
25 # callsign is "FOO".
25 # callsign is "FOO".
26 callsign = FOO
26 callsign = FOO
27
27
28 # curl command to use. If not set (default), use builtin HTTP library to
28 # curl command to use. If not set (default), use builtin HTTP library to
29 # communicate. If set, use the specified curl command. This could be useful
29 # communicate. If set, use the specified curl command. This could be useful
30 # if you need to specify advanced options that is not easily supported by
30 # if you need to specify advanced options that is not easily supported by
31 # the internal library.
31 # the internal library.
32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
33
33
34 [auth]
34 [auth]
35 example.schemes = https
35 example.schemes = https
36 example.prefix = phab.example.com
36 example.prefix = phab.example.com
37
37
38 # API token. Get it from https://$HOST/conduit/login/
38 # API token. Get it from https://$HOST/conduit/login/
39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import contextlib
44 import contextlib
45 import itertools
45 import itertools
46 import json
46 import json
47 import operator
47 import operator
48 import re
48 import re
49
49
50 from mercurial.node import bin, nullid
50 from mercurial.node import bin, nullid
51 from mercurial.i18n import _
51 from mercurial.i18n import _
52 from mercurial.pycompat import getattr
52 from mercurial import (
53 from mercurial import (
53 cmdutil,
54 cmdutil,
54 context,
55 context,
55 encoding,
56 encoding,
56 error,
57 error,
57 exthelper,
58 exthelper,
58 httpconnection as httpconnectionmod,
59 httpconnection as httpconnectionmod,
59 mdiff,
60 mdiff,
60 obsutil,
61 obsutil,
61 parser,
62 parser,
62 patch,
63 patch,
63 phases,
64 phases,
64 pycompat,
65 pycompat,
65 scmutil,
66 scmutil,
66 smartset,
67 smartset,
67 tags,
68 tags,
68 templatefilters,
69 templatefilters,
69 templateutil,
70 templateutil,
70 url as urlmod,
71 url as urlmod,
71 util,
72 util,
72 )
73 )
73 from mercurial.utils import (
74 from mercurial.utils import (
74 procutil,
75 procutil,
75 stringutil,
76 stringutil,
76 )
77 )
77
78
78 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
79 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
79 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
80 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
80 # be specifying the version(s) of Mercurial they are tested with, or
81 # be specifying the version(s) of Mercurial they are tested with, or
81 # leave the attribute unspecified.
82 # leave the attribute unspecified.
82 testedwith = b'ships-with-hg-core'
83 testedwith = b'ships-with-hg-core'
83
84
84 eh = exthelper.exthelper()
85 eh = exthelper.exthelper()
85
86
86 cmdtable = eh.cmdtable
87 cmdtable = eh.cmdtable
87 command = eh.command
88 command = eh.command
88 configtable = eh.configtable
89 configtable = eh.configtable
89 templatekeyword = eh.templatekeyword
90 templatekeyword = eh.templatekeyword
90
91
91 # developer config: phabricator.batchsize
92 # developer config: phabricator.batchsize
92 eh.configitem(
93 eh.configitem(
93 b'phabricator', b'batchsize', default=12,
94 b'phabricator', b'batchsize', default=12,
94 )
95 )
95 eh.configitem(
96 eh.configitem(
96 b'phabricator', b'callsign', default=None,
97 b'phabricator', b'callsign', default=None,
97 )
98 )
98 eh.configitem(
99 eh.configitem(
99 b'phabricator', b'curlcmd', default=None,
100 b'phabricator', b'curlcmd', default=None,
100 )
101 )
101 # developer config: phabricator.repophid
102 # developer config: phabricator.repophid
102 eh.configitem(
103 eh.configitem(
103 b'phabricator', b'repophid', default=None,
104 b'phabricator', b'repophid', default=None,
104 )
105 )
105 eh.configitem(
106 eh.configitem(
106 b'phabricator', b'url', default=None,
107 b'phabricator', b'url', default=None,
107 )
108 )
108 eh.configitem(
109 eh.configitem(
109 b'phabsend', b'confirm', default=False,
110 b'phabsend', b'confirm', default=False,
110 )
111 )
111
112
112 colortable = {
113 colortable = {
113 b'phabricator.action.created': b'green',
114 b'phabricator.action.created': b'green',
114 b'phabricator.action.skipped': b'magenta',
115 b'phabricator.action.skipped': b'magenta',
115 b'phabricator.action.updated': b'magenta',
116 b'phabricator.action.updated': b'magenta',
116 b'phabricator.desc': b'',
117 b'phabricator.desc': b'',
117 b'phabricator.drev': b'bold',
118 b'phabricator.drev': b'bold',
118 b'phabricator.node': b'',
119 b'phabricator.node': b'',
119 }
120 }
120
121
121 _VCR_FLAGS = [
122 _VCR_FLAGS = [
122 (
123 (
123 b'',
124 b'',
124 b'test-vcr',
125 b'test-vcr',
125 b'',
126 b'',
126 _(
127 _(
127 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
128 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
128 b', otherwise will mock all http requests using the specified vcr file.'
129 b', otherwise will mock all http requests using the specified vcr file.'
129 b' (ADVANCED)'
130 b' (ADVANCED)'
130 ),
131 ),
131 ),
132 ),
132 ]
133 ]
133
134
134
135
135 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
136 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
136 fullflags = flags + _VCR_FLAGS
137 fullflags = flags + _VCR_FLAGS
137
138
138 def hgmatcher(r1, r2):
139 def hgmatcher(r1, r2):
139 if r1.uri != r2.uri or r1.method != r2.method:
140 if r1.uri != r2.uri or r1.method != r2.method:
140 return False
141 return False
141 r1params = r1.body.split(b'&')
142 r1params = r1.body.split(b'&')
142 r2params = r2.body.split(b'&')
143 r2params = r2.body.split(b'&')
143 return set(r1params) == set(r2params)
144 return set(r1params) == set(r2params)
144
145
145 def sanitiserequest(request):
146 def sanitiserequest(request):
146 request.body = re.sub(
147 request.body = re.sub(
147 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
148 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
148 )
149 )
149 return request
150 return request
150
151
151 def sanitiseresponse(response):
152 def sanitiseresponse(response):
152 if r'set-cookie' in response[r'headers']:
153 if r'set-cookie' in response[r'headers']:
153 del response[r'headers'][r'set-cookie']
154 del response[r'headers'][r'set-cookie']
154 return response
155 return response
155
156
156 def decorate(fn):
157 def decorate(fn):
157 def inner(*args, **kwargs):
158 def inner(*args, **kwargs):
158 cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
159 cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
159 if cassette:
160 if cassette:
160 import hgdemandimport
161 import hgdemandimport
161
162
162 with hgdemandimport.deactivated():
163 with hgdemandimport.deactivated():
163 import vcr as vcrmod
164 import vcr as vcrmod
164 import vcr.stubs as stubs
165 import vcr.stubs as stubs
165
166
166 vcr = vcrmod.VCR(
167 vcr = vcrmod.VCR(
167 serializer=r'json',
168 serializer=r'json',
168 before_record_request=sanitiserequest,
169 before_record_request=sanitiserequest,
169 before_record_response=sanitiseresponse,
170 before_record_response=sanitiseresponse,
170 custom_patches=[
171 custom_patches=[
171 (
172 (
172 urlmod,
173 urlmod,
173 r'httpconnection',
174 r'httpconnection',
174 stubs.VCRHTTPConnection,
175 stubs.VCRHTTPConnection,
175 ),
176 ),
176 (
177 (
177 urlmod,
178 urlmod,
178 r'httpsconnection',
179 r'httpsconnection',
179 stubs.VCRHTTPSConnection,
180 stubs.VCRHTTPSConnection,
180 ),
181 ),
181 ],
182 ],
182 )
183 )
183 vcr.register_matcher(r'hgmatcher', hgmatcher)
184 vcr.register_matcher(r'hgmatcher', hgmatcher)
184 with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
185 with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
185 return fn(*args, **kwargs)
186 return fn(*args, **kwargs)
186 return fn(*args, **kwargs)
187 return fn(*args, **kwargs)
187
188
188 inner.__name__ = fn.__name__
189 inner.__name__ = fn.__name__
189 inner.__doc__ = fn.__doc__
190 inner.__doc__ = fn.__doc__
190 return command(
191 return command(
191 name,
192 name,
192 fullflags,
193 fullflags,
193 spec,
194 spec,
194 helpcategory=helpcategory,
195 helpcategory=helpcategory,
195 optionalrepo=optionalrepo,
196 optionalrepo=optionalrepo,
196 )(inner)
197 )(inner)
197
198
198 return decorate
199 return decorate
199
200
200
201
201 def urlencodenested(params):
202 def urlencodenested(params):
202 """like urlencode, but works with nested parameters.
203 """like urlencode, but works with nested parameters.
203
204
204 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
205 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
205 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
206 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
206 urlencode. Note: the encoding is consistent with PHP's http_build_query.
207 urlencode. Note: the encoding is consistent with PHP's http_build_query.
207 """
208 """
208 flatparams = util.sortdict()
209 flatparams = util.sortdict()
209
210
210 def process(prefix, obj):
211 def process(prefix, obj):
211 if isinstance(obj, bool):
212 if isinstance(obj, bool):
212 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
213 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
213 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
214 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
214 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
215 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
215 if items is None:
216 if items is None:
216 flatparams[prefix] = obj
217 flatparams[prefix] = obj
217 else:
218 else:
218 for k, v in items(obj):
219 for k, v in items(obj):
219 if prefix:
220 if prefix:
220 process(b'%s[%s]' % (prefix, k), v)
221 process(b'%s[%s]' % (prefix, k), v)
221 else:
222 else:
222 process(k, v)
223 process(k, v)
223
224
224 process(b'', params)
225 process(b'', params)
225 return util.urlreq.urlencode(flatparams)
226 return util.urlreq.urlencode(flatparams)
226
227
227
228
228 def readurltoken(ui):
229 def readurltoken(ui):
229 """return conduit url, token and make sure they exist
230 """return conduit url, token and make sure they exist
230
231
231 Currently read from [auth] config section. In the future, it might
232 Currently read from [auth] config section. In the future, it might
232 make sense to read from .arcconfig and .arcrc as well.
233 make sense to read from .arcconfig and .arcrc as well.
233 """
234 """
234 url = ui.config(b'phabricator', b'url')
235 url = ui.config(b'phabricator', b'url')
235 if not url:
236 if not url:
236 raise error.Abort(
237 raise error.Abort(
237 _(b'config %s.%s is required') % (b'phabricator', b'url')
238 _(b'config %s.%s is required') % (b'phabricator', b'url')
238 )
239 )
239
240
240 res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
241 res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
241 token = None
242 token = None
242
243
243 if res:
244 if res:
244 group, auth = res
245 group, auth = res
245
246
246 ui.debug(b"using auth.%s.* for authentication\n" % group)
247 ui.debug(b"using auth.%s.* for authentication\n" % group)
247
248
248 token = auth.get(b'phabtoken')
249 token = auth.get(b'phabtoken')
249
250
250 if not token:
251 if not token:
251 raise error.Abort(
252 raise error.Abort(
252 _(b'Can\'t find conduit token associated to %s') % (url,)
253 _(b'Can\'t find conduit token associated to %s') % (url,)
253 )
254 )
254
255
255 return url, token
256 return url, token
256
257
257
258
258 def callconduit(ui, name, params):
259 def callconduit(ui, name, params):
259 """call Conduit API, params is a dict. return json.loads result, or None"""
260 """call Conduit API, params is a dict. return json.loads result, or None"""
260 host, token = readurltoken(ui)
261 host, token = readurltoken(ui)
261 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
262 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
262 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
263 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
263 params = params.copy()
264 params = params.copy()
264 params[b'api.token'] = token
265 params[b'api.token'] = token
265 data = urlencodenested(params)
266 data = urlencodenested(params)
266 curlcmd = ui.config(b'phabricator', b'curlcmd')
267 curlcmd = ui.config(b'phabricator', b'curlcmd')
267 if curlcmd:
268 if curlcmd:
268 sin, sout = procutil.popen2(
269 sin, sout = procutil.popen2(
269 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
270 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
270 )
271 )
271 sin.write(data)
272 sin.write(data)
272 sin.close()
273 sin.close()
273 body = sout.read()
274 body = sout.read()
274 else:
275 else:
275 urlopener = urlmod.opener(ui, authinfo)
276 urlopener = urlmod.opener(ui, authinfo)
276 request = util.urlreq.request(pycompat.strurl(url), data=data)
277 request = util.urlreq.request(pycompat.strurl(url), data=data)
277 with contextlib.closing(urlopener.open(request)) as rsp:
278 with contextlib.closing(urlopener.open(request)) as rsp:
278 body = rsp.read()
279 body = rsp.read()
279 ui.debug(b'Conduit Response: %s\n' % body)
280 ui.debug(b'Conduit Response: %s\n' % body)
280 parsed = pycompat.rapply(
281 parsed = pycompat.rapply(
281 lambda x: encoding.unitolocal(x)
282 lambda x: encoding.unitolocal(x)
282 if isinstance(x, pycompat.unicode)
283 if isinstance(x, pycompat.unicode)
283 else x,
284 else x,
284 # json.loads only accepts bytes from py3.6+
285 # json.loads only accepts bytes from py3.6+
285 json.loads(encoding.unifromlocal(body)),
286 json.loads(encoding.unifromlocal(body)),
286 )
287 )
287 if parsed.get(b'error_code'):
288 if parsed.get(b'error_code'):
288 msg = _(b'Conduit Error (%s): %s') % (
289 msg = _(b'Conduit Error (%s): %s') % (
289 parsed[b'error_code'],
290 parsed[b'error_code'],
290 parsed[b'error_info'],
291 parsed[b'error_info'],
291 )
292 )
292 raise error.Abort(msg)
293 raise error.Abort(msg)
293 return parsed[b'result']
294 return parsed[b'result']
294
295
295
296
296 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
297 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
297 def debugcallconduit(ui, repo, name):
298 def debugcallconduit(ui, repo, name):
298 """call Conduit API
299 """call Conduit API
299
300
300 Call parameters are read from stdin as a JSON blob. Result will be written
301 Call parameters are read from stdin as a JSON blob. Result will be written
301 to stdout as a JSON blob.
302 to stdout as a JSON blob.
302 """
303 """
303 # json.loads only accepts bytes from 3.6+
304 # json.loads only accepts bytes from 3.6+
304 rawparams = encoding.unifromlocal(ui.fin.read())
305 rawparams = encoding.unifromlocal(ui.fin.read())
305 # json.loads only returns unicode strings
306 # json.loads only returns unicode strings
306 params = pycompat.rapply(
307 params = pycompat.rapply(
307 lambda x: encoding.unitolocal(x)
308 lambda x: encoding.unitolocal(x)
308 if isinstance(x, pycompat.unicode)
309 if isinstance(x, pycompat.unicode)
309 else x,
310 else x,
310 json.loads(rawparams),
311 json.loads(rawparams),
311 )
312 )
312 # json.dumps only accepts unicode strings
313 # json.dumps only accepts unicode strings
313 result = pycompat.rapply(
314 result = pycompat.rapply(
314 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
315 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
315 callconduit(ui, name, params),
316 callconduit(ui, name, params),
316 )
317 )
317 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
318 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
318 ui.write(b'%s\n' % encoding.unitolocal(s))
319 ui.write(b'%s\n' % encoding.unitolocal(s))
319
320
320
321
321 def getrepophid(repo):
322 def getrepophid(repo):
322 """given callsign, return repository PHID or None"""
323 """given callsign, return repository PHID or None"""
323 # developer config: phabricator.repophid
324 # developer config: phabricator.repophid
324 repophid = repo.ui.config(b'phabricator', b'repophid')
325 repophid = repo.ui.config(b'phabricator', b'repophid')
325 if repophid:
326 if repophid:
326 return repophid
327 return repophid
327 callsign = repo.ui.config(b'phabricator', b'callsign')
328 callsign = repo.ui.config(b'phabricator', b'callsign')
328 if not callsign:
329 if not callsign:
329 return None
330 return None
330 query = callconduit(
331 query = callconduit(
331 repo.ui,
332 repo.ui,
332 b'diffusion.repository.search',
333 b'diffusion.repository.search',
333 {b'constraints': {b'callsigns': [callsign]}},
334 {b'constraints': {b'callsigns': [callsign]}},
334 )
335 )
335 if len(query[b'data']) == 0:
336 if len(query[b'data']) == 0:
336 return None
337 return None
337 repophid = query[b'data'][0][b'phid']
338 repophid = query[b'data'][0][b'phid']
338 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
339 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
339 return repophid
340 return repophid
340
341
341
342
342 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
343 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
343 _differentialrevisiondescre = re.compile(
344 _differentialrevisiondescre = re.compile(
344 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
345 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
345 )
346 )
346
347
347
348
348 def getoldnodedrevmap(repo, nodelist):
349 def getoldnodedrevmap(repo, nodelist):
349 """find previous nodes that has been sent to Phabricator
350 """find previous nodes that has been sent to Phabricator
350
351
351 return {node: (oldnode, Differential diff, Differential Revision ID)}
352 return {node: (oldnode, Differential diff, Differential Revision ID)}
352 for node in nodelist with known previous sent versions, or associated
353 for node in nodelist with known previous sent versions, or associated
353 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
354 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
354 be ``None``.
355 be ``None``.
355
356
356 Examines commit messages like "Differential Revision:" to get the
357 Examines commit messages like "Differential Revision:" to get the
357 association information.
358 association information.
358
359
359 If such commit message line is not found, examines all precursors and their
360 If such commit message line is not found, examines all precursors and their
360 tags. Tags with format like "D1234" are considered a match and the node
361 tags. Tags with format like "D1234" are considered a match and the node
361 with that tag, and the number after "D" (ex. 1234) will be returned.
362 with that tag, and the number after "D" (ex. 1234) will be returned.
362
363
363 The ``old node``, if not None, is guaranteed to be the last diff of
364 The ``old node``, if not None, is guaranteed to be the last diff of
364 corresponding Differential Revision, and exist in the repo.
365 corresponding Differential Revision, and exist in the repo.
365 """
366 """
366 unfi = repo.unfiltered()
367 unfi = repo.unfiltered()
367 nodemap = unfi.changelog.nodemap
368 nodemap = unfi.changelog.nodemap
368
369
369 result = {} # {node: (oldnode?, lastdiff?, drev)}
370 result = {} # {node: (oldnode?, lastdiff?, drev)}
370 toconfirm = {} # {node: (force, {precnode}, drev)}
371 toconfirm = {} # {node: (force, {precnode}, drev)}
371 for node in nodelist:
372 for node in nodelist:
372 ctx = unfi[node]
373 ctx = unfi[node]
373 # For tags like "D123", put them into "toconfirm" to verify later
374 # For tags like "D123", put them into "toconfirm" to verify later
374 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
375 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
375 for n in precnodes:
376 for n in precnodes:
376 if n in nodemap:
377 if n in nodemap:
377 for tag in unfi.nodetags(n):
378 for tag in unfi.nodetags(n):
378 m = _differentialrevisiontagre.match(tag)
379 m = _differentialrevisiontagre.match(tag)
379 if m:
380 if m:
380 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
381 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
381 continue
382 continue
382
383
383 # Check commit message
384 # Check commit message
384 m = _differentialrevisiondescre.search(ctx.description())
385 m = _differentialrevisiondescre.search(ctx.description())
385 if m:
386 if m:
386 toconfirm[node] = (1, set(precnodes), int(m.group(r'id')))
387 toconfirm[node] = (1, set(precnodes), int(m.group(r'id')))
387
388
388 # Double check if tags are genuine by collecting all old nodes from
389 # Double check if tags are genuine by collecting all old nodes from
389 # Phabricator, and expect precursors overlap with it.
390 # Phabricator, and expect precursors overlap with it.
390 if toconfirm:
391 if toconfirm:
391 drevs = [drev for force, precs, drev in toconfirm.values()]
392 drevs = [drev for force, precs, drev in toconfirm.values()]
392 alldiffs = callconduit(
393 alldiffs = callconduit(
393 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
394 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
394 )
395 )
395 getnode = lambda d: bin(getdiffmeta(d).get(b'node', b'')) or None
396 getnode = lambda d: bin(getdiffmeta(d).get(b'node', b'')) or None
396 for newnode, (force, precset, drev) in toconfirm.items():
397 for newnode, (force, precset, drev) in toconfirm.items():
397 diffs = [
398 diffs = [
398 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
399 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
399 ]
400 ]
400
401
401 # "precursors" as known by Phabricator
402 # "precursors" as known by Phabricator
402 phprecset = set(getnode(d) for d in diffs)
403 phprecset = set(getnode(d) for d in diffs)
403
404
404 # Ignore if precursors (Phabricator and local repo) do not overlap,
405 # Ignore if precursors (Phabricator and local repo) do not overlap,
405 # and force is not set (when commit message says nothing)
406 # and force is not set (when commit message says nothing)
406 if not force and not bool(phprecset & precset):
407 if not force and not bool(phprecset & precset):
407 tagname = b'D%d' % drev
408 tagname = b'D%d' % drev
408 tags.tag(
409 tags.tag(
409 repo,
410 repo,
410 tagname,
411 tagname,
411 nullid,
412 nullid,
412 message=None,
413 message=None,
413 user=None,
414 user=None,
414 date=None,
415 date=None,
415 local=True,
416 local=True,
416 )
417 )
417 unfi.ui.warn(
418 unfi.ui.warn(
418 _(
419 _(
419 b'D%s: local tag removed - does not match '
420 b'D%s: local tag removed - does not match '
420 b'Differential history\n'
421 b'Differential history\n'
421 )
422 )
422 % drev
423 % drev
423 )
424 )
424 continue
425 continue
425
426
426 # Find the last node using Phabricator metadata, and make sure it
427 # Find the last node using Phabricator metadata, and make sure it
427 # exists in the repo
428 # exists in the repo
428 oldnode = lastdiff = None
429 oldnode = lastdiff = None
429 if diffs:
430 if diffs:
430 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
431 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
431 oldnode = getnode(lastdiff)
432 oldnode = getnode(lastdiff)
432 if oldnode and oldnode not in nodemap:
433 if oldnode and oldnode not in nodemap:
433 oldnode = None
434 oldnode = None
434
435
435 result[newnode] = (oldnode, lastdiff, drev)
436 result[newnode] = (oldnode, lastdiff, drev)
436
437
437 return result
438 return result
438
439
439
440
440 def getdiff(ctx, diffopts):
441 def getdiff(ctx, diffopts):
441 """plain-text diff without header (user, commit message, etc)"""
442 """plain-text diff without header (user, commit message, etc)"""
442 output = util.stringio()
443 output = util.stringio()
443 for chunk, _label in patch.diffui(
444 for chunk, _label in patch.diffui(
444 ctx.repo(), ctx.p1().node(), ctx.node(), None, opts=diffopts
445 ctx.repo(), ctx.p1().node(), ctx.node(), None, opts=diffopts
445 ):
446 ):
446 output.write(chunk)
447 output.write(chunk)
447 return output.getvalue()
448 return output.getvalue()
448
449
449
450
450 def creatediff(ctx):
451 def creatediff(ctx):
451 """create a Differential Diff"""
452 """create a Differential Diff"""
452 repo = ctx.repo()
453 repo = ctx.repo()
453 repophid = getrepophid(repo)
454 repophid = getrepophid(repo)
454 # Create a "Differential Diff" via "differential.createrawdiff" API
455 # Create a "Differential Diff" via "differential.createrawdiff" API
455 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
456 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
456 if repophid:
457 if repophid:
457 params[b'repositoryPHID'] = repophid
458 params[b'repositoryPHID'] = repophid
458 diff = callconduit(repo.ui, b'differential.createrawdiff', params)
459 diff = callconduit(repo.ui, b'differential.createrawdiff', params)
459 if not diff:
460 if not diff:
460 raise error.Abort(_(b'cannot create diff for %s') % ctx)
461 raise error.Abort(_(b'cannot create diff for %s') % ctx)
461 return diff
462 return diff
462
463
463
464
464 def writediffproperties(ctx, diff):
465 def writediffproperties(ctx, diff):
465 """write metadata to diff so patches could be applied losslessly"""
466 """write metadata to diff so patches could be applied losslessly"""
466 params = {
467 params = {
467 b'diff_id': diff[b'id'],
468 b'diff_id': diff[b'id'],
468 b'name': b'hg:meta',
469 b'name': b'hg:meta',
469 b'data': templatefilters.json(
470 b'data': templatefilters.json(
470 {
471 {
471 b'user': ctx.user(),
472 b'user': ctx.user(),
472 b'date': b'%d %d' % ctx.date(),
473 b'date': b'%d %d' % ctx.date(),
473 b'branch': ctx.branch(),
474 b'branch': ctx.branch(),
474 b'node': ctx.hex(),
475 b'node': ctx.hex(),
475 b'parent': ctx.p1().hex(),
476 b'parent': ctx.p1().hex(),
476 }
477 }
477 ),
478 ),
478 }
479 }
479 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
480 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
480
481
481 params = {
482 params = {
482 b'diff_id': diff[b'id'],
483 b'diff_id': diff[b'id'],
483 b'name': b'local:commits',
484 b'name': b'local:commits',
484 b'data': templatefilters.json(
485 b'data': templatefilters.json(
485 {
486 {
486 ctx.hex(): {
487 ctx.hex(): {
487 b'author': stringutil.person(ctx.user()),
488 b'author': stringutil.person(ctx.user()),
488 b'authorEmail': stringutil.email(ctx.user()),
489 b'authorEmail': stringutil.email(ctx.user()),
489 b'time': int(ctx.date()[0]),
490 b'time': int(ctx.date()[0]),
490 b'commit': ctx.hex(),
491 b'commit': ctx.hex(),
491 b'parents': [ctx.p1().hex()],
492 b'parents': [ctx.p1().hex()],
492 b'branch': ctx.branch(),
493 b'branch': ctx.branch(),
493 },
494 },
494 }
495 }
495 ),
496 ),
496 }
497 }
497 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
498 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
498
499
499
500
500 def createdifferentialrevision(
501 def createdifferentialrevision(
501 ctx,
502 ctx,
502 revid=None,
503 revid=None,
503 parentrevphid=None,
504 parentrevphid=None,
504 oldnode=None,
505 oldnode=None,
505 olddiff=None,
506 olddiff=None,
506 actions=None,
507 actions=None,
507 comment=None,
508 comment=None,
508 ):
509 ):
509 """create or update a Differential Revision
510 """create or update a Differential Revision
510
511
511 If revid is None, create a new Differential Revision, otherwise update
512 If revid is None, create a new Differential Revision, otherwise update
512 revid. If parentrevphid is not None, set it as a dependency.
513 revid. If parentrevphid is not None, set it as a dependency.
513
514
514 If oldnode is not None, check if the patch content (without commit message
515 If oldnode is not None, check if the patch content (without commit message
515 and metadata) has changed before creating another diff.
516 and metadata) has changed before creating another diff.
516
517
517 If actions is not None, they will be appended to the transaction.
518 If actions is not None, they will be appended to the transaction.
518 """
519 """
519 repo = ctx.repo()
520 repo = ctx.repo()
520 if oldnode:
521 if oldnode:
521 diffopts = mdiff.diffopts(git=True, context=32767)
522 diffopts = mdiff.diffopts(git=True, context=32767)
522 oldctx = repo.unfiltered()[oldnode]
523 oldctx = repo.unfiltered()[oldnode]
523 neednewdiff = getdiff(ctx, diffopts) != getdiff(oldctx, diffopts)
524 neednewdiff = getdiff(ctx, diffopts) != getdiff(oldctx, diffopts)
524 else:
525 else:
525 neednewdiff = True
526 neednewdiff = True
526
527
527 transactions = []
528 transactions = []
528 if neednewdiff:
529 if neednewdiff:
529 diff = creatediff(ctx)
530 diff = creatediff(ctx)
530 transactions.append({b'type': b'update', b'value': diff[b'phid']})
531 transactions.append({b'type': b'update', b'value': diff[b'phid']})
531 if comment:
532 if comment:
532 transactions.append({b'type': b'comment', b'value': comment})
533 transactions.append({b'type': b'comment', b'value': comment})
533 else:
534 else:
534 # Even if we don't need to upload a new diff because the patch content
535 # Even if we don't need to upload a new diff because the patch content
535 # does not change. We might still need to update its metadata so
536 # does not change. We might still need to update its metadata so
536 # pushers could know the correct node metadata.
537 # pushers could know the correct node metadata.
537 assert olddiff
538 assert olddiff
538 diff = olddiff
539 diff = olddiff
539 writediffproperties(ctx, diff)
540 writediffproperties(ctx, diff)
540
541
541 # Set the parent Revision every time, so commit re-ordering is picked-up
542 # Set the parent Revision every time, so commit re-ordering is picked-up
542 if parentrevphid:
543 if parentrevphid:
543 transactions.append(
544 transactions.append(
544 {b'type': b'parents.set', b'value': [parentrevphid]}
545 {b'type': b'parents.set', b'value': [parentrevphid]}
545 )
546 )
546
547
547 if actions:
548 if actions:
548 transactions += actions
549 transactions += actions
549
550
550 # Parse commit message and update related fields.
551 # Parse commit message and update related fields.
551 desc = ctx.description()
552 desc = ctx.description()
552 info = callconduit(
553 info = callconduit(
553 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
554 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
554 )
555 )
555 for k, v in info[b'fields'].items():
556 for k, v in info[b'fields'].items():
556 if k in [b'title', b'summary', b'testPlan']:
557 if k in [b'title', b'summary', b'testPlan']:
557 transactions.append({b'type': k, b'value': v})
558 transactions.append({b'type': k, b'value': v})
558
559
559 params = {b'transactions': transactions}
560 params = {b'transactions': transactions}
560 if revid is not None:
561 if revid is not None:
561 # Update an existing Differential Revision
562 # Update an existing Differential Revision
562 params[b'objectIdentifier'] = revid
563 params[b'objectIdentifier'] = revid
563
564
564 revision = callconduit(repo.ui, b'differential.revision.edit', params)
565 revision = callconduit(repo.ui, b'differential.revision.edit', params)
565 if not revision:
566 if not revision:
566 raise error.Abort(_(b'cannot create revision for %s') % ctx)
567 raise error.Abort(_(b'cannot create revision for %s') % ctx)
567
568
568 return revision, diff
569 return revision, diff
569
570
570
571
571 def userphids(repo, names):
572 def userphids(repo, names):
572 """convert user names to PHIDs"""
573 """convert user names to PHIDs"""
573 names = [name.lower() for name in names]
574 names = [name.lower() for name in names]
574 query = {b'constraints': {b'usernames': names}}
575 query = {b'constraints': {b'usernames': names}}
575 result = callconduit(repo.ui, b'user.search', query)
576 result = callconduit(repo.ui, b'user.search', query)
576 # username not found is not an error of the API. So check if we have missed
577 # username not found is not an error of the API. So check if we have missed
577 # some names here.
578 # some names here.
578 data = result[b'data']
579 data = result[b'data']
579 resolved = set(entry[b'fields'][b'username'].lower() for entry in data)
580 resolved = set(entry[b'fields'][b'username'].lower() for entry in data)
580 unresolved = set(names) - resolved
581 unresolved = set(names) - resolved
581 if unresolved:
582 if unresolved:
582 raise error.Abort(
583 raise error.Abort(
583 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
584 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
584 )
585 )
585 return [entry[b'phid'] for entry in data]
586 return [entry[b'phid'] for entry in data]
586
587
587
588
588 @vcrcommand(
589 @vcrcommand(
589 b'phabsend',
590 b'phabsend',
590 [
591 [
591 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
592 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
592 (b'', b'amend', True, _(b'update commit messages')),
593 (b'', b'amend', True, _(b'update commit messages')),
593 (b'', b'reviewer', [], _(b'specify reviewers')),
594 (b'', b'reviewer', [], _(b'specify reviewers')),
594 (b'', b'blocker', [], _(b'specify blocking reviewers')),
595 (b'', b'blocker', [], _(b'specify blocking reviewers')),
595 (
596 (
596 b'm',
597 b'm',
597 b'comment',
598 b'comment',
598 b'',
599 b'',
599 _(b'add a comment to Revisions with new/updated Diffs'),
600 _(b'add a comment to Revisions with new/updated Diffs'),
600 ),
601 ),
601 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
602 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
602 ],
603 ],
603 _(b'REV [OPTIONS]'),
604 _(b'REV [OPTIONS]'),
604 helpcategory=command.CATEGORY_IMPORT_EXPORT,
605 helpcategory=command.CATEGORY_IMPORT_EXPORT,
605 )
606 )
606 def phabsend(ui, repo, *revs, **opts):
607 def phabsend(ui, repo, *revs, **opts):
607 """upload changesets to Phabricator
608 """upload changesets to Phabricator
608
609
609 If there are multiple revisions specified, they will be send as a stack
610 If there are multiple revisions specified, they will be send as a stack
610 with a linear dependencies relationship using the order specified by the
611 with a linear dependencies relationship using the order specified by the
611 revset.
612 revset.
612
613
613 For the first time uploading changesets, local tags will be created to
614 For the first time uploading changesets, local tags will be created to
614 maintain the association. After the first time, phabsend will check
615 maintain the association. After the first time, phabsend will check
615 obsstore and tags information so it can figure out whether to update an
616 obsstore and tags information so it can figure out whether to update an
616 existing Differential Revision, or create a new one.
617 existing Differential Revision, or create a new one.
617
618
618 If --amend is set, update commit messages so they have the
619 If --amend is set, update commit messages so they have the
619 ``Differential Revision`` URL, remove related tags. This is similar to what
620 ``Differential Revision`` URL, remove related tags. This is similar to what
620 arcanist will do, and is more desired in author-push workflows. Otherwise,
621 arcanist will do, and is more desired in author-push workflows. Otherwise,
621 use local tags to record the ``Differential Revision`` association.
622 use local tags to record the ``Differential Revision`` association.
622
623
623 The --confirm option lets you confirm changesets before sending them. You
624 The --confirm option lets you confirm changesets before sending them. You
624 can also add following to your configuration file to make it default
625 can also add following to your configuration file to make it default
625 behaviour::
626 behaviour::
626
627
627 [phabsend]
628 [phabsend]
628 confirm = true
629 confirm = true
629
630
630 phabsend will check obsstore and the above association to decide whether to
631 phabsend will check obsstore and the above association to decide whether to
631 update an existing Differential Revision, or create a new one.
632 update an existing Differential Revision, or create a new one.
632 """
633 """
633 opts = pycompat.byteskwargs(opts)
634 opts = pycompat.byteskwargs(opts)
634 revs = list(revs) + opts.get(b'rev', [])
635 revs = list(revs) + opts.get(b'rev', [])
635 revs = scmutil.revrange(repo, revs)
636 revs = scmutil.revrange(repo, revs)
636
637
637 if not revs:
638 if not revs:
638 raise error.Abort(_(b'phabsend requires at least one changeset'))
639 raise error.Abort(_(b'phabsend requires at least one changeset'))
639 if opts.get(b'amend'):
640 if opts.get(b'amend'):
640 cmdutil.checkunfinished(repo)
641 cmdutil.checkunfinished(repo)
641
642
642 # {newnode: (oldnode, olddiff, olddrev}
643 # {newnode: (oldnode, olddiff, olddrev}
643 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
644 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
644
645
645 confirm = ui.configbool(b'phabsend', b'confirm')
646 confirm = ui.configbool(b'phabsend', b'confirm')
646 confirm |= bool(opts.get(b'confirm'))
647 confirm |= bool(opts.get(b'confirm'))
647 if confirm:
648 if confirm:
648 confirmed = _confirmbeforesend(repo, revs, oldmap)
649 confirmed = _confirmbeforesend(repo, revs, oldmap)
649 if not confirmed:
650 if not confirmed:
650 raise error.Abort(_(b'phabsend cancelled'))
651 raise error.Abort(_(b'phabsend cancelled'))
651
652
652 actions = []
653 actions = []
653 reviewers = opts.get(b'reviewer', [])
654 reviewers = opts.get(b'reviewer', [])
654 blockers = opts.get(b'blocker', [])
655 blockers = opts.get(b'blocker', [])
655 phids = []
656 phids = []
656 if reviewers:
657 if reviewers:
657 phids.extend(userphids(repo, reviewers))
658 phids.extend(userphids(repo, reviewers))
658 if blockers:
659 if blockers:
659 phids.extend(
660 phids.extend(
660 map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers))
661 map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers))
661 )
662 )
662 if phids:
663 if phids:
663 actions.append({b'type': b'reviewers.add', b'value': phids})
664 actions.append({b'type': b'reviewers.add', b'value': phids})
664
665
665 drevids = [] # [int]
666 drevids = [] # [int]
666 diffmap = {} # {newnode: diff}
667 diffmap = {} # {newnode: diff}
667
668
668 # Send patches one by one so we know their Differential Revision PHIDs and
669 # Send patches one by one so we know their Differential Revision PHIDs and
669 # can provide dependency relationship
670 # can provide dependency relationship
670 lastrevphid = None
671 lastrevphid = None
671 for rev in revs:
672 for rev in revs:
672 ui.debug(b'sending rev %d\n' % rev)
673 ui.debug(b'sending rev %d\n' % rev)
673 ctx = repo[rev]
674 ctx = repo[rev]
674
675
675 # Get Differential Revision ID
676 # Get Differential Revision ID
676 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
677 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
677 if oldnode != ctx.node() or opts.get(b'amend'):
678 if oldnode != ctx.node() or opts.get(b'amend'):
678 # Create or update Differential Revision
679 # Create or update Differential Revision
679 revision, diff = createdifferentialrevision(
680 revision, diff = createdifferentialrevision(
680 ctx,
681 ctx,
681 revid,
682 revid,
682 lastrevphid,
683 lastrevphid,
683 oldnode,
684 oldnode,
684 olddiff,
685 olddiff,
685 actions,
686 actions,
686 opts.get(b'comment'),
687 opts.get(b'comment'),
687 )
688 )
688 diffmap[ctx.node()] = diff
689 diffmap[ctx.node()] = diff
689 newrevid = int(revision[b'object'][b'id'])
690 newrevid = int(revision[b'object'][b'id'])
690 newrevphid = revision[b'object'][b'phid']
691 newrevphid = revision[b'object'][b'phid']
691 if revid:
692 if revid:
692 action = b'updated'
693 action = b'updated'
693 else:
694 else:
694 action = b'created'
695 action = b'created'
695
696
696 # Create a local tag to note the association, if commit message
697 # Create a local tag to note the association, if commit message
697 # does not have it already
698 # does not have it already
698 m = _differentialrevisiondescre.search(ctx.description())
699 m = _differentialrevisiondescre.search(ctx.description())
699 if not m or int(m.group(r'id')) != newrevid:
700 if not m or int(m.group(r'id')) != newrevid:
700 tagname = b'D%d' % newrevid
701 tagname = b'D%d' % newrevid
701 tags.tag(
702 tags.tag(
702 repo,
703 repo,
703 tagname,
704 tagname,
704 ctx.node(),
705 ctx.node(),
705 message=None,
706 message=None,
706 user=None,
707 user=None,
707 date=None,
708 date=None,
708 local=True,
709 local=True,
709 )
710 )
710 else:
711 else:
711 # Nothing changed. But still set "newrevphid" so the next revision
712 # Nothing changed. But still set "newrevphid" so the next revision
712 # could depend on this one and "newrevid" for the summary line.
713 # could depend on this one and "newrevid" for the summary line.
713 newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid']
714 newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid']
714 newrevid = revid
715 newrevid = revid
715 action = b'skipped'
716 action = b'skipped'
716
717
717 actiondesc = ui.label(
718 actiondesc = ui.label(
718 {
719 {
719 b'created': _(b'created'),
720 b'created': _(b'created'),
720 b'skipped': _(b'skipped'),
721 b'skipped': _(b'skipped'),
721 b'updated': _(b'updated'),
722 b'updated': _(b'updated'),
722 }[action],
723 }[action],
723 b'phabricator.action.%s' % action,
724 b'phabricator.action.%s' % action,
724 )
725 )
725 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
726 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
726 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
727 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
727 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
728 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
728 ui.write(
729 ui.write(
729 _(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc)
730 _(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc)
730 )
731 )
731 drevids.append(newrevid)
732 drevids.append(newrevid)
732 lastrevphid = newrevphid
733 lastrevphid = newrevphid
733
734
734 # Update commit messages and remove tags
735 # Update commit messages and remove tags
735 if opts.get(b'amend'):
736 if opts.get(b'amend'):
736 unfi = repo.unfiltered()
737 unfi = repo.unfiltered()
737 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
738 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
738 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
739 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
739 wnode = unfi[b'.'].node()
740 wnode = unfi[b'.'].node()
740 mapping = {} # {oldnode: [newnode]}
741 mapping = {} # {oldnode: [newnode]}
741 for i, rev in enumerate(revs):
742 for i, rev in enumerate(revs):
742 old = unfi[rev]
743 old = unfi[rev]
743 drevid = drevids[i]
744 drevid = drevids[i]
744 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
745 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
745 newdesc = getdescfromdrev(drev)
746 newdesc = getdescfromdrev(drev)
746 # Make sure commit message contain "Differential Revision"
747 # Make sure commit message contain "Differential Revision"
747 if old.description() != newdesc:
748 if old.description() != newdesc:
748 if old.phase() == phases.public:
749 if old.phase() == phases.public:
749 ui.warn(
750 ui.warn(
750 _(b"warning: not updating public commit %s\n")
751 _(b"warning: not updating public commit %s\n")
751 % scmutil.formatchangeid(old)
752 % scmutil.formatchangeid(old)
752 )
753 )
753 continue
754 continue
754 parents = [
755 parents = [
755 mapping.get(old.p1().node(), (old.p1(),))[0],
756 mapping.get(old.p1().node(), (old.p1(),))[0],
756 mapping.get(old.p2().node(), (old.p2(),))[0],
757 mapping.get(old.p2().node(), (old.p2(),))[0],
757 ]
758 ]
758 new = context.metadataonlyctx(
759 new = context.metadataonlyctx(
759 repo,
760 repo,
760 old,
761 old,
761 parents=parents,
762 parents=parents,
762 text=newdesc,
763 text=newdesc,
763 user=old.user(),
764 user=old.user(),
764 date=old.date(),
765 date=old.date(),
765 extra=old.extra(),
766 extra=old.extra(),
766 )
767 )
767
768
768 newnode = new.commit()
769 newnode = new.commit()
769
770
770 mapping[old.node()] = [newnode]
771 mapping[old.node()] = [newnode]
771 # Update diff property
772 # Update diff property
772 # If it fails just warn and keep going, otherwise the DREV
773 # If it fails just warn and keep going, otherwise the DREV
773 # associations will be lost
774 # associations will be lost
774 try:
775 try:
775 writediffproperties(unfi[newnode], diffmap[old.node()])
776 writediffproperties(unfi[newnode], diffmap[old.node()])
776 except util.urlerr.urlerror:
777 except util.urlerr.urlerror:
777 ui.warnnoi18n(b'Failed to update metadata for D%s\n' % drevid)
778 ui.warnnoi18n(b'Failed to update metadata for D%s\n' % drevid)
778 # Remove local tags since it's no longer necessary
779 # Remove local tags since it's no longer necessary
779 tagname = b'D%d' % drevid
780 tagname = b'D%d' % drevid
780 if tagname in repo.tags():
781 if tagname in repo.tags():
781 tags.tag(
782 tags.tag(
782 repo,
783 repo,
783 tagname,
784 tagname,
784 nullid,
785 nullid,
785 message=None,
786 message=None,
786 user=None,
787 user=None,
787 date=None,
788 date=None,
788 local=True,
789 local=True,
789 )
790 )
790 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
791 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
791 if wnode in mapping:
792 if wnode in mapping:
792 unfi.setparents(mapping[wnode][0])
793 unfi.setparents(mapping[wnode][0])
793
794
794
795
795 # Map from "hg:meta" keys to header understood by "hg import". The order is
796 # Map from "hg:meta" keys to header understood by "hg import". The order is
796 # consistent with "hg export" output.
797 # consistent with "hg export" output.
797 _metanamemap = util.sortdict(
798 _metanamemap = util.sortdict(
798 [
799 [
799 (b'user', b'User'),
800 (b'user', b'User'),
800 (b'date', b'Date'),
801 (b'date', b'Date'),
801 (b'branch', b'Branch'),
802 (b'branch', b'Branch'),
802 (b'node', b'Node ID'),
803 (b'node', b'Node ID'),
803 (b'parent', b'Parent '),
804 (b'parent', b'Parent '),
804 ]
805 ]
805 )
806 )
806
807
807
808
808 def _confirmbeforesend(repo, revs, oldmap):
809 def _confirmbeforesend(repo, revs, oldmap):
809 url, token = readurltoken(repo.ui)
810 url, token = readurltoken(repo.ui)
810 ui = repo.ui
811 ui = repo.ui
811 for rev in revs:
812 for rev in revs:
812 ctx = repo[rev]
813 ctx = repo[rev]
813 desc = ctx.description().splitlines()[0]
814 desc = ctx.description().splitlines()[0]
814 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
815 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
815 if drevid:
816 if drevid:
816 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
817 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
817 else:
818 else:
818 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
819 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
819
820
820 ui.write(
821 ui.write(
821 _(b'%s - %s: %s\n')
822 _(b'%s - %s: %s\n')
822 % (
823 % (
823 drevdesc,
824 drevdesc,
824 ui.label(bytes(ctx), b'phabricator.node'),
825 ui.label(bytes(ctx), b'phabricator.node'),
825 ui.label(desc, b'phabricator.desc'),
826 ui.label(desc, b'phabricator.desc'),
826 )
827 )
827 )
828 )
828
829
829 if ui.promptchoice(
830 if ui.promptchoice(
830 _(b'Send the above changes to %s (yn)?' b'$$ &Yes $$ &No') % url
831 _(b'Send the above changes to %s (yn)?' b'$$ &Yes $$ &No') % url
831 ):
832 ):
832 return False
833 return False
833
834
834 return True
835 return True
835
836
836
837
837 _knownstatusnames = {
838 _knownstatusnames = {
838 b'accepted',
839 b'accepted',
839 b'needsreview',
840 b'needsreview',
840 b'needsrevision',
841 b'needsrevision',
841 b'closed',
842 b'closed',
842 b'abandoned',
843 b'abandoned',
843 }
844 }
844
845
845
846
846 def _getstatusname(drev):
847 def _getstatusname(drev):
847 """get normalized status name from a Differential Revision"""
848 """get normalized status name from a Differential Revision"""
848 return drev[b'statusName'].replace(b' ', b'').lower()
849 return drev[b'statusName'].replace(b' ', b'').lower()
849
850
850
851
851 # Small language to specify differential revisions. Support symbols: (), :X,
852 # Small language to specify differential revisions. Support symbols: (), :X,
852 # +, and -.
853 # +, and -.
853
854
854 _elements = {
855 _elements = {
855 # token-type: binding-strength, primary, prefix, infix, suffix
856 # token-type: binding-strength, primary, prefix, infix, suffix
856 b'(': (12, None, (b'group', 1, b')'), None, None),
857 b'(': (12, None, (b'group', 1, b')'), None, None),
857 b':': (8, None, (b'ancestors', 8), None, None),
858 b':': (8, None, (b'ancestors', 8), None, None),
858 b'&': (5, None, None, (b'and_', 5), None),
859 b'&': (5, None, None, (b'and_', 5), None),
859 b'+': (4, None, None, (b'add', 4), None),
860 b'+': (4, None, None, (b'add', 4), None),
860 b'-': (4, None, None, (b'sub', 4), None),
861 b'-': (4, None, None, (b'sub', 4), None),
861 b')': (0, None, None, None, None),
862 b')': (0, None, None, None, None),
862 b'symbol': (0, b'symbol', None, None, None),
863 b'symbol': (0, b'symbol', None, None, None),
863 b'end': (0, None, None, None, None),
864 b'end': (0, None, None, None, None),
864 }
865 }
865
866
866
867
867 def _tokenize(text):
868 def _tokenize(text):
868 view = memoryview(text) # zero-copy slice
869 view = memoryview(text) # zero-copy slice
869 special = b'():+-& '
870 special = b'():+-& '
870 pos = 0
871 pos = 0
871 length = len(text)
872 length = len(text)
872 while pos < length:
873 while pos < length:
873 symbol = b''.join(
874 symbol = b''.join(
874 itertools.takewhile(
875 itertools.takewhile(
875 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
876 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
876 )
877 )
877 )
878 )
878 if symbol:
879 if symbol:
879 yield (b'symbol', symbol, pos)
880 yield (b'symbol', symbol, pos)
880 pos += len(symbol)
881 pos += len(symbol)
881 else: # special char, ignore space
882 else: # special char, ignore space
882 if text[pos] != b' ':
883 if text[pos] != b' ':
883 yield (text[pos], None, pos)
884 yield (text[pos], None, pos)
884 pos += 1
885 pos += 1
885 yield (b'end', None, pos)
886 yield (b'end', None, pos)
886
887
887
888
888 def _parse(text):
889 def _parse(text):
889 tree, pos = parser.parser(_elements).parse(_tokenize(text))
890 tree, pos = parser.parser(_elements).parse(_tokenize(text))
890 if pos != len(text):
891 if pos != len(text):
891 raise error.ParseError(b'invalid token', pos)
892 raise error.ParseError(b'invalid token', pos)
892 return tree
893 return tree
893
894
894
895
895 def _parsedrev(symbol):
896 def _parsedrev(symbol):
896 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
897 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
897 if symbol.startswith(b'D') and symbol[1:].isdigit():
898 if symbol.startswith(b'D') and symbol[1:].isdigit():
898 return int(symbol[1:])
899 return int(symbol[1:])
899 if symbol.isdigit():
900 if symbol.isdigit():
900 return int(symbol)
901 return int(symbol)
901
902
902
903
903 def _prefetchdrevs(tree):
904 def _prefetchdrevs(tree):
904 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
905 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
905 drevs = set()
906 drevs = set()
906 ancestordrevs = set()
907 ancestordrevs = set()
907 op = tree[0]
908 op = tree[0]
908 if op == b'symbol':
909 if op == b'symbol':
909 r = _parsedrev(tree[1])
910 r = _parsedrev(tree[1])
910 if r:
911 if r:
911 drevs.add(r)
912 drevs.add(r)
912 elif op == b'ancestors':
913 elif op == b'ancestors':
913 r, a = _prefetchdrevs(tree[1])
914 r, a = _prefetchdrevs(tree[1])
914 drevs.update(r)
915 drevs.update(r)
915 ancestordrevs.update(r)
916 ancestordrevs.update(r)
916 ancestordrevs.update(a)
917 ancestordrevs.update(a)
917 else:
918 else:
918 for t in tree[1:]:
919 for t in tree[1:]:
919 r, a = _prefetchdrevs(t)
920 r, a = _prefetchdrevs(t)
920 drevs.update(r)
921 drevs.update(r)
921 ancestordrevs.update(a)
922 ancestordrevs.update(a)
922 return drevs, ancestordrevs
923 return drevs, ancestordrevs
923
924
924
925
925 def querydrev(repo, spec):
926 def querydrev(repo, spec):
926 """return a list of "Differential Revision" dicts
927 """return a list of "Differential Revision" dicts
927
928
928 spec is a string using a simple query language, see docstring in phabread
929 spec is a string using a simple query language, see docstring in phabread
929 for details.
930 for details.
930
931
931 A "Differential Revision dict" looks like:
932 A "Differential Revision dict" looks like:
932
933
933 {
934 {
934 "id": "2",
935 "id": "2",
935 "phid": "PHID-DREV-672qvysjcczopag46qty",
936 "phid": "PHID-DREV-672qvysjcczopag46qty",
936 "title": "example",
937 "title": "example",
937 "uri": "https://phab.example.com/D2",
938 "uri": "https://phab.example.com/D2",
938 "dateCreated": "1499181406",
939 "dateCreated": "1499181406",
939 "dateModified": "1499182103",
940 "dateModified": "1499182103",
940 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
941 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
941 "status": "0",
942 "status": "0",
942 "statusName": "Needs Review",
943 "statusName": "Needs Review",
943 "properties": [],
944 "properties": [],
944 "branch": null,
945 "branch": null,
945 "summary": "",
946 "summary": "",
946 "testPlan": "",
947 "testPlan": "",
947 "lineCount": "2",
948 "lineCount": "2",
948 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
949 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
949 "diffs": [
950 "diffs": [
950 "3",
951 "3",
951 "4",
952 "4",
952 ],
953 ],
953 "commits": [],
954 "commits": [],
954 "reviewers": [],
955 "reviewers": [],
955 "ccs": [],
956 "ccs": [],
956 "hashes": [],
957 "hashes": [],
957 "auxiliary": {
958 "auxiliary": {
958 "phabricator:projects": [],
959 "phabricator:projects": [],
959 "phabricator:depends-on": [
960 "phabricator:depends-on": [
960 "PHID-DREV-gbapp366kutjebt7agcd"
961 "PHID-DREV-gbapp366kutjebt7agcd"
961 ]
962 ]
962 },
963 },
963 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
964 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
964 "sourcePath": null
965 "sourcePath": null
965 }
966 }
966 """
967 """
967
968
968 def fetch(params):
969 def fetch(params):
969 """params -> single drev or None"""
970 """params -> single drev or None"""
970 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
971 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
971 if key in prefetched:
972 if key in prefetched:
972 return prefetched[key]
973 return prefetched[key]
973 drevs = callconduit(repo.ui, b'differential.query', params)
974 drevs = callconduit(repo.ui, b'differential.query', params)
974 # Fill prefetched with the result
975 # Fill prefetched with the result
975 for drev in drevs:
976 for drev in drevs:
976 prefetched[drev[b'phid']] = drev
977 prefetched[drev[b'phid']] = drev
977 prefetched[int(drev[b'id'])] = drev
978 prefetched[int(drev[b'id'])] = drev
978 if key not in prefetched:
979 if key not in prefetched:
979 raise error.Abort(
980 raise error.Abort(
980 _(b'cannot get Differential Revision %r') % params
981 _(b'cannot get Differential Revision %r') % params
981 )
982 )
982 return prefetched[key]
983 return prefetched[key]
983
984
984 def getstack(topdrevids):
985 def getstack(topdrevids):
985 """given a top, get a stack from the bottom, [id] -> [id]"""
986 """given a top, get a stack from the bottom, [id] -> [id]"""
986 visited = set()
987 visited = set()
987 result = []
988 result = []
988 queue = [{b'ids': [i]} for i in topdrevids]
989 queue = [{b'ids': [i]} for i in topdrevids]
989 while queue:
990 while queue:
990 params = queue.pop()
991 params = queue.pop()
991 drev = fetch(params)
992 drev = fetch(params)
992 if drev[b'id'] in visited:
993 if drev[b'id'] in visited:
993 continue
994 continue
994 visited.add(drev[b'id'])
995 visited.add(drev[b'id'])
995 result.append(int(drev[b'id']))
996 result.append(int(drev[b'id']))
996 auxiliary = drev.get(b'auxiliary', {})
997 auxiliary = drev.get(b'auxiliary', {})
997 depends = auxiliary.get(b'phabricator:depends-on', [])
998 depends = auxiliary.get(b'phabricator:depends-on', [])
998 for phid in depends:
999 for phid in depends:
999 queue.append({b'phids': [phid]})
1000 queue.append({b'phids': [phid]})
1000 result.reverse()
1001 result.reverse()
1001 return smartset.baseset(result)
1002 return smartset.baseset(result)
1002
1003
1003 # Initialize prefetch cache
1004 # Initialize prefetch cache
1004 prefetched = {} # {id or phid: drev}
1005 prefetched = {} # {id or phid: drev}
1005
1006
1006 tree = _parse(spec)
1007 tree = _parse(spec)
1007 drevs, ancestordrevs = _prefetchdrevs(tree)
1008 drevs, ancestordrevs = _prefetchdrevs(tree)
1008
1009
1009 # developer config: phabricator.batchsize
1010 # developer config: phabricator.batchsize
1010 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
1011 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
1011
1012
1012 # Prefetch Differential Revisions in batch
1013 # Prefetch Differential Revisions in batch
1013 tofetch = set(drevs)
1014 tofetch = set(drevs)
1014 for r in ancestordrevs:
1015 for r in ancestordrevs:
1015 tofetch.update(range(max(1, r - batchsize), r + 1))
1016 tofetch.update(range(max(1, r - batchsize), r + 1))
1016 if drevs:
1017 if drevs:
1017 fetch({b'ids': list(tofetch)})
1018 fetch({b'ids': list(tofetch)})
1018 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1019 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1019
1020
1020 # Walk through the tree, return smartsets
1021 # Walk through the tree, return smartsets
1021 def walk(tree):
1022 def walk(tree):
1022 op = tree[0]
1023 op = tree[0]
1023 if op == b'symbol':
1024 if op == b'symbol':
1024 drev = _parsedrev(tree[1])
1025 drev = _parsedrev(tree[1])
1025 if drev:
1026 if drev:
1026 return smartset.baseset([drev])
1027 return smartset.baseset([drev])
1027 elif tree[1] in _knownstatusnames:
1028 elif tree[1] in _knownstatusnames:
1028 drevs = [
1029 drevs = [
1029 r
1030 r
1030 for r in validids
1031 for r in validids
1031 if _getstatusname(prefetched[r]) == tree[1]
1032 if _getstatusname(prefetched[r]) == tree[1]
1032 ]
1033 ]
1033 return smartset.baseset(drevs)
1034 return smartset.baseset(drevs)
1034 else:
1035 else:
1035 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1036 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1036 elif op in {b'and_', b'add', b'sub'}:
1037 elif op in {b'and_', b'add', b'sub'}:
1037 assert len(tree) == 3
1038 assert len(tree) == 3
1038 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1039 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1039 elif op == b'group':
1040 elif op == b'group':
1040 return walk(tree[1])
1041 return walk(tree[1])
1041 elif op == b'ancestors':
1042 elif op == b'ancestors':
1042 return getstack(walk(tree[1]))
1043 return getstack(walk(tree[1]))
1043 else:
1044 else:
1044 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1045 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1045
1046
1046 return [prefetched[r] for r in walk(tree)]
1047 return [prefetched[r] for r in walk(tree)]
1047
1048
1048
1049
1049 def getdescfromdrev(drev):
1050 def getdescfromdrev(drev):
1050 """get description (commit message) from "Differential Revision"
1051 """get description (commit message) from "Differential Revision"
1051
1052
1052 This is similar to differential.getcommitmessage API. But we only care
1053 This is similar to differential.getcommitmessage API. But we only care
1053 about limited fields: title, summary, test plan, and URL.
1054 about limited fields: title, summary, test plan, and URL.
1054 """
1055 """
1055 title = drev[b'title']
1056 title = drev[b'title']
1056 summary = drev[b'summary'].rstrip()
1057 summary = drev[b'summary'].rstrip()
1057 testplan = drev[b'testPlan'].rstrip()
1058 testplan = drev[b'testPlan'].rstrip()
1058 if testplan:
1059 if testplan:
1059 testplan = b'Test Plan:\n%s' % testplan
1060 testplan = b'Test Plan:\n%s' % testplan
1060 uri = b'Differential Revision: %s' % drev[b'uri']
1061 uri = b'Differential Revision: %s' % drev[b'uri']
1061 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1062 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1062
1063
1063
1064
1064 def getdiffmeta(diff):
1065 def getdiffmeta(diff):
1065 """get commit metadata (date, node, user, p1) from a diff object
1066 """get commit metadata (date, node, user, p1) from a diff object
1066
1067
1067 The metadata could be "hg:meta", sent by phabsend, like:
1068 The metadata could be "hg:meta", sent by phabsend, like:
1068
1069
1069 "properties": {
1070 "properties": {
1070 "hg:meta": {
1071 "hg:meta": {
1071 "date": "1499571514 25200",
1072 "date": "1499571514 25200",
1072 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
1073 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
1073 "user": "Foo Bar <foo@example.com>",
1074 "user": "Foo Bar <foo@example.com>",
1074 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
1075 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
1075 }
1076 }
1076 }
1077 }
1077
1078
1078 Or converted from "local:commits", sent by "arc", like:
1079 Or converted from "local:commits", sent by "arc", like:
1079
1080
1080 "properties": {
1081 "properties": {
1081 "local:commits": {
1082 "local:commits": {
1082 "98c08acae292b2faf60a279b4189beb6cff1414d": {
1083 "98c08acae292b2faf60a279b4189beb6cff1414d": {
1083 "author": "Foo Bar",
1084 "author": "Foo Bar",
1084 "time": 1499546314,
1085 "time": 1499546314,
1085 "branch": "default",
1086 "branch": "default",
1086 "tag": "",
1087 "tag": "",
1087 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
1088 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
1088 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
1089 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
1089 "local": "1000",
1090 "local": "1000",
1090 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
1091 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
1091 "summary": "...",
1092 "summary": "...",
1092 "message": "...",
1093 "message": "...",
1093 "authorEmail": "foo@example.com"
1094 "authorEmail": "foo@example.com"
1094 }
1095 }
1095 }
1096 }
1096 }
1097 }
1097
1098
1098 Note: metadata extracted from "local:commits" will lose time zone
1099 Note: metadata extracted from "local:commits" will lose time zone
1099 information.
1100 information.
1100 """
1101 """
1101 props = diff.get(b'properties') or {}
1102 props = diff.get(b'properties') or {}
1102 meta = props.get(b'hg:meta')
1103 meta = props.get(b'hg:meta')
1103 if not meta:
1104 if not meta:
1104 if props.get(b'local:commits'):
1105 if props.get(b'local:commits'):
1105 commit = sorted(props[b'local:commits'].values())[0]
1106 commit = sorted(props[b'local:commits'].values())[0]
1106 meta = {}
1107 meta = {}
1107 if b'author' in commit and b'authorEmail' in commit:
1108 if b'author' in commit and b'authorEmail' in commit:
1108 meta[b'user'] = b'%s <%s>' % (
1109 meta[b'user'] = b'%s <%s>' % (
1109 commit[b'author'],
1110 commit[b'author'],
1110 commit[b'authorEmail'],
1111 commit[b'authorEmail'],
1111 )
1112 )
1112 if b'time' in commit:
1113 if b'time' in commit:
1113 meta[b'date'] = b'%d 0' % int(commit[b'time'])
1114 meta[b'date'] = b'%d 0' % int(commit[b'time'])
1114 if b'branch' in commit:
1115 if b'branch' in commit:
1115 meta[b'branch'] = commit[b'branch']
1116 meta[b'branch'] = commit[b'branch']
1116 node = commit.get(b'commit', commit.get(b'rev'))
1117 node = commit.get(b'commit', commit.get(b'rev'))
1117 if node:
1118 if node:
1118 meta[b'node'] = node
1119 meta[b'node'] = node
1119 if len(commit.get(b'parents', ())) >= 1:
1120 if len(commit.get(b'parents', ())) >= 1:
1120 meta[b'parent'] = commit[b'parents'][0]
1121 meta[b'parent'] = commit[b'parents'][0]
1121 else:
1122 else:
1122 meta = {}
1123 meta = {}
1123 if b'date' not in meta and b'dateCreated' in diff:
1124 if b'date' not in meta and b'dateCreated' in diff:
1124 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
1125 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
1125 if b'branch' not in meta and diff.get(b'branch'):
1126 if b'branch' not in meta and diff.get(b'branch'):
1126 meta[b'branch'] = diff[b'branch']
1127 meta[b'branch'] = diff[b'branch']
1127 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
1128 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
1128 meta[b'parent'] = diff[b'sourceControlBaseRevision']
1129 meta[b'parent'] = diff[b'sourceControlBaseRevision']
1129 return meta
1130 return meta
1130
1131
1131
1132
1132 def readpatch(repo, drevs, write):
1133 def readpatch(repo, drevs, write):
1133 """generate plain-text patch readable by 'hg import'
1134 """generate plain-text patch readable by 'hg import'
1134
1135
1135 write is usually ui.write. drevs is what "querydrev" returns, results of
1136 write is usually ui.write. drevs is what "querydrev" returns, results of
1136 "differential.query".
1137 "differential.query".
1137 """
1138 """
1138 # Prefetch hg:meta property for all diffs
1139 # Prefetch hg:meta property for all diffs
1139 diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
1140 diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
1140 diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids})
1141 diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids})
1141
1142
1142 # Generate patch for each drev
1143 # Generate patch for each drev
1143 for drev in drevs:
1144 for drev in drevs:
1144 repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
1145 repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
1145
1146
1146 diffid = max(int(v) for v in drev[b'diffs'])
1147 diffid = max(int(v) for v in drev[b'diffs'])
1147 body = callconduit(
1148 body = callconduit(
1148 repo.ui, b'differential.getrawdiff', {b'diffID': diffid}
1149 repo.ui, b'differential.getrawdiff', {b'diffID': diffid}
1149 )
1150 )
1150 desc = getdescfromdrev(drev)
1151 desc = getdescfromdrev(drev)
1151 header = b'# HG changeset patch\n'
1152 header = b'# HG changeset patch\n'
1152
1153
1153 # Try to preserve metadata from hg:meta property. Write hg patch
1154 # Try to preserve metadata from hg:meta property. Write hg patch
1154 # headers that can be read by the "import" command. See patchheadermap
1155 # headers that can be read by the "import" command. See patchheadermap
1155 # and extract in mercurial/patch.py for supported headers.
1156 # and extract in mercurial/patch.py for supported headers.
1156 meta = getdiffmeta(diffs[b'%d' % diffid])
1157 meta = getdiffmeta(diffs[b'%d' % diffid])
1157 for k in _metanamemap.keys():
1158 for k in _metanamemap.keys():
1158 if k in meta:
1159 if k in meta:
1159 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
1160 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
1160
1161
1161 content = b'%s%s\n%s' % (header, desc, body)
1162 content = b'%s%s\n%s' % (header, desc, body)
1162 write(content)
1163 write(content)
1163
1164
1164
1165
1165 @vcrcommand(
1166 @vcrcommand(
1166 b'phabread',
1167 b'phabread',
1167 [(b'', b'stack', False, _(b'read dependencies'))],
1168 [(b'', b'stack', False, _(b'read dependencies'))],
1168 _(b'DREVSPEC [OPTIONS]'),
1169 _(b'DREVSPEC [OPTIONS]'),
1169 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1170 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1170 )
1171 )
1171 def phabread(ui, repo, spec, **opts):
1172 def phabread(ui, repo, spec, **opts):
1172 """print patches from Phabricator suitable for importing
1173 """print patches from Phabricator suitable for importing
1173
1174
1174 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
1175 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
1175 the number ``123``. It could also have common operators like ``+``, ``-``,
1176 the number ``123``. It could also have common operators like ``+``, ``-``,
1176 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
1177 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
1177 select a stack.
1178 select a stack.
1178
1179
1179 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
1180 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
1180 could be used to filter patches by status. For performance reason, they
1181 could be used to filter patches by status. For performance reason, they
1181 only represent a subset of non-status selections and cannot be used alone.
1182 only represent a subset of non-status selections and cannot be used alone.
1182
1183
1183 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
1184 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
1184 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
1185 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
1185 stack up to D9.
1186 stack up to D9.
1186
1187
1187 If --stack is given, follow dependencies information and read all patches.
1188 If --stack is given, follow dependencies information and read all patches.
1188 It is equivalent to the ``:`` operator.
1189 It is equivalent to the ``:`` operator.
1189 """
1190 """
1190 opts = pycompat.byteskwargs(opts)
1191 opts = pycompat.byteskwargs(opts)
1191 if opts.get(b'stack'):
1192 if opts.get(b'stack'):
1192 spec = b':(%s)' % spec
1193 spec = b':(%s)' % spec
1193 drevs = querydrev(repo, spec)
1194 drevs = querydrev(repo, spec)
1194 readpatch(repo, drevs, ui.write)
1195 readpatch(repo, drevs, ui.write)
1195
1196
1196
1197
1197 @vcrcommand(
1198 @vcrcommand(
1198 b'phabupdate',
1199 b'phabupdate',
1199 [
1200 [
1200 (b'', b'accept', False, _(b'accept revisions')),
1201 (b'', b'accept', False, _(b'accept revisions')),
1201 (b'', b'reject', False, _(b'reject revisions')),
1202 (b'', b'reject', False, _(b'reject revisions')),
1202 (b'', b'abandon', False, _(b'abandon revisions')),
1203 (b'', b'abandon', False, _(b'abandon revisions')),
1203 (b'', b'reclaim', False, _(b'reclaim revisions')),
1204 (b'', b'reclaim', False, _(b'reclaim revisions')),
1204 (b'm', b'comment', b'', _(b'comment on the last revision')),
1205 (b'm', b'comment', b'', _(b'comment on the last revision')),
1205 ],
1206 ],
1206 _(b'DREVSPEC [OPTIONS]'),
1207 _(b'DREVSPEC [OPTIONS]'),
1207 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1208 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1208 )
1209 )
1209 def phabupdate(ui, repo, spec, **opts):
1210 def phabupdate(ui, repo, spec, **opts):
1210 """update Differential Revision in batch
1211 """update Differential Revision in batch
1211
1212
1212 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
1213 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
1213 """
1214 """
1214 opts = pycompat.byteskwargs(opts)
1215 opts = pycompat.byteskwargs(opts)
1215 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
1216 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
1216 if len(flags) > 1:
1217 if len(flags) > 1:
1217 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
1218 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
1218
1219
1219 actions = []
1220 actions = []
1220 for f in flags:
1221 for f in flags:
1221 actions.append({b'type': f, b'value': b'true'})
1222 actions.append({b'type': f, b'value': b'true'})
1222
1223
1223 drevs = querydrev(repo, spec)
1224 drevs = querydrev(repo, spec)
1224 for i, drev in enumerate(drevs):
1225 for i, drev in enumerate(drevs):
1225 if i + 1 == len(drevs) and opts.get(b'comment'):
1226 if i + 1 == len(drevs) and opts.get(b'comment'):
1226 actions.append({b'type': b'comment', b'value': opts[b'comment']})
1227 actions.append({b'type': b'comment', b'value': opts[b'comment']})
1227 if actions:
1228 if actions:
1228 params = {
1229 params = {
1229 b'objectIdentifier': drev[b'phid'],
1230 b'objectIdentifier': drev[b'phid'],
1230 b'transactions': actions,
1231 b'transactions': actions,
1231 }
1232 }
1232 callconduit(ui, b'differential.revision.edit', params)
1233 callconduit(ui, b'differential.revision.edit', params)
1233
1234
1234
1235
1235 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
1236 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
1236 def template_review(context, mapping):
1237 def template_review(context, mapping):
1237 """:phabreview: Object describing the review for this changeset.
1238 """:phabreview: Object describing the review for this changeset.
1238 Has attributes `url` and `id`.
1239 Has attributes `url` and `id`.
1239 """
1240 """
1240 ctx = context.resource(mapping, b'ctx')
1241 ctx = context.resource(mapping, b'ctx')
1241 m = _differentialrevisiondescre.search(ctx.description())
1242 m = _differentialrevisiondescre.search(ctx.description())
1242 if m:
1243 if m:
1243 return templateutil.hybriddict(
1244 return templateutil.hybriddict(
1244 {b'url': m.group(r'url'), b'id': b"D%s" % m.group(r'id'),}
1245 {b'url': m.group(r'url'), b'id': b"D%s" % m.group(r'id'),}
1245 )
1246 )
1246 else:
1247 else:
1247 tags = ctx.repo().nodetags(ctx.node())
1248 tags = ctx.repo().nodetags(ctx.node())
1248 for t in tags:
1249 for t in tags:
1249 if _differentialrevisiontagre.match(t):
1250 if _differentialrevisiontagre.match(t):
1250 url = ctx.repo().ui.config(b'phabricator', b'url')
1251 url = ctx.repo().ui.config(b'phabricator', b'url')
1251 if not url.endswith(b'/'):
1252 if not url.endswith(b'/'):
1252 url += b'/'
1253 url += b'/'
1253 url += t
1254 url += t
1254
1255
1255 return templateutil.hybriddict({b'url': url, b'id': t,})
1256 return templateutil.hybriddict({b'url': url, b'id': t,})
1256 return None
1257 return None
@@ -1,558 +1,561
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import hashlib
5 import hashlib
6 import mmap
6 import mmap
7 import os
7 import os
8 import struct
8 import struct
9 import time
9 import time
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial.pycompat import open
12 from mercurial.pycompat import (
13 getattr,
14 open,
15 )
13 from mercurial import (
16 from mercurial import (
14 node as nodemod,
17 node as nodemod,
15 policy,
18 policy,
16 pycompat,
19 pycompat,
17 util,
20 util,
18 vfs as vfsmod,
21 vfs as vfsmod,
19 )
22 )
20 from . import shallowutil
23 from . import shallowutil
21
24
22 osutil = policy.importmod(r'osutil')
25 osutil = policy.importmod(r'osutil')
23
26
24 # The pack version supported by this implementation. This will need to be
27 # The pack version supported by this implementation. This will need to be
25 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
28 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
26 # changing any of the int sizes, changing the delta algorithm, etc.
29 # changing any of the int sizes, changing the delta algorithm, etc.
27 PACKVERSIONSIZE = 1
30 PACKVERSIONSIZE = 1
28 INDEXVERSIONSIZE = 2
31 INDEXVERSIONSIZE = 2
29
32
30 FANOUTSTART = INDEXVERSIONSIZE
33 FANOUTSTART = INDEXVERSIONSIZE
31
34
32 # Constant that indicates a fanout table entry hasn't been filled in. (This does
35 # Constant that indicates a fanout table entry hasn't been filled in. (This does
33 # not get serialized)
36 # not get serialized)
34 EMPTYFANOUT = -1
37 EMPTYFANOUT = -1
35
38
36 # The fanout prefix is the number of bytes that can be addressed by the fanout
39 # The fanout prefix is the number of bytes that can be addressed by the fanout
37 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
40 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
38 # look in the fanout table (which will be 2^8 entries long).
41 # look in the fanout table (which will be 2^8 entries long).
39 SMALLFANOUTPREFIX = 1
42 SMALLFANOUTPREFIX = 1
40 LARGEFANOUTPREFIX = 2
43 LARGEFANOUTPREFIX = 2
41
44
42 # The number of entries in the index at which point we switch to a large fanout.
45 # The number of entries in the index at which point we switch to a large fanout.
43 # It is chosen to balance the linear scan through a sparse fanout, with the
46 # It is chosen to balance the linear scan through a sparse fanout, with the
44 # size of the bisect in actual index.
47 # size of the bisect in actual index.
45 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
48 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
46 # bisect) with (8 step fanout scan + 1 step bisect)
49 # bisect) with (8 step fanout scan + 1 step bisect)
47 # 5 step bisect = log(2^16 / 8 / 255) # fanout
50 # 5 step bisect = log(2^16 / 8 / 255) # fanout
48 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
51 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
49 SMALLFANOUTCUTOFF = 2 ** 16 // 8
52 SMALLFANOUTCUTOFF = 2 ** 16 // 8
50
53
51 # The amount of time to wait between checking for new packs. This prevents an
54 # The amount of time to wait between checking for new packs. This prevents an
52 # exception when data is moved to a new pack after the process has already
55 # exception when data is moved to a new pack after the process has already
53 # loaded the pack list.
56 # loaded the pack list.
54 REFRESHRATE = 0.1
57 REFRESHRATE = 0.1
55
58
56 if pycompat.isposix and not pycompat.ispy3:
59 if pycompat.isposix and not pycompat.ispy3:
57 # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
60 # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
58 # The 'e' flag will be ignored on older versions of glibc.
61 # The 'e' flag will be ignored on older versions of glibc.
59 # Python 3 can't handle the 'e' flag.
62 # Python 3 can't handle the 'e' flag.
60 PACKOPENMODE = b'rbe'
63 PACKOPENMODE = b'rbe'
61 else:
64 else:
62 PACKOPENMODE = b'rb'
65 PACKOPENMODE = b'rb'
63
66
64
67
65 class _cachebackedpacks(object):
68 class _cachebackedpacks(object):
66 def __init__(self, packs, cachesize):
69 def __init__(self, packs, cachesize):
67 self._packs = set(packs)
70 self._packs = set(packs)
68 self._lrucache = util.lrucachedict(cachesize)
71 self._lrucache = util.lrucachedict(cachesize)
69 self._lastpack = None
72 self._lastpack = None
70
73
71 # Avoid cold start of the cache by populating the most recent packs
74 # Avoid cold start of the cache by populating the most recent packs
72 # in the cache.
75 # in the cache.
73 for i in reversed(range(min(cachesize, len(packs)))):
76 for i in reversed(range(min(cachesize, len(packs)))):
74 self._movetofront(packs[i])
77 self._movetofront(packs[i])
75
78
76 def _movetofront(self, pack):
79 def _movetofront(self, pack):
77 # This effectively makes pack the first entry in the cache.
80 # This effectively makes pack the first entry in the cache.
78 self._lrucache[pack] = True
81 self._lrucache[pack] = True
79
82
80 def _registerlastpackusage(self):
83 def _registerlastpackusage(self):
81 if self._lastpack is not None:
84 if self._lastpack is not None:
82 self._movetofront(self._lastpack)
85 self._movetofront(self._lastpack)
83 self._lastpack = None
86 self._lastpack = None
84
87
85 def add(self, pack):
88 def add(self, pack):
86 self._registerlastpackusage()
89 self._registerlastpackusage()
87
90
88 # This method will mostly be called when packs are not in cache.
91 # This method will mostly be called when packs are not in cache.
89 # Therefore, adding pack to the cache.
92 # Therefore, adding pack to the cache.
90 self._movetofront(pack)
93 self._movetofront(pack)
91 self._packs.add(pack)
94 self._packs.add(pack)
92
95
93 def __iter__(self):
96 def __iter__(self):
94 self._registerlastpackusage()
97 self._registerlastpackusage()
95
98
96 # Cache iteration is based on LRU.
99 # Cache iteration is based on LRU.
97 for pack in self._lrucache:
100 for pack in self._lrucache:
98 self._lastpack = pack
101 self._lastpack = pack
99 yield pack
102 yield pack
100
103
101 cachedpacks = set(pack for pack in self._lrucache)
104 cachedpacks = set(pack for pack in self._lrucache)
102 # Yield for paths not in the cache.
105 # Yield for paths not in the cache.
103 for pack in self._packs - cachedpacks:
106 for pack in self._packs - cachedpacks:
104 self._lastpack = pack
107 self._lastpack = pack
105 yield pack
108 yield pack
106
109
107 # Data not found in any pack.
110 # Data not found in any pack.
108 self._lastpack = None
111 self._lastpack = None
109
112
110
113
111 class basepackstore(object):
114 class basepackstore(object):
112 # Default cache size limit for the pack files.
115 # Default cache size limit for the pack files.
113 DEFAULTCACHESIZE = 100
116 DEFAULTCACHESIZE = 100
114
117
115 def __init__(self, ui, path):
118 def __init__(self, ui, path):
116 self.ui = ui
119 self.ui = ui
117 self.path = path
120 self.path = path
118
121
119 # lastrefesh is 0 so we'll immediately check for new packs on the first
122 # lastrefesh is 0 so we'll immediately check for new packs on the first
120 # failure.
123 # failure.
121 self.lastrefresh = 0
124 self.lastrefresh = 0
122
125
123 packs = []
126 packs = []
124 for filepath, __, __ in self._getavailablepackfilessorted():
127 for filepath, __, __ in self._getavailablepackfilessorted():
125 try:
128 try:
126 pack = self.getpack(filepath)
129 pack = self.getpack(filepath)
127 except Exception as ex:
130 except Exception as ex:
128 # An exception may be thrown if the pack file is corrupted
131 # An exception may be thrown if the pack file is corrupted
129 # somehow. Log a warning but keep going in this case, just
132 # somehow. Log a warning but keep going in this case, just
130 # skipping this pack file.
133 # skipping this pack file.
131 #
134 #
132 # If this is an ENOENT error then don't even bother logging.
135 # If this is an ENOENT error then don't even bother logging.
133 # Someone could have removed the file since we retrieved the
136 # Someone could have removed the file since we retrieved the
134 # list of paths.
137 # list of paths.
135 if getattr(ex, 'errno', None) != errno.ENOENT:
138 if getattr(ex, 'errno', None) != errno.ENOENT:
136 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
139 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
137 continue
140 continue
138 packs.append(pack)
141 packs.append(pack)
139
142
140 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
143 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
141
144
142 def _getavailablepackfiles(self):
145 def _getavailablepackfiles(self):
143 """For each pack file (a index/data file combo), yields:
146 """For each pack file (a index/data file combo), yields:
144 (full path without extension, mtime, size)
147 (full path without extension, mtime, size)
145
148
146 mtime will be the mtime of the index/data file (whichever is newer)
149 mtime will be the mtime of the index/data file (whichever is newer)
147 size is the combined size of index/data file
150 size is the combined size of index/data file
148 """
151 """
149 indexsuffixlen = len(self.INDEXSUFFIX)
152 indexsuffixlen = len(self.INDEXSUFFIX)
150 packsuffixlen = len(self.PACKSUFFIX)
153 packsuffixlen = len(self.PACKSUFFIX)
151
154
152 ids = set()
155 ids = set()
153 sizes = collections.defaultdict(lambda: 0)
156 sizes = collections.defaultdict(lambda: 0)
154 mtimes = collections.defaultdict(lambda: [])
157 mtimes = collections.defaultdict(lambda: [])
155 try:
158 try:
156 for filename, type, stat in osutil.listdir(self.path, stat=True):
159 for filename, type, stat in osutil.listdir(self.path, stat=True):
157 id = None
160 id = None
158 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
161 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
159 id = filename[:-indexsuffixlen]
162 id = filename[:-indexsuffixlen]
160 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
163 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
161 id = filename[:-packsuffixlen]
164 id = filename[:-packsuffixlen]
162
165
163 # Since we expect to have two files corresponding to each ID
166 # Since we expect to have two files corresponding to each ID
164 # (the index file and the pack file), we can yield once we see
167 # (the index file and the pack file), we can yield once we see
165 # it twice.
168 # it twice.
166 if id:
169 if id:
167 sizes[id] += stat.st_size # Sum both files' sizes together
170 sizes[id] += stat.st_size # Sum both files' sizes together
168 mtimes[id].append(stat.st_mtime)
171 mtimes[id].append(stat.st_mtime)
169 if id in ids:
172 if id in ids:
170 yield (
173 yield (
171 os.path.join(self.path, id),
174 os.path.join(self.path, id),
172 max(mtimes[id]),
175 max(mtimes[id]),
173 sizes[id],
176 sizes[id],
174 )
177 )
175 else:
178 else:
176 ids.add(id)
179 ids.add(id)
177 except OSError as ex:
180 except OSError as ex:
178 if ex.errno != errno.ENOENT:
181 if ex.errno != errno.ENOENT:
179 raise
182 raise
180
183
181 def _getavailablepackfilessorted(self):
184 def _getavailablepackfilessorted(self):
182 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
185 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
183 yielding newest files first.
186 yielding newest files first.
184
187
185 This is desirable, since it is more likely newer packfiles have more
188 This is desirable, since it is more likely newer packfiles have more
186 desirable data.
189 desirable data.
187 """
190 """
188 files = []
191 files = []
189 for path, mtime, size in self._getavailablepackfiles():
192 for path, mtime, size in self._getavailablepackfiles():
190 files.append((mtime, size, path))
193 files.append((mtime, size, path))
191 files = sorted(files, reverse=True)
194 files = sorted(files, reverse=True)
192 for mtime, size, path in files:
195 for mtime, size, path in files:
193 yield path, mtime, size
196 yield path, mtime, size
194
197
195 def gettotalsizeandcount(self):
198 def gettotalsizeandcount(self):
196 """Returns the total disk size (in bytes) of all the pack files in
199 """Returns the total disk size (in bytes) of all the pack files in
197 this store, and the count of pack files.
200 this store, and the count of pack files.
198
201
199 (This might be smaller than the total size of the ``self.path``
202 (This might be smaller than the total size of the ``self.path``
200 directory, since this only considers fuly-writen pack files, and not
203 directory, since this only considers fuly-writen pack files, and not
201 temporary files or other detritus on the directory.)
204 temporary files or other detritus on the directory.)
202 """
205 """
203 totalsize = 0
206 totalsize = 0
204 count = 0
207 count = 0
205 for __, __, size in self._getavailablepackfiles():
208 for __, __, size in self._getavailablepackfiles():
206 totalsize += size
209 totalsize += size
207 count += 1
210 count += 1
208 return totalsize, count
211 return totalsize, count
209
212
210 def getmetrics(self):
213 def getmetrics(self):
211 """Returns metrics on the state of this store."""
214 """Returns metrics on the state of this store."""
212 size, count = self.gettotalsizeandcount()
215 size, count = self.gettotalsizeandcount()
213 return {
216 return {
214 b'numpacks': count,
217 b'numpacks': count,
215 b'totalpacksize': size,
218 b'totalpacksize': size,
216 }
219 }
217
220
218 def getpack(self, path):
221 def getpack(self, path):
219 raise NotImplementedError()
222 raise NotImplementedError()
220
223
221 def getmissing(self, keys):
224 def getmissing(self, keys):
222 missing = keys
225 missing = keys
223 for pack in self.packs:
226 for pack in self.packs:
224 missing = pack.getmissing(missing)
227 missing = pack.getmissing(missing)
225
228
226 # Ensures better performance of the cache by keeping the most
229 # Ensures better performance of the cache by keeping the most
227 # recently accessed pack at the beginning in subsequent iterations.
230 # recently accessed pack at the beginning in subsequent iterations.
228 if not missing:
231 if not missing:
229 return missing
232 return missing
230
233
231 if missing:
234 if missing:
232 for pack in self.refresh():
235 for pack in self.refresh():
233 missing = pack.getmissing(missing)
236 missing = pack.getmissing(missing)
234
237
235 return missing
238 return missing
236
239
237 def markledger(self, ledger, options=None):
240 def markledger(self, ledger, options=None):
238 for pack in self.packs:
241 for pack in self.packs:
239 pack.markledger(ledger)
242 pack.markledger(ledger)
240
243
241 def markforrefresh(self):
244 def markforrefresh(self):
242 """Tells the store that there may be new pack files, so the next time it
245 """Tells the store that there may be new pack files, so the next time it
243 has a lookup miss it should check for new files."""
246 has a lookup miss it should check for new files."""
244 self.lastrefresh = 0
247 self.lastrefresh = 0
245
248
246 def refresh(self):
249 def refresh(self):
247 """Checks for any new packs on disk, adds them to the main pack list,
250 """Checks for any new packs on disk, adds them to the main pack list,
248 and returns a list of just the new packs."""
251 and returns a list of just the new packs."""
249 now = time.time()
252 now = time.time()
250
253
251 # If we experience a lot of misses (like in the case of getmissing() on
254 # If we experience a lot of misses (like in the case of getmissing() on
252 # new objects), let's only actually check disk for new stuff every once
255 # new objects), let's only actually check disk for new stuff every once
253 # in a while. Generally this code path should only ever matter when a
256 # in a while. Generally this code path should only ever matter when a
254 # repack is going on in the background, and that should be pretty rare
257 # repack is going on in the background, and that should be pretty rare
255 # to have that happen twice in quick succession.
258 # to have that happen twice in quick succession.
256 newpacks = []
259 newpacks = []
257 if now > self.lastrefresh + REFRESHRATE:
260 if now > self.lastrefresh + REFRESHRATE:
258 self.lastrefresh = now
261 self.lastrefresh = now
259 previous = set(p.path for p in self.packs)
262 previous = set(p.path for p in self.packs)
260 for filepath, __, __ in self._getavailablepackfilessorted():
263 for filepath, __, __ in self._getavailablepackfilessorted():
261 if filepath not in previous:
264 if filepath not in previous:
262 newpack = self.getpack(filepath)
265 newpack = self.getpack(filepath)
263 newpacks.append(newpack)
266 newpacks.append(newpack)
264 self.packs.add(newpack)
267 self.packs.add(newpack)
265
268
266 return newpacks
269 return newpacks
267
270
268
271
269 class versionmixin(object):
272 class versionmixin(object):
270 # Mix-in for classes with multiple supported versions
273 # Mix-in for classes with multiple supported versions
271 VERSION = None
274 VERSION = None
272 SUPPORTED_VERSIONS = [2]
275 SUPPORTED_VERSIONS = [2]
273
276
274 def _checkversion(self, version):
277 def _checkversion(self, version):
275 if version in self.SUPPORTED_VERSIONS:
278 if version in self.SUPPORTED_VERSIONS:
276 if self.VERSION is None:
279 if self.VERSION is None:
277 # only affect this instance
280 # only affect this instance
278 self.VERSION = version
281 self.VERSION = version
279 elif self.VERSION != version:
282 elif self.VERSION != version:
280 raise RuntimeError(b'inconsistent version: %d' % version)
283 raise RuntimeError(b'inconsistent version: %d' % version)
281 else:
284 else:
282 raise RuntimeError(b'unsupported version: %d' % version)
285 raise RuntimeError(b'unsupported version: %d' % version)
283
286
284
287
285 class basepack(versionmixin):
288 class basepack(versionmixin):
286 # The maximum amount we should read via mmap before remmaping so the old
289 # The maximum amount we should read via mmap before remmaping so the old
287 # pages can be released (100MB)
290 # pages can be released (100MB)
288 MAXPAGEDIN = 100 * 1024 ** 2
291 MAXPAGEDIN = 100 * 1024 ** 2
289
292
290 SUPPORTED_VERSIONS = [2]
293 SUPPORTED_VERSIONS = [2]
291
294
292 def __init__(self, path):
295 def __init__(self, path):
293 self.path = path
296 self.path = path
294 self.packpath = path + self.PACKSUFFIX
297 self.packpath = path + self.PACKSUFFIX
295 self.indexpath = path + self.INDEXSUFFIX
298 self.indexpath = path + self.INDEXSUFFIX
296
299
297 self.indexsize = os.stat(self.indexpath).st_size
300 self.indexsize = os.stat(self.indexpath).st_size
298 self.datasize = os.stat(self.packpath).st_size
301 self.datasize = os.stat(self.packpath).st_size
299
302
300 self._index = None
303 self._index = None
301 self._data = None
304 self._data = None
302 self.freememory() # initialize the mmap
305 self.freememory() # initialize the mmap
303
306
304 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
307 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
305 self._checkversion(version)
308 self._checkversion(version)
306
309
307 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
310 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
308 self._checkversion(version)
311 self._checkversion(version)
309
312
310 if 0b10000000 & config:
313 if 0b10000000 & config:
311 self.params = indexparams(LARGEFANOUTPREFIX, version)
314 self.params = indexparams(LARGEFANOUTPREFIX, version)
312 else:
315 else:
313 self.params = indexparams(SMALLFANOUTPREFIX, version)
316 self.params = indexparams(SMALLFANOUTPREFIX, version)
314
317
315 @util.propertycache
318 @util.propertycache
316 def _fanouttable(self):
319 def _fanouttable(self):
317 params = self.params
320 params = self.params
318 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
321 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
319 fanouttable = []
322 fanouttable = []
320 for i in pycompat.xrange(0, params.fanoutcount):
323 for i in pycompat.xrange(0, params.fanoutcount):
321 loc = i * 4
324 loc = i * 4
322 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
325 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
323 fanouttable.append(fanoutentry)
326 fanouttable.append(fanoutentry)
324 return fanouttable
327 return fanouttable
325
328
326 @util.propertycache
329 @util.propertycache
327 def _indexend(self):
330 def _indexend(self):
328 nodecount = struct.unpack_from(
331 nodecount = struct.unpack_from(
329 b'!Q', self._index, self.params.indexstart - 8
332 b'!Q', self._index, self.params.indexstart - 8
330 )[0]
333 )[0]
331 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
334 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
332
335
333 def freememory(self):
336 def freememory(self):
334 """Unmap and remap the memory to free it up after known expensive
337 """Unmap and remap the memory to free it up after known expensive
335 operations. Return True if self._data and self._index were reloaded.
338 operations. Return True if self._data and self._index were reloaded.
336 """
339 """
337 if self._index:
340 if self._index:
338 if self._pagedin < self.MAXPAGEDIN:
341 if self._pagedin < self.MAXPAGEDIN:
339 return False
342 return False
340
343
341 self._index.close()
344 self._index.close()
342 self._data.close()
345 self._data.close()
343
346
344 # TODO: use an opener/vfs to access these paths
347 # TODO: use an opener/vfs to access these paths
345 with open(self.indexpath, PACKOPENMODE) as indexfp:
348 with open(self.indexpath, PACKOPENMODE) as indexfp:
346 # memory-map the file, size 0 means whole file
349 # memory-map the file, size 0 means whole file
347 self._index = mmap.mmap(
350 self._index = mmap.mmap(
348 indexfp.fileno(), 0, access=mmap.ACCESS_READ
351 indexfp.fileno(), 0, access=mmap.ACCESS_READ
349 )
352 )
350 with open(self.packpath, PACKOPENMODE) as datafp:
353 with open(self.packpath, PACKOPENMODE) as datafp:
351 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
354 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
352
355
353 self._pagedin = 0
356 self._pagedin = 0
354 return True
357 return True
355
358
356 def getmissing(self, keys):
359 def getmissing(self, keys):
357 raise NotImplementedError()
360 raise NotImplementedError()
358
361
359 def markledger(self, ledger, options=None):
362 def markledger(self, ledger, options=None):
360 raise NotImplementedError()
363 raise NotImplementedError()
361
364
362 def cleanup(self, ledger):
365 def cleanup(self, ledger):
363 raise NotImplementedError()
366 raise NotImplementedError()
364
367
365 def __iter__(self):
368 def __iter__(self):
366 raise NotImplementedError()
369 raise NotImplementedError()
367
370
368 def iterentries(self):
371 def iterentries(self):
369 raise NotImplementedError()
372 raise NotImplementedError()
370
373
371
374
372 class mutablebasepack(versionmixin):
375 class mutablebasepack(versionmixin):
373 def __init__(self, ui, packdir, version=2):
376 def __init__(self, ui, packdir, version=2):
374 self._checkversion(version)
377 self._checkversion(version)
375 # TODO(augie): make this configurable
378 # TODO(augie): make this configurable
376 self._compressor = b'GZ'
379 self._compressor = b'GZ'
377 opener = vfsmod.vfs(packdir)
380 opener = vfsmod.vfs(packdir)
378 opener.createmode = 0o444
381 opener.createmode = 0o444
379 self.opener = opener
382 self.opener = opener
380
383
381 self.entries = {}
384 self.entries = {}
382
385
383 shallowutil.mkstickygroupdir(ui, packdir)
386 shallowutil.mkstickygroupdir(ui, packdir)
384 self.packfp, self.packpath = opener.mkstemp(
387 self.packfp, self.packpath = opener.mkstemp(
385 suffix=self.PACKSUFFIX + b'-tmp'
388 suffix=self.PACKSUFFIX + b'-tmp'
386 )
389 )
387 self.idxfp, self.idxpath = opener.mkstemp(
390 self.idxfp, self.idxpath = opener.mkstemp(
388 suffix=self.INDEXSUFFIX + b'-tmp'
391 suffix=self.INDEXSUFFIX + b'-tmp'
389 )
392 )
390 self.packfp = os.fdopen(self.packfp, r'wb+')
393 self.packfp = os.fdopen(self.packfp, r'wb+')
391 self.idxfp = os.fdopen(self.idxfp, r'wb+')
394 self.idxfp = os.fdopen(self.idxfp, r'wb+')
392 self.sha = hashlib.sha1()
395 self.sha = hashlib.sha1()
393 self._closed = False
396 self._closed = False
394
397
395 # The opener provides no way of doing permission fixup on files created
398 # The opener provides no way of doing permission fixup on files created
396 # via mkstemp, so we must fix it ourselves. We can probably fix this
399 # via mkstemp, so we must fix it ourselves. We can probably fix this
397 # upstream in vfs.mkstemp so we don't need to use the private method.
400 # upstream in vfs.mkstemp so we don't need to use the private method.
398 opener._fixfilemode(opener.join(self.packpath))
401 opener._fixfilemode(opener.join(self.packpath))
399 opener._fixfilemode(opener.join(self.idxpath))
402 opener._fixfilemode(opener.join(self.idxpath))
400
403
401 # Write header
404 # Write header
402 # TODO: make it extensible (ex: allow specifying compression algorithm,
405 # TODO: make it extensible (ex: allow specifying compression algorithm,
403 # a flexible key/value header, delta algorithm, fanout size, etc)
406 # a flexible key/value header, delta algorithm, fanout size, etc)
404 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
407 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
405 self.writeraw(versionbuf)
408 self.writeraw(versionbuf)
406
409
407 def __enter__(self):
410 def __enter__(self):
408 return self
411 return self
409
412
410 def __exit__(self, exc_type, exc_value, traceback):
413 def __exit__(self, exc_type, exc_value, traceback):
411 if exc_type is None:
414 if exc_type is None:
412 self.close()
415 self.close()
413 else:
416 else:
414 self.abort()
417 self.abort()
415
418
416 def abort(self):
419 def abort(self):
417 # Unclean exit
420 # Unclean exit
418 self._cleantemppacks()
421 self._cleantemppacks()
419
422
420 def writeraw(self, data):
423 def writeraw(self, data):
421 self.packfp.write(data)
424 self.packfp.write(data)
422 self.sha.update(data)
425 self.sha.update(data)
423
426
424 def close(self, ledger=None):
427 def close(self, ledger=None):
425 if self._closed:
428 if self._closed:
426 return
429 return
427
430
428 try:
431 try:
429 sha = nodemod.hex(self.sha.digest())
432 sha = nodemod.hex(self.sha.digest())
430 self.packfp.close()
433 self.packfp.close()
431 self.writeindex()
434 self.writeindex()
432
435
433 if len(self.entries) == 0:
436 if len(self.entries) == 0:
434 # Empty pack
437 # Empty pack
435 self._cleantemppacks()
438 self._cleantemppacks()
436 self._closed = True
439 self._closed = True
437 return None
440 return None
438
441
439 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
442 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
440 try:
443 try:
441 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
444 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
442 except Exception as ex:
445 except Exception as ex:
443 try:
446 try:
444 self.opener.unlink(sha + self.PACKSUFFIX)
447 self.opener.unlink(sha + self.PACKSUFFIX)
445 except Exception:
448 except Exception:
446 pass
449 pass
447 # Throw exception 'ex' explicitly since a normal 'raise' would
450 # Throw exception 'ex' explicitly since a normal 'raise' would
448 # potentially throw an exception from the unlink cleanup.
451 # potentially throw an exception from the unlink cleanup.
449 raise ex
452 raise ex
450 except Exception:
453 except Exception:
451 # Clean up temp packs in all exception cases
454 # Clean up temp packs in all exception cases
452 self._cleantemppacks()
455 self._cleantemppacks()
453 raise
456 raise
454
457
455 self._closed = True
458 self._closed = True
456 result = self.opener.join(sha)
459 result = self.opener.join(sha)
457 if ledger:
460 if ledger:
458 ledger.addcreated(result)
461 ledger.addcreated(result)
459 return result
462 return result
460
463
461 def _cleantemppacks(self):
464 def _cleantemppacks(self):
462 try:
465 try:
463 self.opener.unlink(self.packpath)
466 self.opener.unlink(self.packpath)
464 except Exception:
467 except Exception:
465 pass
468 pass
466 try:
469 try:
467 self.opener.unlink(self.idxpath)
470 self.opener.unlink(self.idxpath)
468 except Exception:
471 except Exception:
469 pass
472 pass
470
473
471 def writeindex(self):
474 def writeindex(self):
472 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
475 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
473 if largefanout:
476 if largefanout:
474 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
477 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
475 else:
478 else:
476 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
479 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
477
480
478 fanouttable = [EMPTYFANOUT] * params.fanoutcount
481 fanouttable = [EMPTYFANOUT] * params.fanoutcount
479
482
480 # Precompute the location of each entry
483 # Precompute the location of each entry
481 locations = {}
484 locations = {}
482 count = 0
485 count = 0
483 for node in sorted(self.entries):
486 for node in sorted(self.entries):
484 location = count * self.INDEXENTRYLENGTH
487 location = count * self.INDEXENTRYLENGTH
485 locations[node] = location
488 locations[node] = location
486 count += 1
489 count += 1
487
490
488 # Must use [0] on the unpack result since it's always a tuple.
491 # Must use [0] on the unpack result since it's always a tuple.
489 fanoutkey = struct.unpack(
492 fanoutkey = struct.unpack(
490 params.fanoutstruct, node[: params.fanoutprefix]
493 params.fanoutstruct, node[: params.fanoutprefix]
491 )[0]
494 )[0]
492 if fanouttable[fanoutkey] == EMPTYFANOUT:
495 if fanouttable[fanoutkey] == EMPTYFANOUT:
493 fanouttable[fanoutkey] = location
496 fanouttable[fanoutkey] = location
494
497
495 rawfanouttable = b''
498 rawfanouttable = b''
496 last = 0
499 last = 0
497 for offset in fanouttable:
500 for offset in fanouttable:
498 offset = offset if offset != EMPTYFANOUT else last
501 offset = offset if offset != EMPTYFANOUT else last
499 last = offset
502 last = offset
500 rawfanouttable += struct.pack(b'!I', offset)
503 rawfanouttable += struct.pack(b'!I', offset)
501
504
502 rawentrieslength = struct.pack(b'!Q', len(self.entries))
505 rawentrieslength = struct.pack(b'!Q', len(self.entries))
503
506
504 # The index offset is the it's location in the file. So after the 2 byte
507 # The index offset is the it's location in the file. So after the 2 byte
505 # header and the fanouttable.
508 # header and the fanouttable.
506 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
509 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
507
510
508 self._writeheader(params)
511 self._writeheader(params)
509 self.idxfp.write(rawfanouttable)
512 self.idxfp.write(rawfanouttable)
510 self.idxfp.write(rawentrieslength)
513 self.idxfp.write(rawentrieslength)
511 self.idxfp.write(rawindex)
514 self.idxfp.write(rawindex)
512 self.idxfp.close()
515 self.idxfp.close()
513
516
514 def createindex(self, nodelocations):
517 def createindex(self, nodelocations):
515 raise NotImplementedError()
518 raise NotImplementedError()
516
519
517 def _writeheader(self, indexparams):
520 def _writeheader(self, indexparams):
518 # Index header
521 # Index header
519 # <version: 1 byte>
522 # <version: 1 byte>
520 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
523 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
521 # <unused: 7 bit> # future use (compression, delta format, etc)
524 # <unused: 7 bit> # future use (compression, delta format, etc)
522 config = 0
525 config = 0
523 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
526 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
524 config = 0b10000000
527 config = 0b10000000
525 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
528 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
526
529
527
530
528 class indexparams(object):
531 class indexparams(object):
529 __slots__ = (
532 __slots__ = (
530 r'fanoutprefix',
533 r'fanoutprefix',
531 r'fanoutstruct',
534 r'fanoutstruct',
532 r'fanoutcount',
535 r'fanoutcount',
533 r'fanoutsize',
536 r'fanoutsize',
534 r'indexstart',
537 r'indexstart',
535 )
538 )
536
539
537 def __init__(self, prefixsize, version):
540 def __init__(self, prefixsize, version):
538 self.fanoutprefix = prefixsize
541 self.fanoutprefix = prefixsize
539
542
540 # The struct pack format for fanout table location (i.e. the format that
543 # The struct pack format for fanout table location (i.e. the format that
541 # converts the node prefix into an integer location in the fanout
544 # converts the node prefix into an integer location in the fanout
542 # table).
545 # table).
543 if prefixsize == SMALLFANOUTPREFIX:
546 if prefixsize == SMALLFANOUTPREFIX:
544 self.fanoutstruct = b'!B'
547 self.fanoutstruct = b'!B'
545 elif prefixsize == LARGEFANOUTPREFIX:
548 elif prefixsize == LARGEFANOUTPREFIX:
546 self.fanoutstruct = b'!H'
549 self.fanoutstruct = b'!H'
547 else:
550 else:
548 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
551 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
549
552
550 # The number of fanout table entries
553 # The number of fanout table entries
551 self.fanoutcount = 2 ** (prefixsize * 8)
554 self.fanoutcount = 2 ** (prefixsize * 8)
552
555
553 # The total bytes used by the fanout table
556 # The total bytes used by the fanout table
554 self.fanoutsize = self.fanoutcount * 4
557 self.fanoutsize = self.fanoutcount * 4
555
558
556 self.indexstart = FANOUTSTART + self.fanoutsize
559 self.indexstart = FANOUTSTART + self.fanoutsize
557 # Skip the index length
560 # Skip the index length
558 self.indexstart += 8
561 self.indexstart += 8
@@ -1,387 +1,388
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import threading
3 import threading
4
4
5 from mercurial.node import hex, nullid
5 from mercurial.node import hex, nullid
6 from mercurial.pycompat import getattr
6 from mercurial import (
7 from mercurial import (
7 mdiff,
8 mdiff,
8 pycompat,
9 pycompat,
9 revlog,
10 revlog,
10 )
11 )
11 from . import (
12 from . import (
12 basestore,
13 basestore,
13 constants,
14 constants,
14 shallowutil,
15 shallowutil,
15 )
16 )
16
17
17
18
18 class ChainIndicies(object):
19 class ChainIndicies(object):
19 """A static class for easy reference to the delta chain indicies.
20 """A static class for easy reference to the delta chain indicies.
20 """
21 """
21
22
22 # The filename of this revision delta
23 # The filename of this revision delta
23 NAME = 0
24 NAME = 0
24 # The mercurial file node for this revision delta
25 # The mercurial file node for this revision delta
25 NODE = 1
26 NODE = 1
26 # The filename of the delta base's revision. This is useful when delta
27 # The filename of the delta base's revision. This is useful when delta
27 # between different files (like in the case of a move or copy, we can delta
28 # between different files (like in the case of a move or copy, we can delta
28 # against the original file content).
29 # against the original file content).
29 BASENAME = 2
30 BASENAME = 2
30 # The mercurial file node for the delta base revision. This is the nullid if
31 # The mercurial file node for the delta base revision. This is the nullid if
31 # this delta is a full text.
32 # this delta is a full text.
32 BASENODE = 3
33 BASENODE = 3
33 # The actual delta or full text data.
34 # The actual delta or full text data.
34 DATA = 4
35 DATA = 4
35
36
36
37
37 class unioncontentstore(basestore.baseunionstore):
38 class unioncontentstore(basestore.baseunionstore):
38 def __init__(self, *args, **kwargs):
39 def __init__(self, *args, **kwargs):
39 super(unioncontentstore, self).__init__(*args, **kwargs)
40 super(unioncontentstore, self).__init__(*args, **kwargs)
40
41
41 self.stores = args
42 self.stores = args
42 self.writestore = kwargs.get(r'writestore')
43 self.writestore = kwargs.get(r'writestore')
43
44
44 # If allowincomplete==True then the union store can return partial
45 # If allowincomplete==True then the union store can return partial
45 # delta chains, otherwise it will throw a KeyError if a full
46 # delta chains, otherwise it will throw a KeyError if a full
46 # deltachain can't be found.
47 # deltachain can't be found.
47 self.allowincomplete = kwargs.get(r'allowincomplete', False)
48 self.allowincomplete = kwargs.get(r'allowincomplete', False)
48
49
49 def get(self, name, node):
50 def get(self, name, node):
50 """Fetches the full text revision contents of the given name+node pair.
51 """Fetches the full text revision contents of the given name+node pair.
51 If the full text doesn't exist, throws a KeyError.
52 If the full text doesn't exist, throws a KeyError.
52
53
53 Under the hood, this uses getdeltachain() across all the stores to build
54 Under the hood, this uses getdeltachain() across all the stores to build
54 up a full chain to produce the full text.
55 up a full chain to produce the full text.
55 """
56 """
56 chain = self.getdeltachain(name, node)
57 chain = self.getdeltachain(name, node)
57
58
58 if chain[-1][ChainIndicies.BASENODE] != nullid:
59 if chain[-1][ChainIndicies.BASENODE] != nullid:
59 # If we didn't receive a full chain, throw
60 # If we didn't receive a full chain, throw
60 raise KeyError((name, hex(node)))
61 raise KeyError((name, hex(node)))
61
62
62 # The last entry in the chain is a full text, so we start our delta
63 # The last entry in the chain is a full text, so we start our delta
63 # applies with that.
64 # applies with that.
64 fulltext = chain.pop()[ChainIndicies.DATA]
65 fulltext = chain.pop()[ChainIndicies.DATA]
65
66
66 text = fulltext
67 text = fulltext
67 while chain:
68 while chain:
68 delta = chain.pop()[ChainIndicies.DATA]
69 delta = chain.pop()[ChainIndicies.DATA]
69 text = mdiff.patches(text, [delta])
70 text = mdiff.patches(text, [delta])
70
71
71 return text
72 return text
72
73
73 @basestore.baseunionstore.retriable
74 @basestore.baseunionstore.retriable
74 def getdelta(self, name, node):
75 def getdelta(self, name, node):
75 """Return the single delta entry for the given name/node pair.
76 """Return the single delta entry for the given name/node pair.
76 """
77 """
77 for store in self.stores:
78 for store in self.stores:
78 try:
79 try:
79 return store.getdelta(name, node)
80 return store.getdelta(name, node)
80 except KeyError:
81 except KeyError:
81 pass
82 pass
82
83
83 raise KeyError((name, hex(node)))
84 raise KeyError((name, hex(node)))
84
85
85 def getdeltachain(self, name, node):
86 def getdeltachain(self, name, node):
86 """Returns the deltachain for the given name/node pair.
87 """Returns the deltachain for the given name/node pair.
87
88
88 Returns an ordered list of:
89 Returns an ordered list of:
89
90
90 [(name, node, deltabasename, deltabasenode, deltacontent),...]
91 [(name, node, deltabasename, deltabasenode, deltacontent),...]
91
92
92 where the chain is terminated by a full text entry with a nullid
93 where the chain is terminated by a full text entry with a nullid
93 deltabasenode.
94 deltabasenode.
94 """
95 """
95 chain = self._getpartialchain(name, node)
96 chain = self._getpartialchain(name, node)
96 while chain[-1][ChainIndicies.BASENODE] != nullid:
97 while chain[-1][ChainIndicies.BASENODE] != nullid:
97 x, x, deltabasename, deltabasenode, x = chain[-1]
98 x, x, deltabasename, deltabasenode, x = chain[-1]
98 try:
99 try:
99 morechain = self._getpartialchain(deltabasename, deltabasenode)
100 morechain = self._getpartialchain(deltabasename, deltabasenode)
100 chain.extend(morechain)
101 chain.extend(morechain)
101 except KeyError:
102 except KeyError:
102 # If we allow incomplete chains, don't throw.
103 # If we allow incomplete chains, don't throw.
103 if not self.allowincomplete:
104 if not self.allowincomplete:
104 raise
105 raise
105 break
106 break
106
107
107 return chain
108 return chain
108
109
109 @basestore.baseunionstore.retriable
110 @basestore.baseunionstore.retriable
110 def getmeta(self, name, node):
111 def getmeta(self, name, node):
111 """Returns the metadata dict for given node."""
112 """Returns the metadata dict for given node."""
112 for store in self.stores:
113 for store in self.stores:
113 try:
114 try:
114 return store.getmeta(name, node)
115 return store.getmeta(name, node)
115 except KeyError:
116 except KeyError:
116 pass
117 pass
117 raise KeyError((name, hex(node)))
118 raise KeyError((name, hex(node)))
118
119
119 def getmetrics(self):
120 def getmetrics(self):
120 metrics = [s.getmetrics() for s in self.stores]
121 metrics = [s.getmetrics() for s in self.stores]
121 return shallowutil.sumdicts(*metrics)
122 return shallowutil.sumdicts(*metrics)
122
123
123 @basestore.baseunionstore.retriable
124 @basestore.baseunionstore.retriable
124 def _getpartialchain(self, name, node):
125 def _getpartialchain(self, name, node):
125 """Returns a partial delta chain for the given name/node pair.
126 """Returns a partial delta chain for the given name/node pair.
126
127
127 A partial chain is a chain that may not be terminated in a full-text.
128 A partial chain is a chain that may not be terminated in a full-text.
128 """
129 """
129 for store in self.stores:
130 for store in self.stores:
130 try:
131 try:
131 return store.getdeltachain(name, node)
132 return store.getdeltachain(name, node)
132 except KeyError:
133 except KeyError:
133 pass
134 pass
134
135
135 raise KeyError((name, hex(node)))
136 raise KeyError((name, hex(node)))
136
137
137 def add(self, name, node, data):
138 def add(self, name, node, data):
138 raise RuntimeError(
139 raise RuntimeError(
139 b"cannot add content only to remotefilelog " b"contentstore"
140 b"cannot add content only to remotefilelog " b"contentstore"
140 )
141 )
141
142
142 def getmissing(self, keys):
143 def getmissing(self, keys):
143 missing = keys
144 missing = keys
144 for store in self.stores:
145 for store in self.stores:
145 if missing:
146 if missing:
146 missing = store.getmissing(missing)
147 missing = store.getmissing(missing)
147 return missing
148 return missing
148
149
149 def addremotefilelognode(self, name, node, data):
150 def addremotefilelognode(self, name, node, data):
150 if self.writestore:
151 if self.writestore:
151 self.writestore.addremotefilelognode(name, node, data)
152 self.writestore.addremotefilelognode(name, node, data)
152 else:
153 else:
153 raise RuntimeError(b"no writable store configured")
154 raise RuntimeError(b"no writable store configured")
154
155
155 def markledger(self, ledger, options=None):
156 def markledger(self, ledger, options=None):
156 for store in self.stores:
157 for store in self.stores:
157 store.markledger(ledger, options)
158 store.markledger(ledger, options)
158
159
159
160
160 class remotefilelogcontentstore(basestore.basestore):
161 class remotefilelogcontentstore(basestore.basestore):
161 def __init__(self, *args, **kwargs):
162 def __init__(self, *args, **kwargs):
162 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
163 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
163 self._threaddata = threading.local()
164 self._threaddata = threading.local()
164
165
165 def get(self, name, node):
166 def get(self, name, node):
166 # return raw revision text
167 # return raw revision text
167 data = self._getdata(name, node)
168 data = self._getdata(name, node)
168
169
169 offset, size, flags = shallowutil.parsesizeflags(data)
170 offset, size, flags = shallowutil.parsesizeflags(data)
170 content = data[offset : offset + size]
171 content = data[offset : offset + size]
171
172
172 ancestormap = shallowutil.ancestormap(data)
173 ancestormap = shallowutil.ancestormap(data)
173 p1, p2, linknode, copyfrom = ancestormap[node]
174 p1, p2, linknode, copyfrom = ancestormap[node]
174 copyrev = None
175 copyrev = None
175 if copyfrom:
176 if copyfrom:
176 copyrev = hex(p1)
177 copyrev = hex(p1)
177
178
178 self._updatemetacache(node, size, flags)
179 self._updatemetacache(node, size, flags)
179
180
180 # lfs tracks renames in its own metadata, remove hg copy metadata,
181 # lfs tracks renames in its own metadata, remove hg copy metadata,
181 # because copy metadata will be re-added by lfs flag processor.
182 # because copy metadata will be re-added by lfs flag processor.
182 if flags & revlog.REVIDX_EXTSTORED:
183 if flags & revlog.REVIDX_EXTSTORED:
183 copyrev = copyfrom = None
184 copyrev = copyfrom = None
184 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
185 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
185 return revision
186 return revision
186
187
187 def getdelta(self, name, node):
188 def getdelta(self, name, node):
188 # Since remotefilelog content stores only contain full texts, just
189 # Since remotefilelog content stores only contain full texts, just
189 # return that.
190 # return that.
190 revision = self.get(name, node)
191 revision = self.get(name, node)
191 return revision, name, nullid, self.getmeta(name, node)
192 return revision, name, nullid, self.getmeta(name, node)
192
193
193 def getdeltachain(self, name, node):
194 def getdeltachain(self, name, node):
194 # Since remotefilelog content stores just contain full texts, we return
195 # Since remotefilelog content stores just contain full texts, we return
195 # a fake delta chain that just consists of a single full text revision.
196 # a fake delta chain that just consists of a single full text revision.
196 # The nullid in the deltabasenode slot indicates that the revision is a
197 # The nullid in the deltabasenode slot indicates that the revision is a
197 # fulltext.
198 # fulltext.
198 revision = self.get(name, node)
199 revision = self.get(name, node)
199 return [(name, node, None, nullid, revision)]
200 return [(name, node, None, nullid, revision)]
200
201
201 def getmeta(self, name, node):
202 def getmeta(self, name, node):
202 self._sanitizemetacache()
203 self._sanitizemetacache()
203 if node != self._threaddata.metacache[0]:
204 if node != self._threaddata.metacache[0]:
204 data = self._getdata(name, node)
205 data = self._getdata(name, node)
205 offset, size, flags = shallowutil.parsesizeflags(data)
206 offset, size, flags = shallowutil.parsesizeflags(data)
206 self._updatemetacache(node, size, flags)
207 self._updatemetacache(node, size, flags)
207 return self._threaddata.metacache[1]
208 return self._threaddata.metacache[1]
208
209
209 def add(self, name, node, data):
210 def add(self, name, node, data):
210 raise RuntimeError(
211 raise RuntimeError(
211 b"cannot add content only to remotefilelog " b"contentstore"
212 b"cannot add content only to remotefilelog " b"contentstore"
212 )
213 )
213
214
214 def _sanitizemetacache(self):
215 def _sanitizemetacache(self):
215 metacache = getattr(self._threaddata, 'metacache', None)
216 metacache = getattr(self._threaddata, 'metacache', None)
216 if metacache is None:
217 if metacache is None:
217 self._threaddata.metacache = (None, None) # (node, meta)
218 self._threaddata.metacache = (None, None) # (node, meta)
218
219
219 def _updatemetacache(self, node, size, flags):
220 def _updatemetacache(self, node, size, flags):
220 self._sanitizemetacache()
221 self._sanitizemetacache()
221 if node == self._threaddata.metacache[0]:
222 if node == self._threaddata.metacache[0]:
222 return
223 return
223 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
224 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
224 self._threaddata.metacache = (node, meta)
225 self._threaddata.metacache = (node, meta)
225
226
226
227
227 class remotecontentstore(object):
228 class remotecontentstore(object):
228 def __init__(self, ui, fileservice, shared):
229 def __init__(self, ui, fileservice, shared):
229 self._fileservice = fileservice
230 self._fileservice = fileservice
230 # type(shared) is usually remotefilelogcontentstore
231 # type(shared) is usually remotefilelogcontentstore
231 self._shared = shared
232 self._shared = shared
232
233
233 def get(self, name, node):
234 def get(self, name, node):
234 self._fileservice.prefetch(
235 self._fileservice.prefetch(
235 [(name, hex(node))], force=True, fetchdata=True
236 [(name, hex(node))], force=True, fetchdata=True
236 )
237 )
237 return self._shared.get(name, node)
238 return self._shared.get(name, node)
238
239
239 def getdelta(self, name, node):
240 def getdelta(self, name, node):
240 revision = self.get(name, node)
241 revision = self.get(name, node)
241 return revision, name, nullid, self._shared.getmeta(name, node)
242 return revision, name, nullid, self._shared.getmeta(name, node)
242
243
243 def getdeltachain(self, name, node):
244 def getdeltachain(self, name, node):
244 # Since our remote content stores just contain full texts, we return a
245 # Since our remote content stores just contain full texts, we return a
245 # fake delta chain that just consists of a single full text revision.
246 # fake delta chain that just consists of a single full text revision.
246 # The nullid in the deltabasenode slot indicates that the revision is a
247 # The nullid in the deltabasenode slot indicates that the revision is a
247 # fulltext.
248 # fulltext.
248 revision = self.get(name, node)
249 revision = self.get(name, node)
249 return [(name, node, None, nullid, revision)]
250 return [(name, node, None, nullid, revision)]
250
251
251 def getmeta(self, name, node):
252 def getmeta(self, name, node):
252 self._fileservice.prefetch(
253 self._fileservice.prefetch(
253 [(name, hex(node))], force=True, fetchdata=True
254 [(name, hex(node))], force=True, fetchdata=True
254 )
255 )
255 return self._shared.getmeta(name, node)
256 return self._shared.getmeta(name, node)
256
257
257 def add(self, name, node, data):
258 def add(self, name, node, data):
258 raise RuntimeError(b"cannot add to a remote store")
259 raise RuntimeError(b"cannot add to a remote store")
259
260
260 def getmissing(self, keys):
261 def getmissing(self, keys):
261 return keys
262 return keys
262
263
263 def markledger(self, ledger, options=None):
264 def markledger(self, ledger, options=None):
264 pass
265 pass
265
266
266
267
267 class manifestrevlogstore(object):
268 class manifestrevlogstore(object):
268 def __init__(self, repo):
269 def __init__(self, repo):
269 self._store = repo.store
270 self._store = repo.store
270 self._svfs = repo.svfs
271 self._svfs = repo.svfs
271 self._revlogs = dict()
272 self._revlogs = dict()
272 self._cl = revlog.revlog(self._svfs, b'00changelog.i')
273 self._cl = revlog.revlog(self._svfs, b'00changelog.i')
273 self._repackstartlinkrev = 0
274 self._repackstartlinkrev = 0
274
275
275 def get(self, name, node):
276 def get(self, name, node):
276 return self._revlog(name).rawdata(node)
277 return self._revlog(name).rawdata(node)
277
278
278 def getdelta(self, name, node):
279 def getdelta(self, name, node):
279 revision = self.get(name, node)
280 revision = self.get(name, node)
280 return revision, name, nullid, self.getmeta(name, node)
281 return revision, name, nullid, self.getmeta(name, node)
281
282
282 def getdeltachain(self, name, node):
283 def getdeltachain(self, name, node):
283 revision = self.get(name, node)
284 revision = self.get(name, node)
284 return [(name, node, None, nullid, revision)]
285 return [(name, node, None, nullid, revision)]
285
286
286 def getmeta(self, name, node):
287 def getmeta(self, name, node):
287 rl = self._revlog(name)
288 rl = self._revlog(name)
288 rev = rl.rev(node)
289 rev = rl.rev(node)
289 return {
290 return {
290 constants.METAKEYFLAG: rl.flags(rev),
291 constants.METAKEYFLAG: rl.flags(rev),
291 constants.METAKEYSIZE: rl.rawsize(rev),
292 constants.METAKEYSIZE: rl.rawsize(rev),
292 }
293 }
293
294
294 def getancestors(self, name, node, known=None):
295 def getancestors(self, name, node, known=None):
295 if known is None:
296 if known is None:
296 known = set()
297 known = set()
297 if node in known:
298 if node in known:
298 return []
299 return []
299
300
300 rl = self._revlog(name)
301 rl = self._revlog(name)
301 ancestors = {}
302 ancestors = {}
302 missing = set((node,))
303 missing = set((node,))
303 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
304 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
304 ancnode = rl.node(ancrev)
305 ancnode = rl.node(ancrev)
305 missing.discard(ancnode)
306 missing.discard(ancnode)
306
307
307 p1, p2 = rl.parents(ancnode)
308 p1, p2 = rl.parents(ancnode)
308 if p1 != nullid and p1 not in known:
309 if p1 != nullid and p1 not in known:
309 missing.add(p1)
310 missing.add(p1)
310 if p2 != nullid and p2 not in known:
311 if p2 != nullid and p2 not in known:
311 missing.add(p2)
312 missing.add(p2)
312
313
313 linknode = self._cl.node(rl.linkrev(ancrev))
314 linknode = self._cl.node(rl.linkrev(ancrev))
314 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
315 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
315 if not missing:
316 if not missing:
316 break
317 break
317 return ancestors
318 return ancestors
318
319
319 def getnodeinfo(self, name, node):
320 def getnodeinfo(self, name, node):
320 cl = self._cl
321 cl = self._cl
321 rl = self._revlog(name)
322 rl = self._revlog(name)
322 parents = rl.parents(node)
323 parents = rl.parents(node)
323 linkrev = rl.linkrev(rl.rev(node))
324 linkrev = rl.linkrev(rl.rev(node))
324 return (parents[0], parents[1], cl.node(linkrev), None)
325 return (parents[0], parents[1], cl.node(linkrev), None)
325
326
326 def add(self, *args):
327 def add(self, *args):
327 raise RuntimeError(b"cannot add to a revlog store")
328 raise RuntimeError(b"cannot add to a revlog store")
328
329
329 def _revlog(self, name):
330 def _revlog(self, name):
330 rl = self._revlogs.get(name)
331 rl = self._revlogs.get(name)
331 if rl is None:
332 if rl is None:
332 revlogname = b'00manifesttree.i'
333 revlogname = b'00manifesttree.i'
333 if name != b'':
334 if name != b'':
334 revlogname = b'meta/%s/00manifest.i' % name
335 revlogname = b'meta/%s/00manifest.i' % name
335 rl = revlog.revlog(self._svfs, revlogname)
336 rl = revlog.revlog(self._svfs, revlogname)
336 self._revlogs[name] = rl
337 self._revlogs[name] = rl
337 return rl
338 return rl
338
339
339 def getmissing(self, keys):
340 def getmissing(self, keys):
340 missing = []
341 missing = []
341 for name, node in keys:
342 for name, node in keys:
342 mfrevlog = self._revlog(name)
343 mfrevlog = self._revlog(name)
343 if node not in mfrevlog.nodemap:
344 if node not in mfrevlog.nodemap:
344 missing.append((name, node))
345 missing.append((name, node))
345
346
346 return missing
347 return missing
347
348
348 def setrepacklinkrevrange(self, startrev, endrev):
349 def setrepacklinkrevrange(self, startrev, endrev):
349 self._repackstartlinkrev = startrev
350 self._repackstartlinkrev = startrev
350 self._repackendlinkrev = endrev
351 self._repackendlinkrev = endrev
351
352
352 def markledger(self, ledger, options=None):
353 def markledger(self, ledger, options=None):
353 if options and options.get(constants.OPTION_PACKSONLY):
354 if options and options.get(constants.OPTION_PACKSONLY):
354 return
355 return
355 treename = b''
356 treename = b''
356 rl = revlog.revlog(self._svfs, b'00manifesttree.i')
357 rl = revlog.revlog(self._svfs, b'00manifesttree.i')
357 startlinkrev = self._repackstartlinkrev
358 startlinkrev = self._repackstartlinkrev
358 endlinkrev = self._repackendlinkrev
359 endlinkrev = self._repackendlinkrev
359 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
360 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
360 linkrev = rl.linkrev(rev)
361 linkrev = rl.linkrev(rev)
361 if linkrev < startlinkrev:
362 if linkrev < startlinkrev:
362 break
363 break
363 if linkrev > endlinkrev:
364 if linkrev > endlinkrev:
364 continue
365 continue
365 node = rl.node(rev)
366 node = rl.node(rev)
366 ledger.markdataentry(self, treename, node)
367 ledger.markdataentry(self, treename, node)
367 ledger.markhistoryentry(self, treename, node)
368 ledger.markhistoryentry(self, treename, node)
368
369
369 for path, encoded, size in self._store.datafiles():
370 for path, encoded, size in self._store.datafiles():
370 if path[:5] != b'meta/' or path[-2:] != b'.i':
371 if path[:5] != b'meta/' or path[-2:] != b'.i':
371 continue
372 continue
372
373
373 treename = path[5 : -len(b'/00manifest.i')]
374 treename = path[5 : -len(b'/00manifest.i')]
374
375
375 rl = revlog.revlog(self._svfs, path)
376 rl = revlog.revlog(self._svfs, path)
376 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
377 for rev in pycompat.xrange(len(rl) - 1, -1, -1):
377 linkrev = rl.linkrev(rev)
378 linkrev = rl.linkrev(rev)
378 if linkrev < startlinkrev:
379 if linkrev < startlinkrev:
379 break
380 break
380 if linkrev > endlinkrev:
381 if linkrev > endlinkrev:
381 continue
382 continue
382 node = rl.node(rev)
383 node = rl.node(rev)
383 ledger.markdataentry(self, treename, node)
384 ledger.markdataentry(self, treename, node)
384 ledger.markhistoryentry(self, treename, node)
385 ledger.markhistoryentry(self, treename, node)
385
386
386 def cleanup(self, ledger):
387 def cleanup(self, ledger):
387 pass
388 pass
@@ -1,291 +1,292
1 """strip changesets and their descendants from history
1 """strip changesets and their descendants from history
2
2
3 This extension allows you to strip changesets and all their descendants from the
3 This extension allows you to strip changesets and all their descendants from the
4 repository. See the command help for details.
4 repository. See the command help for details.
5 """
5 """
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.pycompat import getattr
9 from mercurial import (
10 from mercurial import (
10 bookmarks as bookmarksmod,
11 bookmarks as bookmarksmod,
11 cmdutil,
12 cmdutil,
12 error,
13 error,
13 hg,
14 hg,
14 lock as lockmod,
15 lock as lockmod,
15 merge,
16 merge,
16 node as nodemod,
17 node as nodemod,
17 pycompat,
18 pycompat,
18 registrar,
19 registrar,
19 repair,
20 repair,
20 scmutil,
21 scmutil,
21 util,
22 util,
22 )
23 )
23
24
24 nullid = nodemod.nullid
25 nullid = nodemod.nullid
25 release = lockmod.release
26 release = lockmod.release
26
27
27 cmdtable = {}
28 cmdtable = {}
28 command = registrar.command(cmdtable)
29 command = registrar.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 # be specifying the version(s) of Mercurial they are tested with, or
32 # be specifying the version(s) of Mercurial they are tested with, or
32 # leave the attribute unspecified.
33 # leave the attribute unspecified.
33 testedwith = b'ships-with-hg-core'
34 testedwith = b'ships-with-hg-core'
34
35
35
36
36 def checklocalchanges(repo, force=False):
37 def checklocalchanges(repo, force=False):
37 s = repo.status()
38 s = repo.status()
38 if not force:
39 if not force:
39 cmdutil.checkunfinished(repo)
40 cmdutil.checkunfinished(repo)
40 cmdutil.bailifchanged(repo)
41 cmdutil.bailifchanged(repo)
41 else:
42 else:
42 cmdutil.checkunfinished(repo, skipmerge=True)
43 cmdutil.checkunfinished(repo, skipmerge=True)
43 return s
44 return s
44
45
45
46
46 def _findupdatetarget(repo, nodes):
47 def _findupdatetarget(repo, nodes):
47 unode, p2 = repo.changelog.parents(nodes[0])
48 unode, p2 = repo.changelog.parents(nodes[0])
48 currentbranch = repo[None].branch()
49 currentbranch = repo[None].branch()
49
50
50 if (
51 if (
51 util.safehasattr(repo, b'mq')
52 util.safehasattr(repo, b'mq')
52 and p2 != nullid
53 and p2 != nullid
53 and p2 in [x.node for x in repo.mq.applied]
54 and p2 in [x.node for x in repo.mq.applied]
54 ):
55 ):
55 unode = p2
56 unode = p2
56 elif currentbranch != repo[unode].branch():
57 elif currentbranch != repo[unode].branch():
57 pwdir = b'parents(wdir())'
58 pwdir = b'parents(wdir())'
58 revset = b'max(((parents(%ln::%r) + %r) - %ln::%r) and branch(%s))'
59 revset = b'max(((parents(%ln::%r) + %r) - %ln::%r) and branch(%s))'
59 branchtarget = repo.revs(
60 branchtarget = repo.revs(
60 revset, nodes, pwdir, pwdir, nodes, pwdir, currentbranch
61 revset, nodes, pwdir, pwdir, nodes, pwdir, currentbranch
61 )
62 )
62 if branchtarget:
63 if branchtarget:
63 cl = repo.changelog
64 cl = repo.changelog
64 unode = cl.node(branchtarget.first())
65 unode = cl.node(branchtarget.first())
65
66
66 return unode
67 return unode
67
68
68
69
69 def strip(
70 def strip(
70 ui,
71 ui,
71 repo,
72 repo,
72 revs,
73 revs,
73 update=True,
74 update=True,
74 backup=True,
75 backup=True,
75 force=None,
76 force=None,
76 bookmarks=None,
77 bookmarks=None,
77 soft=False,
78 soft=False,
78 ):
79 ):
79 with repo.wlock(), repo.lock():
80 with repo.wlock(), repo.lock():
80
81
81 if update:
82 if update:
82 checklocalchanges(repo, force=force)
83 checklocalchanges(repo, force=force)
83 urev = _findupdatetarget(repo, revs)
84 urev = _findupdatetarget(repo, revs)
84 hg.clean(repo, urev)
85 hg.clean(repo, urev)
85 repo.dirstate.write(repo.currenttransaction())
86 repo.dirstate.write(repo.currenttransaction())
86
87
87 if soft:
88 if soft:
88 repair.softstrip(ui, repo, revs, backup)
89 repair.softstrip(ui, repo, revs, backup)
89 else:
90 else:
90 repair.strip(ui, repo, revs, backup)
91 repair.strip(ui, repo, revs, backup)
91
92
92 repomarks = repo._bookmarks
93 repomarks = repo._bookmarks
93 if bookmarks:
94 if bookmarks:
94 with repo.transaction(b'strip') as tr:
95 with repo.transaction(b'strip') as tr:
95 if repo._activebookmark in bookmarks:
96 if repo._activebookmark in bookmarks:
96 bookmarksmod.deactivate(repo)
97 bookmarksmod.deactivate(repo)
97 repomarks.applychanges(repo, tr, [(b, None) for b in bookmarks])
98 repomarks.applychanges(repo, tr, [(b, None) for b in bookmarks])
98 for bookmark in sorted(bookmarks):
99 for bookmark in sorted(bookmarks):
99 ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
100 ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
100
101
101
102
102 @command(
103 @command(
103 b"strip",
104 b"strip",
104 [
105 [
105 (
106 (
106 b'r',
107 b'r',
107 b'rev',
108 b'rev',
108 [],
109 [],
109 _(
110 _(
110 b'strip specified revision (optional, '
111 b'strip specified revision (optional, '
111 b'can specify revisions without this '
112 b'can specify revisions without this '
112 b'option)'
113 b'option)'
113 ),
114 ),
114 _(b'REV'),
115 _(b'REV'),
115 ),
116 ),
116 (
117 (
117 b'f',
118 b'f',
118 b'force',
119 b'force',
119 None,
120 None,
120 _(
121 _(
121 b'force removal of changesets, discard '
122 b'force removal of changesets, discard '
122 b'uncommitted changes (no backup)'
123 b'uncommitted changes (no backup)'
123 ),
124 ),
124 ),
125 ),
125 (b'', b'no-backup', None, _(b'do not save backup bundle')),
126 (b'', b'no-backup', None, _(b'do not save backup bundle')),
126 (
127 (
127 b'',
128 b'',
128 b'nobackup',
129 b'nobackup',
129 None,
130 None,
130 _(b'do not save backup bundle ' b'(DEPRECATED)'),
131 _(b'do not save backup bundle ' b'(DEPRECATED)'),
131 ),
132 ),
132 (b'n', b'', None, _(b'ignored (DEPRECATED)')),
133 (b'n', b'', None, _(b'ignored (DEPRECATED)')),
133 (
134 (
134 b'k',
135 b'k',
135 b'keep',
136 b'keep',
136 None,
137 None,
137 _(b"do not modify working directory during " b"strip"),
138 _(b"do not modify working directory during " b"strip"),
138 ),
139 ),
139 (
140 (
140 b'B',
141 b'B',
141 b'bookmark',
142 b'bookmark',
142 [],
143 [],
143 _(b"remove revs only reachable from given" b" bookmark"),
144 _(b"remove revs only reachable from given" b" bookmark"),
144 _(b'BOOKMARK'),
145 _(b'BOOKMARK'),
145 ),
146 ),
146 (
147 (
147 b'',
148 b'',
148 b'soft',
149 b'soft',
149 None,
150 None,
150 _(b"simply drop changesets from visible history (EXPERIMENTAL)"),
151 _(b"simply drop changesets from visible history (EXPERIMENTAL)"),
151 ),
152 ),
152 ],
153 ],
153 _(b'hg strip [-k] [-f] [-B bookmark] [-r] REV...'),
154 _(b'hg strip [-k] [-f] [-B bookmark] [-r] REV...'),
154 helpcategory=command.CATEGORY_MAINTENANCE,
155 helpcategory=command.CATEGORY_MAINTENANCE,
155 )
156 )
156 def stripcmd(ui, repo, *revs, **opts):
157 def stripcmd(ui, repo, *revs, **opts):
157 """strip changesets and all their descendants from the repository
158 """strip changesets and all their descendants from the repository
158
159
159 The strip command removes the specified changesets and all their
160 The strip command removes the specified changesets and all their
160 descendants. If the working directory has uncommitted changes, the
161 descendants. If the working directory has uncommitted changes, the
161 operation is aborted unless the --force flag is supplied, in which
162 operation is aborted unless the --force flag is supplied, in which
162 case changes will be discarded.
163 case changes will be discarded.
163
164
164 If a parent of the working directory is stripped, then the working
165 If a parent of the working directory is stripped, then the working
165 directory will automatically be updated to the most recent
166 directory will automatically be updated to the most recent
166 available ancestor of the stripped parent after the operation
167 available ancestor of the stripped parent after the operation
167 completes.
168 completes.
168
169
169 Any stripped changesets are stored in ``.hg/strip-backup`` as a
170 Any stripped changesets are stored in ``.hg/strip-backup`` as a
170 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
171 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
171 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
172 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
172 where BUNDLE is the bundle file created by the strip. Note that
173 where BUNDLE is the bundle file created by the strip. Note that
173 the local revision numbers will in general be different after the
174 the local revision numbers will in general be different after the
174 restore.
175 restore.
175
176
176 Use the --no-backup option to discard the backup bundle once the
177 Use the --no-backup option to discard the backup bundle once the
177 operation completes.
178 operation completes.
178
179
179 Strip is not a history-rewriting operation and can be used on
180 Strip is not a history-rewriting operation and can be used on
180 changesets in the public phase. But if the stripped changesets have
181 changesets in the public phase. But if the stripped changesets have
181 been pushed to a remote repository you will likely pull them again.
182 been pushed to a remote repository you will likely pull them again.
182
183
183 Return 0 on success.
184 Return 0 on success.
184 """
185 """
185 opts = pycompat.byteskwargs(opts)
186 opts = pycompat.byteskwargs(opts)
186 backup = True
187 backup = True
187 if opts.get(b'no_backup') or opts.get(b'nobackup'):
188 if opts.get(b'no_backup') or opts.get(b'nobackup'):
188 backup = False
189 backup = False
189
190
190 cl = repo.changelog
191 cl = repo.changelog
191 revs = list(revs) + opts.get(b'rev')
192 revs = list(revs) + opts.get(b'rev')
192 revs = set(scmutil.revrange(repo, revs))
193 revs = set(scmutil.revrange(repo, revs))
193
194
194 with repo.wlock():
195 with repo.wlock():
195 bookmarks = set(opts.get(b'bookmark'))
196 bookmarks = set(opts.get(b'bookmark'))
196 if bookmarks:
197 if bookmarks:
197 repomarks = repo._bookmarks
198 repomarks = repo._bookmarks
198 if not bookmarks.issubset(repomarks):
199 if not bookmarks.issubset(repomarks):
199 raise error.Abort(
200 raise error.Abort(
200 _(b"bookmark '%s' not found")
201 _(b"bookmark '%s' not found")
201 % b','.join(sorted(bookmarks - set(repomarks.keys())))
202 % b','.join(sorted(bookmarks - set(repomarks.keys())))
202 )
203 )
203
204
204 # If the requested bookmark is not the only one pointing to a
205 # If the requested bookmark is not the only one pointing to a
205 # a revision we have to only delete the bookmark and not strip
206 # a revision we have to only delete the bookmark and not strip
206 # anything. revsets cannot detect that case.
207 # anything. revsets cannot detect that case.
207 nodetobookmarks = {}
208 nodetobookmarks = {}
208 for mark, node in repomarks.iteritems():
209 for mark, node in repomarks.iteritems():
209 nodetobookmarks.setdefault(node, []).append(mark)
210 nodetobookmarks.setdefault(node, []).append(mark)
210 for marks in nodetobookmarks.values():
211 for marks in nodetobookmarks.values():
211 if bookmarks.issuperset(marks):
212 if bookmarks.issuperset(marks):
212 rsrevs = scmutil.bookmarkrevs(repo, marks[0])
213 rsrevs = scmutil.bookmarkrevs(repo, marks[0])
213 revs.update(set(rsrevs))
214 revs.update(set(rsrevs))
214 if not revs:
215 if not revs:
215 with repo.lock(), repo.transaction(b'bookmark') as tr:
216 with repo.lock(), repo.transaction(b'bookmark') as tr:
216 bmchanges = [(b, None) for b in bookmarks]
217 bmchanges = [(b, None) for b in bookmarks]
217 repomarks.applychanges(repo, tr, bmchanges)
218 repomarks.applychanges(repo, tr, bmchanges)
218 for bookmark in sorted(bookmarks):
219 for bookmark in sorted(bookmarks):
219 ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
220 ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
220
221
221 if not revs:
222 if not revs:
222 raise error.Abort(_(b'empty revision set'))
223 raise error.Abort(_(b'empty revision set'))
223
224
224 descendants = set(cl.descendants(revs))
225 descendants = set(cl.descendants(revs))
225 strippedrevs = revs.union(descendants)
226 strippedrevs = revs.union(descendants)
226 roots = revs.difference(descendants)
227 roots = revs.difference(descendants)
227
228
228 # if one of the wdir parent is stripped we'll need
229 # if one of the wdir parent is stripped we'll need
229 # to update away to an earlier revision
230 # to update away to an earlier revision
230 update = any(
231 update = any(
231 p != nullid and cl.rev(p) in strippedrevs
232 p != nullid and cl.rev(p) in strippedrevs
232 for p in repo.dirstate.parents()
233 for p in repo.dirstate.parents()
233 )
234 )
234
235
235 rootnodes = set(cl.node(r) for r in roots)
236 rootnodes = set(cl.node(r) for r in roots)
236
237
237 q = getattr(repo, 'mq', None)
238 q = getattr(repo, 'mq', None)
238 if q is not None and q.applied:
239 if q is not None and q.applied:
239 # refresh queue state if we're about to strip
240 # refresh queue state if we're about to strip
240 # applied patches
241 # applied patches
241 if cl.rev(repo.lookup(b'qtip')) in strippedrevs:
242 if cl.rev(repo.lookup(b'qtip')) in strippedrevs:
242 q.applieddirty = True
243 q.applieddirty = True
243 start = 0
244 start = 0
244 end = len(q.applied)
245 end = len(q.applied)
245 for i, statusentry in enumerate(q.applied):
246 for i, statusentry in enumerate(q.applied):
246 if statusentry.node in rootnodes:
247 if statusentry.node in rootnodes:
247 # if one of the stripped roots is an applied
248 # if one of the stripped roots is an applied
248 # patch, only part of the queue is stripped
249 # patch, only part of the queue is stripped
249 start = i
250 start = i
250 break
251 break
251 del q.applied[start:end]
252 del q.applied[start:end]
252 q.savedirty()
253 q.savedirty()
253
254
254 revs = sorted(rootnodes)
255 revs = sorted(rootnodes)
255 if update and opts.get(b'keep'):
256 if update and opts.get(b'keep'):
256 urev = _findupdatetarget(repo, revs)
257 urev = _findupdatetarget(repo, revs)
257 uctx = repo[urev]
258 uctx = repo[urev]
258
259
259 # only reset the dirstate for files that would actually change
260 # only reset the dirstate for files that would actually change
260 # between the working context and uctx
261 # between the working context and uctx
261 descendantrevs = repo.revs(b"%d::.", uctx.rev())
262 descendantrevs = repo.revs(b"%d::.", uctx.rev())
262 changedfiles = []
263 changedfiles = []
263 for rev in descendantrevs:
264 for rev in descendantrevs:
264 # blindly reset the files, regardless of what actually changed
265 # blindly reset the files, regardless of what actually changed
265 changedfiles.extend(repo[rev].files())
266 changedfiles.extend(repo[rev].files())
266
267
267 # reset files that only changed in the dirstate too
268 # reset files that only changed in the dirstate too
268 dirstate = repo.dirstate
269 dirstate = repo.dirstate
269 dirchanges = [f for f in dirstate if dirstate[f] != b'n']
270 dirchanges = [f for f in dirstate if dirstate[f] != b'n']
270 changedfiles.extend(dirchanges)
271 changedfiles.extend(dirchanges)
271
272
272 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
273 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
273 repo.dirstate.write(repo.currenttransaction())
274 repo.dirstate.write(repo.currenttransaction())
274
275
275 # clear resolve state
276 # clear resolve state
276 merge.mergestate.clean(repo, repo[b'.'].node())
277 merge.mergestate.clean(repo, repo[b'.'].node())
277
278
278 update = False
279 update = False
279
280
280 strip(
281 strip(
281 ui,
282 ui,
282 repo,
283 repo,
283 revs,
284 revs,
284 backup=backup,
285 backup=backup,
285 update=update,
286 update=update,
286 force=opts.get(b'force'),
287 force=opts.get(b'force'),
287 bookmarks=bookmarks,
288 bookmarks=bookmarks,
288 soft=opts[b'soft'],
289 soft=opts[b'soft'],
289 )
290 )
290
291
291 return 0
292 return 0
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now