##// END OF EJS Templates
py3: use pycompat.getcwd() instead of os.getcwd()...
Pulkit Goyal -
r30519:20a42325 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,296 +1,297
1 1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import errno
10 10 import os
11 11 import re
12 12 import socket
13 13
14 14 from mercurial.i18n import _
15 15 from mercurial import (
16 16 encoding,
17 17 error,
18 pycompat,
18 19 util,
19 20 )
20 21
21 22 from . import (
22 23 common,
23 24 cvsps,
24 25 )
25 26
26 27 stringio = util.stringio
27 28 checktool = common.checktool
28 29 commit = common.commit
29 30 converter_source = common.converter_source
30 31 makedatetimestamp = common.makedatetimestamp
31 32 NoRepo = common.NoRepo
32 33
33 34 class convert_cvs(converter_source):
34 35 def __init__(self, ui, path, revs=None):
35 36 super(convert_cvs, self).__init__(ui, path, revs=revs)
36 37
37 38 cvs = os.path.join(path, "CVS")
38 39 if not os.path.exists(cvs):
39 40 raise NoRepo(_("%s does not look like a CVS checkout") % path)
40 41
41 42 checktool('cvs')
42 43
43 44 self.changeset = None
44 45 self.files = {}
45 46 self.tags = {}
46 47 self.lastbranch = {}
47 48 self.socket = None
48 49 self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
49 50 self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
50 51 self.encoding = encoding.encoding
51 52
52 53 self._connect()
53 54
54 55 def _parse(self):
55 56 if self.changeset is not None:
56 57 return
57 58 self.changeset = {}
58 59
59 60 maxrev = 0
60 61 if self.revs:
61 62 if len(self.revs) > 1:
62 63 raise error.Abort(_('cvs source does not support specifying '
63 64 'multiple revs'))
64 65 # TODO: handle tags
65 66 try:
66 67 # patchset number?
67 68 maxrev = int(self.revs[0])
68 69 except ValueError:
69 70 raise error.Abort(_('revision %s is not a patchset number')
70 71 % self.revs[0])
71 72
72 d = os.getcwd()
73 d = pycompat.getcwd()
73 74 try:
74 75 os.chdir(self.path)
75 76 id = None
76 77
77 78 cache = 'update'
78 79 if not self.ui.configbool('convert', 'cvsps.cache', True):
79 80 cache = None
80 81 db = cvsps.createlog(self.ui, cache=cache)
81 82 db = cvsps.createchangeset(self.ui, db,
82 83 fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
83 84 mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
84 85 mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
85 86
86 87 for cs in db:
87 88 if maxrev and cs.id > maxrev:
88 89 break
89 90 id = str(cs.id)
90 91 cs.author = self.recode(cs.author)
91 92 self.lastbranch[cs.branch] = id
92 93 cs.comment = self.recode(cs.comment)
93 94 if self.ui.configbool('convert', 'localtimezone'):
94 95 cs.date = makedatetimestamp(cs.date[0])
95 96 date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
96 97 self.tags.update(dict.fromkeys(cs.tags, id))
97 98
98 99 files = {}
99 100 for f in cs.entries:
100 101 files[f.file] = "%s%s" % ('.'.join([str(x)
101 102 for x in f.revision]),
102 103 ['', '(DEAD)'][f.dead])
103 104
104 105 # add current commit to set
105 106 c = commit(author=cs.author, date=date,
106 107 parents=[str(p.id) for p in cs.parents],
107 108 desc=cs.comment, branch=cs.branch or '')
108 109 self.changeset[id] = c
109 110 self.files[id] = files
110 111
111 112 self.heads = self.lastbranch.values()
112 113 finally:
113 114 os.chdir(d)
114 115
115 116 def _connect(self):
116 117 root = self.cvsroot
117 118 conntype = None
118 119 user, host = None, None
119 120 cmd = ['cvs', 'server']
120 121
121 122 self.ui.status(_("connecting to %s\n") % root)
122 123
123 124 if root.startswith(":pserver:"):
124 125 root = root[9:]
125 126 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
126 127 root)
127 128 if m:
128 129 conntype = "pserver"
129 130 user, passw, serv, port, root = m.groups()
130 131 if not user:
131 132 user = "anonymous"
132 133 if not port:
133 134 port = 2401
134 135 else:
135 136 port = int(port)
136 137 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
137 138 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
138 139
139 140 if not passw:
140 141 passw = "A"
141 142 cvspass = os.path.expanduser("~/.cvspass")
142 143 try:
143 144 pf = open(cvspass)
144 145 for line in pf.read().splitlines():
145 146 part1, part2 = line.split(' ', 1)
146 147 # /1 :pserver:user@example.com:2401/cvsroot/foo
147 148 # Ah<Z
148 149 if part1 == '/1':
149 150 part1, part2 = part2.split(' ', 1)
150 151 format = format1
151 152 # :pserver:user@example.com:/cvsroot/foo Ah<Z
152 153 else:
153 154 format = format0
154 155 if part1 == format:
155 156 passw = part2
156 157 break
157 158 pf.close()
158 159 except IOError as inst:
159 160 if inst.errno != errno.ENOENT:
160 161 if not getattr(inst, 'filename', None):
161 162 inst.filename = cvspass
162 163 raise
163 164
164 165 sck = socket.socket()
165 166 sck.connect((serv, port))
166 167 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
167 168 "END AUTH REQUEST", ""]))
168 169 if sck.recv(128) != "I LOVE YOU\n":
169 170 raise error.Abort(_("CVS pserver authentication failed"))
170 171
171 172 self.writep = self.readp = sck.makefile('r+')
172 173
173 174 if not conntype and root.startswith(":local:"):
174 175 conntype = "local"
175 176 root = root[7:]
176 177
177 178 if not conntype:
178 179 # :ext:user@host/home/user/path/to/cvsroot
179 180 if root.startswith(":ext:"):
180 181 root = root[5:]
181 182 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
182 183 # Do not take Windows path "c:\foo\bar" for a connection strings
183 184 if os.path.isdir(root) or not m:
184 185 conntype = "local"
185 186 else:
186 187 conntype = "rsh"
187 188 user, host, root = m.group(1), m.group(2), m.group(3)
188 189
189 190 if conntype != "pserver":
190 191 if conntype == "rsh":
191 192 rsh = os.environ.get("CVS_RSH") or "ssh"
192 193 if user:
193 194 cmd = [rsh, '-l', user, host] + cmd
194 195 else:
195 196 cmd = [rsh, host] + cmd
196 197
197 198 # popen2 does not support argument lists under Windows
198 199 cmd = [util.shellquote(arg) for arg in cmd]
199 200 cmd = util.quotecommand(' '.join(cmd))
200 201 self.writep, self.readp = util.popen2(cmd)
201 202
202 203 self.realroot = root
203 204
204 205 self.writep.write("Root %s\n" % root)
205 206 self.writep.write("Valid-responses ok error Valid-requests Mode"
206 207 " M Mbinary E Checked-in Created Updated"
207 208 " Merged Removed\n")
208 209 self.writep.write("valid-requests\n")
209 210 self.writep.flush()
210 211 r = self.readp.readline()
211 212 if not r.startswith("Valid-requests"):
212 213 raise error.Abort(_('unexpected response from CVS server '
213 214 '(expected "Valid-requests", but got %r)')
214 215 % r)
215 216 if "UseUnchanged" in r:
216 217 self.writep.write("UseUnchanged\n")
217 218 self.writep.flush()
218 219 r = self.readp.readline()
219 220
220 221 def getheads(self):
221 222 self._parse()
222 223 return self.heads
223 224
224 225 def getfile(self, name, rev):
225 226
226 227 def chunkedread(fp, count):
227 228 # file-objects returned by socket.makefile() do not handle
228 229 # large read() requests very well.
229 230 chunksize = 65536
230 231 output = stringio()
231 232 while count > 0:
232 233 data = fp.read(min(count, chunksize))
233 234 if not data:
234 235 raise error.Abort(_("%d bytes missing from remote file")
235 236 % count)
236 237 count -= len(data)
237 238 output.write(data)
238 239 return output.getvalue()
239 240
240 241 self._parse()
241 242 if rev.endswith("(DEAD)"):
242 243 return None, None
243 244
244 245 args = ("-N -P -kk -r %s --" % rev).split()
245 246 args.append(self.cvsrepo + '/' + name)
246 247 for x in args:
247 248 self.writep.write("Argument %s\n" % x)
248 249 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
249 250 self.writep.flush()
250 251
251 252 data = ""
252 253 mode = None
253 254 while True:
254 255 line = self.readp.readline()
255 256 if line.startswith("Created ") or line.startswith("Updated "):
256 257 self.readp.readline() # path
257 258 self.readp.readline() # entries
258 259 mode = self.readp.readline()[:-1]
259 260 count = int(self.readp.readline()[:-1])
260 261 data = chunkedread(self.readp, count)
261 262 elif line.startswith(" "):
262 263 data += line[1:]
263 264 elif line.startswith("M "):
264 265 pass
265 266 elif line.startswith("Mbinary "):
266 267 count = int(self.readp.readline()[:-1])
267 268 data = chunkedread(self.readp, count)
268 269 else:
269 270 if line == "ok\n":
270 271 if mode is None:
271 272 raise error.Abort(_('malformed response from CVS'))
272 273 return (data, "x" in mode and "x" or "")
273 274 elif line.startswith("E "):
274 275 self.ui.warn(_("cvs server: %s\n") % line[2:])
275 276 elif line.startswith("Remove"):
276 277 self.readp.readline()
277 278 else:
278 279 raise error.Abort(_("unknown CVS response: %s") % line)
279 280
280 281 def getchanges(self, rev, full):
281 282 if full:
282 283 raise error.Abort(_("convert from cvs does not support --full"))
283 284 self._parse()
284 285 return sorted(self.files[rev].iteritems()), {}, set()
285 286
286 287 def getcommit(self, rev):
287 288 self._parse()
288 289 return self.changeset[rev]
289 290
290 291 def gettags(self):
291 292 self._parse()
292 293 return self.tags
293 294
294 295 def getchangedfiles(self, rev, i):
295 296 self._parse()
296 297 return sorted(self.files[rev])
@@ -1,1351 +1,1353
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4 from __future__ import absolute_import
5 5
6 6 import os
7 7 import re
8 8 import tempfile
9 9 import xml.dom.minidom
10 10
11 11 from mercurial.i18n import _
12 12 from mercurial import (
13 13 encoding,
14 14 error,
15 pycompat,
15 16 scmutil,
16 17 strutil,
17 18 util,
18 19 )
19 20
20 21 from . import common
21 22
22 23 pickle = util.pickle
23 24 stringio = util.stringio
24 25 propertycache = util.propertycache
25 26 urlerr = util.urlerr
26 27 urlreq = util.urlreq
27 28
28 29 commandline = common.commandline
29 30 commit = common.commit
30 31 converter_sink = common.converter_sink
31 32 converter_source = common.converter_source
32 33 decodeargs = common.decodeargs
33 34 encodeargs = common.encodeargs
34 35 makedatetimestamp = common.makedatetimestamp
35 36 mapfile = common.mapfile
36 37 MissingTool = common.MissingTool
37 38 NoRepo = common.NoRepo
38 39
39 40 # Subversion stuff. Works best with very recent Python SVN bindings
40 41 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
41 42 # these bindings.
42 43
43 44 try:
44 45 import svn
45 46 import svn.client
46 47 import svn.core
47 48 import svn.ra
48 49 import svn.delta
49 50 from . import transport
50 51 import warnings
51 52 warnings.filterwarnings('ignore',
52 53 module='svn.core',
53 54 category=DeprecationWarning)
54 55 svn.core.SubversionException # trigger import to catch error
55 56
56 57 except ImportError:
57 58 svn = None
58 59
59 60 class SvnPathNotFound(Exception):
60 61 pass
61 62
62 63 def revsplit(rev):
63 64 """Parse a revision string and return (uuid, path, revnum).
64 65 >>> revsplit('svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
65 66 ... '/proj%20B/mytrunk/mytrunk@1')
66 67 ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1)
67 68 >>> revsplit('svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1')
68 69 ('', '', 1)
69 70 >>> revsplit('@7')
70 71 ('', '', 7)
71 72 >>> revsplit('7')
72 73 ('', '', 0)
73 74 >>> revsplit('bad')
74 75 ('', '', 0)
75 76 """
76 77 parts = rev.rsplit('@', 1)
77 78 revnum = 0
78 79 if len(parts) > 1:
79 80 revnum = int(parts[1])
80 81 parts = parts[0].split('/', 1)
81 82 uuid = ''
82 83 mod = ''
83 84 if len(parts) > 1 and parts[0].startswith('svn:'):
84 85 uuid = parts[0][4:]
85 86 mod = '/' + parts[1]
86 87 return uuid, mod, revnum
87 88
88 89 def quote(s):
89 90 # As of svn 1.7, many svn calls expect "canonical" paths. In
90 91 # theory, we should call svn.core.*canonicalize() on all paths
91 92 # before passing them to the API. Instead, we assume the base url
92 93 # is canonical and copy the behaviour of svn URL encoding function
93 94 # so we can extend it safely with new components. The "safe"
94 95 # characters were taken from the "svn_uri__char_validity" table in
95 96 # libsvn_subr/path.c.
96 97 return urlreq.quote(s, "!$&'()*+,-./:=@_~")
97 98
98 99 def geturl(path):
99 100 try:
100 101 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
101 102 except svn.core.SubversionException:
102 103 # svn.client.url_from_path() fails with local repositories
103 104 pass
104 105 if os.path.isdir(path):
105 106 path = os.path.normpath(os.path.abspath(path))
106 107 if os.name == 'nt':
107 108 path = '/' + util.normpath(path)
108 109 # Module URL is later compared with the repository URL returned
109 110 # by svn API, which is UTF-8.
110 111 path = encoding.tolocal(path)
111 112 path = 'file://%s' % quote(path)
112 113 return svn.core.svn_path_canonicalize(path)
113 114
114 115 def optrev(number):
115 116 optrev = svn.core.svn_opt_revision_t()
116 117 optrev.kind = svn.core.svn_opt_revision_number
117 118 optrev.value.number = number
118 119 return optrev
119 120
120 121 class changedpath(object):
121 122 def __init__(self, p):
122 123 self.copyfrom_path = p.copyfrom_path
123 124 self.copyfrom_rev = p.copyfrom_rev
124 125 self.action = p.action
125 126
126 127 def get_log_child(fp, url, paths, start, end, limit=0,
127 128 discover_changed_paths=True, strict_node_history=False):
128 129 protocol = -1
129 130 def receiver(orig_paths, revnum, author, date, message, pool):
130 131 paths = {}
131 132 if orig_paths is not None:
132 133 for k, v in orig_paths.iteritems():
133 134 paths[k] = changedpath(v)
134 135 pickle.dump((paths, revnum, author, date, message),
135 136 fp, protocol)
136 137
137 138 try:
138 139 # Use an ra of our own so that our parent can consume
139 140 # our results without confusing the server.
140 141 t = transport.SvnRaTransport(url=url)
141 142 svn.ra.get_log(t.ra, paths, start, end, limit,
142 143 discover_changed_paths,
143 144 strict_node_history,
144 145 receiver)
145 146 except IOError:
146 147 # Caller may interrupt the iteration
147 148 pickle.dump(None, fp, protocol)
148 149 except Exception as inst:
149 150 pickle.dump(str(inst), fp, protocol)
150 151 else:
151 152 pickle.dump(None, fp, protocol)
152 153 fp.close()
153 154 # With large history, cleanup process goes crazy and suddenly
154 155 # consumes *huge* amount of memory. The output file being closed,
155 156 # there is no need for clean termination.
156 157 os._exit(0)
157 158
158 159 def debugsvnlog(ui, **opts):
159 160 """Fetch SVN log in a subprocess and channel them back to parent to
160 161 avoid memory collection issues.
161 162 """
162 163 if svn is None:
163 164 raise error.Abort(_('debugsvnlog could not load Subversion python '
164 165 'bindings'))
165 166
166 167 args = decodeargs(ui.fin.read())
167 168 get_log_child(ui.fout, *args)
168 169
169 170 class logstream(object):
170 171 """Interruptible revision log iterator."""
171 172 def __init__(self, stdout):
172 173 self._stdout = stdout
173 174
174 175 def __iter__(self):
175 176 while True:
176 177 try:
177 178 entry = pickle.load(self._stdout)
178 179 except EOFError:
179 180 raise error.Abort(_('Mercurial failed to run itself, check'
180 181 ' hg executable is in PATH'))
181 182 try:
182 183 orig_paths, revnum, author, date, message = entry
183 184 except (TypeError, ValueError):
184 185 if entry is None:
185 186 break
186 187 raise error.Abort(_("log stream exception '%s'") % entry)
187 188 yield entry
188 189
189 190 def close(self):
190 191 if self._stdout:
191 192 self._stdout.close()
192 193 self._stdout = None
193 194
194 195 class directlogstream(list):
195 196 """Direct revision log iterator.
196 197 This can be used for debugging and development but it will probably leak
197 198 memory and is not suitable for real conversions."""
198 199 def __init__(self, url, paths, start, end, limit=0,
199 200 discover_changed_paths=True, strict_node_history=False):
200 201
201 202 def receiver(orig_paths, revnum, author, date, message, pool):
202 203 paths = {}
203 204 if orig_paths is not None:
204 205 for k, v in orig_paths.iteritems():
205 206 paths[k] = changedpath(v)
206 207 self.append((paths, revnum, author, date, message))
207 208
208 209 # Use an ra of our own so that our parent can consume
209 210 # our results without confusing the server.
210 211 t = transport.SvnRaTransport(url=url)
211 212 svn.ra.get_log(t.ra, paths, start, end, limit,
212 213 discover_changed_paths,
213 214 strict_node_history,
214 215 receiver)
215 216
216 217 def close(self):
217 218 pass
218 219
219 220 # Check to see if the given path is a local Subversion repo. Verify this by
220 221 # looking for several svn-specific files and directories in the given
221 222 # directory.
222 223 def filecheck(ui, path, proto):
223 224 for x in ('locks', 'hooks', 'format', 'db'):
224 225 if not os.path.exists(os.path.join(path, x)):
225 226 return False
226 227 return True
227 228
228 229 # Check to see if a given path is the root of an svn repo over http. We verify
229 230 # this by requesting a version-controlled URL we know can't exist and looking
230 231 # for the svn-specific "not found" XML.
231 232 def httpcheck(ui, path, proto):
232 233 try:
233 234 opener = urlreq.buildopener()
234 235 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
235 236 data = rsp.read()
236 237 except urlerr.httperror as inst:
237 238 if inst.code != 404:
238 239 # Except for 404 we cannot know for sure this is not an svn repo
239 240 ui.warn(_('svn: cannot probe remote repository, assume it could '
240 241 'be a subversion repository. Use --source-type if you '
241 242 'know better.\n'))
242 243 return True
243 244 data = inst.fp.read()
244 245 except Exception:
245 246 # Could be urlerr.urlerror if the URL is invalid or anything else.
246 247 return False
247 248 return '<m:human-readable errcode="160013">' in data
248 249
249 250 protomap = {'http': httpcheck,
250 251 'https': httpcheck,
251 252 'file': filecheck,
252 253 }
253 254 def issvnurl(ui, url):
254 255 try:
255 256 proto, path = url.split('://', 1)
256 257 if proto == 'file':
257 258 if (os.name == 'nt' and path[:1] == '/' and path[1:2].isalpha()
258 259 and path[2:6].lower() == '%3a/'):
259 260 path = path[:2] + ':/' + path[6:]
260 261 path = urlreq.url2pathname(path)
261 262 except ValueError:
262 263 proto = 'file'
263 264 path = os.path.abspath(url)
264 265 if proto == 'file':
265 266 path = util.pconvert(path)
266 267 check = protomap.get(proto, lambda *args: False)
267 268 while '/' in path:
268 269 if check(ui, path, proto):
269 270 return True
270 271 path = path.rsplit('/', 1)[0]
271 272 return False
272 273
273 274 # SVN conversion code stolen from bzr-svn and tailor
274 275 #
275 276 # Subversion looks like a versioned filesystem, branches structures
276 277 # are defined by conventions and not enforced by the tool. First,
277 278 # we define the potential branches (modules) as "trunk" and "branches"
278 279 # children directories. Revisions are then identified by their
279 280 # module and revision number (and a repository identifier).
280 281 #
281 282 # The revision graph is really a tree (or a forest). By default, a
282 283 # revision parent is the previous revision in the same module. If the
283 284 # module directory is copied/moved from another module then the
284 285 # revision is the module root and its parent the source revision in
285 286 # the parent module. A revision has at most one parent.
286 287 #
287 288 class svn_source(converter_source):
288 289 def __init__(self, ui, url, revs=None):
289 290 super(svn_source, self).__init__(ui, url, revs=revs)
290 291
291 292 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
292 293 (os.path.exists(url) and
293 294 os.path.exists(os.path.join(url, '.svn'))) or
294 295 issvnurl(ui, url)):
295 296 raise NoRepo(_("%s does not look like a Subversion repository")
296 297 % url)
297 298 if svn is None:
298 299 raise MissingTool(_('could not load Subversion python bindings'))
299 300
300 301 try:
301 302 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
302 303 if version < (1, 4):
303 304 raise MissingTool(_('Subversion python bindings %d.%d found, '
304 305 '1.4 or later required') % version)
305 306 except AttributeError:
306 307 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
307 308 'or later required'))
308 309
309 310 self.lastrevs = {}
310 311
311 312 latest = None
312 313 try:
313 314 # Support file://path@rev syntax. Useful e.g. to convert
314 315 # deleted branches.
315 316 at = url.rfind('@')
316 317 if at >= 0:
317 318 latest = int(url[at + 1:])
318 319 url = url[:at]
319 320 except ValueError:
320 321 pass
321 322 self.url = geturl(url)
322 323 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
323 324 try:
324 325 self.transport = transport.SvnRaTransport(url=self.url)
325 326 self.ra = self.transport.ra
326 327 self.ctx = self.transport.client
327 328 self.baseurl = svn.ra.get_repos_root(self.ra)
328 329 # Module is either empty or a repository path starting with
329 330 # a slash and not ending with a slash.
330 331 self.module = urlreq.unquote(self.url[len(self.baseurl):])
331 332 self.prevmodule = None
332 333 self.rootmodule = self.module
333 334 self.commits = {}
334 335 self.paths = {}
335 336 self.uuid = svn.ra.get_uuid(self.ra)
336 337 except svn.core.SubversionException:
337 338 ui.traceback()
338 339 svnversion = '%d.%d.%d' % (svn.core.SVN_VER_MAJOR,
339 340 svn.core.SVN_VER_MINOR,
340 341 svn.core.SVN_VER_MICRO)
341 342 raise NoRepo(_("%s does not look like a Subversion repository "
342 343 "to libsvn version %s")
343 344 % (self.url, svnversion))
344 345
345 346 if revs:
346 347 if len(revs) > 1:
347 348 raise error.Abort(_('subversion source does not support '
348 349 'specifying multiple revisions'))
349 350 try:
350 351 latest = int(revs[0])
351 352 except ValueError:
352 353 raise error.Abort(_('svn: revision %s is not an integer') %
353 354 revs[0])
354 355
355 356 self.trunkname = self.ui.config('convert', 'svn.trunk',
356 357 'trunk').strip('/')
357 358 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
358 359 try:
359 360 self.startrev = int(self.startrev)
360 361 if self.startrev < 0:
361 362 self.startrev = 0
362 363 except ValueError:
363 364 raise error.Abort(_('svn: start revision %s is not an integer')
364 365 % self.startrev)
365 366
366 367 try:
367 368 self.head = self.latest(self.module, latest)
368 369 except SvnPathNotFound:
369 370 self.head = None
370 371 if not self.head:
371 372 raise error.Abort(_('no revision found in module %s')
372 373 % self.module)
373 374 self.last_changed = self.revnum(self.head)
374 375
375 376 self._changescache = (None, None)
376 377
377 378 if os.path.exists(os.path.join(url, '.svn/entries')):
378 379 self.wc = url
379 380 else:
380 381 self.wc = None
381 382 self.convertfp = None
382 383
383 384 def setrevmap(self, revmap):
384 385 lastrevs = {}
385 386 for revid in revmap.iterkeys():
386 387 uuid, module, revnum = revsplit(revid)
387 388 lastrevnum = lastrevs.setdefault(module, revnum)
388 389 if revnum > lastrevnum:
389 390 lastrevs[module] = revnum
390 391 self.lastrevs = lastrevs
391 392
392 393 def exists(self, path, optrev):
393 394 try:
394 395 svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
395 396 optrev, False, self.ctx)
396 397 return True
397 398 except svn.core.SubversionException:
398 399 return False
399 400
400 401 def getheads(self):
401 402
402 403 def isdir(path, revnum):
403 404 kind = self._checkpath(path, revnum)
404 405 return kind == svn.core.svn_node_dir
405 406
406 407 def getcfgpath(name, rev):
407 408 cfgpath = self.ui.config('convert', 'svn.' + name)
408 409 if cfgpath is not None and cfgpath.strip() == '':
409 410 return None
410 411 path = (cfgpath or name).strip('/')
411 412 if not self.exists(path, rev):
412 413 if self.module.endswith(path) and name == 'trunk':
413 414 # we are converting from inside this directory
414 415 return None
415 416 if cfgpath:
416 417 raise error.Abort(_('expected %s to be at %r, but not found'
417 418 ) % (name, path))
418 419 return None
419 420 self.ui.note(_('found %s at %r\n') % (name, path))
420 421 return path
421 422
422 423 rev = optrev(self.last_changed)
423 424 oldmodule = ''
424 425 trunk = getcfgpath('trunk', rev)
425 426 self.tags = getcfgpath('tags', rev)
426 427 branches = getcfgpath('branches', rev)
427 428
428 429 # If the project has a trunk or branches, we will extract heads
429 430 # from them. We keep the project root otherwise.
430 431 if trunk:
431 432 oldmodule = self.module or ''
432 433 self.module += '/' + trunk
433 434 self.head = self.latest(self.module, self.last_changed)
434 435 if not self.head:
435 436 raise error.Abort(_('no revision found in module %s')
436 437 % self.module)
437 438
438 439 # First head in the list is the module's head
439 440 self.heads = [self.head]
440 441 if self.tags is not None:
441 442 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
442 443
443 444 # Check if branches bring a few more heads to the list
444 445 if branches:
445 446 rpath = self.url.strip('/')
446 447 branchnames = svn.client.ls(rpath + '/' + quote(branches),
447 448 rev, False, self.ctx)
448 449 for branch in sorted(branchnames):
449 450 module = '%s/%s/%s' % (oldmodule, branches, branch)
450 451 if not isdir(module, self.last_changed):
451 452 continue
452 453 brevid = self.latest(module, self.last_changed)
453 454 if not brevid:
454 455 self.ui.note(_('ignoring empty branch %s\n') % branch)
455 456 continue
456 457 self.ui.note(_('found branch %s at %d\n') %
457 458 (branch, self.revnum(brevid)))
458 459 self.heads.append(brevid)
459 460
460 461 if self.startrev and self.heads:
461 462 if len(self.heads) > 1:
462 463 raise error.Abort(_('svn: start revision is not supported '
463 464 'with more than one branch'))
464 465 revnum = self.revnum(self.heads[0])
465 466 if revnum < self.startrev:
466 467 raise error.Abort(
467 468 _('svn: no revision found after start revision %d')
468 469 % self.startrev)
469 470
470 471 return self.heads
471 472
472 473 def _getchanges(self, rev, full):
473 474 (paths, parents) = self.paths[rev]
474 475 copies = {}
475 476 if parents:
476 477 files, self.removed, copies = self.expandpaths(rev, paths, parents)
477 478 if full or not parents:
478 479 # Perform a full checkout on roots
479 480 uuid, module, revnum = revsplit(rev)
480 481 entries = svn.client.ls(self.baseurl + quote(module),
481 482 optrev(revnum), True, self.ctx)
482 483 files = [n for n, e in entries.iteritems()
483 484 if e.kind == svn.core.svn_node_file]
484 485 self.removed = set()
485 486
486 487 files.sort()
487 488 files = zip(files, [rev] * len(files))
488 489 return (files, copies)
489 490
490 491 def getchanges(self, rev, full):
491 492 # reuse cache from getchangedfiles
492 493 if self._changescache[0] == rev and not full:
493 494 (files, copies) = self._changescache[1]
494 495 else:
495 496 (files, copies) = self._getchanges(rev, full)
496 497 # caller caches the result, so free it here to release memory
497 498 del self.paths[rev]
498 499 return (files, copies, set())
499 500
500 501 def getchangedfiles(self, rev, i):
501 502 # called from filemap - cache computed values for reuse in getchanges
502 503 (files, copies) = self._getchanges(rev, False)
503 504 self._changescache = (rev, (files, copies))
504 505 return [f[0] for f in files]
505 506
506 507 def getcommit(self, rev):
507 508 if rev not in self.commits:
508 509 uuid, module, revnum = revsplit(rev)
509 510 self.module = module
510 511 self.reparent(module)
511 512 # We assume that:
512 513 # - requests for revisions after "stop" come from the
513 514 # revision graph backward traversal. Cache all of them
514 515 # down to stop, they will be used eventually.
515 516 # - requests for revisions before "stop" come to get
516 517 # isolated branches parents. Just fetch what is needed.
517 518 stop = self.lastrevs.get(module, 0)
518 519 if revnum < stop:
519 520 stop = revnum + 1
520 521 self._fetch_revisions(revnum, stop)
521 522 if rev not in self.commits:
522 523 raise error.Abort(_('svn: revision %s not found') % revnum)
523 524 revcommit = self.commits[rev]
524 525 # caller caches the result, so free it here to release memory
525 526 del self.commits[rev]
526 527 return revcommit
527 528
528 529 def checkrevformat(self, revstr, mapname='splicemap'):
529 530 """ fails if revision format does not match the correct format"""
530 531 if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
531 532 r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
532 533 r'{12,12}(.*)\@[0-9]+$',revstr):
533 534 raise error.Abort(_('%s entry %s is not a valid revision'
534 535 ' identifier') % (mapname, revstr))
535 536
536 537 def numcommits(self):
537 538 return int(self.head.rsplit('@', 1)[1]) - self.startrev
538 539
539 540 def gettags(self):
540 541 tags = {}
541 542 if self.tags is None:
542 543 return tags
543 544
544 545 # svn tags are just a convention, project branches left in a
545 546 # 'tags' directory. There is no other relationship than
546 547 # ancestry, which is expensive to discover and makes them hard
547 548 # to update incrementally. Worse, past revisions may be
548 549 # referenced by tags far away in the future, requiring a deep
549 550 # history traversal on every calculation. Current code
550 551 # performs a single backward traversal, tracking moves within
551 552 # the tags directory (tag renaming) and recording a new tag
552 553 # everytime a project is copied from outside the tags
553 554 # directory. It also lists deleted tags, this behaviour may
554 555 # change in the future.
555 556 pendings = []
556 557 tagspath = self.tags
557 558 start = svn.ra.get_latest_revnum(self.ra)
558 559 stream = self._getlog([self.tags], start, self.startrev)
559 560 try:
560 561 for entry in stream:
561 562 origpaths, revnum, author, date, message = entry
562 563 if not origpaths:
563 564 origpaths = []
564 565 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
565 566 in origpaths.iteritems() if e.copyfrom_path]
566 567 # Apply moves/copies from more specific to general
567 568 copies.sort(reverse=True)
568 569
569 570 srctagspath = tagspath
570 571 if copies and copies[-1][2] == tagspath:
571 572 # Track tags directory moves
572 573 srctagspath = copies.pop()[0]
573 574
574 575 for source, sourcerev, dest in copies:
575 576 if not dest.startswith(tagspath + '/'):
576 577 continue
577 578 for tag in pendings:
578 579 if tag[0].startswith(dest):
579 580 tagpath = source + tag[0][len(dest):]
580 581 tag[:2] = [tagpath, sourcerev]
581 582 break
582 583 else:
583 584 pendings.append([source, sourcerev, dest])
584 585
585 586 # Filter out tags with children coming from different
586 587 # parts of the repository like:
587 588 # /tags/tag.1 (from /trunk:10)
588 589 # /tags/tag.1/foo (from /branches/foo:12)
589 590 # Here/tags/tag.1 discarded as well as its children.
590 591 # It happens with tools like cvs2svn. Such tags cannot
591 592 # be represented in mercurial.
592 593 addeds = dict((p, e.copyfrom_path) for p, e
593 594 in origpaths.iteritems()
594 595 if e.action == 'A' and e.copyfrom_path)
595 596 badroots = set()
596 597 for destroot in addeds:
597 598 for source, sourcerev, dest in pendings:
598 599 if (not dest.startswith(destroot + '/')
599 600 or source.startswith(addeds[destroot] + '/')):
600 601 continue
601 602 badroots.add(destroot)
602 603 break
603 604
604 605 for badroot in badroots:
605 606 pendings = [p for p in pendings if p[2] != badroot
606 607 and not p[2].startswith(badroot + '/')]
607 608
608 609 # Tell tag renamings from tag creations
609 610 renamings = []
610 611 for source, sourcerev, dest in pendings:
611 612 tagname = dest.split('/')[-1]
612 613 if source.startswith(srctagspath):
613 614 renamings.append([source, sourcerev, tagname])
614 615 continue
615 616 if tagname in tags:
616 617 # Keep the latest tag value
617 618 continue
618 619 # From revision may be fake, get one with changes
619 620 try:
620 621 tagid = self.latest(source, sourcerev)
621 622 if tagid and tagname not in tags:
622 623 tags[tagname] = tagid
623 624 except SvnPathNotFound:
624 625 # It happens when we are following directories
625 626 # we assumed were copied with their parents
626 627 # but were really created in the tag
627 628 # directory.
628 629 pass
629 630 pendings = renamings
630 631 tagspath = srctagspath
631 632 finally:
632 633 stream.close()
633 634 return tags
634 635
635 636 def converted(self, rev, destrev):
636 637 if not self.wc:
637 638 return
638 639 if self.convertfp is None:
639 640 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
640 641 'a')
641 642 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
642 643 self.convertfp.flush()
643 644
644 645 def revid(self, revnum, module=None):
645 646 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
646 647
647 648 def revnum(self, rev):
648 649 return int(rev.split('@')[-1])
649 650
650 651 def latest(self, path, stop=None):
651 652 """Find the latest revid affecting path, up to stop revision
652 653 number. If stop is None, default to repository latest
653 654 revision. It may return a revision in a different module,
654 655 since a branch may be moved without a change being
655 656 reported. Return None if computed module does not belong to
656 657 rootmodule subtree.
657 658 """
658 659 def findchanges(path, start, stop=None):
659 660 stream = self._getlog([path], start, stop or 1)
660 661 try:
661 662 for entry in stream:
662 663 paths, revnum, author, date, message = entry
663 664 if stop is None and paths:
664 665 # We do not know the latest changed revision,
665 666 # keep the first one with changed paths.
666 667 break
667 668 if revnum <= stop:
668 669 break
669 670
670 671 for p in paths:
671 672 if (not path.startswith(p) or
672 673 not paths[p].copyfrom_path):
673 674 continue
674 675 newpath = paths[p].copyfrom_path + path[len(p):]
675 676 self.ui.debug("branch renamed from %s to %s at %d\n" %
676 677 (path, newpath, revnum))
677 678 path = newpath
678 679 break
679 680 if not paths:
680 681 revnum = None
681 682 return revnum, path
682 683 finally:
683 684 stream.close()
684 685
685 686 if not path.startswith(self.rootmodule):
686 687 # Requests on foreign branches may be forbidden at server level
687 688 self.ui.debug('ignoring foreign branch %r\n' % path)
688 689 return None
689 690
690 691 if stop is None:
691 692 stop = svn.ra.get_latest_revnum(self.ra)
692 693 try:
693 694 prevmodule = self.reparent('')
694 695 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
695 696 self.reparent(prevmodule)
696 697 except svn.core.SubversionException:
697 698 dirent = None
698 699 if not dirent:
699 700 raise SvnPathNotFound(_('%s not found up to revision %d')
700 701 % (path, stop))
701 702
702 703 # stat() gives us the previous revision on this line of
703 704 # development, but it might be in *another module*. Fetch the
704 705 # log and detect renames down to the latest revision.
705 706 revnum, realpath = findchanges(path, stop, dirent.created_rev)
706 707 if revnum is None:
707 708 # Tools like svnsync can create empty revision, when
708 709 # synchronizing only a subtree for instance. These empty
709 710 # revisions created_rev still have their original values
710 711 # despite all changes having disappeared and can be
711 712 # returned by ra.stat(), at least when stating the root
712 713 # module. In that case, do not trust created_rev and scan
713 714 # the whole history.
714 715 revnum, realpath = findchanges(path, stop)
715 716 if revnum is None:
716 717 self.ui.debug('ignoring empty branch %r\n' % realpath)
717 718 return None
718 719
719 720 if not realpath.startswith(self.rootmodule):
720 721 self.ui.debug('ignoring foreign branch %r\n' % realpath)
721 722 return None
722 723 return self.revid(revnum, realpath)
723 724
724 725 def reparent(self, module):
725 726 """Reparent the svn transport and return the previous parent."""
726 727 if self.prevmodule == module:
727 728 return module
728 729 svnurl = self.baseurl + quote(module)
729 730 prevmodule = self.prevmodule
730 731 if prevmodule is None:
731 732 prevmodule = ''
732 733 self.ui.debug("reparent to %s\n" % svnurl)
733 734 svn.ra.reparent(self.ra, svnurl)
734 735 self.prevmodule = module
735 736 return prevmodule
736 737
737 738 def expandpaths(self, rev, paths, parents):
738 739 changed, removed = set(), set()
739 740 copies = {}
740 741
741 742 new_module, revnum = revsplit(rev)[1:]
742 743 if new_module != self.module:
743 744 self.module = new_module
744 745 self.reparent(self.module)
745 746
746 747 for i, (path, ent) in enumerate(paths):
747 748 self.ui.progress(_('scanning paths'), i, item=path,
748 749 total=len(paths), unit=_('paths'))
749 750 entrypath = self.getrelpath(path)
750 751
751 752 kind = self._checkpath(entrypath, revnum)
752 753 if kind == svn.core.svn_node_file:
753 754 changed.add(self.recode(entrypath))
754 755 if not ent.copyfrom_path or not parents:
755 756 continue
756 757 # Copy sources not in parent revisions cannot be
757 758 # represented, ignore their origin for now
758 759 pmodule, prevnum = revsplit(parents[0])[1:]
759 760 if ent.copyfrom_rev < prevnum:
760 761 continue
761 762 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
762 763 if not copyfrom_path:
763 764 continue
764 765 self.ui.debug("copied to %s from %s@%s\n" %
765 766 (entrypath, copyfrom_path, ent.copyfrom_rev))
766 767 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
767 768 elif kind == 0: # gone, but had better be a deleted *file*
768 769 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
769 770 pmodule, prevnum = revsplit(parents[0])[1:]
770 771 parentpath = pmodule + "/" + entrypath
771 772 fromkind = self._checkpath(entrypath, prevnum, pmodule)
772 773
773 774 if fromkind == svn.core.svn_node_file:
774 775 removed.add(self.recode(entrypath))
775 776 elif fromkind == svn.core.svn_node_dir:
776 777 oroot = parentpath.strip('/')
777 778 nroot = path.strip('/')
778 779 children = self._iterfiles(oroot, prevnum)
779 780 for childpath in children:
780 781 childpath = childpath.replace(oroot, nroot)
781 782 childpath = self.getrelpath("/" + childpath, pmodule)
782 783 if childpath:
783 784 removed.add(self.recode(childpath))
784 785 else:
785 786 self.ui.debug('unknown path in revision %d: %s\n' % \
786 787 (revnum, path))
787 788 elif kind == svn.core.svn_node_dir:
788 789 if ent.action == 'M':
789 790 # If the directory just had a prop change,
790 791 # then we shouldn't need to look for its children.
791 792 continue
792 793 if ent.action == 'R' and parents:
793 794 # If a directory is replacing a file, mark the previous
794 795 # file as deleted
795 796 pmodule, prevnum = revsplit(parents[0])[1:]
796 797 pkind = self._checkpath(entrypath, prevnum, pmodule)
797 798 if pkind == svn.core.svn_node_file:
798 799 removed.add(self.recode(entrypath))
799 800 elif pkind == svn.core.svn_node_dir:
800 801 # We do not know what files were kept or removed,
801 802 # mark them all as changed.
802 803 for childpath in self._iterfiles(pmodule, prevnum):
803 804 childpath = self.getrelpath("/" + childpath)
804 805 if childpath:
805 806 changed.add(self.recode(childpath))
806 807
807 808 for childpath in self._iterfiles(path, revnum):
808 809 childpath = self.getrelpath("/" + childpath)
809 810 if childpath:
810 811 changed.add(self.recode(childpath))
811 812
812 813 # Handle directory copies
813 814 if not ent.copyfrom_path or not parents:
814 815 continue
815 816 # Copy sources not in parent revisions cannot be
816 817 # represented, ignore their origin for now
817 818 pmodule, prevnum = revsplit(parents[0])[1:]
818 819 if ent.copyfrom_rev < prevnum:
819 820 continue
820 821 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
821 822 if not copyfrompath:
822 823 continue
823 824 self.ui.debug("mark %s came from %s:%d\n"
824 825 % (path, copyfrompath, ent.copyfrom_rev))
825 826 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
826 827 for childpath in children:
827 828 childpath = self.getrelpath("/" + childpath, pmodule)
828 829 if not childpath:
829 830 continue
830 831 copytopath = path + childpath[len(copyfrompath):]
831 832 copytopath = self.getrelpath(copytopath)
832 833 copies[self.recode(copytopath)] = self.recode(childpath)
833 834
834 835 self.ui.progress(_('scanning paths'), None)
835 836 changed.update(removed)
836 837 return (list(changed), removed, copies)
837 838
838 839 def _fetch_revisions(self, from_revnum, to_revnum):
839 840 if from_revnum < to_revnum:
840 841 from_revnum, to_revnum = to_revnum, from_revnum
841 842
842 843 self.child_cset = None
843 844
844 845 def parselogentry(orig_paths, revnum, author, date, message):
845 846 """Return the parsed commit object or None, and True if
846 847 the revision is a branch root.
847 848 """
848 849 self.ui.debug("parsing revision %d (%d changes)\n" %
849 850 (revnum, len(orig_paths)))
850 851
851 852 branched = False
852 853 rev = self.revid(revnum)
853 854 # branch log might return entries for a parent we already have
854 855
855 856 if rev in self.commits or revnum < to_revnum:
856 857 return None, branched
857 858
858 859 parents = []
859 860 # check whether this revision is the start of a branch or part
860 861 # of a branch renaming
861 862 orig_paths = sorted(orig_paths.iteritems())
862 863 root_paths = [(p, e) for p, e in orig_paths
863 864 if self.module.startswith(p)]
864 865 if root_paths:
865 866 path, ent = root_paths[-1]
866 867 if ent.copyfrom_path:
867 868 branched = True
868 869 newpath = ent.copyfrom_path + self.module[len(path):]
869 870 # ent.copyfrom_rev may not be the actual last revision
870 871 previd = self.latest(newpath, ent.copyfrom_rev)
871 872 if previd is not None:
872 873 prevmodule, prevnum = revsplit(previd)[1:]
873 874 if prevnum >= self.startrev:
874 875 parents = [previd]
875 876 self.ui.note(
876 877 _('found parent of branch %s at %d: %s\n') %
877 878 (self.module, prevnum, prevmodule))
878 879 else:
879 880 self.ui.debug("no copyfrom path, don't know what to do.\n")
880 881
881 882 paths = []
882 883 # filter out unrelated paths
883 884 for path, ent in orig_paths:
884 885 if self.getrelpath(path) is None:
885 886 continue
886 887 paths.append((path, ent))
887 888
888 889 # Example SVN datetime. Includes microseconds.
889 890 # ISO-8601 conformant
890 891 # '2007-01-04T17:35:00.902377Z'
891 892 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
892 893 if self.ui.configbool('convert', 'localtimezone'):
893 894 date = makedatetimestamp(date[0])
894 895
895 896 if message:
896 897 log = self.recode(message)
897 898 else:
898 899 log = ''
899 900
900 901 if author:
901 902 author = self.recode(author)
902 903 else:
903 904 author = ''
904 905
905 906 try:
906 907 branch = self.module.split("/")[-1]
907 908 if branch == self.trunkname:
908 909 branch = None
909 910 except IndexError:
910 911 branch = None
911 912
912 913 cset = commit(author=author,
913 914 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
914 915 desc=log,
915 916 parents=parents,
916 917 branch=branch,
917 918 rev=rev)
918 919
919 920 self.commits[rev] = cset
920 921 # The parents list is *shared* among self.paths and the
921 922 # commit object. Both will be updated below.
922 923 self.paths[rev] = (paths, cset.parents)
923 924 if self.child_cset and not self.child_cset.parents:
924 925 self.child_cset.parents[:] = [rev]
925 926 self.child_cset = cset
926 927 return cset, branched
927 928
928 929 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
929 930 (self.module, from_revnum, to_revnum))
930 931
931 932 try:
932 933 firstcset = None
933 934 lastonbranch = False
934 935 stream = self._getlog([self.module], from_revnum, to_revnum)
935 936 try:
936 937 for entry in stream:
937 938 paths, revnum, author, date, message = entry
938 939 if revnum < self.startrev:
939 940 lastonbranch = True
940 941 break
941 942 if not paths:
942 943 self.ui.debug('revision %d has no entries\n' % revnum)
943 944 # If we ever leave the loop on an empty
944 945 # revision, do not try to get a parent branch
945 946 lastonbranch = lastonbranch or revnum == 0
946 947 continue
947 948 cset, lastonbranch = parselogentry(paths, revnum, author,
948 949 date, message)
949 950 if cset:
950 951 firstcset = cset
951 952 if lastonbranch:
952 953 break
953 954 finally:
954 955 stream.close()
955 956
956 957 if not lastonbranch and firstcset and not firstcset.parents:
957 958 # The first revision of the sequence (the last fetched one)
958 959 # has invalid parents if not a branch root. Find the parent
959 960 # revision now, if any.
960 961 try:
961 962 firstrevnum = self.revnum(firstcset.rev)
962 963 if firstrevnum > 1:
963 964 latest = self.latest(self.module, firstrevnum - 1)
964 965 if latest:
965 966 firstcset.parents.append(latest)
966 967 except SvnPathNotFound:
967 968 pass
968 969 except svn.core.SubversionException as xxx_todo_changeme:
969 970 (inst, num) = xxx_todo_changeme.args
970 971 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
971 972 raise error.Abort(_('svn: branch has no revision %s')
972 973 % to_revnum)
973 974 raise
974 975
975 976 def getfile(self, file, rev):
976 977 # TODO: ra.get_file transmits the whole file instead of diffs.
977 978 if file in self.removed:
978 979 return None, None
979 980 mode = ''
980 981 try:
981 982 new_module, revnum = revsplit(rev)[1:]
982 983 if self.module != new_module:
983 984 self.module = new_module
984 985 self.reparent(self.module)
985 986 io = stringio()
986 987 info = svn.ra.get_file(self.ra, file, revnum, io)
987 988 data = io.getvalue()
988 989 # ra.get_file() seems to keep a reference on the input buffer
989 990 # preventing collection. Release it explicitly.
990 991 io.close()
991 992 if isinstance(info, list):
992 993 info = info[-1]
993 994 mode = ("svn:executable" in info) and 'x' or ''
994 995 mode = ("svn:special" in info) and 'l' or mode
995 996 except svn.core.SubversionException as e:
996 997 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
997 998 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
998 999 if e.apr_err in notfound: # File not found
999 1000 return None, None
1000 1001 raise
1001 1002 if mode == 'l':
1002 1003 link_prefix = "link "
1003 1004 if data.startswith(link_prefix):
1004 1005 data = data[len(link_prefix):]
1005 1006 return data, mode
1006 1007
1007 1008 def _iterfiles(self, path, revnum):
1008 1009 """Enumerate all files in path at revnum, recursively."""
1009 1010 path = path.strip('/')
1010 1011 pool = svn.core.Pool()
1011 1012 rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
1012 1013 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
1013 1014 if path:
1014 1015 path += '/'
1015 1016 return ((path + p) for p, e in entries.iteritems()
1016 1017 if e.kind == svn.core.svn_node_file)
1017 1018
1018 1019 def getrelpath(self, path, module=None):
1019 1020 if module is None:
1020 1021 module = self.module
1021 1022 # Given the repository url of this wc, say
1022 1023 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
1023 1024 # extract the "entry" portion (a relative path) from what
1024 1025 # svn log --xml says, i.e.
1025 1026 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
1026 1027 # that is to say "tests/PloneTestCase.py"
1027 1028 if path.startswith(module):
1028 1029 relative = path.rstrip('/')[len(module):]
1029 1030 if relative.startswith('/'):
1030 1031 return relative[1:]
1031 1032 elif relative == '':
1032 1033 return relative
1033 1034
1034 1035 # The path is outside our tracked tree...
1035 1036 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
1036 1037 return None
1037 1038
1038 1039 def _checkpath(self, path, revnum, module=None):
1039 1040 if module is not None:
1040 1041 prevmodule = self.reparent('')
1041 1042 path = module + '/' + path
1042 1043 try:
1043 1044 # ra.check_path does not like leading slashes very much, it leads
1044 1045 # to PROPFIND subversion errors
1045 1046 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
1046 1047 finally:
1047 1048 if module is not None:
1048 1049 self.reparent(prevmodule)
1049 1050
1050 1051 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
1051 1052 strict_node_history=False):
1052 1053 # Normalize path names, svn >= 1.5 only wants paths relative to
1053 1054 # supplied URL
1054 1055 relpaths = []
1055 1056 for p in paths:
1056 1057 if not p.startswith('/'):
1057 1058 p = self.module + '/' + p
1058 1059 relpaths.append(p.strip('/'))
1059 1060 args = [self.baseurl, relpaths, start, end, limit,
1060 1061 discover_changed_paths, strict_node_history]
1061 1062 # developer config: convert.svn.debugsvnlog
1062 1063 if not self.ui.configbool('convert', 'svn.debugsvnlog', True):
1063 1064 return directlogstream(*args)
1064 1065 arg = encodeargs(args)
1065 1066 hgexe = util.hgexecutable()
1066 1067 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
1067 1068 stdin, stdout = util.popen2(util.quotecommand(cmd))
1068 1069 stdin.write(arg)
1069 1070 try:
1070 1071 stdin.close()
1071 1072 except IOError:
1072 1073 raise error.Abort(_('Mercurial failed to run itself, check'
1073 1074 ' hg executable is in PATH'))
1074 1075 return logstream(stdout)
1075 1076
1076 1077 pre_revprop_change = '''#!/bin/sh
1077 1078
1078 1079 REPOS="$1"
1079 1080 REV="$2"
1080 1081 USER="$3"
1081 1082 PROPNAME="$4"
1082 1083 ACTION="$5"
1083 1084
1084 1085 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
1085 1086 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
1086 1087 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
1087 1088
1088 1089 echo "Changing prohibited revision property" >&2
1089 1090 exit 1
1090 1091 '''
1091 1092
1092 1093 class svn_sink(converter_sink, commandline):
1093 1094 commit_re = re.compile(r'Committed revision (\d+).', re.M)
1094 1095 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
1095 1096
1096 1097 def prerun(self):
1097 1098 if self.wc:
1098 1099 os.chdir(self.wc)
1099 1100
1100 1101 def postrun(self):
1101 1102 if self.wc:
1102 1103 os.chdir(self.cwd)
1103 1104
1104 1105 def join(self, name):
1105 1106 return os.path.join(self.wc, '.svn', name)
1106 1107
1107 1108 def revmapfile(self):
1108 1109 return self.join('hg-shamap')
1109 1110
1110 1111 def authorfile(self):
1111 1112 return self.join('hg-authormap')
1112 1113
1113 1114 def __init__(self, ui, path):
1114 1115
1115 1116 converter_sink.__init__(self, ui, path)
1116 1117 commandline.__init__(self, ui, 'svn')
1117 1118 self.delete = []
1118 1119 self.setexec = []
1119 1120 self.delexec = []
1120 1121 self.copies = []
1121 1122 self.wc = None
1122 self.cwd = os.getcwd()
1123 self.cwd = pycompat.getcwd()
1123 1124
1124 1125 created = False
1125 1126 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1126 1127 self.wc = os.path.realpath(path)
1127 1128 self.run0('update')
1128 1129 else:
1129 1130 if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
1130 1131 path = os.path.realpath(path)
1131 1132 if os.path.isdir(os.path.dirname(path)):
1132 1133 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1133 1134 ui.status(_('initializing svn repository %r\n') %
1134 1135 os.path.basename(path))
1135 1136 commandline(ui, 'svnadmin').run0('create', path)
1136 1137 created = path
1137 1138 path = util.normpath(path)
1138 1139 if not path.startswith('/'):
1139 1140 path = '/' + path
1140 1141 path = 'file://' + path
1141 1142
1142 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1143 wcpath = os.path.join(pycompat.getcwd(), os.path.basename(path) +
1144 '-wc')
1143 1145 ui.status(_('initializing svn working copy %r\n')
1144 1146 % os.path.basename(wcpath))
1145 1147 self.run0('checkout', path, wcpath)
1146 1148
1147 1149 self.wc = wcpath
1148 1150 self.opener = scmutil.opener(self.wc)
1149 1151 self.wopener = scmutil.opener(self.wc)
1150 1152 self.childmap = mapfile(ui, self.join('hg-childmap'))
1151 1153 if util.checkexec(self.wc):
1152 1154 self.is_exec = util.isexec
1153 1155 else:
1154 1156 self.is_exec = None
1155 1157
1156 1158 if created:
1157 1159 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1158 1160 fp = open(hook, 'w')
1159 1161 fp.write(pre_revprop_change)
1160 1162 fp.close()
1161 1163 util.setflags(hook, False, True)
1162 1164
1163 1165 output = self.run0('info')
1164 1166 self.uuid = self.uuid_re.search(output).group(1).strip()
1165 1167
1166 1168 def wjoin(self, *names):
1167 1169 return os.path.join(self.wc, *names)
1168 1170
1169 1171 @propertycache
1170 1172 def manifest(self):
1171 1173 # As of svn 1.7, the "add" command fails when receiving
1172 1174 # already tracked entries, so we have to track and filter them
1173 1175 # ourselves.
1174 1176 m = set()
1175 1177 output = self.run0('ls', recursive=True, xml=True)
1176 1178 doc = xml.dom.minidom.parseString(output)
1177 1179 for e in doc.getElementsByTagName('entry'):
1178 1180 for n in e.childNodes:
1179 1181 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1180 1182 continue
1181 1183 name = ''.join(c.data for c in n.childNodes
1182 1184 if c.nodeType == c.TEXT_NODE)
1183 1185 # Entries are compared with names coming from
1184 1186 # mercurial, so bytes with undefined encoding. Our
1185 1187 # best bet is to assume they are in local
1186 1188 # encoding. They will be passed to command line calls
1187 1189 # later anyway, so they better be.
1188 1190 m.add(encoding.tolocal(name.encode('utf-8')))
1189 1191 break
1190 1192 return m
1191 1193
1192 1194 def putfile(self, filename, flags, data):
1193 1195 if 'l' in flags:
1194 1196 self.wopener.symlink(data, filename)
1195 1197 else:
1196 1198 try:
1197 1199 if os.path.islink(self.wjoin(filename)):
1198 1200 os.unlink(filename)
1199 1201 except OSError:
1200 1202 pass
1201 1203 self.wopener.write(filename, data)
1202 1204
1203 1205 if self.is_exec:
1204 1206 if self.is_exec(self.wjoin(filename)):
1205 1207 if 'x' not in flags:
1206 1208 self.delexec.append(filename)
1207 1209 else:
1208 1210 if 'x' in flags:
1209 1211 self.setexec.append(filename)
1210 1212 util.setflags(self.wjoin(filename), False, 'x' in flags)
1211 1213
1212 1214 def _copyfile(self, source, dest):
1213 1215 # SVN's copy command pukes if the destination file exists, but
1214 1216 # our copyfile method expects to record a copy that has
1215 1217 # already occurred. Cross the semantic gap.
1216 1218 wdest = self.wjoin(dest)
1217 1219 exists = os.path.lexists(wdest)
1218 1220 if exists:
1219 1221 fd, tempname = tempfile.mkstemp(
1220 1222 prefix='hg-copy-', dir=os.path.dirname(wdest))
1221 1223 os.close(fd)
1222 1224 os.unlink(tempname)
1223 1225 os.rename(wdest, tempname)
1224 1226 try:
1225 1227 self.run0('copy', source, dest)
1226 1228 finally:
1227 1229 self.manifest.add(dest)
1228 1230 if exists:
1229 1231 try:
1230 1232 os.unlink(wdest)
1231 1233 except OSError:
1232 1234 pass
1233 1235 os.rename(tempname, wdest)
1234 1236
1235 1237 def dirs_of(self, files):
1236 1238 dirs = set()
1237 1239 for f in files:
1238 1240 if os.path.isdir(self.wjoin(f)):
1239 1241 dirs.add(f)
1240 1242 for i in strutil.rfindall(f, '/'):
1241 1243 dirs.add(f[:i])
1242 1244 return dirs
1243 1245
1244 1246 def add_dirs(self, files):
1245 1247 add_dirs = [d for d in sorted(self.dirs_of(files))
1246 1248 if d not in self.manifest]
1247 1249 if add_dirs:
1248 1250 self.manifest.update(add_dirs)
1249 1251 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1250 1252 return add_dirs
1251 1253
1252 1254 def add_files(self, files):
1253 1255 files = [f for f in files if f not in self.manifest]
1254 1256 if files:
1255 1257 self.manifest.update(files)
1256 1258 self.xargs(files, 'add', quiet=True)
1257 1259 return files
1258 1260
1259 1261 def addchild(self, parent, child):
1260 1262 self.childmap[parent] = child
1261 1263
1262 1264 def revid(self, rev):
1263 1265 return u"svn:%s@%s" % (self.uuid, rev)
1264 1266
1265 1267 def putcommit(self, files, copies, parents, commit, source, revmap, full,
1266 1268 cleanp2):
1267 1269 for parent in parents:
1268 1270 try:
1269 1271 return self.revid(self.childmap[parent])
1270 1272 except KeyError:
1271 1273 pass
1272 1274
1273 1275 # Apply changes to working copy
1274 1276 for f, v in files:
1275 1277 data, mode = source.getfile(f, v)
1276 1278 if data is None:
1277 1279 self.delete.append(f)
1278 1280 else:
1279 1281 self.putfile(f, mode, data)
1280 1282 if f in copies:
1281 1283 self.copies.append([copies[f], f])
1282 1284 if full:
1283 1285 self.delete.extend(sorted(self.manifest.difference(files)))
1284 1286 files = [f[0] for f in files]
1285 1287
1286 1288 entries = set(self.delete)
1287 1289 files = frozenset(files)
1288 1290 entries.update(self.add_dirs(files.difference(entries)))
1289 1291 if self.copies:
1290 1292 for s, d in self.copies:
1291 1293 self._copyfile(s, d)
1292 1294 self.copies = []
1293 1295 if self.delete:
1294 1296 self.xargs(self.delete, 'delete')
1295 1297 for f in self.delete:
1296 1298 self.manifest.remove(f)
1297 1299 self.delete = []
1298 1300 entries.update(self.add_files(files.difference(entries)))
1299 1301 if self.delexec:
1300 1302 self.xargs(self.delexec, 'propdel', 'svn:executable')
1301 1303 self.delexec = []
1302 1304 if self.setexec:
1303 1305 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1304 1306 self.setexec = []
1305 1307
1306 1308 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1307 1309 fp = os.fdopen(fd, 'w')
1308 1310 fp.write(commit.desc)
1309 1311 fp.close()
1310 1312 try:
1311 1313 output = self.run0('commit',
1312 1314 username=util.shortuser(commit.author),
1313 1315 file=messagefile,
1314 1316 encoding='utf-8')
1315 1317 try:
1316 1318 rev = self.commit_re.search(output).group(1)
1317 1319 except AttributeError:
1318 1320 if parents and not files:
1319 1321 return parents[0]
1320 1322 self.ui.warn(_('unexpected svn output:\n'))
1321 1323 self.ui.warn(output)
1322 1324 raise error.Abort(_('unable to cope with svn output'))
1323 1325 if commit.rev:
1324 1326 self.run('propset', 'hg:convert-rev', commit.rev,
1325 1327 revprop=True, revision=rev)
1326 1328 if commit.branch and commit.branch != 'default':
1327 1329 self.run('propset', 'hg:convert-branch', commit.branch,
1328 1330 revprop=True, revision=rev)
1329 1331 for parent in parents:
1330 1332 self.addchild(parent, rev)
1331 1333 return self.revid(rev)
1332 1334 finally:
1333 1335 os.unlink(messagefile)
1334 1336
1335 1337 def puttags(self, tags):
1336 1338 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1337 1339 return None, None
1338 1340
1339 1341 def hascommitfrommap(self, rev):
1340 1342 # We trust that revisions referenced in a map still is present
1341 1343 # TODO: implement something better if necessary and feasible
1342 1344 return True
1343 1345
1344 1346 def hascommitforsplicemap(self, rev):
1345 1347 # This is not correct as one can convert to an existing subversion
1346 1348 # repository and childmap would not list all revisions. Too bad.
1347 1349 if rev in self.childmap:
1348 1350 return True
1349 1351 raise error.Abort(_('splice map revision %s not found in subversion '
1350 1352 'child map (revision lookups are not implemented)')
1351 1353 % rev)
@@ -1,3608 +1,3609
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65 from __future__ import absolute_import
66 66
67 67 import errno
68 68 import os
69 69 import re
70 70 import shutil
71 71 from mercurial.i18n import _
72 72 from mercurial.node import (
73 73 bin,
74 74 hex,
75 75 nullid,
76 76 nullrev,
77 77 short,
78 78 )
79 79 from mercurial import (
80 80 cmdutil,
81 81 commands,
82 82 dirstateguard,
83 83 error,
84 84 extensions,
85 85 hg,
86 86 localrepo,
87 87 lock as lockmod,
88 88 patch as patchmod,
89 89 phases,
90 pycompat,
90 91 registrar,
91 92 revset,
92 93 scmutil,
93 94 subrepo,
94 95 util,
95 96 )
96 97
97 98 release = lockmod.release
98 99 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
99 100
100 101 cmdtable = {}
101 102 command = cmdutil.command(cmdtable)
102 103 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
103 104 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
104 105 # be specifying the version(s) of Mercurial they are tested with, or
105 106 # leave the attribute unspecified.
106 107 testedwith = 'ships-with-hg-core'
107 108
108 109 # force load strip extension formerly included in mq and import some utility
109 110 try:
110 111 stripext = extensions.find('strip')
111 112 except KeyError:
112 113 # note: load is lazy so we could avoid the try-except,
113 114 # but I (marmoute) prefer this explicit code.
114 115 class dummyui(object):
115 116 def debug(self, msg):
116 117 pass
117 118 stripext = extensions.load(dummyui(), 'strip', '')
118 119
119 120 strip = stripext.strip
120 121 checksubstate = stripext.checksubstate
121 122 checklocalchanges = stripext.checklocalchanges
122 123
123 124
124 125 # Patch names looks like unix-file names.
125 126 # They must be joinable with queue directory and result in the patch path.
126 127 normname = util.normpath
127 128
128 129 class statusentry(object):
129 130 def __init__(self, node, name):
130 131 self.node, self.name = node, name
131 132 def __repr__(self):
132 133 return hex(self.node) + ':' + self.name
133 134
134 135 # The order of the headers in 'hg export' HG patches:
135 136 HGHEADERS = [
136 137 # '# HG changeset patch',
137 138 '# User ',
138 139 '# Date ',
139 140 '# ',
140 141 '# Branch ',
141 142 '# Node ID ',
142 143 '# Parent ', # can occur twice for merges - but that is not relevant for mq
143 144 ]
144 145 # The order of headers in plain 'mail style' patches:
145 146 PLAINHEADERS = {
146 147 'from': 0,
147 148 'date': 1,
148 149 'subject': 2,
149 150 }
150 151
151 152 def inserthgheader(lines, header, value):
152 153 """Assuming lines contains a HG patch header, add a header line with value.
153 154 >>> try: inserthgheader([], '# Date ', 'z')
154 155 ... except ValueError, inst: print "oops"
155 156 oops
156 157 >>> inserthgheader(['# HG changeset patch'], '# Date ', 'z')
157 158 ['# HG changeset patch', '# Date z']
158 159 >>> inserthgheader(['# HG changeset patch', ''], '# Date ', 'z')
159 160 ['# HG changeset patch', '# Date z', '']
160 161 >>> inserthgheader(['# HG changeset patch', '# User y'], '# Date ', 'z')
161 162 ['# HG changeset patch', '# User y', '# Date z']
162 163 >>> inserthgheader(['# HG changeset patch', '# Date x', '# User y'],
163 164 ... '# User ', 'z')
164 165 ['# HG changeset patch', '# Date x', '# User z']
165 166 >>> inserthgheader(['# HG changeset patch', '# Date y'], '# Date ', 'z')
166 167 ['# HG changeset patch', '# Date z']
167 168 >>> inserthgheader(['# HG changeset patch', '', '# Date y'], '# Date ', 'z')
168 169 ['# HG changeset patch', '# Date z', '', '# Date y']
169 170 >>> inserthgheader(['# HG changeset patch', '# Parent y'], '# Date ', 'z')
170 171 ['# HG changeset patch', '# Date z', '# Parent y']
171 172 """
172 173 start = lines.index('# HG changeset patch') + 1
173 174 newindex = HGHEADERS.index(header)
174 175 bestpos = len(lines)
175 176 for i in range(start, len(lines)):
176 177 line = lines[i]
177 178 if not line.startswith('# '):
178 179 bestpos = min(bestpos, i)
179 180 break
180 181 for lineindex, h in enumerate(HGHEADERS):
181 182 if line.startswith(h):
182 183 if lineindex == newindex:
183 184 lines[i] = header + value
184 185 return lines
185 186 if lineindex > newindex:
186 187 bestpos = min(bestpos, i)
187 188 break # next line
188 189 lines.insert(bestpos, header + value)
189 190 return lines
190 191
191 192 def insertplainheader(lines, header, value):
192 193 """For lines containing a plain patch header, add a header line with value.
193 194 >>> insertplainheader([], 'Date', 'z')
194 195 ['Date: z']
195 196 >>> insertplainheader([''], 'Date', 'z')
196 197 ['Date: z', '']
197 198 >>> insertplainheader(['x'], 'Date', 'z')
198 199 ['Date: z', '', 'x']
199 200 >>> insertplainheader(['From: y', 'x'], 'Date', 'z')
200 201 ['From: y', 'Date: z', '', 'x']
201 202 >>> insertplainheader([' date : x', ' from : y', ''], 'From', 'z')
202 203 [' date : x', 'From: z', '']
203 204 >>> insertplainheader(['', 'Date: y'], 'Date', 'z')
204 205 ['Date: z', '', 'Date: y']
205 206 >>> insertplainheader(['foo: bar', 'DATE: z', 'x'], 'From', 'y')
206 207 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
207 208 """
208 209 newprio = PLAINHEADERS[header.lower()]
209 210 bestpos = len(lines)
210 211 for i, line in enumerate(lines):
211 212 if ':' in line:
212 213 lheader = line.split(':', 1)[0].strip().lower()
213 214 lprio = PLAINHEADERS.get(lheader, newprio + 1)
214 215 if lprio == newprio:
215 216 lines[i] = '%s: %s' % (header, value)
216 217 return lines
217 218 if lprio > newprio and i < bestpos:
218 219 bestpos = i
219 220 else:
220 221 if line:
221 222 lines.insert(i, '')
222 223 if i < bestpos:
223 224 bestpos = i
224 225 break
225 226 lines.insert(bestpos, '%s: %s' % (header, value))
226 227 return lines
227 228
228 229 class patchheader(object):
229 230 def __init__(self, pf, plainmode=False):
230 231 def eatdiff(lines):
231 232 while lines:
232 233 l = lines[-1]
233 234 if (l.startswith("diff -") or
234 235 l.startswith("Index:") or
235 236 l.startswith("===========")):
236 237 del lines[-1]
237 238 else:
238 239 break
239 240 def eatempty(lines):
240 241 while lines:
241 242 if not lines[-1].strip():
242 243 del lines[-1]
243 244 else:
244 245 break
245 246
246 247 message = []
247 248 comments = []
248 249 user = None
249 250 date = None
250 251 parent = None
251 252 format = None
252 253 subject = None
253 254 branch = None
254 255 nodeid = None
255 256 diffstart = 0
256 257
257 258 for line in file(pf):
258 259 line = line.rstrip()
259 260 if (line.startswith('diff --git')
260 261 or (diffstart and line.startswith('+++ '))):
261 262 diffstart = 2
262 263 break
263 264 diffstart = 0 # reset
264 265 if line.startswith("--- "):
265 266 diffstart = 1
266 267 continue
267 268 elif format == "hgpatch":
268 269 # parse values when importing the result of an hg export
269 270 if line.startswith("# User "):
270 271 user = line[7:]
271 272 elif line.startswith("# Date "):
272 273 date = line[7:]
273 274 elif line.startswith("# Parent "):
274 275 parent = line[9:].lstrip() # handle double trailing space
275 276 elif line.startswith("# Branch "):
276 277 branch = line[9:]
277 278 elif line.startswith("# Node ID "):
278 279 nodeid = line[10:]
279 280 elif not line.startswith("# ") and line:
280 281 message.append(line)
281 282 format = None
282 283 elif line == '# HG changeset patch':
283 284 message = []
284 285 format = "hgpatch"
285 286 elif (format != "tagdone" and (line.startswith("Subject: ") or
286 287 line.startswith("subject: "))):
287 288 subject = line[9:]
288 289 format = "tag"
289 290 elif (format != "tagdone" and (line.startswith("From: ") or
290 291 line.startswith("from: "))):
291 292 user = line[6:]
292 293 format = "tag"
293 294 elif (format != "tagdone" and (line.startswith("Date: ") or
294 295 line.startswith("date: "))):
295 296 date = line[6:]
296 297 format = "tag"
297 298 elif format == "tag" and line == "":
298 299 # when looking for tags (subject: from: etc) they
299 300 # end once you find a blank line in the source
300 301 format = "tagdone"
301 302 elif message or line:
302 303 message.append(line)
303 304 comments.append(line)
304 305
305 306 eatdiff(message)
306 307 eatdiff(comments)
307 308 # Remember the exact starting line of the patch diffs before consuming
308 309 # empty lines, for external use by TortoiseHg and others
309 310 self.diffstartline = len(comments)
310 311 eatempty(message)
311 312 eatempty(comments)
312 313
313 314 # make sure message isn't empty
314 315 if format and format.startswith("tag") and subject:
315 316 message.insert(0, subject)
316 317
317 318 self.message = message
318 319 self.comments = comments
319 320 self.user = user
320 321 self.date = date
321 322 self.parent = parent
322 323 # nodeid and branch are for external use by TortoiseHg and others
323 324 self.nodeid = nodeid
324 325 self.branch = branch
325 326 self.haspatch = diffstart > 1
326 327 self.plainmode = (plainmode or
327 328 '# HG changeset patch' not in self.comments and
328 329 any(c.startswith('Date: ') or
329 330 c.startswith('From: ')
330 331 for c in self.comments))
331 332
332 333 def setuser(self, user):
333 334 try:
334 335 inserthgheader(self.comments, '# User ', user)
335 336 except ValueError:
336 337 if self.plainmode:
337 338 insertplainheader(self.comments, 'From', user)
338 339 else:
339 340 tmp = ['# HG changeset patch', '# User ' + user]
340 341 self.comments = tmp + self.comments
341 342 self.user = user
342 343
343 344 def setdate(self, date):
344 345 try:
345 346 inserthgheader(self.comments, '# Date ', date)
346 347 except ValueError:
347 348 if self.plainmode:
348 349 insertplainheader(self.comments, 'Date', date)
349 350 else:
350 351 tmp = ['# HG changeset patch', '# Date ' + date]
351 352 self.comments = tmp + self.comments
352 353 self.date = date
353 354
354 355 def setparent(self, parent):
355 356 try:
356 357 inserthgheader(self.comments, '# Parent ', parent)
357 358 except ValueError:
358 359 if not self.plainmode:
359 360 tmp = ['# HG changeset patch', '# Parent ' + parent]
360 361 self.comments = tmp + self.comments
361 362 self.parent = parent
362 363
363 364 def setmessage(self, message):
364 365 if self.comments:
365 366 self._delmsg()
366 367 self.message = [message]
367 368 if message:
368 369 if self.plainmode and self.comments and self.comments[-1]:
369 370 self.comments.append('')
370 371 self.comments.append(message)
371 372
372 373 def __str__(self):
373 374 s = '\n'.join(self.comments).rstrip()
374 375 if not s:
375 376 return ''
376 377 return s + '\n\n'
377 378
378 379 def _delmsg(self):
379 380 '''Remove existing message, keeping the rest of the comments fields.
380 381 If comments contains 'subject: ', message will prepend
381 382 the field and a blank line.'''
382 383 if self.message:
383 384 subj = 'subject: ' + self.message[0].lower()
384 385 for i in xrange(len(self.comments)):
385 386 if subj == self.comments[i].lower():
386 387 del self.comments[i]
387 388 self.message = self.message[2:]
388 389 break
389 390 ci = 0
390 391 for mi in self.message:
391 392 while mi != self.comments[ci]:
392 393 ci += 1
393 394 del self.comments[ci]
394 395
395 396 def newcommit(repo, phase, *args, **kwargs):
396 397 """helper dedicated to ensure a commit respect mq.secret setting
397 398
398 399 It should be used instead of repo.commit inside the mq source for operation
399 400 creating new changeset.
400 401 """
401 402 repo = repo.unfiltered()
402 403 if phase is None:
403 404 if repo.ui.configbool('mq', 'secret', False):
404 405 phase = phases.secret
405 406 if phase is not None:
406 407 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
407 408 allowemptybackup = repo.ui.backupconfig('ui', 'allowemptycommit')
408 409 try:
409 410 if phase is not None:
410 411 repo.ui.setconfig('phases', 'new-commit', phase, 'mq')
411 412 repo.ui.setconfig('ui', 'allowemptycommit', True)
412 413 return repo.commit(*args, **kwargs)
413 414 finally:
414 415 repo.ui.restoreconfig(allowemptybackup)
415 416 if phase is not None:
416 417 repo.ui.restoreconfig(phasebackup)
417 418
418 419 class AbortNoCleanup(error.Abort):
419 420 pass
420 421
421 422 class queue(object):
422 423 def __init__(self, ui, baseui, path, patchdir=None):
423 424 self.basepath = path
424 425 try:
425 426 fh = open(os.path.join(path, 'patches.queue'))
426 427 cur = fh.read().rstrip()
427 428 fh.close()
428 429 if not cur:
429 430 curpath = os.path.join(path, 'patches')
430 431 else:
431 432 curpath = os.path.join(path, 'patches-' + cur)
432 433 except IOError:
433 434 curpath = os.path.join(path, 'patches')
434 435 self.path = patchdir or curpath
435 436 self.opener = scmutil.opener(self.path)
436 437 self.ui = ui
437 438 self.baseui = baseui
438 439 self.applieddirty = False
439 440 self.seriesdirty = False
440 441 self.added = []
441 442 self.seriespath = "series"
442 443 self.statuspath = "status"
443 444 self.guardspath = "guards"
444 445 self.activeguards = None
445 446 self.guardsdirty = False
446 447 # Handle mq.git as a bool with extended values
447 448 try:
448 449 gitmode = ui.configbool('mq', 'git', None)
449 450 if gitmode is None:
450 451 raise error.ConfigError
451 452 if gitmode:
452 453 self.gitmode = 'yes'
453 454 else:
454 455 self.gitmode = 'no'
455 456 except error.ConfigError:
456 457 # let's have check-config ignore the type mismatch
457 458 self.gitmode = ui.config(r'mq', 'git', 'auto').lower()
458 459 # deprecated config: mq.plain
459 460 self.plainmode = ui.configbool('mq', 'plain', False)
460 461 self.checkapplied = True
461 462
462 463 @util.propertycache
463 464 def applied(self):
464 465 def parselines(lines):
465 466 for l in lines:
466 467 entry = l.split(':', 1)
467 468 if len(entry) > 1:
468 469 n, name = entry
469 470 yield statusentry(bin(n), name)
470 471 elif l.strip():
471 472 self.ui.warn(_('malformated mq status line: %s\n') % entry)
472 473 # else we ignore empty lines
473 474 try:
474 475 lines = self.opener.read(self.statuspath).splitlines()
475 476 return list(parselines(lines))
476 477 except IOError as e:
477 478 if e.errno == errno.ENOENT:
478 479 return []
479 480 raise
480 481
481 482 @util.propertycache
482 483 def fullseries(self):
483 484 try:
484 485 return self.opener.read(self.seriespath).splitlines()
485 486 except IOError as e:
486 487 if e.errno == errno.ENOENT:
487 488 return []
488 489 raise
489 490
490 491 @util.propertycache
491 492 def series(self):
492 493 self.parseseries()
493 494 return self.series
494 495
495 496 @util.propertycache
496 497 def seriesguards(self):
497 498 self.parseseries()
498 499 return self.seriesguards
499 500
500 501 def invalidate(self):
501 502 for a in 'applied fullseries series seriesguards'.split():
502 503 if a in self.__dict__:
503 504 delattr(self, a)
504 505 self.applieddirty = False
505 506 self.seriesdirty = False
506 507 self.guardsdirty = False
507 508 self.activeguards = None
508 509
509 510 def diffopts(self, opts=None, patchfn=None):
510 511 diffopts = patchmod.diffopts(self.ui, opts)
511 512 if self.gitmode == 'auto':
512 513 diffopts.upgrade = True
513 514 elif self.gitmode == 'keep':
514 515 pass
515 516 elif self.gitmode in ('yes', 'no'):
516 517 diffopts.git = self.gitmode == 'yes'
517 518 else:
518 519 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
519 520 ' got %s') % self.gitmode)
520 521 if patchfn:
521 522 diffopts = self.patchopts(diffopts, patchfn)
522 523 return diffopts
523 524
524 525 def patchopts(self, diffopts, *patches):
525 526 """Return a copy of input diff options with git set to true if
526 527 referenced patch is a git patch and should be preserved as such.
527 528 """
528 529 diffopts = diffopts.copy()
529 530 if not diffopts.git and self.gitmode == 'keep':
530 531 for patchfn in patches:
531 532 patchf = self.opener(patchfn, 'r')
532 533 # if the patch was a git patch, refresh it as a git patch
533 534 for line in patchf:
534 535 if line.startswith('diff --git'):
535 536 diffopts.git = True
536 537 break
537 538 patchf.close()
538 539 return diffopts
539 540
540 541 def join(self, *p):
541 542 return os.path.join(self.path, *p)
542 543
543 544 def findseries(self, patch):
544 545 def matchpatch(l):
545 546 l = l.split('#', 1)[0]
546 547 return l.strip() == patch
547 548 for index, l in enumerate(self.fullseries):
548 549 if matchpatch(l):
549 550 return index
550 551 return None
551 552
552 553 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
553 554
554 555 def parseseries(self):
555 556 self.series = []
556 557 self.seriesguards = []
557 558 for l in self.fullseries:
558 559 h = l.find('#')
559 560 if h == -1:
560 561 patch = l
561 562 comment = ''
562 563 elif h == 0:
563 564 continue
564 565 else:
565 566 patch = l[:h]
566 567 comment = l[h:]
567 568 patch = patch.strip()
568 569 if patch:
569 570 if patch in self.series:
570 571 raise error.Abort(_('%s appears more than once in %s') %
571 572 (patch, self.join(self.seriespath)))
572 573 self.series.append(patch)
573 574 self.seriesguards.append(self.guard_re.findall(comment))
574 575
575 576 def checkguard(self, guard):
576 577 if not guard:
577 578 return _('guard cannot be an empty string')
578 579 bad_chars = '# \t\r\n\f'
579 580 first = guard[0]
580 581 if first in '-+':
581 582 return (_('guard %r starts with invalid character: %r') %
582 583 (guard, first))
583 584 for c in bad_chars:
584 585 if c in guard:
585 586 return _('invalid character in guard %r: %r') % (guard, c)
586 587
587 588 def setactive(self, guards):
588 589 for guard in guards:
589 590 bad = self.checkguard(guard)
590 591 if bad:
591 592 raise error.Abort(bad)
592 593 guards = sorted(set(guards))
593 594 self.ui.debug('active guards: %s\n' % ' '.join(guards))
594 595 self.activeguards = guards
595 596 self.guardsdirty = True
596 597
597 598 def active(self):
598 599 if self.activeguards is None:
599 600 self.activeguards = []
600 601 try:
601 602 guards = self.opener.read(self.guardspath).split()
602 603 except IOError as err:
603 604 if err.errno != errno.ENOENT:
604 605 raise
605 606 guards = []
606 607 for i, guard in enumerate(guards):
607 608 bad = self.checkguard(guard)
608 609 if bad:
609 610 self.ui.warn('%s:%d: %s\n' %
610 611 (self.join(self.guardspath), i + 1, bad))
611 612 else:
612 613 self.activeguards.append(guard)
613 614 return self.activeguards
614 615
615 616 def setguards(self, idx, guards):
616 617 for g in guards:
617 618 if len(g) < 2:
618 619 raise error.Abort(_('guard %r too short') % g)
619 620 if g[0] not in '-+':
620 621 raise error.Abort(_('guard %r starts with invalid char') % g)
621 622 bad = self.checkguard(g[1:])
622 623 if bad:
623 624 raise error.Abort(bad)
624 625 drop = self.guard_re.sub('', self.fullseries[idx])
625 626 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
626 627 self.parseseries()
627 628 self.seriesdirty = True
628 629
629 630 def pushable(self, idx):
630 631 if isinstance(idx, str):
631 632 idx = self.series.index(idx)
632 633 patchguards = self.seriesguards[idx]
633 634 if not patchguards:
634 635 return True, None
635 636 guards = self.active()
636 637 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
637 638 if exactneg:
638 639 return False, repr(exactneg[0])
639 640 pos = [g for g in patchguards if g[0] == '+']
640 641 exactpos = [g for g in pos if g[1:] in guards]
641 642 if pos:
642 643 if exactpos:
643 644 return True, repr(exactpos[0])
644 645 return False, ' '.join(map(repr, pos))
645 646 return True, ''
646 647
647 648 def explainpushable(self, idx, all_patches=False):
648 649 if all_patches:
649 650 write = self.ui.write
650 651 else:
651 652 write = self.ui.warn
652 653
653 654 if all_patches or self.ui.verbose:
654 655 if isinstance(idx, str):
655 656 idx = self.series.index(idx)
656 657 pushable, why = self.pushable(idx)
657 658 if all_patches and pushable:
658 659 if why is None:
659 660 write(_('allowing %s - no guards in effect\n') %
660 661 self.series[idx])
661 662 else:
662 663 if not why:
663 664 write(_('allowing %s - no matching negative guards\n') %
664 665 self.series[idx])
665 666 else:
666 667 write(_('allowing %s - guarded by %s\n') %
667 668 (self.series[idx], why))
668 669 if not pushable:
669 670 if why:
670 671 write(_('skipping %s - guarded by %s\n') %
671 672 (self.series[idx], why))
672 673 else:
673 674 write(_('skipping %s - no matching guards\n') %
674 675 self.series[idx])
675 676
676 677 def savedirty(self):
677 678 def writelist(items, path):
678 679 fp = self.opener(path, 'w')
679 680 for i in items:
680 681 fp.write("%s\n" % i)
681 682 fp.close()
682 683 if self.applieddirty:
683 684 writelist(map(str, self.applied), self.statuspath)
684 685 self.applieddirty = False
685 686 if self.seriesdirty:
686 687 writelist(self.fullseries, self.seriespath)
687 688 self.seriesdirty = False
688 689 if self.guardsdirty:
689 690 writelist(self.activeguards, self.guardspath)
690 691 self.guardsdirty = False
691 692 if self.added:
692 693 qrepo = self.qrepo()
693 694 if qrepo:
694 695 qrepo[None].add(f for f in self.added if f not in qrepo[None])
695 696 self.added = []
696 697
697 698 def removeundo(self, repo):
698 699 undo = repo.sjoin('undo')
699 700 if not os.path.exists(undo):
700 701 return
701 702 try:
702 703 os.unlink(undo)
703 704 except OSError as inst:
704 705 self.ui.warn(_('error removing undo: %s\n') % str(inst))
705 706
706 707 def backup(self, repo, files, copy=False):
707 708 # backup local changes in --force case
708 709 for f in sorted(files):
709 710 absf = repo.wjoin(f)
710 711 if os.path.lexists(absf):
711 712 self.ui.note(_('saving current version of %s as %s\n') %
712 713 (f, scmutil.origpath(self.ui, repo, f)))
713 714
714 715 absorig = scmutil.origpath(self.ui, repo, absf)
715 716 if copy:
716 717 util.copyfile(absf, absorig)
717 718 else:
718 719 util.rename(absf, absorig)
719 720
720 721 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
721 722 fp=None, changes=None, opts={}):
722 723 stat = opts.get('stat')
723 724 m = scmutil.match(repo[node1], files, opts)
724 725 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
725 726 changes, stat, fp)
726 727
727 728 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
728 729 # first try just applying the patch
729 730 (err, n) = self.apply(repo, [patch], update_status=False,
730 731 strict=True, merge=rev)
731 732
732 733 if err == 0:
733 734 return (err, n)
734 735
735 736 if n is None:
736 737 raise error.Abort(_("apply failed for patch %s") % patch)
737 738
738 739 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
739 740
740 741 # apply failed, strip away that rev and merge.
741 742 hg.clean(repo, head)
742 743 strip(self.ui, repo, [n], update=False, backup=False)
743 744
744 745 ctx = repo[rev]
745 746 ret = hg.merge(repo, rev)
746 747 if ret:
747 748 raise error.Abort(_("update returned %d") % ret)
748 749 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
749 750 if n is None:
750 751 raise error.Abort(_("repo commit failed"))
751 752 try:
752 753 ph = patchheader(mergeq.join(patch), self.plainmode)
753 754 except Exception:
754 755 raise error.Abort(_("unable to read %s") % patch)
755 756
756 757 diffopts = self.patchopts(diffopts, patch)
757 758 patchf = self.opener(patch, "w")
758 759 comments = str(ph)
759 760 if comments:
760 761 patchf.write(comments)
761 762 self.printdiff(repo, diffopts, head, n, fp=patchf)
762 763 patchf.close()
763 764 self.removeundo(repo)
764 765 return (0, n)
765 766
766 767 def qparents(self, repo, rev=None):
767 768 """return the mq handled parent or p1
768 769
769 770 In some case where mq get himself in being the parent of a merge the
770 771 appropriate parent may be p2.
771 772 (eg: an in progress merge started with mq disabled)
772 773
773 774 If no parent are managed by mq, p1 is returned.
774 775 """
775 776 if rev is None:
776 777 (p1, p2) = repo.dirstate.parents()
777 778 if p2 == nullid:
778 779 return p1
779 780 if not self.applied:
780 781 return None
781 782 return self.applied[-1].node
782 783 p1, p2 = repo.changelog.parents(rev)
783 784 if p2 != nullid and p2 in [x.node for x in self.applied]:
784 785 return p2
785 786 return p1
786 787
787 788 def mergepatch(self, repo, mergeq, series, diffopts):
788 789 if not self.applied:
789 790 # each of the patches merged in will have two parents. This
790 791 # can confuse the qrefresh, qdiff, and strip code because it
791 792 # needs to know which parent is actually in the patch queue.
792 793 # so, we insert a merge marker with only one parent. This way
793 794 # the first patch in the queue is never a merge patch
794 795 #
795 796 pname = ".hg.patches.merge.marker"
796 797 n = newcommit(repo, None, '[mq]: merge marker', force=True)
797 798 self.removeundo(repo)
798 799 self.applied.append(statusentry(n, pname))
799 800 self.applieddirty = True
800 801
801 802 head = self.qparents(repo)
802 803
803 804 for patch in series:
804 805 patch = mergeq.lookup(patch, strict=True)
805 806 if not patch:
806 807 self.ui.warn(_("patch %s does not exist\n") % patch)
807 808 return (1, None)
808 809 pushable, reason = self.pushable(patch)
809 810 if not pushable:
810 811 self.explainpushable(patch, all_patches=True)
811 812 continue
812 813 info = mergeq.isapplied(patch)
813 814 if not info:
814 815 self.ui.warn(_("patch %s is not applied\n") % patch)
815 816 return (1, None)
816 817 rev = info[1]
817 818 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
818 819 if head:
819 820 self.applied.append(statusentry(head, patch))
820 821 self.applieddirty = True
821 822 if err:
822 823 return (err, head)
823 824 self.savedirty()
824 825 return (0, head)
825 826
826 827 def patch(self, repo, patchfile):
827 828 '''Apply patchfile to the working directory.
828 829 patchfile: name of patch file'''
829 830 files = set()
830 831 try:
831 832 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
832 833 files=files, eolmode=None)
833 834 return (True, list(files), fuzz)
834 835 except Exception as inst:
835 836 self.ui.note(str(inst) + '\n')
836 837 if not self.ui.verbose:
837 838 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
838 839 self.ui.traceback()
839 840 return (False, list(files), False)
840 841
841 842 def apply(self, repo, series, list=False, update_status=True,
842 843 strict=False, patchdir=None, merge=None, all_files=None,
843 844 tobackup=None, keepchanges=False):
844 845 wlock = lock = tr = None
845 846 try:
846 847 wlock = repo.wlock()
847 848 lock = repo.lock()
848 849 tr = repo.transaction("qpush")
849 850 try:
850 851 ret = self._apply(repo, series, list, update_status,
851 852 strict, patchdir, merge, all_files=all_files,
852 853 tobackup=tobackup, keepchanges=keepchanges)
853 854 tr.close()
854 855 self.savedirty()
855 856 return ret
856 857 except AbortNoCleanup:
857 858 tr.close()
858 859 self.savedirty()
859 860 raise
860 861 except: # re-raises
861 862 try:
862 863 tr.abort()
863 864 finally:
864 865 self.invalidate()
865 866 raise
866 867 finally:
867 868 release(tr, lock, wlock)
868 869 self.removeundo(repo)
869 870
870 871 def _apply(self, repo, series, list=False, update_status=True,
871 872 strict=False, patchdir=None, merge=None, all_files=None,
872 873 tobackup=None, keepchanges=False):
873 874 """returns (error, hash)
874 875
875 876 error = 1 for unable to read, 2 for patch failed, 3 for patch
876 877 fuzz. tobackup is None or a set of files to backup before they
877 878 are modified by a patch.
878 879 """
879 880 # TODO unify with commands.py
880 881 if not patchdir:
881 882 patchdir = self.path
882 883 err = 0
883 884 n = None
884 885 for patchname in series:
885 886 pushable, reason = self.pushable(patchname)
886 887 if not pushable:
887 888 self.explainpushable(patchname, all_patches=True)
888 889 continue
889 890 self.ui.status(_("applying %s\n") % patchname)
890 891 pf = os.path.join(patchdir, patchname)
891 892
892 893 try:
893 894 ph = patchheader(self.join(patchname), self.plainmode)
894 895 except IOError:
895 896 self.ui.warn(_("unable to read %s\n") % patchname)
896 897 err = 1
897 898 break
898 899
899 900 message = ph.message
900 901 if not message:
901 902 # The commit message should not be translated
902 903 message = "imported patch %s\n" % patchname
903 904 else:
904 905 if list:
905 906 # The commit message should not be translated
906 907 message.append("\nimported patch %s" % patchname)
907 908 message = '\n'.join(message)
908 909
909 910 if ph.haspatch:
910 911 if tobackup:
911 912 touched = patchmod.changedfiles(self.ui, repo, pf)
912 913 touched = set(touched) & tobackup
913 914 if touched and keepchanges:
914 915 raise AbortNoCleanup(
915 916 _("conflicting local changes found"),
916 917 hint=_("did you forget to qrefresh?"))
917 918 self.backup(repo, touched, copy=True)
918 919 tobackup = tobackup - touched
919 920 (patcherr, files, fuzz) = self.patch(repo, pf)
920 921 if all_files is not None:
921 922 all_files.update(files)
922 923 patcherr = not patcherr
923 924 else:
924 925 self.ui.warn(_("patch %s is empty\n") % patchname)
925 926 patcherr, files, fuzz = 0, [], 0
926 927
927 928 if merge and files:
928 929 # Mark as removed/merged and update dirstate parent info
929 930 removed = []
930 931 merged = []
931 932 for f in files:
932 933 if os.path.lexists(repo.wjoin(f)):
933 934 merged.append(f)
934 935 else:
935 936 removed.append(f)
936 937 repo.dirstate.beginparentchange()
937 938 for f in removed:
938 939 repo.dirstate.remove(f)
939 940 for f in merged:
940 941 repo.dirstate.merge(f)
941 942 p1, p2 = repo.dirstate.parents()
942 943 repo.setparents(p1, merge)
943 944 repo.dirstate.endparentchange()
944 945
945 946 if all_files and '.hgsubstate' in all_files:
946 947 wctx = repo[None]
947 948 pctx = repo['.']
948 949 overwrite = False
949 950 mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
950 951 overwrite)
951 952 files += mergedsubstate.keys()
952 953
953 954 match = scmutil.matchfiles(repo, files or [])
954 955 oldtip = repo['tip']
955 956 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
956 957 force=True)
957 958 if repo['tip'] == oldtip:
958 959 raise error.Abort(_("qpush exactly duplicates child changeset"))
959 960 if n is None:
960 961 raise error.Abort(_("repository commit failed"))
961 962
962 963 if update_status:
963 964 self.applied.append(statusentry(n, patchname))
964 965
965 966 if patcherr:
966 967 self.ui.warn(_("patch failed, rejects left in working "
967 968 "directory\n"))
968 969 err = 2
969 970 break
970 971
971 972 if fuzz and strict:
972 973 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
973 974 err = 3
974 975 break
975 976 return (err, n)
976 977
977 978 def _cleanup(self, patches, numrevs, keep=False):
978 979 if not keep:
979 980 r = self.qrepo()
980 981 if r:
981 982 r[None].forget(patches)
982 983 for p in patches:
983 984 try:
984 985 os.unlink(self.join(p))
985 986 except OSError as inst:
986 987 if inst.errno != errno.ENOENT:
987 988 raise
988 989
989 990 qfinished = []
990 991 if numrevs:
991 992 qfinished = self.applied[:numrevs]
992 993 del self.applied[:numrevs]
993 994 self.applieddirty = True
994 995
995 996 unknown = []
996 997
997 998 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
998 999 reverse=True):
999 1000 if i is not None:
1000 1001 del self.fullseries[i]
1001 1002 else:
1002 1003 unknown.append(p)
1003 1004
1004 1005 if unknown:
1005 1006 if numrevs:
1006 1007 rev = dict((entry.name, entry.node) for entry in qfinished)
1007 1008 for p in unknown:
1008 1009 msg = _('revision %s refers to unknown patches: %s\n')
1009 1010 self.ui.warn(msg % (short(rev[p]), p))
1010 1011 else:
1011 1012 msg = _('unknown patches: %s\n')
1012 1013 raise error.Abort(''.join(msg % p for p in unknown))
1013 1014
1014 1015 self.parseseries()
1015 1016 self.seriesdirty = True
1016 1017 return [entry.node for entry in qfinished]
1017 1018
1018 1019 def _revpatches(self, repo, revs):
1019 1020 firstrev = repo[self.applied[0].node].rev()
1020 1021 patches = []
1021 1022 for i, rev in enumerate(revs):
1022 1023
1023 1024 if rev < firstrev:
1024 1025 raise error.Abort(_('revision %d is not managed') % rev)
1025 1026
1026 1027 ctx = repo[rev]
1027 1028 base = self.applied[i].node
1028 1029 if ctx.node() != base:
1029 1030 msg = _('cannot delete revision %d above applied patches')
1030 1031 raise error.Abort(msg % rev)
1031 1032
1032 1033 patch = self.applied[i].name
1033 1034 for fmt in ('[mq]: %s', 'imported patch %s'):
1034 1035 if ctx.description() == fmt % patch:
1035 1036 msg = _('patch %s finalized without changeset message\n')
1036 1037 repo.ui.status(msg % patch)
1037 1038 break
1038 1039
1039 1040 patches.append(patch)
1040 1041 return patches
1041 1042
1042 1043 def finish(self, repo, revs):
1043 1044 # Manually trigger phase computation to ensure phasedefaults is
1044 1045 # executed before we remove the patches.
1045 1046 repo._phasecache
1046 1047 patches = self._revpatches(repo, sorted(revs))
1047 1048 qfinished = self._cleanup(patches, len(patches))
1048 1049 if qfinished and repo.ui.configbool('mq', 'secret', False):
1049 1050 # only use this logic when the secret option is added
1050 1051 oldqbase = repo[qfinished[0]]
1051 1052 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
1052 1053 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1053 1054 with repo.transaction('qfinish') as tr:
1054 1055 phases.advanceboundary(repo, tr, tphase, qfinished)
1055 1056
1056 1057 def delete(self, repo, patches, opts):
1057 1058 if not patches and not opts.get('rev'):
1058 1059 raise error.Abort(_('qdelete requires at least one revision or '
1059 1060 'patch name'))
1060 1061
1061 1062 realpatches = []
1062 1063 for patch in patches:
1063 1064 patch = self.lookup(patch, strict=True)
1064 1065 info = self.isapplied(patch)
1065 1066 if info:
1066 1067 raise error.Abort(_("cannot delete applied patch %s") % patch)
1067 1068 if patch not in self.series:
1068 1069 raise error.Abort(_("patch %s not in series file") % patch)
1069 1070 if patch not in realpatches:
1070 1071 realpatches.append(patch)
1071 1072
1072 1073 numrevs = 0
1073 1074 if opts.get('rev'):
1074 1075 if not self.applied:
1075 1076 raise error.Abort(_('no patches applied'))
1076 1077 revs = scmutil.revrange(repo, opts.get('rev'))
1077 1078 revs.sort()
1078 1079 revpatches = self._revpatches(repo, revs)
1079 1080 realpatches += revpatches
1080 1081 numrevs = len(revpatches)
1081 1082
1082 1083 self._cleanup(realpatches, numrevs, opts.get('keep'))
1083 1084
1084 1085 def checktoppatch(self, repo):
1085 1086 '''check that working directory is at qtip'''
1086 1087 if self.applied:
1087 1088 top = self.applied[-1].node
1088 1089 patch = self.applied[-1].name
1089 1090 if repo.dirstate.p1() != top:
1090 1091 raise error.Abort(_("working directory revision is not qtip"))
1091 1092 return top, patch
1092 1093 return None, None
1093 1094
1094 1095 def putsubstate2changes(self, substatestate, changes):
1095 1096 for files in changes[:3]:
1096 1097 if '.hgsubstate' in files:
1097 1098 return # already listed up
1098 1099 # not yet listed up
1099 1100 if substatestate in 'a?':
1100 1101 changes[1].append('.hgsubstate')
1101 1102 elif substatestate in 'r':
1102 1103 changes[2].append('.hgsubstate')
1103 1104 else: # modified
1104 1105 changes[0].append('.hgsubstate')
1105 1106
1106 1107 def checklocalchanges(self, repo, force=False, refresh=True):
1107 1108 excsuffix = ''
1108 1109 if refresh:
1109 1110 excsuffix = ', qrefresh first'
1110 1111 # plain versions for i18n tool to detect them
1111 1112 _("local changes found, qrefresh first")
1112 1113 _("local changed subrepos found, qrefresh first")
1113 1114 return checklocalchanges(repo, force, excsuffix)
1114 1115
1115 1116 _reserved = ('series', 'status', 'guards', '.', '..')
1116 1117 def checkreservedname(self, name):
1117 1118 if name in self._reserved:
1118 1119 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1119 1120 % name)
1120 1121 for prefix in ('.hg', '.mq'):
1121 1122 if name.startswith(prefix):
1122 1123 raise error.Abort(_('patch name cannot begin with "%s"')
1123 1124 % prefix)
1124 1125 for c in ('#', ':', '\r', '\n'):
1125 1126 if c in name:
1126 1127 raise error.Abort(_('%r cannot be used in the name of a patch')
1127 1128 % c)
1128 1129
1129 1130 def checkpatchname(self, name, force=False):
1130 1131 self.checkreservedname(name)
1131 1132 if not force and os.path.exists(self.join(name)):
1132 1133 if os.path.isdir(self.join(name)):
1133 1134 raise error.Abort(_('"%s" already exists as a directory')
1134 1135 % name)
1135 1136 else:
1136 1137 raise error.Abort(_('patch "%s" already exists') % name)
1137 1138
1138 1139 def makepatchname(self, title, fallbackname):
1139 1140 """Return a suitable filename for title, adding a suffix to make
1140 1141 it unique in the existing list"""
1141 1142 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
1142 1143 namebase = namebase[:75] # avoid too long name (issue5117)
1143 1144 if namebase:
1144 1145 try:
1145 1146 self.checkreservedname(namebase)
1146 1147 except error.Abort:
1147 1148 namebase = fallbackname
1148 1149 else:
1149 1150 namebase = fallbackname
1150 1151 name = namebase
1151 1152 i = 0
1152 1153 while True:
1153 1154 if name not in self.fullseries:
1154 1155 try:
1155 1156 self.checkpatchname(name)
1156 1157 break
1157 1158 except error.Abort:
1158 1159 pass
1159 1160 i += 1
1160 1161 name = '%s__%s' % (namebase, i)
1161 1162 return name
1162 1163
1163 1164 def checkkeepchanges(self, keepchanges, force):
1164 1165 if force and keepchanges:
1165 1166 raise error.Abort(_('cannot use both --force and --keep-changes'))
1166 1167
1167 1168 def new(self, repo, patchfn, *pats, **opts):
1168 1169 """options:
1169 1170 msg: a string or a no-argument function returning a string
1170 1171 """
1171 1172 msg = opts.get('msg')
1172 1173 edit = opts.get('edit')
1173 1174 editform = opts.get('editform', 'mq.qnew')
1174 1175 user = opts.get('user')
1175 1176 date = opts.get('date')
1176 1177 if date:
1177 1178 date = util.parsedate(date)
1178 1179 diffopts = self.diffopts({'git': opts.get('git')})
1179 1180 if opts.get('checkname', True):
1180 1181 self.checkpatchname(patchfn)
1181 1182 inclsubs = checksubstate(repo)
1182 1183 if inclsubs:
1183 1184 substatestate = repo.dirstate['.hgsubstate']
1184 1185 if opts.get('include') or opts.get('exclude') or pats:
1185 1186 # detect missing files in pats
1186 1187 def badfn(f, msg):
1187 1188 if f != '.hgsubstate': # .hgsubstate is auto-created
1188 1189 raise error.Abort('%s: %s' % (f, msg))
1189 1190 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1190 1191 changes = repo.status(match=match)
1191 1192 else:
1192 1193 changes = self.checklocalchanges(repo, force=True)
1193 1194 commitfiles = list(inclsubs)
1194 1195 for files in changes[:3]:
1195 1196 commitfiles.extend(files)
1196 1197 match = scmutil.matchfiles(repo, commitfiles)
1197 1198 if len(repo[None].parents()) > 1:
1198 1199 raise error.Abort(_('cannot manage merge changesets'))
1199 1200 self.checktoppatch(repo)
1200 1201 insert = self.fullseriesend()
1201 1202 with repo.wlock():
1202 1203 try:
1203 1204 # if patch file write fails, abort early
1204 1205 p = self.opener(patchfn, "w")
1205 1206 except IOError as e:
1206 1207 raise error.Abort(_('cannot write patch "%s": %s')
1207 1208 % (patchfn, e.strerror))
1208 1209 try:
1209 1210 defaultmsg = "[mq]: %s" % patchfn
1210 1211 editor = cmdutil.getcommiteditor(editform=editform)
1211 1212 if edit:
1212 1213 def finishdesc(desc):
1213 1214 if desc.rstrip():
1214 1215 return desc
1215 1216 else:
1216 1217 return defaultmsg
1217 1218 # i18n: this message is shown in editor with "HG: " prefix
1218 1219 extramsg = _('Leave message empty to use default message.')
1219 1220 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1220 1221 extramsg=extramsg,
1221 1222 editform=editform)
1222 1223 commitmsg = msg
1223 1224 else:
1224 1225 commitmsg = msg or defaultmsg
1225 1226
1226 1227 n = newcommit(repo, None, commitmsg, user, date, match=match,
1227 1228 force=True, editor=editor)
1228 1229 if n is None:
1229 1230 raise error.Abort(_("repo commit failed"))
1230 1231 try:
1231 1232 self.fullseries[insert:insert] = [patchfn]
1232 1233 self.applied.append(statusentry(n, patchfn))
1233 1234 self.parseseries()
1234 1235 self.seriesdirty = True
1235 1236 self.applieddirty = True
1236 1237 nctx = repo[n]
1237 1238 ph = patchheader(self.join(patchfn), self.plainmode)
1238 1239 if user:
1239 1240 ph.setuser(user)
1240 1241 if date:
1241 1242 ph.setdate('%s %s' % date)
1242 1243 ph.setparent(hex(nctx.p1().node()))
1243 1244 msg = nctx.description().strip()
1244 1245 if msg == defaultmsg.strip():
1245 1246 msg = ''
1246 1247 ph.setmessage(msg)
1247 1248 p.write(str(ph))
1248 1249 if commitfiles:
1249 1250 parent = self.qparents(repo, n)
1250 1251 if inclsubs:
1251 1252 self.putsubstate2changes(substatestate, changes)
1252 1253 chunks = patchmod.diff(repo, node1=parent, node2=n,
1253 1254 changes=changes, opts=diffopts)
1254 1255 for chunk in chunks:
1255 1256 p.write(chunk)
1256 1257 p.close()
1257 1258 r = self.qrepo()
1258 1259 if r:
1259 1260 r[None].add([patchfn])
1260 1261 except: # re-raises
1261 1262 repo.rollback()
1262 1263 raise
1263 1264 except Exception:
1264 1265 patchpath = self.join(patchfn)
1265 1266 try:
1266 1267 os.unlink(patchpath)
1267 1268 except OSError:
1268 1269 self.ui.warn(_('error unlinking %s\n') % patchpath)
1269 1270 raise
1270 1271 self.removeundo(repo)
1271 1272
1272 1273 def isapplied(self, patch):
1273 1274 """returns (index, rev, patch)"""
1274 1275 for i, a in enumerate(self.applied):
1275 1276 if a.name == patch:
1276 1277 return (i, a.node, a.name)
1277 1278 return None
1278 1279
1279 1280 # if the exact patch name does not exist, we try a few
1280 1281 # variations. If strict is passed, we try only #1
1281 1282 #
1282 1283 # 1) a number (as string) to indicate an offset in the series file
1283 1284 # 2) a unique substring of the patch name was given
1284 1285 # 3) patchname[-+]num to indicate an offset in the series file
1285 1286 def lookup(self, patch, strict=False):
1286 1287 def partialname(s):
1287 1288 if s in self.series:
1288 1289 return s
1289 1290 matches = [x for x in self.series if s in x]
1290 1291 if len(matches) > 1:
1291 1292 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1292 1293 for m in matches:
1293 1294 self.ui.warn(' %s\n' % m)
1294 1295 return None
1295 1296 if matches:
1296 1297 return matches[0]
1297 1298 if self.series and self.applied:
1298 1299 if s == 'qtip':
1299 1300 return self.series[self.seriesend(True) - 1]
1300 1301 if s == 'qbase':
1301 1302 return self.series[0]
1302 1303 return None
1303 1304
1304 1305 if patch in self.series:
1305 1306 return patch
1306 1307
1307 1308 if not os.path.isfile(self.join(patch)):
1308 1309 try:
1309 1310 sno = int(patch)
1310 1311 except (ValueError, OverflowError):
1311 1312 pass
1312 1313 else:
1313 1314 if -len(self.series) <= sno < len(self.series):
1314 1315 return self.series[sno]
1315 1316
1316 1317 if not strict:
1317 1318 res = partialname(patch)
1318 1319 if res:
1319 1320 return res
1320 1321 minus = patch.rfind('-')
1321 1322 if minus >= 0:
1322 1323 res = partialname(patch[:minus])
1323 1324 if res:
1324 1325 i = self.series.index(res)
1325 1326 try:
1326 1327 off = int(patch[minus + 1:] or 1)
1327 1328 except (ValueError, OverflowError):
1328 1329 pass
1329 1330 else:
1330 1331 if i - off >= 0:
1331 1332 return self.series[i - off]
1332 1333 plus = patch.rfind('+')
1333 1334 if plus >= 0:
1334 1335 res = partialname(patch[:plus])
1335 1336 if res:
1336 1337 i = self.series.index(res)
1337 1338 try:
1338 1339 off = int(patch[plus + 1:] or 1)
1339 1340 except (ValueError, OverflowError):
1340 1341 pass
1341 1342 else:
1342 1343 if i + off < len(self.series):
1343 1344 return self.series[i + off]
1344 1345 raise error.Abort(_("patch %s not in series") % patch)
1345 1346
1346 1347 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1347 1348 all=False, move=False, exact=False, nobackup=False,
1348 1349 keepchanges=False):
1349 1350 self.checkkeepchanges(keepchanges, force)
1350 1351 diffopts = self.diffopts()
1351 1352 with repo.wlock():
1352 1353 heads = []
1353 1354 for hs in repo.branchmap().itervalues():
1354 1355 heads.extend(hs)
1355 1356 if not heads:
1356 1357 heads = [nullid]
1357 1358 if repo.dirstate.p1() not in heads and not exact:
1358 1359 self.ui.status(_("(working directory not at a head)\n"))
1359 1360
1360 1361 if not self.series:
1361 1362 self.ui.warn(_('no patches in series\n'))
1362 1363 return 0
1363 1364
1364 1365 # Suppose our series file is: A B C and the current 'top'
1365 1366 # patch is B. qpush C should be performed (moving forward)
1366 1367 # qpush B is a NOP (no change) qpush A is an error (can't
1367 1368 # go backwards with qpush)
1368 1369 if patch:
1369 1370 patch = self.lookup(patch)
1370 1371 info = self.isapplied(patch)
1371 1372 if info and info[0] >= len(self.applied) - 1:
1372 1373 self.ui.warn(
1373 1374 _('qpush: %s is already at the top\n') % patch)
1374 1375 return 0
1375 1376
1376 1377 pushable, reason = self.pushable(patch)
1377 1378 if pushable:
1378 1379 if self.series.index(patch) < self.seriesend():
1379 1380 raise error.Abort(
1380 1381 _("cannot push to a previous patch: %s") % patch)
1381 1382 else:
1382 1383 if reason:
1383 1384 reason = _('guarded by %s') % reason
1384 1385 else:
1385 1386 reason = _('no matching guards')
1386 1387 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1387 1388 return 1
1388 1389 elif all:
1389 1390 patch = self.series[-1]
1390 1391 if self.isapplied(patch):
1391 1392 self.ui.warn(_('all patches are currently applied\n'))
1392 1393 return 0
1393 1394
1394 1395 # Following the above example, starting at 'top' of B:
1395 1396 # qpush should be performed (pushes C), but a subsequent
1396 1397 # qpush without an argument is an error (nothing to
1397 1398 # apply). This allows a loop of "...while hg qpush..." to
1398 1399 # work as it detects an error when done
1399 1400 start = self.seriesend()
1400 1401 if start == len(self.series):
1401 1402 self.ui.warn(_('patch series already fully applied\n'))
1402 1403 return 1
1403 1404 if not force and not keepchanges:
1404 1405 self.checklocalchanges(repo, refresh=self.applied)
1405 1406
1406 1407 if exact:
1407 1408 if keepchanges:
1408 1409 raise error.Abort(
1409 1410 _("cannot use --exact and --keep-changes together"))
1410 1411 if move:
1411 1412 raise error.Abort(_('cannot use --exact and --move '
1412 1413 'together'))
1413 1414 if self.applied:
1414 1415 raise error.Abort(_('cannot push --exact with applied '
1415 1416 'patches'))
1416 1417 root = self.series[start]
1417 1418 target = patchheader(self.join(root), self.plainmode).parent
1418 1419 if not target:
1419 1420 raise error.Abort(
1420 1421 _("%s does not have a parent recorded") % root)
1421 1422 if not repo[target] == repo['.']:
1422 1423 hg.update(repo, target)
1423 1424
1424 1425 if move:
1425 1426 if not patch:
1426 1427 raise error.Abort(_("please specify the patch to move"))
1427 1428 for fullstart, rpn in enumerate(self.fullseries):
1428 1429 # strip markers for patch guards
1429 1430 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1430 1431 break
1431 1432 for i, rpn in enumerate(self.fullseries[fullstart:]):
1432 1433 # strip markers for patch guards
1433 1434 if self.guard_re.split(rpn, 1)[0] == patch:
1434 1435 break
1435 1436 index = fullstart + i
1436 1437 assert index < len(self.fullseries)
1437 1438 fullpatch = self.fullseries[index]
1438 1439 del self.fullseries[index]
1439 1440 self.fullseries.insert(fullstart, fullpatch)
1440 1441 self.parseseries()
1441 1442 self.seriesdirty = True
1442 1443
1443 1444 self.applieddirty = True
1444 1445 if start > 0:
1445 1446 self.checktoppatch(repo)
1446 1447 if not patch:
1447 1448 patch = self.series[start]
1448 1449 end = start + 1
1449 1450 else:
1450 1451 end = self.series.index(patch, start) + 1
1451 1452
1452 1453 tobackup = set()
1453 1454 if (not nobackup and force) or keepchanges:
1454 1455 status = self.checklocalchanges(repo, force=True)
1455 1456 if keepchanges:
1456 1457 tobackup.update(status.modified + status.added +
1457 1458 status.removed + status.deleted)
1458 1459 else:
1459 1460 tobackup.update(status.modified + status.added)
1460 1461
1461 1462 s = self.series[start:end]
1462 1463 all_files = set()
1463 1464 try:
1464 1465 if mergeq:
1465 1466 ret = self.mergepatch(repo, mergeq, s, diffopts)
1466 1467 else:
1467 1468 ret = self.apply(repo, s, list, all_files=all_files,
1468 1469 tobackup=tobackup, keepchanges=keepchanges)
1469 1470 except AbortNoCleanup:
1470 1471 raise
1471 1472 except: # re-raises
1472 1473 self.ui.warn(_('cleaning up working directory...\n'))
1473 1474 cmdutil.revert(self.ui, repo, repo['.'],
1474 1475 repo.dirstate.parents(), no_backup=True)
1475 1476 # only remove unknown files that we know we touched or
1476 1477 # created while patching
1477 1478 for f in all_files:
1478 1479 if f not in repo.dirstate:
1479 1480 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1480 1481 self.ui.warn(_('done\n'))
1481 1482 raise
1482 1483
1483 1484 if not self.applied:
1484 1485 return ret[0]
1485 1486 top = self.applied[-1].name
1486 1487 if ret[0] and ret[0] > 1:
1487 1488 msg = _("errors during apply, please fix and qrefresh %s\n")
1488 1489 self.ui.write(msg % top)
1489 1490 else:
1490 1491 self.ui.write(_("now at: %s\n") % top)
1491 1492 return ret[0]
1492 1493
1493 1494 def pop(self, repo, patch=None, force=False, update=True, all=False,
1494 1495 nobackup=False, keepchanges=False):
1495 1496 self.checkkeepchanges(keepchanges, force)
1496 1497 with repo.wlock():
1497 1498 if patch:
1498 1499 # index, rev, patch
1499 1500 info = self.isapplied(patch)
1500 1501 if not info:
1501 1502 patch = self.lookup(patch)
1502 1503 info = self.isapplied(patch)
1503 1504 if not info:
1504 1505 raise error.Abort(_("patch %s is not applied") % patch)
1505 1506
1506 1507 if not self.applied:
1507 1508 # Allow qpop -a to work repeatedly,
1508 1509 # but not qpop without an argument
1509 1510 self.ui.warn(_("no patches applied\n"))
1510 1511 return not all
1511 1512
1512 1513 if all:
1513 1514 start = 0
1514 1515 elif patch:
1515 1516 start = info[0] + 1
1516 1517 else:
1517 1518 start = len(self.applied) - 1
1518 1519
1519 1520 if start >= len(self.applied):
1520 1521 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1521 1522 return
1522 1523
1523 1524 if not update:
1524 1525 parents = repo.dirstate.parents()
1525 1526 rr = [x.node for x in self.applied]
1526 1527 for p in parents:
1527 1528 if p in rr:
1528 1529 self.ui.warn(_("qpop: forcing dirstate update\n"))
1529 1530 update = True
1530 1531 else:
1531 1532 parents = [p.node() for p in repo[None].parents()]
1532 1533 needupdate = False
1533 1534 for entry in self.applied[start:]:
1534 1535 if entry.node in parents:
1535 1536 needupdate = True
1536 1537 break
1537 1538 update = needupdate
1538 1539
1539 1540 tobackup = set()
1540 1541 if update:
1541 1542 s = self.checklocalchanges(repo, force=force or keepchanges)
1542 1543 if force:
1543 1544 if not nobackup:
1544 1545 tobackup.update(s.modified + s.added)
1545 1546 elif keepchanges:
1546 1547 tobackup.update(s.modified + s.added +
1547 1548 s.removed + s.deleted)
1548 1549
1549 1550 self.applieddirty = True
1550 1551 end = len(self.applied)
1551 1552 rev = self.applied[start].node
1552 1553
1553 1554 try:
1554 1555 heads = repo.changelog.heads(rev)
1555 1556 except error.LookupError:
1556 1557 node = short(rev)
1557 1558 raise error.Abort(_('trying to pop unknown node %s') % node)
1558 1559
1559 1560 if heads != [self.applied[-1].node]:
1560 1561 raise error.Abort(_("popping would remove a revision not "
1561 1562 "managed by this patch queue"))
1562 1563 if not repo[self.applied[-1].node].mutable():
1563 1564 raise error.Abort(
1564 1565 _("popping would remove a public revision"),
1565 1566 hint=_("see 'hg help phases' for details"))
1566 1567
1567 1568 # we know there are no local changes, so we can make a simplified
1568 1569 # form of hg.update.
1569 1570 if update:
1570 1571 qp = self.qparents(repo, rev)
1571 1572 ctx = repo[qp]
1572 1573 m, a, r, d = repo.status(qp, '.')[:4]
1573 1574 if d:
1574 1575 raise error.Abort(_("deletions found between repo revs"))
1575 1576
1576 1577 tobackup = set(a + m + r) & tobackup
1577 1578 if keepchanges and tobackup:
1578 1579 raise error.Abort(_("local changes found, qrefresh first"))
1579 1580 self.backup(repo, tobackup)
1580 1581 repo.dirstate.beginparentchange()
1581 1582 for f in a:
1582 1583 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1583 1584 repo.dirstate.drop(f)
1584 1585 for f in m + r:
1585 1586 fctx = ctx[f]
1586 1587 repo.wwrite(f, fctx.data(), fctx.flags())
1587 1588 repo.dirstate.normal(f)
1588 1589 repo.setparents(qp, nullid)
1589 1590 repo.dirstate.endparentchange()
1590 1591 for patch in reversed(self.applied[start:end]):
1591 1592 self.ui.status(_("popping %s\n") % patch.name)
1592 1593 del self.applied[start:end]
1593 1594 strip(self.ui, repo, [rev], update=False, backup=False)
1594 1595 for s, state in repo['.'].substate.items():
1595 1596 repo['.'].sub(s).get(state)
1596 1597 if self.applied:
1597 1598 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1598 1599 else:
1599 1600 self.ui.write(_("patch queue now empty\n"))
1600 1601
1601 1602 def diff(self, repo, pats, opts):
1602 1603 top, patch = self.checktoppatch(repo)
1603 1604 if not top:
1604 1605 self.ui.write(_("no patches applied\n"))
1605 1606 return
1606 1607 qp = self.qparents(repo, top)
1607 1608 if opts.get('reverse'):
1608 1609 node1, node2 = None, qp
1609 1610 else:
1610 1611 node1, node2 = qp, None
1611 1612 diffopts = self.diffopts(opts, patch)
1612 1613 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1613 1614
1614 1615 def refresh(self, repo, pats=None, **opts):
1615 1616 if not self.applied:
1616 1617 self.ui.write(_("no patches applied\n"))
1617 1618 return 1
1618 1619 msg = opts.get('msg', '').rstrip()
1619 1620 edit = opts.get('edit')
1620 1621 editform = opts.get('editform', 'mq.qrefresh')
1621 1622 newuser = opts.get('user')
1622 1623 newdate = opts.get('date')
1623 1624 if newdate:
1624 1625 newdate = '%d %d' % util.parsedate(newdate)
1625 1626 wlock = repo.wlock()
1626 1627
1627 1628 try:
1628 1629 self.checktoppatch(repo)
1629 1630 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1630 1631 if repo.changelog.heads(top) != [top]:
1631 1632 raise error.Abort(_("cannot qrefresh a revision with children"))
1632 1633 if not repo[top].mutable():
1633 1634 raise error.Abort(_("cannot qrefresh public revision"),
1634 1635 hint=_("see 'hg help phases' for details"))
1635 1636
1636 1637 cparents = repo.changelog.parents(top)
1637 1638 patchparent = self.qparents(repo, top)
1638 1639
1639 1640 inclsubs = checksubstate(repo, hex(patchparent))
1640 1641 if inclsubs:
1641 1642 substatestate = repo.dirstate['.hgsubstate']
1642 1643
1643 1644 ph = patchheader(self.join(patchfn), self.plainmode)
1644 1645 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1645 1646 if newuser:
1646 1647 ph.setuser(newuser)
1647 1648 if newdate:
1648 1649 ph.setdate(newdate)
1649 1650 ph.setparent(hex(patchparent))
1650 1651
1651 1652 # only commit new patch when write is complete
1652 1653 patchf = self.opener(patchfn, 'w', atomictemp=True)
1653 1654
1654 1655 # update the dirstate in place, strip off the qtip commit
1655 1656 # and then commit.
1656 1657 #
1657 1658 # this should really read:
1658 1659 # mm, dd, aa = repo.status(top, patchparent)[:3]
1659 1660 # but we do it backwards to take advantage of manifest/changelog
1660 1661 # caching against the next repo.status call
1661 1662 mm, aa, dd = repo.status(patchparent, top)[:3]
1662 1663 changes = repo.changelog.read(top)
1663 1664 man = repo.manifestlog[changes[0]].read()
1664 1665 aaa = aa[:]
1665 1666 matchfn = scmutil.match(repo[None], pats, opts)
1666 1667 # in short mode, we only diff the files included in the
1667 1668 # patch already plus specified files
1668 1669 if opts.get('short'):
1669 1670 # if amending a patch, we start with existing
1670 1671 # files plus specified files - unfiltered
1671 1672 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1672 1673 # filter with include/exclude options
1673 1674 matchfn = scmutil.match(repo[None], opts=opts)
1674 1675 else:
1675 1676 match = scmutil.matchall(repo)
1676 1677 m, a, r, d = repo.status(match=match)[:4]
1677 1678 mm = set(mm)
1678 1679 aa = set(aa)
1679 1680 dd = set(dd)
1680 1681
1681 1682 # we might end up with files that were added between
1682 1683 # qtip and the dirstate parent, but then changed in the
1683 1684 # local dirstate. in this case, we want them to only
1684 1685 # show up in the added section
1685 1686 for x in m:
1686 1687 if x not in aa:
1687 1688 mm.add(x)
1688 1689 # we might end up with files added by the local dirstate that
1689 1690 # were deleted by the patch. In this case, they should only
1690 1691 # show up in the changed section.
1691 1692 for x in a:
1692 1693 if x in dd:
1693 1694 dd.remove(x)
1694 1695 mm.add(x)
1695 1696 else:
1696 1697 aa.add(x)
1697 1698 # make sure any files deleted in the local dirstate
1698 1699 # are not in the add or change column of the patch
1699 1700 forget = []
1700 1701 for x in d + r:
1701 1702 if x in aa:
1702 1703 aa.remove(x)
1703 1704 forget.append(x)
1704 1705 continue
1705 1706 else:
1706 1707 mm.discard(x)
1707 1708 dd.add(x)
1708 1709
1709 1710 m = list(mm)
1710 1711 r = list(dd)
1711 1712 a = list(aa)
1712 1713
1713 1714 # create 'match' that includes the files to be recommitted.
1714 1715 # apply matchfn via repo.status to ensure correct case handling.
1715 1716 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1716 1717 allmatches = set(cm + ca + cr + cd)
1717 1718 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1718 1719
1719 1720 files = set(inclsubs)
1720 1721 for x in refreshchanges:
1721 1722 files.update(x)
1722 1723 match = scmutil.matchfiles(repo, files)
1723 1724
1724 1725 bmlist = repo[top].bookmarks()
1725 1726
1726 1727 dsguard = None
1727 1728 try:
1728 1729 dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
1729 1730 if diffopts.git or diffopts.upgrade:
1730 1731 copies = {}
1731 1732 for dst in a:
1732 1733 src = repo.dirstate.copied(dst)
1733 1734 # during qfold, the source file for copies may
1734 1735 # be removed. Treat this as a simple add.
1735 1736 if src is not None and src in repo.dirstate:
1736 1737 copies.setdefault(src, []).append(dst)
1737 1738 repo.dirstate.add(dst)
1738 1739 # remember the copies between patchparent and qtip
1739 1740 for dst in aaa:
1740 1741 f = repo.file(dst)
1741 1742 src = f.renamed(man[dst])
1742 1743 if src:
1743 1744 copies.setdefault(src[0], []).extend(
1744 1745 copies.get(dst, []))
1745 1746 if dst in a:
1746 1747 copies[src[0]].append(dst)
1747 1748 # we can't copy a file created by the patch itself
1748 1749 if dst in copies:
1749 1750 del copies[dst]
1750 1751 for src, dsts in copies.iteritems():
1751 1752 for dst in dsts:
1752 1753 repo.dirstate.copy(src, dst)
1753 1754 else:
1754 1755 for dst in a:
1755 1756 repo.dirstate.add(dst)
1756 1757 # Drop useless copy information
1757 1758 for f in list(repo.dirstate.copies()):
1758 1759 repo.dirstate.copy(None, f)
1759 1760 for f in r:
1760 1761 repo.dirstate.remove(f)
1761 1762 # if the patch excludes a modified file, mark that
1762 1763 # file with mtime=0 so status can see it.
1763 1764 mm = []
1764 1765 for i in xrange(len(m) - 1, -1, -1):
1765 1766 if not matchfn(m[i]):
1766 1767 mm.append(m[i])
1767 1768 del m[i]
1768 1769 for f in m:
1769 1770 repo.dirstate.normal(f)
1770 1771 for f in mm:
1771 1772 repo.dirstate.normallookup(f)
1772 1773 for f in forget:
1773 1774 repo.dirstate.drop(f)
1774 1775
1775 1776 user = ph.user or changes[1]
1776 1777
1777 1778 oldphase = repo[top].phase()
1778 1779
1779 1780 # assumes strip can roll itself back if interrupted
1780 1781 repo.setparents(*cparents)
1781 1782 self.applied.pop()
1782 1783 self.applieddirty = True
1783 1784 strip(self.ui, repo, [top], update=False, backup=False)
1784 1785 dsguard.close()
1785 1786 finally:
1786 1787 release(dsguard)
1787 1788
1788 1789 try:
1789 1790 # might be nice to attempt to roll back strip after this
1790 1791
1791 1792 defaultmsg = "[mq]: %s" % patchfn
1792 1793 editor = cmdutil.getcommiteditor(editform=editform)
1793 1794 if edit:
1794 1795 def finishdesc(desc):
1795 1796 if desc.rstrip():
1796 1797 ph.setmessage(desc)
1797 1798 return desc
1798 1799 return defaultmsg
1799 1800 # i18n: this message is shown in editor with "HG: " prefix
1800 1801 extramsg = _('Leave message empty to use default message.')
1801 1802 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1802 1803 extramsg=extramsg,
1803 1804 editform=editform)
1804 1805 message = msg or "\n".join(ph.message)
1805 1806 elif not msg:
1806 1807 if not ph.message:
1807 1808 message = defaultmsg
1808 1809 else:
1809 1810 message = "\n".join(ph.message)
1810 1811 else:
1811 1812 message = msg
1812 1813 ph.setmessage(msg)
1813 1814
1814 1815 # Ensure we create a new changeset in the same phase than
1815 1816 # the old one.
1816 1817 lock = tr = None
1817 1818 try:
1818 1819 lock = repo.lock()
1819 1820 tr = repo.transaction('mq')
1820 1821 n = newcommit(repo, oldphase, message, user, ph.date,
1821 1822 match=match, force=True, editor=editor)
1822 1823 # only write patch after a successful commit
1823 1824 c = [list(x) for x in refreshchanges]
1824 1825 if inclsubs:
1825 1826 self.putsubstate2changes(substatestate, c)
1826 1827 chunks = patchmod.diff(repo, patchparent,
1827 1828 changes=c, opts=diffopts)
1828 1829 comments = str(ph)
1829 1830 if comments:
1830 1831 patchf.write(comments)
1831 1832 for chunk in chunks:
1832 1833 patchf.write(chunk)
1833 1834 patchf.close()
1834 1835
1835 1836 marks = repo._bookmarks
1836 1837 for bm in bmlist:
1837 1838 marks[bm] = n
1838 1839 marks.recordchange(tr)
1839 1840 tr.close()
1840 1841
1841 1842 self.applied.append(statusentry(n, patchfn))
1842 1843 finally:
1843 1844 lockmod.release(tr, lock)
1844 1845 except: # re-raises
1845 1846 ctx = repo[cparents[0]]
1846 1847 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1847 1848 self.savedirty()
1848 1849 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1849 1850 '(revert --all, qpush to recover)\n'))
1850 1851 raise
1851 1852 finally:
1852 1853 wlock.release()
1853 1854 self.removeundo(repo)
1854 1855
1855 1856 def init(self, repo, create=False):
1856 1857 if not create and os.path.isdir(self.path):
1857 1858 raise error.Abort(_("patch queue directory already exists"))
1858 1859 try:
1859 1860 os.mkdir(self.path)
1860 1861 except OSError as inst:
1861 1862 if inst.errno != errno.EEXIST or not create:
1862 1863 raise
1863 1864 if create:
1864 1865 return self.qrepo(create=True)
1865 1866
1866 1867 def unapplied(self, repo, patch=None):
1867 1868 if patch and patch not in self.series:
1868 1869 raise error.Abort(_("patch %s is not in series file") % patch)
1869 1870 if not patch:
1870 1871 start = self.seriesend()
1871 1872 else:
1872 1873 start = self.series.index(patch) + 1
1873 1874 unapplied = []
1874 1875 for i in xrange(start, len(self.series)):
1875 1876 pushable, reason = self.pushable(i)
1876 1877 if pushable:
1877 1878 unapplied.append((i, self.series[i]))
1878 1879 self.explainpushable(i)
1879 1880 return unapplied
1880 1881
1881 1882 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1882 1883 summary=False):
1883 1884 def displayname(pfx, patchname, state):
1884 1885 if pfx:
1885 1886 self.ui.write(pfx)
1886 1887 if summary:
1887 1888 ph = patchheader(self.join(patchname), self.plainmode)
1888 1889 if ph.message:
1889 1890 msg = ph.message[0]
1890 1891 else:
1891 1892 msg = ''
1892 1893
1893 1894 if self.ui.formatted():
1894 1895 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1895 1896 if width > 0:
1896 1897 msg = util.ellipsis(msg, width)
1897 1898 else:
1898 1899 msg = ''
1899 1900 self.ui.write(patchname, label='qseries.' + state)
1900 1901 self.ui.write(': ')
1901 1902 self.ui.write(msg, label='qseries.message.' + state)
1902 1903 else:
1903 1904 self.ui.write(patchname, label='qseries.' + state)
1904 1905 self.ui.write('\n')
1905 1906
1906 1907 applied = set([p.name for p in self.applied])
1907 1908 if length is None:
1908 1909 length = len(self.series) - start
1909 1910 if not missing:
1910 1911 if self.ui.verbose:
1911 1912 idxwidth = len(str(start + length - 1))
1912 1913 for i in xrange(start, start + length):
1913 1914 patch = self.series[i]
1914 1915 if patch in applied:
1915 1916 char, state = 'A', 'applied'
1916 1917 elif self.pushable(i)[0]:
1917 1918 char, state = 'U', 'unapplied'
1918 1919 else:
1919 1920 char, state = 'G', 'guarded'
1920 1921 pfx = ''
1921 1922 if self.ui.verbose:
1922 1923 pfx = '%*d %s ' % (idxwidth, i, char)
1923 1924 elif status and status != char:
1924 1925 continue
1925 1926 displayname(pfx, patch, state)
1926 1927 else:
1927 1928 msng_list = []
1928 1929 for root, dirs, files in os.walk(self.path):
1929 1930 d = root[len(self.path) + 1:]
1930 1931 for f in files:
1931 1932 fl = os.path.join(d, f)
1932 1933 if (fl not in self.series and
1933 1934 fl not in (self.statuspath, self.seriespath,
1934 1935 self.guardspath)
1935 1936 and not fl.startswith('.')):
1936 1937 msng_list.append(fl)
1937 1938 for x in sorted(msng_list):
1938 1939 pfx = self.ui.verbose and ('D ') or ''
1939 1940 displayname(pfx, x, 'missing')
1940 1941
1941 1942 def issaveline(self, l):
1942 1943 if l.name == '.hg.patches.save.line':
1943 1944 return True
1944 1945
1945 1946 def qrepo(self, create=False):
1946 1947 ui = self.baseui.copy()
1947 1948 if create or os.path.isdir(self.join(".hg")):
1948 1949 return hg.repository(ui, path=self.path, create=create)
1949 1950
1950 1951 def restore(self, repo, rev, delete=None, qupdate=None):
1951 1952 desc = repo[rev].description().strip()
1952 1953 lines = desc.splitlines()
1953 1954 i = 0
1954 1955 datastart = None
1955 1956 series = []
1956 1957 applied = []
1957 1958 qpp = None
1958 1959 for i, line in enumerate(lines):
1959 1960 if line == 'Patch Data:':
1960 1961 datastart = i + 1
1961 1962 elif line.startswith('Dirstate:'):
1962 1963 l = line.rstrip()
1963 1964 l = l[10:].split(' ')
1964 1965 qpp = [bin(x) for x in l]
1965 1966 elif datastart is not None:
1966 1967 l = line.rstrip()
1967 1968 n, name = l.split(':', 1)
1968 1969 if n:
1969 1970 applied.append(statusentry(bin(n), name))
1970 1971 else:
1971 1972 series.append(l)
1972 1973 if datastart is None:
1973 1974 self.ui.warn(_("no saved patch data found\n"))
1974 1975 return 1
1975 1976 self.ui.warn(_("restoring status: %s\n") % lines[0])
1976 1977 self.fullseries = series
1977 1978 self.applied = applied
1978 1979 self.parseseries()
1979 1980 self.seriesdirty = True
1980 1981 self.applieddirty = True
1981 1982 heads = repo.changelog.heads()
1982 1983 if delete:
1983 1984 if rev not in heads:
1984 1985 self.ui.warn(_("save entry has children, leaving it alone\n"))
1985 1986 else:
1986 1987 self.ui.warn(_("removing save entry %s\n") % short(rev))
1987 1988 pp = repo.dirstate.parents()
1988 1989 if rev in pp:
1989 1990 update = True
1990 1991 else:
1991 1992 update = False
1992 1993 strip(self.ui, repo, [rev], update=update, backup=False)
1993 1994 if qpp:
1994 1995 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1995 1996 (short(qpp[0]), short(qpp[1])))
1996 1997 if qupdate:
1997 1998 self.ui.status(_("updating queue directory\n"))
1998 1999 r = self.qrepo()
1999 2000 if not r:
2000 2001 self.ui.warn(_("unable to load queue repository\n"))
2001 2002 return 1
2002 2003 hg.clean(r, qpp[0])
2003 2004
2004 2005 def save(self, repo, msg=None):
2005 2006 if not self.applied:
2006 2007 self.ui.warn(_("save: no patches applied, exiting\n"))
2007 2008 return 1
2008 2009 if self.issaveline(self.applied[-1]):
2009 2010 self.ui.warn(_("status is already saved\n"))
2010 2011 return 1
2011 2012
2012 2013 if not msg:
2013 2014 msg = _("hg patches saved state")
2014 2015 else:
2015 2016 msg = "hg patches: " + msg.rstrip('\r\n')
2016 2017 r = self.qrepo()
2017 2018 if r:
2018 2019 pp = r.dirstate.parents()
2019 2020 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2020 2021 msg += "\n\nPatch Data:\n"
2021 2022 msg += ''.join('%s\n' % x for x in self.applied)
2022 2023 msg += ''.join(':%s\n' % x for x in self.fullseries)
2023 2024 n = repo.commit(msg, force=True)
2024 2025 if not n:
2025 2026 self.ui.warn(_("repo commit failed\n"))
2026 2027 return 1
2027 2028 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2028 2029 self.applieddirty = True
2029 2030 self.removeundo(repo)
2030 2031
2031 2032 def fullseriesend(self):
2032 2033 if self.applied:
2033 2034 p = self.applied[-1].name
2034 2035 end = self.findseries(p)
2035 2036 if end is None:
2036 2037 return len(self.fullseries)
2037 2038 return end + 1
2038 2039 return 0
2039 2040
2040 2041 def seriesend(self, all_patches=False):
2041 2042 """If all_patches is False, return the index of the next pushable patch
2042 2043 in the series, or the series length. If all_patches is True, return the
2043 2044 index of the first patch past the last applied one.
2044 2045 """
2045 2046 end = 0
2046 2047 def nextpatch(start):
2047 2048 if all_patches or start >= len(self.series):
2048 2049 return start
2049 2050 for i in xrange(start, len(self.series)):
2050 2051 p, reason = self.pushable(i)
2051 2052 if p:
2052 2053 return i
2053 2054 self.explainpushable(i)
2054 2055 return len(self.series)
2055 2056 if self.applied:
2056 2057 p = self.applied[-1].name
2057 2058 try:
2058 2059 end = self.series.index(p)
2059 2060 except ValueError:
2060 2061 return 0
2061 2062 return nextpatch(end + 1)
2062 2063 return nextpatch(end)
2063 2064
2064 2065 def appliedname(self, index):
2065 2066 pname = self.applied[index].name
2066 2067 if not self.ui.verbose:
2067 2068 p = pname
2068 2069 else:
2069 2070 p = str(self.series.index(pname)) + " " + pname
2070 2071 return p
2071 2072
2072 2073 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2073 2074 force=None, git=False):
2074 2075 def checkseries(patchname):
2075 2076 if patchname in self.series:
2076 2077 raise error.Abort(_('patch %s is already in the series file')
2077 2078 % patchname)
2078 2079
2079 2080 if rev:
2080 2081 if files:
2081 2082 raise error.Abort(_('option "-r" not valid when importing '
2082 2083 'files'))
2083 2084 rev = scmutil.revrange(repo, rev)
2084 2085 rev.sort(reverse=True)
2085 2086 elif not files:
2086 2087 raise error.Abort(_('no files or revisions specified'))
2087 2088 if (len(files) > 1 or len(rev) > 1) and patchname:
2088 2089 raise error.Abort(_('option "-n" not valid when importing multiple '
2089 2090 'patches'))
2090 2091 imported = []
2091 2092 if rev:
2092 2093 # If mq patches are applied, we can only import revisions
2093 2094 # that form a linear path to qbase.
2094 2095 # Otherwise, they should form a linear path to a head.
2095 2096 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2096 2097 if len(heads) > 1:
2097 2098 raise error.Abort(_('revision %d is the root of more than one '
2098 2099 'branch') % rev.last())
2099 2100 if self.applied:
2100 2101 base = repo.changelog.node(rev.first())
2101 2102 if base in [n.node for n in self.applied]:
2102 2103 raise error.Abort(_('revision %d is already managed')
2103 2104 % rev.first())
2104 2105 if heads != [self.applied[-1].node]:
2105 2106 raise error.Abort(_('revision %d is not the parent of '
2106 2107 'the queue') % rev.first())
2107 2108 base = repo.changelog.rev(self.applied[0].node)
2108 2109 lastparent = repo.changelog.parentrevs(base)[0]
2109 2110 else:
2110 2111 if heads != [repo.changelog.node(rev.first())]:
2111 2112 raise error.Abort(_('revision %d has unmanaged children')
2112 2113 % rev.first())
2113 2114 lastparent = None
2114 2115
2115 2116 diffopts = self.diffopts({'git': git})
2116 2117 with repo.transaction('qimport') as tr:
2117 2118 for r in rev:
2118 2119 if not repo[r].mutable():
2119 2120 raise error.Abort(_('revision %d is not mutable') % r,
2120 2121 hint=_("see 'hg help phases' "
2121 2122 'for details'))
2122 2123 p1, p2 = repo.changelog.parentrevs(r)
2123 2124 n = repo.changelog.node(r)
2124 2125 if p2 != nullrev:
2125 2126 raise error.Abort(_('cannot import merge revision %d')
2126 2127 % r)
2127 2128 if lastparent and lastparent != r:
2128 2129 raise error.Abort(_('revision %d is not the parent of '
2129 2130 '%d')
2130 2131 % (r, lastparent))
2131 2132 lastparent = p1
2132 2133
2133 2134 if not patchname:
2134 2135 patchname = self.makepatchname(
2135 2136 repo[r].description().split('\n', 1)[0],
2136 2137 '%d.diff' % r)
2137 2138 checkseries(patchname)
2138 2139 self.checkpatchname(patchname, force)
2139 2140 self.fullseries.insert(0, patchname)
2140 2141
2141 2142 patchf = self.opener(patchname, "w")
2142 2143 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
2143 2144 patchf.close()
2144 2145
2145 2146 se = statusentry(n, patchname)
2146 2147 self.applied.insert(0, se)
2147 2148
2148 2149 self.added.append(patchname)
2149 2150 imported.append(patchname)
2150 2151 patchname = None
2151 2152 if rev and repo.ui.configbool('mq', 'secret', False):
2152 2153 # if we added anything with --rev, move the secret root
2153 2154 phases.retractboundary(repo, tr, phases.secret, [n])
2154 2155 self.parseseries()
2155 2156 self.applieddirty = True
2156 2157 self.seriesdirty = True
2157 2158
2158 2159 for i, filename in enumerate(files):
2159 2160 if existing:
2160 2161 if filename == '-':
2161 2162 raise error.Abort(_('-e is incompatible with import from -')
2162 2163 )
2163 2164 filename = normname(filename)
2164 2165 self.checkreservedname(filename)
2165 2166 if util.url(filename).islocal():
2166 2167 originpath = self.join(filename)
2167 2168 if not os.path.isfile(originpath):
2168 2169 raise error.Abort(
2169 2170 _("patch %s does not exist") % filename)
2170 2171
2171 2172 if patchname:
2172 2173 self.checkpatchname(patchname, force)
2173 2174
2174 2175 self.ui.write(_('renaming %s to %s\n')
2175 2176 % (filename, patchname))
2176 2177 util.rename(originpath, self.join(patchname))
2177 2178 else:
2178 2179 patchname = filename
2179 2180
2180 2181 else:
2181 2182 if filename == '-' and not patchname:
2182 2183 raise error.Abort(_('need --name to import a patch from -'))
2183 2184 elif not patchname:
2184 2185 patchname = normname(os.path.basename(filename.rstrip('/')))
2185 2186 self.checkpatchname(patchname, force)
2186 2187 try:
2187 2188 if filename == '-':
2188 2189 text = self.ui.fin.read()
2189 2190 else:
2190 2191 fp = hg.openpath(self.ui, filename)
2191 2192 text = fp.read()
2192 2193 fp.close()
2193 2194 except (OSError, IOError):
2194 2195 raise error.Abort(_("unable to read file %s") % filename)
2195 2196 patchf = self.opener(patchname, "w")
2196 2197 patchf.write(text)
2197 2198 patchf.close()
2198 2199 if not force:
2199 2200 checkseries(patchname)
2200 2201 if patchname not in self.series:
2201 2202 index = self.fullseriesend() + i
2202 2203 self.fullseries[index:index] = [patchname]
2203 2204 self.parseseries()
2204 2205 self.seriesdirty = True
2205 2206 self.ui.warn(_("adding %s to series file\n") % patchname)
2206 2207 self.added.append(patchname)
2207 2208 imported.append(patchname)
2208 2209 patchname = None
2209 2210
2210 2211 self.removeundo(repo)
2211 2212 return imported
2212 2213
2213 2214 def fixkeepchangesopts(ui, opts):
2214 2215 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2215 2216 or opts.get('exact')):
2216 2217 return opts
2217 2218 opts = dict(opts)
2218 2219 opts['keep_changes'] = True
2219 2220 return opts
2220 2221
2221 2222 @command("qdelete|qremove|qrm",
2222 2223 [('k', 'keep', None, _('keep patch file')),
2223 2224 ('r', 'rev', [],
2224 2225 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2225 2226 _('hg qdelete [-k] [PATCH]...'))
2226 2227 def delete(ui, repo, *patches, **opts):
2227 2228 """remove patches from queue
2228 2229
2229 2230 The patches must not be applied, and at least one patch is required. Exact
2230 2231 patch identifiers must be given. With -k/--keep, the patch files are
2231 2232 preserved in the patch directory.
2232 2233
2233 2234 To stop managing a patch and move it into permanent history,
2234 2235 use the :hg:`qfinish` command."""
2235 2236 q = repo.mq
2236 2237 q.delete(repo, patches, opts)
2237 2238 q.savedirty()
2238 2239 return 0
2239 2240
2240 2241 @command("qapplied",
2241 2242 [('1', 'last', None, _('show only the preceding applied patch'))
2242 2243 ] + seriesopts,
2243 2244 _('hg qapplied [-1] [-s] [PATCH]'))
2244 2245 def applied(ui, repo, patch=None, **opts):
2245 2246 """print the patches already applied
2246 2247
2247 2248 Returns 0 on success."""
2248 2249
2249 2250 q = repo.mq
2250 2251
2251 2252 if patch:
2252 2253 if patch not in q.series:
2253 2254 raise error.Abort(_("patch %s is not in series file") % patch)
2254 2255 end = q.series.index(patch) + 1
2255 2256 else:
2256 2257 end = q.seriesend(True)
2257 2258
2258 2259 if opts.get('last') and not end:
2259 2260 ui.write(_("no patches applied\n"))
2260 2261 return 1
2261 2262 elif opts.get('last') and end == 1:
2262 2263 ui.write(_("only one patch applied\n"))
2263 2264 return 1
2264 2265 elif opts.get('last'):
2265 2266 start = end - 2
2266 2267 end = 1
2267 2268 else:
2268 2269 start = 0
2269 2270
2270 2271 q.qseries(repo, length=end, start=start, status='A',
2271 2272 summary=opts.get('summary'))
2272 2273
2273 2274
2274 2275 @command("qunapplied",
2275 2276 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2276 2277 _('hg qunapplied [-1] [-s] [PATCH]'))
2277 2278 def unapplied(ui, repo, patch=None, **opts):
2278 2279 """print the patches not yet applied
2279 2280
2280 2281 Returns 0 on success."""
2281 2282
2282 2283 q = repo.mq
2283 2284 if patch:
2284 2285 if patch not in q.series:
2285 2286 raise error.Abort(_("patch %s is not in series file") % patch)
2286 2287 start = q.series.index(patch) + 1
2287 2288 else:
2288 2289 start = q.seriesend(True)
2289 2290
2290 2291 if start == len(q.series) and opts.get('first'):
2291 2292 ui.write(_("all patches applied\n"))
2292 2293 return 1
2293 2294
2294 2295 if opts.get('first'):
2295 2296 length = 1
2296 2297 else:
2297 2298 length = None
2298 2299 q.qseries(repo, start=start, length=length, status='U',
2299 2300 summary=opts.get('summary'))
2300 2301
2301 2302 @command("qimport",
2302 2303 [('e', 'existing', None, _('import file in patch directory')),
2303 2304 ('n', 'name', '',
2304 2305 _('name of patch file'), _('NAME')),
2305 2306 ('f', 'force', None, _('overwrite existing files')),
2306 2307 ('r', 'rev', [],
2307 2308 _('place existing revisions under mq control'), _('REV')),
2308 2309 ('g', 'git', None, _('use git extended diff format')),
2309 2310 ('P', 'push', None, _('qpush after importing'))],
2310 2311 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2311 2312 def qimport(ui, repo, *filename, **opts):
2312 2313 """import a patch or existing changeset
2313 2314
2314 2315 The patch is inserted into the series after the last applied
2315 2316 patch. If no patches have been applied, qimport prepends the patch
2316 2317 to the series.
2317 2318
2318 2319 The patch will have the same name as its source file unless you
2319 2320 give it a new one with -n/--name.
2320 2321
2321 2322 You can register an existing patch inside the patch directory with
2322 2323 the -e/--existing flag.
2323 2324
2324 2325 With -f/--force, an existing patch of the same name will be
2325 2326 overwritten.
2326 2327
2327 2328 An existing changeset may be placed under mq control with -r/--rev
2328 2329 (e.g. qimport --rev . -n patch will place the current revision
2329 2330 under mq control). With -g/--git, patches imported with --rev will
2330 2331 use the git diff format. See the diffs help topic for information
2331 2332 on why this is important for preserving rename/copy information
2332 2333 and permission changes. Use :hg:`qfinish` to remove changesets
2333 2334 from mq control.
2334 2335
2335 2336 To import a patch from standard input, pass - as the patch file.
2336 2337 When importing from standard input, a patch name must be specified
2337 2338 using the --name flag.
2338 2339
2339 2340 To import an existing patch while renaming it::
2340 2341
2341 2342 hg qimport -e existing-patch -n new-name
2342 2343
2343 2344 Returns 0 if import succeeded.
2344 2345 """
2345 2346 with repo.lock(): # cause this may move phase
2346 2347 q = repo.mq
2347 2348 try:
2348 2349 imported = q.qimport(
2349 2350 repo, filename, patchname=opts.get('name'),
2350 2351 existing=opts.get('existing'), force=opts.get('force'),
2351 2352 rev=opts.get('rev'), git=opts.get('git'))
2352 2353 finally:
2353 2354 q.savedirty()
2354 2355
2355 2356 if imported and opts.get('push') and not opts.get('rev'):
2356 2357 return q.push(repo, imported[-1])
2357 2358 return 0
2358 2359
2359 2360 def qinit(ui, repo, create):
2360 2361 """initialize a new queue repository
2361 2362
2362 2363 This command also creates a series file for ordering patches, and
2363 2364 an mq-specific .hgignore file in the queue repository, to exclude
2364 2365 the status and guards files (these contain mostly transient state).
2365 2366
2366 2367 Returns 0 if initialization succeeded."""
2367 2368 q = repo.mq
2368 2369 r = q.init(repo, create)
2369 2370 q.savedirty()
2370 2371 if r:
2371 2372 if not os.path.exists(r.wjoin('.hgignore')):
2372 2373 fp = r.wvfs('.hgignore', 'w')
2373 2374 fp.write('^\\.hg\n')
2374 2375 fp.write('^\\.mq\n')
2375 2376 fp.write('syntax: glob\n')
2376 2377 fp.write('status\n')
2377 2378 fp.write('guards\n')
2378 2379 fp.close()
2379 2380 if not os.path.exists(r.wjoin('series')):
2380 2381 r.wvfs('series', 'w').close()
2381 2382 r[None].add(['.hgignore', 'series'])
2382 2383 commands.add(ui, r)
2383 2384 return 0
2384 2385
2385 2386 @command("^qinit",
2386 2387 [('c', 'create-repo', None, _('create queue repository'))],
2387 2388 _('hg qinit [-c]'))
2388 2389 def init(ui, repo, **opts):
2389 2390 """init a new queue repository (DEPRECATED)
2390 2391
2391 2392 The queue repository is unversioned by default. If
2392 2393 -c/--create-repo is specified, qinit will create a separate nested
2393 2394 repository for patches (qinit -c may also be run later to convert
2394 2395 an unversioned patch repository into a versioned one). You can use
2395 2396 qcommit to commit changes to this queue repository.
2396 2397
2397 2398 This command is deprecated. Without -c, it's implied by other relevant
2398 2399 commands. With -c, use :hg:`init --mq` instead."""
2399 2400 return qinit(ui, repo, create=opts.get('create_repo'))
2400 2401
2401 2402 @command("qclone",
2402 2403 [('', 'pull', None, _('use pull protocol to copy metadata')),
2403 2404 ('U', 'noupdate', None,
2404 2405 _('do not update the new working directories')),
2405 2406 ('', 'uncompressed', None,
2406 2407 _('use uncompressed transfer (fast over LAN)')),
2407 2408 ('p', 'patches', '',
2408 2409 _('location of source patch repository'), _('REPO')),
2409 2410 ] + commands.remoteopts,
2410 2411 _('hg qclone [OPTION]... SOURCE [DEST]'),
2411 2412 norepo=True)
2412 2413 def clone(ui, source, dest=None, **opts):
2413 2414 '''clone main and patch repository at same time
2414 2415
2415 2416 If source is local, destination will have no patches applied. If
2416 2417 source is remote, this command can not check if patches are
2417 2418 applied in source, so cannot guarantee that patches are not
2418 2419 applied in destination. If you clone remote repository, be sure
2419 2420 before that it has no patches applied.
2420 2421
2421 2422 Source patch repository is looked for in <src>/.hg/patches by
2422 2423 default. Use -p <url> to change.
2423 2424
2424 2425 The patch directory must be a nested Mercurial repository, as
2425 2426 would be created by :hg:`init --mq`.
2426 2427
2427 2428 Return 0 on success.
2428 2429 '''
2429 2430 def patchdir(repo):
2430 2431 """compute a patch repo url from a repo object"""
2431 2432 url = repo.url()
2432 2433 if url.endswith('/'):
2433 2434 url = url[:-1]
2434 2435 return url + '/.hg/patches'
2435 2436
2436 2437 # main repo (destination and sources)
2437 2438 if dest is None:
2438 2439 dest = hg.defaultdest(source)
2439 2440 sr = hg.peer(ui, opts, ui.expandpath(source))
2440 2441
2441 2442 # patches repo (source only)
2442 2443 if opts.get('patches'):
2443 2444 patchespath = ui.expandpath(opts.get('patches'))
2444 2445 else:
2445 2446 patchespath = patchdir(sr)
2446 2447 try:
2447 2448 hg.peer(ui, opts, patchespath)
2448 2449 except error.RepoError:
2449 2450 raise error.Abort(_('versioned patch repository not found'
2450 2451 ' (see init --mq)'))
2451 2452 qbase, destrev = None, None
2452 2453 if sr.local():
2453 2454 repo = sr.local()
2454 2455 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2455 2456 qbase = repo.mq.applied[0].node
2456 2457 if not hg.islocal(dest):
2457 2458 heads = set(repo.heads())
2458 2459 destrev = list(heads.difference(repo.heads(qbase)))
2459 2460 destrev.append(repo.changelog.parents(qbase)[0])
2460 2461 elif sr.capable('lookup'):
2461 2462 try:
2462 2463 qbase = sr.lookup('qbase')
2463 2464 except error.RepoError:
2464 2465 pass
2465 2466
2466 2467 ui.note(_('cloning main repository\n'))
2467 2468 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2468 2469 pull=opts.get('pull'),
2469 2470 rev=destrev,
2470 2471 update=False,
2471 2472 stream=opts.get('uncompressed'))
2472 2473
2473 2474 ui.note(_('cloning patch repository\n'))
2474 2475 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2475 2476 pull=opts.get('pull'), update=not opts.get('noupdate'),
2476 2477 stream=opts.get('uncompressed'))
2477 2478
2478 2479 if dr.local():
2479 2480 repo = dr.local()
2480 2481 if qbase:
2481 2482 ui.note(_('stripping applied patches from destination '
2482 2483 'repository\n'))
2483 2484 strip(ui, repo, [qbase], update=False, backup=None)
2484 2485 if not opts.get('noupdate'):
2485 2486 ui.note(_('updating destination repository\n'))
2486 2487 hg.update(repo, repo.changelog.tip())
2487 2488
2488 2489 @command("qcommit|qci",
2489 2490 commands.table["^commit|ci"][1],
2490 2491 _('hg qcommit [OPTION]... [FILE]...'),
2491 2492 inferrepo=True)
2492 2493 def commit(ui, repo, *pats, **opts):
2493 2494 """commit changes in the queue repository (DEPRECATED)
2494 2495
2495 2496 This command is deprecated; use :hg:`commit --mq` instead."""
2496 2497 q = repo.mq
2497 2498 r = q.qrepo()
2498 2499 if not r:
2499 2500 raise error.Abort('no queue repository')
2500 2501 commands.commit(r.ui, r, *pats, **opts)
2501 2502
2502 2503 @command("qseries",
2503 2504 [('m', 'missing', None, _('print patches not in series')),
2504 2505 ] + seriesopts,
2505 2506 _('hg qseries [-ms]'))
2506 2507 def series(ui, repo, **opts):
2507 2508 """print the entire series file
2508 2509
2509 2510 Returns 0 on success."""
2510 2511 repo.mq.qseries(repo, missing=opts.get('missing'),
2511 2512 summary=opts.get('summary'))
2512 2513 return 0
2513 2514
2514 2515 @command("qtop", seriesopts, _('hg qtop [-s]'))
2515 2516 def top(ui, repo, **opts):
2516 2517 """print the name of the current patch
2517 2518
2518 2519 Returns 0 on success."""
2519 2520 q = repo.mq
2520 2521 if q.applied:
2521 2522 t = q.seriesend(True)
2522 2523 else:
2523 2524 t = 0
2524 2525
2525 2526 if t:
2526 2527 q.qseries(repo, start=t - 1, length=1, status='A',
2527 2528 summary=opts.get('summary'))
2528 2529 else:
2529 2530 ui.write(_("no patches applied\n"))
2530 2531 return 1
2531 2532
2532 2533 @command("qnext", seriesopts, _('hg qnext [-s]'))
2533 2534 def next(ui, repo, **opts):
2534 2535 """print the name of the next pushable patch
2535 2536
2536 2537 Returns 0 on success."""
2537 2538 q = repo.mq
2538 2539 end = q.seriesend()
2539 2540 if end == len(q.series):
2540 2541 ui.write(_("all patches applied\n"))
2541 2542 return 1
2542 2543 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2543 2544
2544 2545 @command("qprev", seriesopts, _('hg qprev [-s]'))
2545 2546 def prev(ui, repo, **opts):
2546 2547 """print the name of the preceding applied patch
2547 2548
2548 2549 Returns 0 on success."""
2549 2550 q = repo.mq
2550 2551 l = len(q.applied)
2551 2552 if l == 1:
2552 2553 ui.write(_("only one patch applied\n"))
2553 2554 return 1
2554 2555 if not l:
2555 2556 ui.write(_("no patches applied\n"))
2556 2557 return 1
2557 2558 idx = q.series.index(q.applied[-2].name)
2558 2559 q.qseries(repo, start=idx, length=1, status='A',
2559 2560 summary=opts.get('summary'))
2560 2561
2561 2562 def setupheaderopts(ui, opts):
2562 2563 if not opts.get('user') and opts.get('currentuser'):
2563 2564 opts['user'] = ui.username()
2564 2565 if not opts.get('date') and opts.get('currentdate'):
2565 2566 opts['date'] = "%d %d" % util.makedate()
2566 2567
2567 2568 @command("^qnew",
2568 2569 [('e', 'edit', None, _('invoke editor on commit messages')),
2569 2570 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2570 2571 ('g', 'git', None, _('use git extended diff format')),
2571 2572 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2572 2573 ('u', 'user', '',
2573 2574 _('add "From: <USER>" to patch'), _('USER')),
2574 2575 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2575 2576 ('d', 'date', '',
2576 2577 _('add "Date: <DATE>" to patch'), _('DATE'))
2577 2578 ] + commands.walkopts + commands.commitopts,
2578 2579 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2579 2580 inferrepo=True)
2580 2581 def new(ui, repo, patch, *args, **opts):
2581 2582 """create a new patch
2582 2583
2583 2584 qnew creates a new patch on top of the currently-applied patch (if
2584 2585 any). The patch will be initialized with any outstanding changes
2585 2586 in the working directory. You may also use -I/--include,
2586 2587 -X/--exclude, and/or a list of files after the patch name to add
2587 2588 only changes to matching files to the new patch, leaving the rest
2588 2589 as uncommitted modifications.
2589 2590
2590 2591 -u/--user and -d/--date can be used to set the (given) user and
2591 2592 date, respectively. -U/--currentuser and -D/--currentdate set user
2592 2593 to current user and date to current date.
2593 2594
2594 2595 -e/--edit, -m/--message or -l/--logfile set the patch header as
2595 2596 well as the commit message. If none is specified, the header is
2596 2597 empty and the commit message is '[mq]: PATCH'.
2597 2598
2598 2599 Use the -g/--git option to keep the patch in the git extended diff
2599 2600 format. Read the diffs help topic for more information on why this
2600 2601 is important for preserving permission changes and copy/rename
2601 2602 information.
2602 2603
2603 2604 Returns 0 on successful creation of a new patch.
2604 2605 """
2605 2606 msg = cmdutil.logmessage(ui, opts)
2606 2607 q = repo.mq
2607 2608 opts['msg'] = msg
2608 2609 setupheaderopts(ui, opts)
2609 2610 q.new(repo, patch, *args, **opts)
2610 2611 q.savedirty()
2611 2612 return 0
2612 2613
2613 2614 @command("^qrefresh",
2614 2615 [('e', 'edit', None, _('invoke editor on commit messages')),
2615 2616 ('g', 'git', None, _('use git extended diff format')),
2616 2617 ('s', 'short', None,
2617 2618 _('refresh only files already in the patch and specified files')),
2618 2619 ('U', 'currentuser', None,
2619 2620 _('add/update author field in patch with current user')),
2620 2621 ('u', 'user', '',
2621 2622 _('add/update author field in patch with given user'), _('USER')),
2622 2623 ('D', 'currentdate', None,
2623 2624 _('add/update date field in patch with current date')),
2624 2625 ('d', 'date', '',
2625 2626 _('add/update date field in patch with given date'), _('DATE'))
2626 2627 ] + commands.walkopts + commands.commitopts,
2627 2628 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2628 2629 inferrepo=True)
2629 2630 def refresh(ui, repo, *pats, **opts):
2630 2631 """update the current patch
2631 2632
2632 2633 If any file patterns are provided, the refreshed patch will
2633 2634 contain only the modifications that match those patterns; the
2634 2635 remaining modifications will remain in the working directory.
2635 2636
2636 2637 If -s/--short is specified, files currently included in the patch
2637 2638 will be refreshed just like matched files and remain in the patch.
2638 2639
2639 2640 If -e/--edit is specified, Mercurial will start your configured editor for
2640 2641 you to enter a message. In case qrefresh fails, you will find a backup of
2641 2642 your message in ``.hg/last-message.txt``.
2642 2643
2643 2644 hg add/remove/copy/rename work as usual, though you might want to
2644 2645 use git-style patches (-g/--git or [diff] git=1) to track copies
2645 2646 and renames. See the diffs help topic for more information on the
2646 2647 git diff format.
2647 2648
2648 2649 Returns 0 on success.
2649 2650 """
2650 2651 q = repo.mq
2651 2652 message = cmdutil.logmessage(ui, opts)
2652 2653 setupheaderopts(ui, opts)
2653 2654 with repo.wlock():
2654 2655 ret = q.refresh(repo, pats, msg=message, **opts)
2655 2656 q.savedirty()
2656 2657 return ret
2657 2658
2658 2659 @command("^qdiff",
2659 2660 commands.diffopts + commands.diffopts2 + commands.walkopts,
2660 2661 _('hg qdiff [OPTION]... [FILE]...'),
2661 2662 inferrepo=True)
2662 2663 def diff(ui, repo, *pats, **opts):
2663 2664 """diff of the current patch and subsequent modifications
2664 2665
2665 2666 Shows a diff which includes the current patch as well as any
2666 2667 changes which have been made in the working directory since the
2667 2668 last refresh (thus showing what the current patch would become
2668 2669 after a qrefresh).
2669 2670
2670 2671 Use :hg:`diff` if you only want to see the changes made since the
2671 2672 last qrefresh, or :hg:`export qtip` if you want to see changes
2672 2673 made by the current patch without including changes made since the
2673 2674 qrefresh.
2674 2675
2675 2676 Returns 0 on success.
2676 2677 """
2677 2678 repo.mq.diff(repo, pats, opts)
2678 2679 return 0
2679 2680
2680 2681 @command('qfold',
2681 2682 [('e', 'edit', None, _('invoke editor on commit messages')),
2682 2683 ('k', 'keep', None, _('keep folded patch files')),
2683 2684 ] + commands.commitopts,
2684 2685 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2685 2686 def fold(ui, repo, *files, **opts):
2686 2687 """fold the named patches into the current patch
2687 2688
2688 2689 Patches must not yet be applied. Each patch will be successively
2689 2690 applied to the current patch in the order given. If all the
2690 2691 patches apply successfully, the current patch will be refreshed
2691 2692 with the new cumulative patch, and the folded patches will be
2692 2693 deleted. With -k/--keep, the folded patch files will not be
2693 2694 removed afterwards.
2694 2695
2695 2696 The header for each folded patch will be concatenated with the
2696 2697 current patch header, separated by a line of ``* * *``.
2697 2698
2698 2699 Returns 0 on success."""
2699 2700 q = repo.mq
2700 2701 if not files:
2701 2702 raise error.Abort(_('qfold requires at least one patch name'))
2702 2703 if not q.checktoppatch(repo)[0]:
2703 2704 raise error.Abort(_('no patches applied'))
2704 2705 q.checklocalchanges(repo)
2705 2706
2706 2707 message = cmdutil.logmessage(ui, opts)
2707 2708
2708 2709 parent = q.lookup('qtip')
2709 2710 patches = []
2710 2711 messages = []
2711 2712 for f in files:
2712 2713 p = q.lookup(f)
2713 2714 if p in patches or p == parent:
2714 2715 ui.warn(_('skipping already folded patch %s\n') % p)
2715 2716 if q.isapplied(p):
2716 2717 raise error.Abort(_('qfold cannot fold already applied patch %s')
2717 2718 % p)
2718 2719 patches.append(p)
2719 2720
2720 2721 for p in patches:
2721 2722 if not message:
2722 2723 ph = patchheader(q.join(p), q.plainmode)
2723 2724 if ph.message:
2724 2725 messages.append(ph.message)
2725 2726 pf = q.join(p)
2726 2727 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2727 2728 if not patchsuccess:
2728 2729 raise error.Abort(_('error folding patch %s') % p)
2729 2730
2730 2731 if not message:
2731 2732 ph = patchheader(q.join(parent), q.plainmode)
2732 2733 message = ph.message
2733 2734 for msg in messages:
2734 2735 if msg:
2735 2736 if message:
2736 2737 message.append('* * *')
2737 2738 message.extend(msg)
2738 2739 message = '\n'.join(message)
2739 2740
2740 2741 diffopts = q.patchopts(q.diffopts(), *patches)
2741 2742 with repo.wlock():
2742 2743 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2743 2744 editform='mq.qfold')
2744 2745 q.delete(repo, patches, opts)
2745 2746 q.savedirty()
2746 2747
2747 2748 @command("qgoto",
2748 2749 [('', 'keep-changes', None,
2749 2750 _('tolerate non-conflicting local changes')),
2750 2751 ('f', 'force', None, _('overwrite any local changes')),
2751 2752 ('', 'no-backup', None, _('do not save backup copies of files'))],
2752 2753 _('hg qgoto [OPTION]... PATCH'))
2753 2754 def goto(ui, repo, patch, **opts):
2754 2755 '''push or pop patches until named patch is at top of stack
2755 2756
2756 2757 Returns 0 on success.'''
2757 2758 opts = fixkeepchangesopts(ui, opts)
2758 2759 q = repo.mq
2759 2760 patch = q.lookup(patch)
2760 2761 nobackup = opts.get('no_backup')
2761 2762 keepchanges = opts.get('keep_changes')
2762 2763 if q.isapplied(patch):
2763 2764 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2764 2765 keepchanges=keepchanges)
2765 2766 else:
2766 2767 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2767 2768 keepchanges=keepchanges)
2768 2769 q.savedirty()
2769 2770 return ret
2770 2771
2771 2772 @command("qguard",
2772 2773 [('l', 'list', None, _('list all patches and guards')),
2773 2774 ('n', 'none', None, _('drop all guards'))],
2774 2775 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2775 2776 def guard(ui, repo, *args, **opts):
2776 2777 '''set or print guards for a patch
2777 2778
2778 2779 Guards control whether a patch can be pushed. A patch with no
2779 2780 guards is always pushed. A patch with a positive guard ("+foo") is
2780 2781 pushed only if the :hg:`qselect` command has activated it. A patch with
2781 2782 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2782 2783 has activated it.
2783 2784
2784 2785 With no arguments, print the currently active guards.
2785 2786 With arguments, set guards for the named patch.
2786 2787
2787 2788 .. note::
2788 2789
2789 2790 Specifying negative guards now requires '--'.
2790 2791
2791 2792 To set guards on another patch::
2792 2793
2793 2794 hg qguard other.patch -- +2.6.17 -stable
2794 2795
2795 2796 Returns 0 on success.
2796 2797 '''
2797 2798 def status(idx):
2798 2799 guards = q.seriesguards[idx] or ['unguarded']
2799 2800 if q.series[idx] in applied:
2800 2801 state = 'applied'
2801 2802 elif q.pushable(idx)[0]:
2802 2803 state = 'unapplied'
2803 2804 else:
2804 2805 state = 'guarded'
2805 2806 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2806 2807 ui.write('%s: ' % ui.label(q.series[idx], label))
2807 2808
2808 2809 for i, guard in enumerate(guards):
2809 2810 if guard.startswith('+'):
2810 2811 ui.write(guard, label='qguard.positive')
2811 2812 elif guard.startswith('-'):
2812 2813 ui.write(guard, label='qguard.negative')
2813 2814 else:
2814 2815 ui.write(guard, label='qguard.unguarded')
2815 2816 if i != len(guards) - 1:
2816 2817 ui.write(' ')
2817 2818 ui.write('\n')
2818 2819 q = repo.mq
2819 2820 applied = set(p.name for p in q.applied)
2820 2821 patch = None
2821 2822 args = list(args)
2822 2823 if opts.get('list'):
2823 2824 if args or opts.get('none'):
2824 2825 raise error.Abort(_('cannot mix -l/--list with options or '
2825 2826 'arguments'))
2826 2827 for i in xrange(len(q.series)):
2827 2828 status(i)
2828 2829 return
2829 2830 if not args or args[0][0:1] in '-+':
2830 2831 if not q.applied:
2831 2832 raise error.Abort(_('no patches applied'))
2832 2833 patch = q.applied[-1].name
2833 2834 if patch is None and args[0][0:1] not in '-+':
2834 2835 patch = args.pop(0)
2835 2836 if patch is None:
2836 2837 raise error.Abort(_('no patch to work with'))
2837 2838 if args or opts.get('none'):
2838 2839 idx = q.findseries(patch)
2839 2840 if idx is None:
2840 2841 raise error.Abort(_('no patch named %s') % patch)
2841 2842 q.setguards(idx, args)
2842 2843 q.savedirty()
2843 2844 else:
2844 2845 status(q.series.index(q.lookup(patch)))
2845 2846
2846 2847 @command("qheader", [], _('hg qheader [PATCH]'))
2847 2848 def header(ui, repo, patch=None):
2848 2849 """print the header of the topmost or specified patch
2849 2850
2850 2851 Returns 0 on success."""
2851 2852 q = repo.mq
2852 2853
2853 2854 if patch:
2854 2855 patch = q.lookup(patch)
2855 2856 else:
2856 2857 if not q.applied:
2857 2858 ui.write(_('no patches applied\n'))
2858 2859 return 1
2859 2860 patch = q.lookup('qtip')
2860 2861 ph = patchheader(q.join(patch), q.plainmode)
2861 2862
2862 2863 ui.write('\n'.join(ph.message) + '\n')
2863 2864
2864 2865 def lastsavename(path):
2865 2866 (directory, base) = os.path.split(path)
2866 2867 names = os.listdir(directory)
2867 2868 namere = re.compile("%s.([0-9]+)" % base)
2868 2869 maxindex = None
2869 2870 maxname = None
2870 2871 for f in names:
2871 2872 m = namere.match(f)
2872 2873 if m:
2873 2874 index = int(m.group(1))
2874 2875 if maxindex is None or index > maxindex:
2875 2876 maxindex = index
2876 2877 maxname = f
2877 2878 if maxname:
2878 2879 return (os.path.join(directory, maxname), maxindex)
2879 2880 return (None, None)
2880 2881
2881 2882 def savename(path):
2882 2883 (last, index) = lastsavename(path)
2883 2884 if last is None:
2884 2885 index = 0
2885 2886 newpath = path + ".%d" % (index + 1)
2886 2887 return newpath
2887 2888
2888 2889 @command("^qpush",
2889 2890 [('', 'keep-changes', None,
2890 2891 _('tolerate non-conflicting local changes')),
2891 2892 ('f', 'force', None, _('apply on top of local changes')),
2892 2893 ('e', 'exact', None,
2893 2894 _('apply the target patch to its recorded parent')),
2894 2895 ('l', 'list', None, _('list patch name in commit text')),
2895 2896 ('a', 'all', None, _('apply all patches')),
2896 2897 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2897 2898 ('n', 'name', '',
2898 2899 _('merge queue name (DEPRECATED)'), _('NAME')),
2899 2900 ('', 'move', None,
2900 2901 _('reorder patch series and apply only the patch')),
2901 2902 ('', 'no-backup', None, _('do not save backup copies of files'))],
2902 2903 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2903 2904 def push(ui, repo, patch=None, **opts):
2904 2905 """push the next patch onto the stack
2905 2906
2906 2907 By default, abort if the working directory contains uncommitted
2907 2908 changes. With --keep-changes, abort only if the uncommitted files
2908 2909 overlap with patched files. With -f/--force, backup and patch over
2909 2910 uncommitted changes.
2910 2911
2911 2912 Return 0 on success.
2912 2913 """
2913 2914 q = repo.mq
2914 2915 mergeq = None
2915 2916
2916 2917 opts = fixkeepchangesopts(ui, opts)
2917 2918 if opts.get('merge'):
2918 2919 if opts.get('name'):
2919 2920 newpath = repo.join(opts.get('name'))
2920 2921 else:
2921 2922 newpath, i = lastsavename(q.path)
2922 2923 if not newpath:
2923 2924 ui.warn(_("no saved queues found, please use -n\n"))
2924 2925 return 1
2925 2926 mergeq = queue(ui, repo.baseui, repo.path, newpath)
2926 2927 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2927 2928 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2928 2929 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2929 2930 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2930 2931 keepchanges=opts.get('keep_changes'))
2931 2932 return ret
2932 2933
2933 2934 @command("^qpop",
2934 2935 [('a', 'all', None, _('pop all patches')),
2935 2936 ('n', 'name', '',
2936 2937 _('queue name to pop (DEPRECATED)'), _('NAME')),
2937 2938 ('', 'keep-changes', None,
2938 2939 _('tolerate non-conflicting local changes')),
2939 2940 ('f', 'force', None, _('forget any local changes to patched files')),
2940 2941 ('', 'no-backup', None, _('do not save backup copies of files'))],
2941 2942 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2942 2943 def pop(ui, repo, patch=None, **opts):
2943 2944 """pop the current patch off the stack
2944 2945
2945 2946 Without argument, pops off the top of the patch stack. If given a
2946 2947 patch name, keeps popping off patches until the named patch is at
2947 2948 the top of the stack.
2948 2949
2949 2950 By default, abort if the working directory contains uncommitted
2950 2951 changes. With --keep-changes, abort only if the uncommitted files
2951 2952 overlap with patched files. With -f/--force, backup and discard
2952 2953 changes made to such files.
2953 2954
2954 2955 Return 0 on success.
2955 2956 """
2956 2957 opts = fixkeepchangesopts(ui, opts)
2957 2958 localupdate = True
2958 2959 if opts.get('name'):
2959 2960 q = queue(ui, repo.baseui, repo.path, repo.join(opts.get('name')))
2960 2961 ui.warn(_('using patch queue: %s\n') % q.path)
2961 2962 localupdate = False
2962 2963 else:
2963 2964 q = repo.mq
2964 2965 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2965 2966 all=opts.get('all'), nobackup=opts.get('no_backup'),
2966 2967 keepchanges=opts.get('keep_changes'))
2967 2968 q.savedirty()
2968 2969 return ret
2969 2970
2970 2971 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2971 2972 def rename(ui, repo, patch, name=None, **opts):
2972 2973 """rename a patch
2973 2974
2974 2975 With one argument, renames the current patch to PATCH1.
2975 2976 With two arguments, renames PATCH1 to PATCH2.
2976 2977
2977 2978 Returns 0 on success."""
2978 2979 q = repo.mq
2979 2980 if not name:
2980 2981 name = patch
2981 2982 patch = None
2982 2983
2983 2984 if patch:
2984 2985 patch = q.lookup(patch)
2985 2986 else:
2986 2987 if not q.applied:
2987 2988 ui.write(_('no patches applied\n'))
2988 2989 return
2989 2990 patch = q.lookup('qtip')
2990 2991 absdest = q.join(name)
2991 2992 if os.path.isdir(absdest):
2992 2993 name = normname(os.path.join(name, os.path.basename(patch)))
2993 2994 absdest = q.join(name)
2994 2995 q.checkpatchname(name)
2995 2996
2996 2997 ui.note(_('renaming %s to %s\n') % (patch, name))
2997 2998 i = q.findseries(patch)
2998 2999 guards = q.guard_re.findall(q.fullseries[i])
2999 3000 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
3000 3001 q.parseseries()
3001 3002 q.seriesdirty = True
3002 3003
3003 3004 info = q.isapplied(patch)
3004 3005 if info:
3005 3006 q.applied[info[0]] = statusentry(info[1], name)
3006 3007 q.applieddirty = True
3007 3008
3008 3009 destdir = os.path.dirname(absdest)
3009 3010 if not os.path.isdir(destdir):
3010 3011 os.makedirs(destdir)
3011 3012 util.rename(q.join(patch), absdest)
3012 3013 r = q.qrepo()
3013 3014 if r and patch in r.dirstate:
3014 3015 wctx = r[None]
3015 3016 with r.wlock():
3016 3017 if r.dirstate[patch] == 'a':
3017 3018 r.dirstate.drop(patch)
3018 3019 r.dirstate.add(name)
3019 3020 else:
3020 3021 wctx.copy(patch, name)
3021 3022 wctx.forget([patch])
3022 3023
3023 3024 q.savedirty()
3024 3025
3025 3026 @command("qrestore",
3026 3027 [('d', 'delete', None, _('delete save entry')),
3027 3028 ('u', 'update', None, _('update queue working directory'))],
3028 3029 _('hg qrestore [-d] [-u] REV'))
3029 3030 def restore(ui, repo, rev, **opts):
3030 3031 """restore the queue state saved by a revision (DEPRECATED)
3031 3032
3032 3033 This command is deprecated, use :hg:`rebase` instead."""
3033 3034 rev = repo.lookup(rev)
3034 3035 q = repo.mq
3035 3036 q.restore(repo, rev, delete=opts.get('delete'),
3036 3037 qupdate=opts.get('update'))
3037 3038 q.savedirty()
3038 3039 return 0
3039 3040
3040 3041 @command("qsave",
3041 3042 [('c', 'copy', None, _('copy patch directory')),
3042 3043 ('n', 'name', '',
3043 3044 _('copy directory name'), _('NAME')),
3044 3045 ('e', 'empty', None, _('clear queue status file')),
3045 3046 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3046 3047 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
3047 3048 def save(ui, repo, **opts):
3048 3049 """save current queue state (DEPRECATED)
3049 3050
3050 3051 This command is deprecated, use :hg:`rebase` instead."""
3051 3052 q = repo.mq
3052 3053 message = cmdutil.logmessage(ui, opts)
3053 3054 ret = q.save(repo, msg=message)
3054 3055 if ret:
3055 3056 return ret
3056 3057 q.savedirty() # save to .hg/patches before copying
3057 3058 if opts.get('copy'):
3058 3059 path = q.path
3059 3060 if opts.get('name'):
3060 3061 newpath = os.path.join(q.basepath, opts.get('name'))
3061 3062 if os.path.exists(newpath):
3062 3063 if not os.path.isdir(newpath):
3063 3064 raise error.Abort(_('destination %s exists and is not '
3064 3065 'a directory') % newpath)
3065 3066 if not opts.get('force'):
3066 3067 raise error.Abort(_('destination %s exists, '
3067 3068 'use -f to force') % newpath)
3068 3069 else:
3069 3070 newpath = savename(path)
3070 3071 ui.warn(_("copy %s to %s\n") % (path, newpath))
3071 3072 util.copyfiles(path, newpath)
3072 3073 if opts.get('empty'):
3073 3074 del q.applied[:]
3074 3075 q.applieddirty = True
3075 3076 q.savedirty()
3076 3077 return 0
3077 3078
3078 3079
3079 3080 @command("qselect",
3080 3081 [('n', 'none', None, _('disable all guards')),
3081 3082 ('s', 'series', None, _('list all guards in series file')),
3082 3083 ('', 'pop', None, _('pop to before first guarded applied patch')),
3083 3084 ('', 'reapply', None, _('pop, then reapply patches'))],
3084 3085 _('hg qselect [OPTION]... [GUARD]...'))
3085 3086 def select(ui, repo, *args, **opts):
3086 3087 '''set or print guarded patches to push
3087 3088
3088 3089 Use the :hg:`qguard` command to set or print guards on patch, then use
3089 3090 qselect to tell mq which guards to use. A patch will be pushed if
3090 3091 it has no guards or any positive guards match the currently
3091 3092 selected guard, but will not be pushed if any negative guards
3092 3093 match the current guard. For example::
3093 3094
3094 3095 qguard foo.patch -- -stable (negative guard)
3095 3096 qguard bar.patch +stable (positive guard)
3096 3097 qselect stable
3097 3098
3098 3099 This activates the "stable" guard. mq will skip foo.patch (because
3099 3100 it has a negative match) but push bar.patch (because it has a
3100 3101 positive match).
3101 3102
3102 3103 With no arguments, prints the currently active guards.
3103 3104 With one argument, sets the active guard.
3104 3105
3105 3106 Use -n/--none to deactivate guards (no other arguments needed).
3106 3107 When no guards are active, patches with positive guards are
3107 3108 skipped and patches with negative guards are pushed.
3108 3109
3109 3110 qselect can change the guards on applied patches. It does not pop
3110 3111 guarded patches by default. Use --pop to pop back to the last
3111 3112 applied patch that is not guarded. Use --reapply (which implies
3112 3113 --pop) to push back to the current patch afterwards, but skip
3113 3114 guarded patches.
3114 3115
3115 3116 Use -s/--series to print a list of all guards in the series file
3116 3117 (no other arguments needed). Use -v for more information.
3117 3118
3118 3119 Returns 0 on success.'''
3119 3120
3120 3121 q = repo.mq
3121 3122 guards = q.active()
3122 3123 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3123 3124 if args or opts.get('none'):
3124 3125 old_unapplied = q.unapplied(repo)
3125 3126 old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3126 3127 q.setactive(args)
3127 3128 q.savedirty()
3128 3129 if not args:
3129 3130 ui.status(_('guards deactivated\n'))
3130 3131 if not opts.get('pop') and not opts.get('reapply'):
3131 3132 unapplied = q.unapplied(repo)
3132 3133 guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3133 3134 if len(unapplied) != len(old_unapplied):
3134 3135 ui.status(_('number of unguarded, unapplied patches has '
3135 3136 'changed from %d to %d\n') %
3136 3137 (len(old_unapplied), len(unapplied)))
3137 3138 if len(guarded) != len(old_guarded):
3138 3139 ui.status(_('number of guarded, applied patches has changed '
3139 3140 'from %d to %d\n') %
3140 3141 (len(old_guarded), len(guarded)))
3141 3142 elif opts.get('series'):
3142 3143 guards = {}
3143 3144 noguards = 0
3144 3145 for gs in q.seriesguards:
3145 3146 if not gs:
3146 3147 noguards += 1
3147 3148 for g in gs:
3148 3149 guards.setdefault(g, 0)
3149 3150 guards[g] += 1
3150 3151 if ui.verbose:
3151 3152 guards['NONE'] = noguards
3152 3153 guards = guards.items()
3153 3154 guards.sort(key=lambda x: x[0][1:])
3154 3155 if guards:
3155 3156 ui.note(_('guards in series file:\n'))
3156 3157 for guard, count in guards:
3157 3158 ui.note('%2d ' % count)
3158 3159 ui.write(guard, '\n')
3159 3160 else:
3160 3161 ui.note(_('no guards in series file\n'))
3161 3162 else:
3162 3163 if guards:
3163 3164 ui.note(_('active guards:\n'))
3164 3165 for g in guards:
3165 3166 ui.write(g, '\n')
3166 3167 else:
3167 3168 ui.write(_('no active guards\n'))
3168 3169 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3169 3170 popped = False
3170 3171 if opts.get('pop') or opts.get('reapply'):
3171 3172 for i in xrange(len(q.applied)):
3172 3173 if not pushable(i):
3173 3174 ui.status(_('popping guarded patches\n'))
3174 3175 popped = True
3175 3176 if i == 0:
3176 3177 q.pop(repo, all=True)
3177 3178 else:
3178 3179 q.pop(repo, q.applied[i - 1].name)
3179 3180 break
3180 3181 if popped:
3181 3182 try:
3182 3183 if reapply:
3183 3184 ui.status(_('reapplying unguarded patches\n'))
3184 3185 q.push(repo, reapply)
3185 3186 finally:
3186 3187 q.savedirty()
3187 3188
3188 3189 @command("qfinish",
3189 3190 [('a', 'applied', None, _('finish all applied changesets'))],
3190 3191 _('hg qfinish [-a] [REV]...'))
3191 3192 def finish(ui, repo, *revrange, **opts):
3192 3193 """move applied patches into repository history
3193 3194
3194 3195 Finishes the specified revisions (corresponding to applied
3195 3196 patches) by moving them out of mq control into regular repository
3196 3197 history.
3197 3198
3198 3199 Accepts a revision range or the -a/--applied option. If --applied
3199 3200 is specified, all applied mq revisions are removed from mq
3200 3201 control. Otherwise, the given revisions must be at the base of the
3201 3202 stack of applied patches.
3202 3203
3203 3204 This can be especially useful if your changes have been applied to
3204 3205 an upstream repository, or if you are about to push your changes
3205 3206 to upstream.
3206 3207
3207 3208 Returns 0 on success.
3208 3209 """
3209 3210 if not opts.get('applied') and not revrange:
3210 3211 raise error.Abort(_('no revisions specified'))
3211 3212 elif opts.get('applied'):
3212 3213 revrange = ('qbase::qtip',) + revrange
3213 3214
3214 3215 q = repo.mq
3215 3216 if not q.applied:
3216 3217 ui.status(_('no patches applied\n'))
3217 3218 return 0
3218 3219
3219 3220 revs = scmutil.revrange(repo, revrange)
3220 3221 if repo['.'].rev() in revs and repo[None].files():
3221 3222 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3222 3223 # queue.finish may changes phases but leave the responsibility to lock the
3223 3224 # repo to the caller to avoid deadlock with wlock. This command code is
3224 3225 # responsibility for this locking.
3225 3226 with repo.lock():
3226 3227 q.finish(repo, revs)
3227 3228 q.savedirty()
3228 3229 return 0
3229 3230
3230 3231 @command("qqueue",
3231 3232 [('l', 'list', False, _('list all available queues')),
3232 3233 ('', 'active', False, _('print name of active queue')),
3233 3234 ('c', 'create', False, _('create new queue')),
3234 3235 ('', 'rename', False, _('rename active queue')),
3235 3236 ('', 'delete', False, _('delete reference to queue')),
3236 3237 ('', 'purge', False, _('delete queue, and remove patch dir')),
3237 3238 ],
3238 3239 _('[OPTION] [QUEUE]'))
3239 3240 def qqueue(ui, repo, name=None, **opts):
3240 3241 '''manage multiple patch queues
3241 3242
3242 3243 Supports switching between different patch queues, as well as creating
3243 3244 new patch queues and deleting existing ones.
3244 3245
3245 3246 Omitting a queue name or specifying -l/--list will show you the registered
3246 3247 queues - by default the "normal" patches queue is registered. The currently
3247 3248 active queue will be marked with "(active)". Specifying --active will print
3248 3249 only the name of the active queue.
3249 3250
3250 3251 To create a new queue, use -c/--create. The queue is automatically made
3251 3252 active, except in the case where there are applied patches from the
3252 3253 currently active queue in the repository. Then the queue will only be
3253 3254 created and switching will fail.
3254 3255
3255 3256 To delete an existing queue, use --delete. You cannot delete the currently
3256 3257 active queue.
3257 3258
3258 3259 Returns 0 on success.
3259 3260 '''
3260 3261 q = repo.mq
3261 3262 _defaultqueue = 'patches'
3262 3263 _allqueues = 'patches.queues'
3263 3264 _activequeue = 'patches.queue'
3264 3265
3265 3266 def _getcurrent():
3266 3267 cur = os.path.basename(q.path)
3267 3268 if cur.startswith('patches-'):
3268 3269 cur = cur[8:]
3269 3270 return cur
3270 3271
3271 3272 def _noqueues():
3272 3273 try:
3273 3274 fh = repo.vfs(_allqueues, 'r')
3274 3275 fh.close()
3275 3276 except IOError:
3276 3277 return True
3277 3278
3278 3279 return False
3279 3280
3280 3281 def _getqueues():
3281 3282 current = _getcurrent()
3282 3283
3283 3284 try:
3284 3285 fh = repo.vfs(_allqueues, 'r')
3285 3286 queues = [queue.strip() for queue in fh if queue.strip()]
3286 3287 fh.close()
3287 3288 if current not in queues:
3288 3289 queues.append(current)
3289 3290 except IOError:
3290 3291 queues = [_defaultqueue]
3291 3292
3292 3293 return sorted(queues)
3293 3294
3294 3295 def _setactive(name):
3295 3296 if q.applied:
3296 3297 raise error.Abort(_('new queue created, but cannot make active '
3297 3298 'as patches are applied'))
3298 3299 _setactivenocheck(name)
3299 3300
3300 3301 def _setactivenocheck(name):
3301 3302 fh = repo.vfs(_activequeue, 'w')
3302 3303 if name != 'patches':
3303 3304 fh.write(name)
3304 3305 fh.close()
3305 3306
3306 3307 def _addqueue(name):
3307 3308 fh = repo.vfs(_allqueues, 'a')
3308 3309 fh.write('%s\n' % (name,))
3309 3310 fh.close()
3310 3311
3311 3312 def _queuedir(name):
3312 3313 if name == 'patches':
3313 3314 return repo.join('patches')
3314 3315 else:
3315 3316 return repo.join('patches-' + name)
3316 3317
3317 3318 def _validname(name):
3318 3319 for n in name:
3319 3320 if n in ':\\/.':
3320 3321 return False
3321 3322 return True
3322 3323
3323 3324 def _delete(name):
3324 3325 if name not in existing:
3325 3326 raise error.Abort(_('cannot delete queue that does not exist'))
3326 3327
3327 3328 current = _getcurrent()
3328 3329
3329 3330 if name == current:
3330 3331 raise error.Abort(_('cannot delete currently active queue'))
3331 3332
3332 3333 fh = repo.vfs('patches.queues.new', 'w')
3333 3334 for queue in existing:
3334 3335 if queue == name:
3335 3336 continue
3336 3337 fh.write('%s\n' % (queue,))
3337 3338 fh.close()
3338 3339 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3339 3340
3340 3341 if not name or opts.get('list') or opts.get('active'):
3341 3342 current = _getcurrent()
3342 3343 if opts.get('active'):
3343 3344 ui.write('%s\n' % (current,))
3344 3345 return
3345 3346 for queue in _getqueues():
3346 3347 ui.write('%s' % (queue,))
3347 3348 if queue == current and not ui.quiet:
3348 3349 ui.write(_(' (active)\n'))
3349 3350 else:
3350 3351 ui.write('\n')
3351 3352 return
3352 3353
3353 3354 if not _validname(name):
3354 3355 raise error.Abort(
3355 3356 _('invalid queue name, may not contain the characters ":\\/."'))
3356 3357
3357 3358 with repo.wlock():
3358 3359 existing = _getqueues()
3359 3360
3360 3361 if opts.get('create'):
3361 3362 if name in existing:
3362 3363 raise error.Abort(_('queue "%s" already exists') % name)
3363 3364 if _noqueues():
3364 3365 _addqueue(_defaultqueue)
3365 3366 _addqueue(name)
3366 3367 _setactive(name)
3367 3368 elif opts.get('rename'):
3368 3369 current = _getcurrent()
3369 3370 if name == current:
3370 3371 raise error.Abort(_('can\'t rename "%s" to its current name')
3371 3372 % name)
3372 3373 if name in existing:
3373 3374 raise error.Abort(_('queue "%s" already exists') % name)
3374 3375
3375 3376 olddir = _queuedir(current)
3376 3377 newdir = _queuedir(name)
3377 3378
3378 3379 if os.path.exists(newdir):
3379 3380 raise error.Abort(_('non-queue directory "%s" already exists') %
3380 3381 newdir)
3381 3382
3382 3383 fh = repo.vfs('patches.queues.new', 'w')
3383 3384 for queue in existing:
3384 3385 if queue == current:
3385 3386 fh.write('%s\n' % (name,))
3386 3387 if os.path.exists(olddir):
3387 3388 util.rename(olddir, newdir)
3388 3389 else:
3389 3390 fh.write('%s\n' % (queue,))
3390 3391 fh.close()
3391 3392 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3392 3393 _setactivenocheck(name)
3393 3394 elif opts.get('delete'):
3394 3395 _delete(name)
3395 3396 elif opts.get('purge'):
3396 3397 if name in existing:
3397 3398 _delete(name)
3398 3399 qdir = _queuedir(name)
3399 3400 if os.path.exists(qdir):
3400 3401 shutil.rmtree(qdir)
3401 3402 else:
3402 3403 if name not in existing:
3403 3404 raise error.Abort(_('use --create to create a new queue'))
3404 3405 _setactive(name)
3405 3406
3406 3407 def mqphasedefaults(repo, roots):
3407 3408 """callback used to set mq changeset as secret when no phase data exists"""
3408 3409 if repo.mq.applied:
3409 3410 if repo.ui.configbool('mq', 'secret', False):
3410 3411 mqphase = phases.secret
3411 3412 else:
3412 3413 mqphase = phases.draft
3413 3414 qbase = repo[repo.mq.applied[0].node]
3414 3415 roots[mqphase].add(qbase.node())
3415 3416 return roots
3416 3417
3417 3418 def reposetup(ui, repo):
3418 3419 class mqrepo(repo.__class__):
3419 3420 @localrepo.unfilteredpropertycache
3420 3421 def mq(self):
3421 3422 return queue(self.ui, self.baseui, self.path)
3422 3423
3423 3424 def invalidateall(self):
3424 3425 super(mqrepo, self).invalidateall()
3425 3426 if localrepo.hasunfilteredcache(self, 'mq'):
3426 3427 # recreate mq in case queue path was changed
3427 3428 delattr(self.unfiltered(), 'mq')
3428 3429
3429 3430 def abortifwdirpatched(self, errmsg, force=False):
3430 3431 if self.mq.applied and self.mq.checkapplied and not force:
3431 3432 parents = self.dirstate.parents()
3432 3433 patches = [s.node for s in self.mq.applied]
3433 3434 if parents[0] in patches or parents[1] in patches:
3434 3435 raise error.Abort(errmsg)
3435 3436
3436 3437 def commit(self, text="", user=None, date=None, match=None,
3437 3438 force=False, editor=False, extra={}):
3438 3439 self.abortifwdirpatched(
3439 3440 _('cannot commit over an applied mq patch'),
3440 3441 force)
3441 3442
3442 3443 return super(mqrepo, self).commit(text, user, date, match, force,
3443 3444 editor, extra)
3444 3445
3445 3446 def checkpush(self, pushop):
3446 3447 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3447 3448 outapplied = [e.node for e in self.mq.applied]
3448 3449 if pushop.revs:
3449 3450 # Assume applied patches have no non-patch descendants and
3450 3451 # are not on remote already. Filtering any changeset not
3451 3452 # pushed.
3452 3453 heads = set(pushop.revs)
3453 3454 for node in reversed(outapplied):
3454 3455 if node in heads:
3455 3456 break
3456 3457 else:
3457 3458 outapplied.pop()
3458 3459 # looking for pushed and shared changeset
3459 3460 for node in outapplied:
3460 3461 if self[node].phase() < phases.secret:
3461 3462 raise error.Abort(_('source has mq patches applied'))
3462 3463 # no non-secret patches pushed
3463 3464 super(mqrepo, self).checkpush(pushop)
3464 3465
3465 3466 def _findtags(self):
3466 3467 '''augment tags from base class with patch tags'''
3467 3468 result = super(mqrepo, self)._findtags()
3468 3469
3469 3470 q = self.mq
3470 3471 if not q.applied:
3471 3472 return result
3472 3473
3473 3474 mqtags = [(patch.node, patch.name) for patch in q.applied]
3474 3475
3475 3476 try:
3476 3477 # for now ignore filtering business
3477 3478 self.unfiltered().changelog.rev(mqtags[-1][0])
3478 3479 except error.LookupError:
3479 3480 self.ui.warn(_('mq status file refers to unknown node %s\n')
3480 3481 % short(mqtags[-1][0]))
3481 3482 return result
3482 3483
3483 3484 # do not add fake tags for filtered revisions
3484 3485 included = self.changelog.hasnode
3485 3486 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3486 3487 if not mqtags:
3487 3488 return result
3488 3489
3489 3490 mqtags.append((mqtags[-1][0], 'qtip'))
3490 3491 mqtags.append((mqtags[0][0], 'qbase'))
3491 3492 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3492 3493 tags = result[0]
3493 3494 for patch in mqtags:
3494 3495 if patch[1] in tags:
3495 3496 self.ui.warn(_('tag %s overrides mq patch of the same '
3496 3497 'name\n') % patch[1])
3497 3498 else:
3498 3499 tags[patch[1]] = patch[0]
3499 3500
3500 3501 return result
3501 3502
3502 3503 if repo.local():
3503 3504 repo.__class__ = mqrepo
3504 3505
3505 3506 repo._phasedefaults.append(mqphasedefaults)
3506 3507
3507 3508 def mqimport(orig, ui, repo, *args, **kwargs):
3508 3509 if (util.safehasattr(repo, 'abortifwdirpatched')
3509 3510 and not kwargs.get('no_commit', False)):
3510 3511 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3511 3512 kwargs.get('force'))
3512 3513 return orig(ui, repo, *args, **kwargs)
3513 3514
3514 3515 def mqinit(orig, ui, *args, **kwargs):
3515 3516 mq = kwargs.pop('mq', None)
3516 3517
3517 3518 if not mq:
3518 3519 return orig(ui, *args, **kwargs)
3519 3520
3520 3521 if args:
3521 3522 repopath = args[0]
3522 3523 if not hg.islocal(repopath):
3523 3524 raise error.Abort(_('only a local queue repository '
3524 3525 'may be initialized'))
3525 3526 else:
3526 repopath = cmdutil.findrepo(os.getcwd())
3527 repopath = cmdutil.findrepo(pycompat.getcwd())
3527 3528 if not repopath:
3528 3529 raise error.Abort(_('there is no Mercurial repository here '
3529 3530 '(.hg not found)'))
3530 3531 repo = hg.repository(ui, repopath)
3531 3532 return qinit(ui, repo, True)
3532 3533
3533 3534 def mqcommand(orig, ui, repo, *args, **kwargs):
3534 3535 """Add --mq option to operate on patch repository instead of main"""
3535 3536
3536 3537 # some commands do not like getting unknown options
3537 3538 mq = kwargs.pop('mq', None)
3538 3539
3539 3540 if not mq:
3540 3541 return orig(ui, repo, *args, **kwargs)
3541 3542
3542 3543 q = repo.mq
3543 3544 r = q.qrepo()
3544 3545 if not r:
3545 3546 raise error.Abort(_('no queue repository'))
3546 3547 return orig(r.ui, r, *args, **kwargs)
3547 3548
3548 3549 def summaryhook(ui, repo):
3549 3550 q = repo.mq
3550 3551 m = []
3551 3552 a, u = len(q.applied), len(q.unapplied(repo))
3552 3553 if a:
3553 3554 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3554 3555 if u:
3555 3556 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3556 3557 if m:
3557 3558 # i18n: column positioning for "hg summary"
3558 3559 ui.write(_("mq: %s\n") % ', '.join(m))
3559 3560 else:
3560 3561 # i18n: column positioning for "hg summary"
3561 3562 ui.note(_("mq: (empty queue)\n"))
3562 3563
3563 3564 revsetpredicate = registrar.revsetpredicate()
3564 3565
3565 3566 @revsetpredicate('mq()')
3566 3567 def revsetmq(repo, subset, x):
3567 3568 """Changesets managed by MQ.
3568 3569 """
3569 3570 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3570 3571 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3571 3572 return revset.baseset([r for r in subset if r in applied])
3572 3573
3573 3574 # tell hggettext to extract docstrings from these functions:
3574 3575 i18nfunctions = [revsetmq]
3575 3576
3576 3577 def extsetup(ui):
3577 3578 # Ensure mq wrappers are called first, regardless of extension load order by
3578 3579 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3579 3580 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3580 3581
3581 3582 extensions.wrapcommand(commands.table, 'import', mqimport)
3582 3583 cmdutil.summaryhooks.add('mq', summaryhook)
3583 3584
3584 3585 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3585 3586 entry[1].extend(mqopt)
3586 3587
3587 3588 def dotable(cmdtable):
3588 3589 for cmd, entry in cmdtable.iteritems():
3589 3590 cmd = cmdutil.parsealiases(cmd)[0]
3590 3591 func = entry[0]
3591 3592 if func.norepo:
3592 3593 continue
3593 3594 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3594 3595 entry[1].extend(mqopt)
3595 3596
3596 3597 dotable(commands.table)
3597 3598
3598 3599 for extname, extmodule in extensions.extensions():
3599 3600 if extmodule.__file__ != __file__:
3600 3601 dotable(getattr(extmodule, 'cmdtable', {}))
3601 3602
3602 3603 colortable = {'qguard.negative': 'red',
3603 3604 'qguard.positive': 'yellow',
3604 3605 'qguard.unguarded': 'green',
3605 3606 'qseries.applied': 'blue bold underline',
3606 3607 'qseries.guarded': 'black bold',
3607 3608 'qseries.missing': 'red bold',
3608 3609 'qseries.unapplied': 'black bold'}
@@ -1,553 +1,554
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17 import shutil
18 18 import tempfile
19 19
20 20 from .i18n import _
21 21 from .node import nullid
22 22
23 23 from . import (
24 24 bundle2,
25 25 changegroup,
26 26 changelog,
27 27 cmdutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 filelog,
32 32 localrepo,
33 33 manifest,
34 34 mdiff,
35 35 node as nodemod,
36 36 pathutil,
37 37 phases,
38 pycompat,
38 39 revlog,
39 40 scmutil,
40 41 util,
41 42 )
42 43
43 44 class bundlerevlog(revlog.revlog):
44 45 def __init__(self, opener, indexfile, bundle, linkmapper):
45 46 # How it works:
46 47 # To retrieve a revision, we need to know the offset of the revision in
47 48 # the bundle (an unbundle object). We store this offset in the index
48 49 # (start). The base of the delta is stored in the base field.
49 50 #
50 51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 52 # check revision against repotiprev.
52 53 opener = scmutil.readonlyvfs(opener)
53 54 revlog.revlog.__init__(self, opener, indexfile)
54 55 self.bundle = bundle
55 56 n = len(self)
56 57 self.repotiprev = n - 1
57 58 chain = None
58 59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 60 getchunk = lambda: bundle.deltachunk(chain)
60 61 for chunkdata in iter(getchunk, {}):
61 62 node = chunkdata['node']
62 63 p1 = chunkdata['p1']
63 64 p2 = chunkdata['p2']
64 65 cs = chunkdata['cs']
65 66 deltabase = chunkdata['deltabase']
66 67 delta = chunkdata['delta']
67 68
68 69 size = len(delta)
69 70 start = bundle.tell() - size
70 71
71 72 link = linkmapper(cs)
72 73 if node in self.nodemap:
73 74 # this can happen if two branches make the same change
74 75 chain = node
75 76 self.bundlerevs.add(self.nodemap[node])
76 77 continue
77 78
78 79 for p in (p1, p2):
79 80 if p not in self.nodemap:
80 81 raise error.LookupError(p, self.indexfile,
81 82 _("unknown parent"))
82 83
83 84 if deltabase not in self.nodemap:
84 85 raise LookupError(deltabase, self.indexfile,
85 86 _('unknown delta base'))
86 87
87 88 baserev = self.rev(deltabase)
88 89 # start, size, full unc. size, base (unused), link, p1, p2, node
89 90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
90 91 self.rev(p1), self.rev(p2), node)
91 92 self.index.insert(-1, e)
92 93 self.nodemap[node] = n
93 94 self.bundlerevs.add(n)
94 95 chain = node
95 96 n += 1
96 97
97 98 def _chunk(self, rev):
98 99 # Warning: in case of bundle, the diff is against what we stored as
99 100 # delta base, not against rev - 1
100 101 # XXX: could use some caching
101 102 if rev <= self.repotiprev:
102 103 return revlog.revlog._chunk(self, rev)
103 104 self.bundle.seek(self.start(rev))
104 105 return self.bundle.read(self.length(rev))
105 106
106 107 def revdiff(self, rev1, rev2):
107 108 """return or calculate a delta between two revisions"""
108 109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
109 110 # hot path for bundle
110 111 revb = self.index[rev2][3]
111 112 if revb == rev1:
112 113 return self._chunk(rev2)
113 114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
114 115 return revlog.revlog.revdiff(self, rev1, rev2)
115 116
116 117 return mdiff.textdiff(self.revision(self.node(rev1)),
117 118 self.revision(self.node(rev2)))
118 119
119 120 def revision(self, nodeorrev):
120 121 """return an uncompressed revision of a given node or revision
121 122 number.
122 123 """
123 124 if isinstance(nodeorrev, int):
124 125 rev = nodeorrev
125 126 node = self.node(rev)
126 127 else:
127 128 node = nodeorrev
128 129 rev = self.rev(node)
129 130
130 131 if node == nullid:
131 132 return ""
132 133
133 134 text = None
134 135 chain = []
135 136 iterrev = rev
136 137 # reconstruct the revision if it is from a changegroup
137 138 while iterrev > self.repotiprev:
138 139 if self._cache and self._cache[1] == iterrev:
139 140 text = self._cache[2]
140 141 break
141 142 chain.append(iterrev)
142 143 iterrev = self.index[iterrev][3]
143 144 if text is None:
144 145 text = self.baserevision(iterrev)
145 146
146 147 while chain:
147 148 delta = self._chunk(chain.pop())
148 149 text = mdiff.patches(text, [delta])
149 150
150 151 self._checkhash(text, node, rev)
151 152 self._cache = (node, rev, text)
152 153 return text
153 154
154 155 def baserevision(self, nodeorrev):
155 156 # Revlog subclasses may override 'revision' method to modify format of
156 157 # content retrieved from revlog. To use bundlerevlog with such class one
157 158 # needs to override 'baserevision' and make more specific call here.
158 159 return revlog.revlog.revision(self, nodeorrev)
159 160
160 161 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
161 162 raise NotImplementedError
162 163 def addgroup(self, revs, linkmapper, transaction):
163 164 raise NotImplementedError
164 165 def strip(self, rev, minlink):
165 166 raise NotImplementedError
166 167 def checksize(self):
167 168 raise NotImplementedError
168 169
169 170 class bundlechangelog(bundlerevlog, changelog.changelog):
170 171 def __init__(self, opener, bundle):
171 172 changelog.changelog.__init__(self, opener)
172 173 linkmapper = lambda x: x
173 174 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
174 175 linkmapper)
175 176
176 177 def baserevision(self, nodeorrev):
177 178 # Although changelog doesn't override 'revision' method, some extensions
178 179 # may replace this class with another that does. Same story with
179 180 # manifest and filelog classes.
180 181
181 182 # This bypasses filtering on changelog.node() and rev() because we need
182 183 # revision text of the bundle base even if it is hidden.
183 184 oldfilter = self.filteredrevs
184 185 try:
185 186 self.filteredrevs = ()
186 187 return changelog.changelog.revision(self, nodeorrev)
187 188 finally:
188 189 self.filteredrevs = oldfilter
189 190
190 191 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
191 192 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
192 193 manifest.manifestrevlog.__init__(self, opener, dir=dir)
193 194 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
194 195 linkmapper)
195 196 if dirlogstarts is None:
196 197 dirlogstarts = {}
197 198 if self.bundle.version == "03":
198 199 dirlogstarts = _getfilestarts(self.bundle)
199 200 self._dirlogstarts = dirlogstarts
200 201 self._linkmapper = linkmapper
201 202
202 203 def baserevision(self, nodeorrev):
203 204 node = nodeorrev
204 205 if isinstance(node, int):
205 206 node = self.node(node)
206 207
207 208 if node in self.fulltextcache:
208 209 result = self.fulltextcache[node].tostring()
209 210 else:
210 211 result = manifest.manifestrevlog.revision(self, nodeorrev)
211 212 return result
212 213
213 214 def dirlog(self, d):
214 215 if d in self._dirlogstarts:
215 216 self.bundle.seek(self._dirlogstarts[d])
216 217 return bundlemanifest(
217 218 self.opener, self.bundle, self._linkmapper,
218 219 self._dirlogstarts, dir=d)
219 220 return super(bundlemanifest, self).dirlog(d)
220 221
221 222 class bundlefilelog(bundlerevlog, filelog.filelog):
222 223 def __init__(self, opener, path, bundle, linkmapper):
223 224 filelog.filelog.__init__(self, opener, path)
224 225 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
225 226 linkmapper)
226 227
227 228 def baserevision(self, nodeorrev):
228 229 return filelog.filelog.revision(self, nodeorrev)
229 230
230 231 class bundlepeer(localrepo.localpeer):
231 232 def canpush(self):
232 233 return False
233 234
234 235 class bundlephasecache(phases.phasecache):
235 236 def __init__(self, *args, **kwargs):
236 237 super(bundlephasecache, self).__init__(*args, **kwargs)
237 238 if util.safehasattr(self, 'opener'):
238 239 self.opener = scmutil.readonlyvfs(self.opener)
239 240
240 241 def write(self):
241 242 raise NotImplementedError
242 243
243 244 def _write(self, fp):
244 245 raise NotImplementedError
245 246
246 247 def _updateroots(self, phase, newroots, tr):
247 248 self.phaseroots[phase] = newroots
248 249 self.invalidate()
249 250 self.dirty = True
250 251
251 252 def _getfilestarts(bundle):
252 253 bundlefilespos = {}
253 254 for chunkdata in iter(bundle.filelogheader, {}):
254 255 fname = chunkdata['filename']
255 256 bundlefilespos[fname] = bundle.tell()
256 257 for chunk in iter(lambda: bundle.deltachunk(None), {}):
257 258 pass
258 259 return bundlefilespos
259 260
260 261 class bundlerepository(localrepo.localrepository):
261 262 def __init__(self, ui, path, bundlename):
262 263 def _writetempbundle(read, suffix, header=''):
263 264 """Write a temporary file to disk
264 265
265 266 This is closure because we need to make sure this tracked by
266 267 self.tempfile for cleanup purposes."""
267 268 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
268 269 suffix=".hg10un")
269 270 self.tempfile = temp
270 271
271 272 with os.fdopen(fdtemp, 'wb') as fptemp:
272 273 fptemp.write(header)
273 274 while True:
274 275 chunk = read(2**18)
275 276 if not chunk:
276 277 break
277 278 fptemp.write(chunk)
278 279
279 280 return self.vfs.open(self.tempfile, mode="rb")
280 281 self._tempparent = None
281 282 try:
282 283 localrepo.localrepository.__init__(self, ui, path)
283 284 except error.RepoError:
284 285 self._tempparent = tempfile.mkdtemp()
285 286 localrepo.instance(ui, self._tempparent, 1)
286 287 localrepo.localrepository.__init__(self, ui, self._tempparent)
287 288 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
288 289
289 290 if path:
290 291 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
291 292 else:
292 293 self._url = 'bundle:' + bundlename
293 294
294 295 self.tempfile = None
295 296 f = util.posixfile(bundlename, "rb")
296 297 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
297 298
298 299 if isinstance(self.bundle, bundle2.unbundle20):
299 300 cgstream = None
300 301 for part in self.bundle.iterparts():
301 302 if part.type == 'changegroup':
302 303 if cgstream is not None:
303 304 raise NotImplementedError("can't process "
304 305 "multiple changegroups")
305 306 cgstream = part
306 307 version = part.params.get('version', '01')
307 308 legalcgvers = changegroup.supportedincomingversions(self)
308 309 if version not in legalcgvers:
309 310 msg = _('Unsupported changegroup version: %s')
310 311 raise error.Abort(msg % version)
311 312 if self.bundle.compressed():
312 313 cgstream = _writetempbundle(part.read,
313 314 ".cg%sun" % version)
314 315
315 316 if cgstream is None:
316 317 raise error.Abort(_('No changegroups found'))
317 318 cgstream.seek(0)
318 319
319 320 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
320 321
321 322 elif self.bundle.compressed():
322 323 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
323 324 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
324 325 bundlename,
325 326 self.vfs)
326 327
327 328 # dict with the mapping 'filename' -> position in the bundle
328 329 self.bundlefilespos = {}
329 330
330 331 self.firstnewrev = self.changelog.repotiprev + 1
331 332 phases.retractboundary(self, None, phases.draft,
332 333 [ctx.node() for ctx in self[self.firstnewrev:]])
333 334
334 335 @localrepo.unfilteredpropertycache
335 336 def _phasecache(self):
336 337 return bundlephasecache(self, self._phasedefaults)
337 338
338 339 @localrepo.unfilteredpropertycache
339 340 def changelog(self):
340 341 # consume the header if it exists
341 342 self.bundle.changelogheader()
342 343 c = bundlechangelog(self.svfs, self.bundle)
343 344 self.manstart = self.bundle.tell()
344 345 return c
345 346
346 347 def _constructmanifest(self):
347 348 self.bundle.seek(self.manstart)
348 349 # consume the header if it exists
349 350 self.bundle.manifestheader()
350 351 linkmapper = self.unfiltered().changelog.rev
351 352 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
352 353 self.filestart = self.bundle.tell()
353 354 return m
354 355
355 356 @localrepo.unfilteredpropertycache
356 357 def manstart(self):
357 358 self.changelog
358 359 return self.manstart
359 360
360 361 @localrepo.unfilteredpropertycache
361 362 def filestart(self):
362 363 self.manifestlog
363 364 return self.filestart
364 365
365 366 def url(self):
366 367 return self._url
367 368
368 369 def file(self, f):
369 370 if not self.bundlefilespos:
370 371 self.bundle.seek(self.filestart)
371 372 self.bundlefilespos = _getfilestarts(self.bundle)
372 373
373 374 if f in self.bundlefilespos:
374 375 self.bundle.seek(self.bundlefilespos[f])
375 376 linkmapper = self.unfiltered().changelog.rev
376 377 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
377 378 else:
378 379 return filelog.filelog(self.svfs, f)
379 380
380 381 def close(self):
381 382 """Close assigned bundle file immediately."""
382 383 self.bundlefile.close()
383 384 if self.tempfile is not None:
384 385 self.vfs.unlink(self.tempfile)
385 386 if self._tempparent:
386 387 shutil.rmtree(self._tempparent, True)
387 388
388 389 def cancopy(self):
389 390 return False
390 391
391 392 def peer(self):
392 393 return bundlepeer(self)
393 394
394 395 def getcwd(self):
395 return os.getcwd() # always outside the repo
396 return pycompat.getcwd() # always outside the repo
396 397
397 398 # Check if parents exist in localrepo before setting
398 399 def setparents(self, p1, p2=nullid):
399 400 p1rev = self.changelog.rev(p1)
400 401 p2rev = self.changelog.rev(p2)
401 402 msg = _("setting parent to node %s that only exists in the bundle\n")
402 403 if self.changelog.repotiprev < p1rev:
403 404 self.ui.warn(msg % nodemod.hex(p1))
404 405 if self.changelog.repotiprev < p2rev:
405 406 self.ui.warn(msg % nodemod.hex(p2))
406 407 return super(bundlerepository, self).setparents(p1, p2)
407 408
408 409 def instance(ui, path, create):
409 410 if create:
410 411 raise error.Abort(_('cannot create new bundle repository'))
411 412 # internal config: bundle.mainreporoot
412 413 parentpath = ui.config("bundle", "mainreporoot", "")
413 414 if not parentpath:
414 415 # try to find the correct path to the working directory repo
415 parentpath = cmdutil.findrepo(os.getcwd())
416 parentpath = cmdutil.findrepo(pycompat.getcwd())
416 417 if parentpath is None:
417 418 parentpath = ''
418 419 if parentpath:
419 420 # Try to make the full path relative so we get a nice, short URL.
420 421 # In particular, we don't want temp dir names in test outputs.
421 cwd = os.getcwd()
422 cwd = pycompat.getcwd()
422 423 if parentpath == cwd:
423 424 parentpath = ''
424 425 else:
425 426 cwd = pathutil.normasprefix(cwd)
426 427 if parentpath.startswith(cwd):
427 428 parentpath = parentpath[len(cwd):]
428 429 u = util.url(path)
429 430 path = u.localpath()
430 431 if u.scheme == 'bundle':
431 432 s = path.split("+", 1)
432 433 if len(s) == 1:
433 434 repopath, bundlename = parentpath, s[0]
434 435 else:
435 436 repopath, bundlename = s
436 437 else:
437 438 repopath, bundlename = parentpath, path
438 439 return bundlerepository(ui, repopath, bundlename)
439 440
440 441 class bundletransactionmanager(object):
441 442 def transaction(self):
442 443 return None
443 444
444 445 def close(self):
445 446 raise NotImplementedError
446 447
447 448 def release(self):
448 449 raise NotImplementedError
449 450
450 451 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
451 452 force=False):
452 453 '''obtains a bundle of changes incoming from other
453 454
454 455 "onlyheads" restricts the returned changes to those reachable from the
455 456 specified heads.
456 457 "bundlename", if given, stores the bundle to this file path permanently;
457 458 otherwise it's stored to a temp file and gets deleted again when you call
458 459 the returned "cleanupfn".
459 460 "force" indicates whether to proceed on unrelated repos.
460 461
461 462 Returns a tuple (local, csets, cleanupfn):
462 463
463 464 "local" is a local repo from which to obtain the actual incoming
464 465 changesets; it is a bundlerepo for the obtained bundle when the
465 466 original "other" is remote.
466 467 "csets" lists the incoming changeset node ids.
467 468 "cleanupfn" must be called without arguments when you're done processing
468 469 the changes; it closes both the original "other" and the one returned
469 470 here.
470 471 '''
471 472 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
472 473 force=force)
473 474 common, incoming, rheads = tmp
474 475 if not incoming:
475 476 try:
476 477 if bundlename:
477 478 os.unlink(bundlename)
478 479 except OSError:
479 480 pass
480 481 return repo, [], other.close
481 482
482 483 commonset = set(common)
483 484 rheads = [x for x in rheads if x not in commonset]
484 485
485 486 bundle = None
486 487 bundlerepo = None
487 488 localrepo = other.local()
488 489 if bundlename or not localrepo:
489 490 # create a bundle (uncompressed if other repo is not local)
490 491
491 492 # developer config: devel.legacy.exchange
492 493 legexc = ui.configlist('devel', 'legacy.exchange')
493 494 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
494 495 canbundle2 = (not forcebundle1
495 496 and other.capable('getbundle')
496 497 and other.capable('bundle2'))
497 498 if canbundle2:
498 499 kwargs = {}
499 500 kwargs['common'] = common
500 501 kwargs['heads'] = rheads
501 502 kwargs['bundlecaps'] = exchange.caps20to10(repo)
502 503 kwargs['cg'] = True
503 504 b2 = other.getbundle('incoming', **kwargs)
504 505 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
505 506 bundlename)
506 507 else:
507 508 if other.capable('getbundle'):
508 509 cg = other.getbundle('incoming', common=common, heads=rheads)
509 510 elif onlyheads is None and not other.capable('changegroupsubset'):
510 511 # compat with older servers when pulling all remote heads
511 512 cg = other.changegroup(incoming, "incoming")
512 513 rheads = None
513 514 else:
514 515 cg = other.changegroupsubset(incoming, rheads, 'incoming')
515 516 if localrepo:
516 517 bundletype = "HG10BZ"
517 518 else:
518 519 bundletype = "HG10UN"
519 520 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
520 521 bundletype)
521 522 # keep written bundle?
522 523 if bundlename:
523 524 bundle = None
524 525 if not localrepo:
525 526 # use the created uncompressed bundlerepo
526 527 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
527 528 fname)
528 529 # this repo contains local and other now, so filter out local again
529 530 common = repo.heads()
530 531 if localrepo:
531 532 # Part of common may be remotely filtered
532 533 # So use an unfiltered version
533 534 # The discovery process probably need cleanup to avoid that
534 535 localrepo = localrepo.unfiltered()
535 536
536 537 csets = localrepo.changelog.findmissing(common, rheads)
537 538
538 539 if bundlerepo:
539 540 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
540 541 remotephases = other.listkeys('phases')
541 542
542 543 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
543 544 pullop.trmanager = bundletransactionmanager()
544 545 exchange._pullapplyphases(pullop, remotephases)
545 546
546 547 def cleanup():
547 548 if bundlerepo:
548 549 bundlerepo.close()
549 550 if bundle:
550 551 os.unlink(bundle)
551 552 other.close()
552 553
553 554 return (localrepo, csets, cleanup)
@@ -1,3440 +1,3441
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import tempfile
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 dirstateguard as dirstateguardmod,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 lock as lockmod,
35 35 match as matchmod,
36 36 mergeutil,
37 37 obsolete,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 pycompat,
41 42 repair,
42 43 revlog,
43 44 revset,
44 45 scmutil,
45 46 templatekw,
46 47 templater,
47 48 util,
48 49 )
49 50 stringio = util.stringio
50 51
51 52 def ishunk(x):
52 53 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
53 54 return isinstance(x, hunkclasses)
54 55
55 56 def newandmodified(chunks, originalchunks):
56 57 newlyaddedandmodifiedfiles = set()
57 58 for chunk in chunks:
58 59 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
59 60 originalchunks:
60 61 newlyaddedandmodifiedfiles.add(chunk.header.filename())
61 62 return newlyaddedandmodifiedfiles
62 63
63 64 def parsealiases(cmd):
64 65 return cmd.lstrip("^").split("|")
65 66
66 67 def setupwrapcolorwrite(ui):
67 68 # wrap ui.write so diff output can be labeled/colorized
68 69 def wrapwrite(orig, *args, **kw):
69 70 label = kw.pop('label', '')
70 71 for chunk, l in patch.difflabel(lambda: args):
71 72 orig(chunk, label=label + l)
72 73
73 74 oldwrite = ui.write
74 75 def wrap(*args, **kwargs):
75 76 return wrapwrite(oldwrite, *args, **kwargs)
76 77 setattr(ui, 'write', wrap)
77 78 return oldwrite
78 79
79 80 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
80 81 if usecurses:
81 82 if testfile:
82 83 recordfn = crecordmod.testdecorator(testfile,
83 84 crecordmod.testchunkselector)
84 85 else:
85 86 recordfn = crecordmod.chunkselector
86 87
87 88 return crecordmod.filterpatch(ui, originalhunks, recordfn)
88 89
89 90 else:
90 91 return patch.filterpatch(ui, originalhunks, operation)
91 92
92 93 def recordfilter(ui, originalhunks, operation=None):
93 94 """ Prompts the user to filter the originalhunks and return a list of
94 95 selected hunks.
95 96 *operation* is used for to build ui messages to indicate the user what
96 97 kind of filtering they are doing: reverting, committing, shelving, etc.
97 98 (see patch.filterpatch).
98 99 """
99 100 usecurses = crecordmod.checkcurses(ui)
100 101 testfile = ui.config('experimental', 'crecordtest', None)
101 102 oldwrite = setupwrapcolorwrite(ui)
102 103 try:
103 104 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
104 105 testfile, operation)
105 106 finally:
106 107 ui.write = oldwrite
107 108 return newchunks, newopts
108 109
109 110 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
110 111 filterfn, *pats, **opts):
111 112 from . import merge as mergemod
112 113 if not ui.interactive():
113 114 if cmdsuggest:
114 115 msg = _('running non-interactively, use %s instead') % cmdsuggest
115 116 else:
116 117 msg = _('running non-interactively')
117 118 raise error.Abort(msg)
118 119
119 120 # make sure username is set before going interactive
120 121 if not opts.get('user'):
121 122 ui.username() # raise exception, username not provided
122 123
123 124 def recordfunc(ui, repo, message, match, opts):
124 125 """This is generic record driver.
125 126
126 127 Its job is to interactively filter local changes, and
127 128 accordingly prepare working directory into a state in which the
128 129 job can be delegated to a non-interactive commit command such as
129 130 'commit' or 'qrefresh'.
130 131
131 132 After the actual job is done by non-interactive command, the
132 133 working directory is restored to its original state.
133 134
134 135 In the end we'll record interesting changes, and everything else
135 136 will be left in place, so the user can continue working.
136 137 """
137 138
138 139 checkunfinished(repo, commit=True)
139 140 wctx = repo[None]
140 141 merge = len(wctx.parents()) > 1
141 142 if merge:
142 143 raise error.Abort(_('cannot partially commit a merge '
143 144 '(use "hg commit" instead)'))
144 145
145 146 def fail(f, msg):
146 147 raise error.Abort('%s: %s' % (f, msg))
147 148
148 149 force = opts.get('force')
149 150 if not force:
150 151 vdirs = []
151 152 match.explicitdir = vdirs.append
152 153 match.bad = fail
153 154
154 155 status = repo.status(match=match)
155 156 if not force:
156 157 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
157 158 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
158 159 diffopts.nodates = True
159 160 diffopts.git = True
160 161 diffopts.showfunc = True
161 162 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
162 163 originalchunks = patch.parsepatch(originaldiff)
163 164
164 165 # 1. filter patch, since we are intending to apply subset of it
165 166 try:
166 167 chunks, newopts = filterfn(ui, originalchunks)
167 168 except patch.PatchError as err:
168 169 raise error.Abort(_('error parsing patch: %s') % err)
169 170 opts.update(newopts)
170 171
171 172 # We need to keep a backup of files that have been newly added and
172 173 # modified during the recording process because there is a previous
173 174 # version without the edit in the workdir
174 175 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
175 176 contenders = set()
176 177 for h in chunks:
177 178 try:
178 179 contenders.update(set(h.files()))
179 180 except AttributeError:
180 181 pass
181 182
182 183 changed = status.modified + status.added + status.removed
183 184 newfiles = [f for f in changed if f in contenders]
184 185 if not newfiles:
185 186 ui.status(_('no changes to record\n'))
186 187 return 0
187 188
188 189 modified = set(status.modified)
189 190
190 191 # 2. backup changed files, so we can restore them in the end
191 192
192 193 if backupall:
193 194 tobackup = changed
194 195 else:
195 196 tobackup = [f for f in newfiles if f in modified or f in \
196 197 newlyaddedandmodifiedfiles]
197 198 backups = {}
198 199 if tobackup:
199 200 backupdir = repo.join('record-backups')
200 201 try:
201 202 os.mkdir(backupdir)
202 203 except OSError as err:
203 204 if err.errno != errno.EEXIST:
204 205 raise
205 206 try:
206 207 # backup continues
207 208 for f in tobackup:
208 209 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
209 210 dir=backupdir)
210 211 os.close(fd)
211 212 ui.debug('backup %r as %r\n' % (f, tmpname))
212 213 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
213 214 backups[f] = tmpname
214 215
215 216 fp = stringio()
216 217 for c in chunks:
217 218 fname = c.filename()
218 219 if fname in backups:
219 220 c.write(fp)
220 221 dopatch = fp.tell()
221 222 fp.seek(0)
222 223
223 224 # 2.5 optionally review / modify patch in text editor
224 225 if opts.get('review', False):
225 226 patchtext = (crecordmod.diffhelptext
226 227 + crecordmod.patchhelptext
227 228 + fp.read())
228 229 reviewedpatch = ui.edit(patchtext, "",
229 230 extra={"suffix": ".diff"})
230 231 fp.truncate(0)
231 232 fp.write(reviewedpatch)
232 233 fp.seek(0)
233 234
234 235 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
235 236 # 3a. apply filtered patch to clean repo (clean)
236 237 if backups:
237 238 # Equivalent to hg.revert
238 239 m = scmutil.matchfiles(repo, backups.keys())
239 240 mergemod.update(repo, repo.dirstate.p1(),
240 241 False, True, matcher=m)
241 242
242 243 # 3b. (apply)
243 244 if dopatch:
244 245 try:
245 246 ui.debug('applying patch\n')
246 247 ui.debug(fp.getvalue())
247 248 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
248 249 except patch.PatchError as err:
249 250 raise error.Abort(str(err))
250 251 del fp
251 252
252 253 # 4. We prepared working directory according to filtered
253 254 # patch. Now is the time to delegate the job to
254 255 # commit/qrefresh or the like!
255 256
256 257 # Make all of the pathnames absolute.
257 258 newfiles = [repo.wjoin(nf) for nf in newfiles]
258 259 return commitfunc(ui, repo, *newfiles, **opts)
259 260 finally:
260 261 # 5. finally restore backed-up files
261 262 try:
262 263 dirstate = repo.dirstate
263 264 for realname, tmpname in backups.iteritems():
264 265 ui.debug('restoring %r to %r\n' % (tmpname, realname))
265 266
266 267 if dirstate[realname] == 'n':
267 268 # without normallookup, restoring timestamp
268 269 # may cause partially committed files
269 270 # to be treated as unmodified
270 271 dirstate.normallookup(realname)
271 272
272 273 # copystat=True here and above are a hack to trick any
273 274 # editors that have f open that we haven't modified them.
274 275 #
275 276 # Also note that this racy as an editor could notice the
276 277 # file's mtime before we've finished writing it.
277 278 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
278 279 os.unlink(tmpname)
279 280 if tobackup:
280 281 os.rmdir(backupdir)
281 282 except OSError:
282 283 pass
283 284
284 285 def recordinwlock(ui, repo, message, match, opts):
285 286 with repo.wlock():
286 287 return recordfunc(ui, repo, message, match, opts)
287 288
288 289 return commit(ui, repo, recordinwlock, pats, opts)
289 290
290 291 def findpossible(cmd, table, strict=False):
291 292 """
292 293 Return cmd -> (aliases, command table entry)
293 294 for each matching command.
294 295 Return debug commands (or their aliases) only if no normal command matches.
295 296 """
296 297 choice = {}
297 298 debugchoice = {}
298 299
299 300 if cmd in table:
300 301 # short-circuit exact matches, "log" alias beats "^log|history"
301 302 keys = [cmd]
302 303 else:
303 304 keys = table.keys()
304 305
305 306 allcmds = []
306 307 for e in keys:
307 308 aliases = parsealiases(e)
308 309 allcmds.extend(aliases)
309 310 found = None
310 311 if cmd in aliases:
311 312 found = cmd
312 313 elif not strict:
313 314 for a in aliases:
314 315 if a.startswith(cmd):
315 316 found = a
316 317 break
317 318 if found is not None:
318 319 if aliases[0].startswith("debug") or found.startswith("debug"):
319 320 debugchoice[found] = (aliases, table[e])
320 321 else:
321 322 choice[found] = (aliases, table[e])
322 323
323 324 if not choice and debugchoice:
324 325 choice = debugchoice
325 326
326 327 return choice, allcmds
327 328
328 329 def findcmd(cmd, table, strict=True):
329 330 """Return (aliases, command table entry) for command string."""
330 331 choice, allcmds = findpossible(cmd, table, strict)
331 332
332 333 if cmd in choice:
333 334 return choice[cmd]
334 335
335 336 if len(choice) > 1:
336 337 clist = choice.keys()
337 338 clist.sort()
338 339 raise error.AmbiguousCommand(cmd, clist)
339 340
340 341 if choice:
341 342 return choice.values()[0]
342 343
343 344 raise error.UnknownCommand(cmd, allcmds)
344 345
345 346 def findrepo(p):
346 347 while not os.path.isdir(os.path.join(p, ".hg")):
347 348 oldp, p = p, os.path.dirname(p)
348 349 if p == oldp:
349 350 return None
350 351
351 352 return p
352 353
353 354 def bailifchanged(repo, merge=True):
354 355 if merge and repo.dirstate.p2() != nullid:
355 356 raise error.Abort(_('outstanding uncommitted merge'))
356 357 modified, added, removed, deleted = repo.status()[:4]
357 358 if modified or added or removed or deleted:
358 359 raise error.Abort(_('uncommitted changes'))
359 360 ctx = repo[None]
360 361 for s in sorted(ctx.substate):
361 362 ctx.sub(s).bailifchanged()
362 363
363 364 def logmessage(ui, opts):
364 365 """ get the log message according to -m and -l option """
365 366 message = opts.get('message')
366 367 logfile = opts.get('logfile')
367 368
368 369 if message and logfile:
369 370 raise error.Abort(_('options --message and --logfile are mutually '
370 371 'exclusive'))
371 372 if not message and logfile:
372 373 try:
373 374 if logfile == '-':
374 375 message = ui.fin.read()
375 376 else:
376 377 message = '\n'.join(util.readfile(logfile).splitlines())
377 378 except IOError as inst:
378 379 raise error.Abort(_("can't read commit message '%s': %s") %
379 380 (logfile, inst.strerror))
380 381 return message
381 382
382 383 def mergeeditform(ctxorbool, baseformname):
383 384 """return appropriate editform name (referencing a committemplate)
384 385
385 386 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
386 387 merging is committed.
387 388
388 389 This returns baseformname with '.merge' appended if it is a merge,
389 390 otherwise '.normal' is appended.
390 391 """
391 392 if isinstance(ctxorbool, bool):
392 393 if ctxorbool:
393 394 return baseformname + ".merge"
394 395 elif 1 < len(ctxorbool.parents()):
395 396 return baseformname + ".merge"
396 397
397 398 return baseformname + ".normal"
398 399
399 400 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
400 401 editform='', **opts):
401 402 """get appropriate commit message editor according to '--edit' option
402 403
403 404 'finishdesc' is a function to be called with edited commit message
404 405 (= 'description' of the new changeset) just after editing, but
405 406 before checking empty-ness. It should return actual text to be
406 407 stored into history. This allows to change description before
407 408 storing.
408 409
409 410 'extramsg' is a extra message to be shown in the editor instead of
410 411 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
411 412 is automatically added.
412 413
413 414 'editform' is a dot-separated list of names, to distinguish
414 415 the purpose of commit text editing.
415 416
416 417 'getcommiteditor' returns 'commitforceeditor' regardless of
417 418 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
418 419 they are specific for usage in MQ.
419 420 """
420 421 if edit or finishdesc or extramsg:
421 422 return lambda r, c, s: commitforceeditor(r, c, s,
422 423 finishdesc=finishdesc,
423 424 extramsg=extramsg,
424 425 editform=editform)
425 426 elif editform:
426 427 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
427 428 else:
428 429 return commiteditor
429 430
430 431 def loglimit(opts):
431 432 """get the log limit according to option -l/--limit"""
432 433 limit = opts.get('limit')
433 434 if limit:
434 435 try:
435 436 limit = int(limit)
436 437 except ValueError:
437 438 raise error.Abort(_('limit must be a positive integer'))
438 439 if limit <= 0:
439 440 raise error.Abort(_('limit must be positive'))
440 441 else:
441 442 limit = None
442 443 return limit
443 444
444 445 def makefilename(repo, pat, node, desc=None,
445 446 total=None, seqno=None, revwidth=None, pathname=None):
446 447 node_expander = {
447 448 'H': lambda: hex(node),
448 449 'R': lambda: str(repo.changelog.rev(node)),
449 450 'h': lambda: short(node),
450 451 'm': lambda: re.sub('[^\w]', '_', str(desc))
451 452 }
452 453 expander = {
453 454 '%': lambda: '%',
454 455 'b': lambda: os.path.basename(repo.root),
455 456 }
456 457
457 458 try:
458 459 if node:
459 460 expander.update(node_expander)
460 461 if node:
461 462 expander['r'] = (lambda:
462 463 str(repo.changelog.rev(node)).zfill(revwidth or 0))
463 464 if total is not None:
464 465 expander['N'] = lambda: str(total)
465 466 if seqno is not None:
466 467 expander['n'] = lambda: str(seqno)
467 468 if total is not None and seqno is not None:
468 469 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
469 470 if pathname is not None:
470 471 expander['s'] = lambda: os.path.basename(pathname)
471 472 expander['d'] = lambda: os.path.dirname(pathname) or '.'
472 473 expander['p'] = lambda: pathname
473 474
474 475 newname = []
475 476 patlen = len(pat)
476 477 i = 0
477 478 while i < patlen:
478 479 c = pat[i]
479 480 if c == '%':
480 481 i += 1
481 482 c = pat[i]
482 483 c = expander[c]()
483 484 newname.append(c)
484 485 i += 1
485 486 return ''.join(newname)
486 487 except KeyError as inst:
487 488 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
488 489 inst.args[0])
489 490
490 491 class _unclosablefile(object):
491 492 def __init__(self, fp):
492 493 self._fp = fp
493 494
494 495 def close(self):
495 496 pass
496 497
497 498 def __iter__(self):
498 499 return iter(self._fp)
499 500
500 501 def __getattr__(self, attr):
501 502 return getattr(self._fp, attr)
502 503
503 504 def __enter__(self):
504 505 return self
505 506
506 507 def __exit__(self, exc_type, exc_value, exc_tb):
507 508 pass
508 509
509 510 def makefileobj(repo, pat, node=None, desc=None, total=None,
510 511 seqno=None, revwidth=None, mode='wb', modemap=None,
511 512 pathname=None):
512 513
513 514 writable = mode not in ('r', 'rb')
514 515
515 516 if not pat or pat == '-':
516 517 if writable:
517 518 fp = repo.ui.fout
518 519 else:
519 520 fp = repo.ui.fin
520 521 return _unclosablefile(fp)
521 522 if util.safehasattr(pat, 'write') and writable:
522 523 return pat
523 524 if util.safehasattr(pat, 'read') and 'r' in mode:
524 525 return pat
525 526 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
526 527 if modemap is not None:
527 528 mode = modemap.get(fn, mode)
528 529 if mode == 'wb':
529 530 modemap[fn] = 'ab'
530 531 return open(fn, mode)
531 532
532 533 def openrevlog(repo, cmd, file_, opts):
533 534 """opens the changelog, manifest, a filelog or a given revlog"""
534 535 cl = opts['changelog']
535 536 mf = opts['manifest']
536 537 dir = opts['dir']
537 538 msg = None
538 539 if cl and mf:
539 540 msg = _('cannot specify --changelog and --manifest at the same time')
540 541 elif cl and dir:
541 542 msg = _('cannot specify --changelog and --dir at the same time')
542 543 elif cl or mf or dir:
543 544 if file_:
544 545 msg = _('cannot specify filename with --changelog or --manifest')
545 546 elif not repo:
546 547 msg = _('cannot specify --changelog or --manifest or --dir '
547 548 'without a repository')
548 549 if msg:
549 550 raise error.Abort(msg)
550 551
551 552 r = None
552 553 if repo:
553 554 if cl:
554 555 r = repo.unfiltered().changelog
555 556 elif dir:
556 557 if 'treemanifest' not in repo.requirements:
557 558 raise error.Abort(_("--dir can only be used on repos with "
558 559 "treemanifest enabled"))
559 560 dirlog = repo.manifestlog._revlog.dirlog(dir)
560 561 if len(dirlog):
561 562 r = dirlog
562 563 elif mf:
563 564 r = repo.manifestlog._revlog
564 565 elif file_:
565 566 filelog = repo.file(file_)
566 567 if len(filelog):
567 568 r = filelog
568 569 if not r:
569 570 if not file_:
570 571 raise error.CommandError(cmd, _('invalid arguments'))
571 572 if not os.path.isfile(file_):
572 573 raise error.Abort(_("revlog '%s' not found") % file_)
573 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
574 r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
574 575 file_[:-2] + ".i")
575 576 return r
576 577
577 578 def copy(ui, repo, pats, opts, rename=False):
578 579 # called with the repo lock held
579 580 #
580 581 # hgsep => pathname that uses "/" to separate directories
581 582 # ossep => pathname that uses os.sep to separate directories
582 583 cwd = repo.getcwd()
583 584 targets = {}
584 585 after = opts.get("after")
585 586 dryrun = opts.get("dry_run")
586 587 wctx = repo[None]
587 588
588 589 def walkpat(pat):
589 590 srcs = []
590 591 if after:
591 592 badstates = '?'
592 593 else:
593 594 badstates = '?r'
594 595 m = scmutil.match(repo[None], [pat], opts, globbed=True)
595 596 for abs in repo.walk(m):
596 597 state = repo.dirstate[abs]
597 598 rel = m.rel(abs)
598 599 exact = m.exact(abs)
599 600 if state in badstates:
600 601 if exact and state == '?':
601 602 ui.warn(_('%s: not copying - file is not managed\n') % rel)
602 603 if exact and state == 'r':
603 604 ui.warn(_('%s: not copying - file has been marked for'
604 605 ' remove\n') % rel)
605 606 continue
606 607 # abs: hgsep
607 608 # rel: ossep
608 609 srcs.append((abs, rel, exact))
609 610 return srcs
610 611
611 612 # abssrc: hgsep
612 613 # relsrc: ossep
613 614 # otarget: ossep
614 615 def copyfile(abssrc, relsrc, otarget, exact):
615 616 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
616 617 if '/' in abstarget:
617 618 # We cannot normalize abstarget itself, this would prevent
618 619 # case only renames, like a => A.
619 620 abspath, absname = abstarget.rsplit('/', 1)
620 621 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
621 622 reltarget = repo.pathto(abstarget, cwd)
622 623 target = repo.wjoin(abstarget)
623 624 src = repo.wjoin(abssrc)
624 625 state = repo.dirstate[abstarget]
625 626
626 627 scmutil.checkportable(ui, abstarget)
627 628
628 629 # check for collisions
629 630 prevsrc = targets.get(abstarget)
630 631 if prevsrc is not None:
631 632 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
632 633 (reltarget, repo.pathto(abssrc, cwd),
633 634 repo.pathto(prevsrc, cwd)))
634 635 return
635 636
636 637 # check for overwrites
637 638 exists = os.path.lexists(target)
638 639 samefile = False
639 640 if exists and abssrc != abstarget:
640 641 if (repo.dirstate.normalize(abssrc) ==
641 642 repo.dirstate.normalize(abstarget)):
642 643 if not rename:
643 644 ui.warn(_("%s: can't copy - same file\n") % reltarget)
644 645 return
645 646 exists = False
646 647 samefile = True
647 648
648 649 if not after and exists or after and state in 'mn':
649 650 if not opts['force']:
650 651 if state in 'mn':
651 652 msg = _('%s: not overwriting - file already committed\n')
652 653 if after:
653 654 flags = '--after --force'
654 655 else:
655 656 flags = '--force'
656 657 if rename:
657 658 hint = _('(hg rename %s to replace the file by '
658 659 'recording a rename)\n') % flags
659 660 else:
660 661 hint = _('(hg copy %s to replace the file by '
661 662 'recording a copy)\n') % flags
662 663 else:
663 664 msg = _('%s: not overwriting - file exists\n')
664 665 if rename:
665 666 hint = _('(hg rename --after to record the rename)\n')
666 667 else:
667 668 hint = _('(hg copy --after to record the copy)\n')
668 669 ui.warn(msg % reltarget)
669 670 ui.warn(hint)
670 671 return
671 672
672 673 if after:
673 674 if not exists:
674 675 if rename:
675 676 ui.warn(_('%s: not recording move - %s does not exist\n') %
676 677 (relsrc, reltarget))
677 678 else:
678 679 ui.warn(_('%s: not recording copy - %s does not exist\n') %
679 680 (relsrc, reltarget))
680 681 return
681 682 elif not dryrun:
682 683 try:
683 684 if exists:
684 685 os.unlink(target)
685 686 targetdir = os.path.dirname(target) or '.'
686 687 if not os.path.isdir(targetdir):
687 688 os.makedirs(targetdir)
688 689 if samefile:
689 690 tmp = target + "~hgrename"
690 691 os.rename(src, tmp)
691 692 os.rename(tmp, target)
692 693 else:
693 694 util.copyfile(src, target)
694 695 srcexists = True
695 696 except IOError as inst:
696 697 if inst.errno == errno.ENOENT:
697 698 ui.warn(_('%s: deleted in working directory\n') % relsrc)
698 699 srcexists = False
699 700 else:
700 701 ui.warn(_('%s: cannot copy - %s\n') %
701 702 (relsrc, inst.strerror))
702 703 return True # report a failure
703 704
704 705 if ui.verbose or not exact:
705 706 if rename:
706 707 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
707 708 else:
708 709 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
709 710
710 711 targets[abstarget] = abssrc
711 712
712 713 # fix up dirstate
713 714 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
714 715 dryrun=dryrun, cwd=cwd)
715 716 if rename and not dryrun:
716 717 if not after and srcexists and not samefile:
717 718 util.unlinkpath(repo.wjoin(abssrc))
718 719 wctx.forget([abssrc])
719 720
720 721 # pat: ossep
721 722 # dest ossep
722 723 # srcs: list of (hgsep, hgsep, ossep, bool)
723 724 # return: function that takes hgsep and returns ossep
724 725 def targetpathfn(pat, dest, srcs):
725 726 if os.path.isdir(pat):
726 727 abspfx = pathutil.canonpath(repo.root, cwd, pat)
727 728 abspfx = util.localpath(abspfx)
728 729 if destdirexists:
729 730 striplen = len(os.path.split(abspfx)[0])
730 731 else:
731 732 striplen = len(abspfx)
732 733 if striplen:
733 734 striplen += len(os.sep)
734 735 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
735 736 elif destdirexists:
736 737 res = lambda p: os.path.join(dest,
737 738 os.path.basename(util.localpath(p)))
738 739 else:
739 740 res = lambda p: dest
740 741 return res
741 742
742 743 # pat: ossep
743 744 # dest ossep
744 745 # srcs: list of (hgsep, hgsep, ossep, bool)
745 746 # return: function that takes hgsep and returns ossep
746 747 def targetpathafterfn(pat, dest, srcs):
747 748 if matchmod.patkind(pat):
748 749 # a mercurial pattern
749 750 res = lambda p: os.path.join(dest,
750 751 os.path.basename(util.localpath(p)))
751 752 else:
752 753 abspfx = pathutil.canonpath(repo.root, cwd, pat)
753 754 if len(abspfx) < len(srcs[0][0]):
754 755 # A directory. Either the target path contains the last
755 756 # component of the source path or it does not.
756 757 def evalpath(striplen):
757 758 score = 0
758 759 for s in srcs:
759 760 t = os.path.join(dest, util.localpath(s[0])[striplen:])
760 761 if os.path.lexists(t):
761 762 score += 1
762 763 return score
763 764
764 765 abspfx = util.localpath(abspfx)
765 766 striplen = len(abspfx)
766 767 if striplen:
767 768 striplen += len(os.sep)
768 769 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
769 770 score = evalpath(striplen)
770 771 striplen1 = len(os.path.split(abspfx)[0])
771 772 if striplen1:
772 773 striplen1 += len(os.sep)
773 774 if evalpath(striplen1) > score:
774 775 striplen = striplen1
775 776 res = lambda p: os.path.join(dest,
776 777 util.localpath(p)[striplen:])
777 778 else:
778 779 # a file
779 780 if destdirexists:
780 781 res = lambda p: os.path.join(dest,
781 782 os.path.basename(util.localpath(p)))
782 783 else:
783 784 res = lambda p: dest
784 785 return res
785 786
786 787 pats = scmutil.expandpats(pats)
787 788 if not pats:
788 789 raise error.Abort(_('no source or destination specified'))
789 790 if len(pats) == 1:
790 791 raise error.Abort(_('no destination specified'))
791 792 dest = pats.pop()
792 793 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
793 794 if not destdirexists:
794 795 if len(pats) > 1 or matchmod.patkind(pats[0]):
795 796 raise error.Abort(_('with multiple sources, destination must be an '
796 797 'existing directory'))
797 798 if util.endswithsep(dest):
798 799 raise error.Abort(_('destination %s is not a directory') % dest)
799 800
800 801 tfn = targetpathfn
801 802 if after:
802 803 tfn = targetpathafterfn
803 804 copylist = []
804 805 for pat in pats:
805 806 srcs = walkpat(pat)
806 807 if not srcs:
807 808 continue
808 809 copylist.append((tfn(pat, dest, srcs), srcs))
809 810 if not copylist:
810 811 raise error.Abort(_('no files to copy'))
811 812
812 813 errors = 0
813 814 for targetpath, srcs in copylist:
814 815 for abssrc, relsrc, exact in srcs:
815 816 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
816 817 errors += 1
817 818
818 819 if errors:
819 820 ui.warn(_('(consider using --after)\n'))
820 821
821 822 return errors != 0
822 823
823 824 ## facility to let extension process additional data into an import patch
824 825 # list of identifier to be executed in order
825 826 extrapreimport = [] # run before commit
826 827 extrapostimport = [] # run after commit
827 828 # mapping from identifier to actual import function
828 829 #
829 830 # 'preimport' are run before the commit is made and are provided the following
830 831 # arguments:
831 832 # - repo: the localrepository instance,
832 833 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
833 834 # - extra: the future extra dictionary of the changeset, please mutate it,
834 835 # - opts: the import options.
835 836 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
836 837 # mutation of in memory commit and more. Feel free to rework the code to get
837 838 # there.
838 839 extrapreimportmap = {}
839 840 # 'postimport' are run after the commit is made and are provided the following
840 841 # argument:
841 842 # - ctx: the changectx created by import.
842 843 extrapostimportmap = {}
843 844
844 845 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
845 846 """Utility function used by commands.import to import a single patch
846 847
847 848 This function is explicitly defined here to help the evolve extension to
848 849 wrap this part of the import logic.
849 850
850 851 The API is currently a bit ugly because it a simple code translation from
851 852 the import command. Feel free to make it better.
852 853
853 854 :hunk: a patch (as a binary string)
854 855 :parents: nodes that will be parent of the created commit
855 856 :opts: the full dict of option passed to the import command
856 857 :msgs: list to save commit message to.
857 858 (used in case we need to save it when failing)
858 859 :updatefunc: a function that update a repo to a given node
859 860 updatefunc(<repo>, <node>)
860 861 """
861 862 # avoid cycle context -> subrepo -> cmdutil
862 863 from . import context
863 864 extractdata = patch.extract(ui, hunk)
864 865 tmpname = extractdata.get('filename')
865 866 message = extractdata.get('message')
866 867 user = opts.get('user') or extractdata.get('user')
867 868 date = opts.get('date') or extractdata.get('date')
868 869 branch = extractdata.get('branch')
869 870 nodeid = extractdata.get('nodeid')
870 871 p1 = extractdata.get('p1')
871 872 p2 = extractdata.get('p2')
872 873
873 874 nocommit = opts.get('no_commit')
874 875 importbranch = opts.get('import_branch')
875 876 update = not opts.get('bypass')
876 877 strip = opts["strip"]
877 878 prefix = opts["prefix"]
878 879 sim = float(opts.get('similarity') or 0)
879 880 if not tmpname:
880 881 return (None, None, False)
881 882
882 883 rejects = False
883 884
884 885 try:
885 886 cmdline_message = logmessage(ui, opts)
886 887 if cmdline_message:
887 888 # pickup the cmdline msg
888 889 message = cmdline_message
889 890 elif message:
890 891 # pickup the patch msg
891 892 message = message.strip()
892 893 else:
893 894 # launch the editor
894 895 message = None
895 896 ui.debug('message:\n%s\n' % message)
896 897
897 898 if len(parents) == 1:
898 899 parents.append(repo[nullid])
899 900 if opts.get('exact'):
900 901 if not nodeid or not p1:
901 902 raise error.Abort(_('not a Mercurial patch'))
902 903 p1 = repo[p1]
903 904 p2 = repo[p2 or nullid]
904 905 elif p2:
905 906 try:
906 907 p1 = repo[p1]
907 908 p2 = repo[p2]
908 909 # Without any options, consider p2 only if the
909 910 # patch is being applied on top of the recorded
910 911 # first parent.
911 912 if p1 != parents[0]:
912 913 p1 = parents[0]
913 914 p2 = repo[nullid]
914 915 except error.RepoError:
915 916 p1, p2 = parents
916 917 if p2.node() == nullid:
917 918 ui.warn(_("warning: import the patch as a normal revision\n"
918 919 "(use --exact to import the patch as a merge)\n"))
919 920 else:
920 921 p1, p2 = parents
921 922
922 923 n = None
923 924 if update:
924 925 if p1 != parents[0]:
925 926 updatefunc(repo, p1.node())
926 927 if p2 != parents[1]:
927 928 repo.setparents(p1.node(), p2.node())
928 929
929 930 if opts.get('exact') or importbranch:
930 931 repo.dirstate.setbranch(branch or 'default')
931 932
932 933 partial = opts.get('partial', False)
933 934 files = set()
934 935 try:
935 936 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
936 937 files=files, eolmode=None, similarity=sim / 100.0)
937 938 except patch.PatchError as e:
938 939 if not partial:
939 940 raise error.Abort(str(e))
940 941 if partial:
941 942 rejects = True
942 943
943 944 files = list(files)
944 945 if nocommit:
945 946 if message:
946 947 msgs.append(message)
947 948 else:
948 949 if opts.get('exact') or p2:
949 950 # If you got here, you either use --force and know what
950 951 # you are doing or used --exact or a merge patch while
951 952 # being updated to its first parent.
952 953 m = None
953 954 else:
954 955 m = scmutil.matchfiles(repo, files or [])
955 956 editform = mergeeditform(repo[None], 'import.normal')
956 957 if opts.get('exact'):
957 958 editor = None
958 959 else:
959 960 editor = getcommiteditor(editform=editform, **opts)
960 961 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
961 962 extra = {}
962 963 for idfunc in extrapreimport:
963 964 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
964 965 try:
965 966 if partial:
966 967 repo.ui.setconfig('ui', 'allowemptycommit', True)
967 968 n = repo.commit(message, user,
968 969 date, match=m,
969 970 editor=editor, extra=extra)
970 971 for idfunc in extrapostimport:
971 972 extrapostimportmap[idfunc](repo[n])
972 973 finally:
973 974 repo.ui.restoreconfig(allowemptyback)
974 975 else:
975 976 if opts.get('exact') or importbranch:
976 977 branch = branch or 'default'
977 978 else:
978 979 branch = p1.branch()
979 980 store = patch.filestore()
980 981 try:
981 982 files = set()
982 983 try:
983 984 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
984 985 files, eolmode=None)
985 986 except patch.PatchError as e:
986 987 raise error.Abort(str(e))
987 988 if opts.get('exact'):
988 989 editor = None
989 990 else:
990 991 editor = getcommiteditor(editform='import.bypass')
991 992 memctx = context.makememctx(repo, (p1.node(), p2.node()),
992 993 message,
993 994 user,
994 995 date,
995 996 branch, files, store,
996 997 editor=editor)
997 998 n = memctx.commit()
998 999 finally:
999 1000 store.close()
1000 1001 if opts.get('exact') and nocommit:
1001 1002 # --exact with --no-commit is still useful in that it does merge
1002 1003 # and branch bits
1003 1004 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1004 1005 elif opts.get('exact') and hex(n) != nodeid:
1005 1006 raise error.Abort(_('patch is damaged or loses information'))
1006 1007 msg = _('applied to working directory')
1007 1008 if n:
1008 1009 # i18n: refers to a short changeset id
1009 1010 msg = _('created %s') % short(n)
1010 1011 return (msg, n, rejects)
1011 1012 finally:
1012 1013 os.unlink(tmpname)
1013 1014
1014 1015 # facility to let extensions include additional data in an exported patch
1015 1016 # list of identifiers to be executed in order
1016 1017 extraexport = []
1017 1018 # mapping from identifier to actual export function
1018 1019 # function as to return a string to be added to the header or None
1019 1020 # it is given two arguments (sequencenumber, changectx)
1020 1021 extraexportmap = {}
1021 1022
1022 1023 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1023 1024 opts=None, match=None):
1024 1025 '''export changesets as hg patches.'''
1025 1026
1026 1027 total = len(revs)
1027 1028 revwidth = max([len(str(rev)) for rev in revs])
1028 1029 filemode = {}
1029 1030
1030 1031 def single(rev, seqno, fp):
1031 1032 ctx = repo[rev]
1032 1033 node = ctx.node()
1033 1034 parents = [p.node() for p in ctx.parents() if p]
1034 1035 branch = ctx.branch()
1035 1036 if switch_parent:
1036 1037 parents.reverse()
1037 1038
1038 1039 if parents:
1039 1040 prev = parents[0]
1040 1041 else:
1041 1042 prev = nullid
1042 1043
1043 1044 shouldclose = False
1044 1045 if not fp and len(template) > 0:
1045 1046 desc_lines = ctx.description().rstrip().split('\n')
1046 1047 desc = desc_lines[0] #Commit always has a first line.
1047 1048 fp = makefileobj(repo, template, node, desc=desc, total=total,
1048 1049 seqno=seqno, revwidth=revwidth, mode='wb',
1049 1050 modemap=filemode)
1050 1051 shouldclose = True
1051 1052 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1052 1053 repo.ui.note("%s\n" % fp.name)
1053 1054
1054 1055 if not fp:
1055 1056 write = repo.ui.write
1056 1057 else:
1057 1058 def write(s, **kw):
1058 1059 fp.write(s)
1059 1060
1060 1061 write("# HG changeset patch\n")
1061 1062 write("# User %s\n" % ctx.user())
1062 1063 write("# Date %d %d\n" % ctx.date())
1063 1064 write("# %s\n" % util.datestr(ctx.date()))
1064 1065 if branch and branch != 'default':
1065 1066 write("# Branch %s\n" % branch)
1066 1067 write("# Node ID %s\n" % hex(node))
1067 1068 write("# Parent %s\n" % hex(prev))
1068 1069 if len(parents) > 1:
1069 1070 write("# Parent %s\n" % hex(parents[1]))
1070 1071
1071 1072 for headerid in extraexport:
1072 1073 header = extraexportmap[headerid](seqno, ctx)
1073 1074 if header is not None:
1074 1075 write('# %s\n' % header)
1075 1076 write(ctx.description().rstrip())
1076 1077 write("\n\n")
1077 1078
1078 1079 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1079 1080 write(chunk, label=label)
1080 1081
1081 1082 if shouldclose:
1082 1083 fp.close()
1083 1084
1084 1085 for seqno, rev in enumerate(revs):
1085 1086 single(rev, seqno + 1, fp)
1086 1087
1087 1088 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1088 1089 changes=None, stat=False, fp=None, prefix='',
1089 1090 root='', listsubrepos=False):
1090 1091 '''show diff or diffstat.'''
1091 1092 if fp is None:
1092 1093 write = ui.write
1093 1094 else:
1094 1095 def write(s, **kw):
1095 1096 fp.write(s)
1096 1097
1097 1098 if root:
1098 1099 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1099 1100 else:
1100 1101 relroot = ''
1101 1102 if relroot != '':
1102 1103 # XXX relative roots currently don't work if the root is within a
1103 1104 # subrepo
1104 1105 uirelroot = match.uipath(relroot)
1105 1106 relroot += '/'
1106 1107 for matchroot in match.files():
1107 1108 if not matchroot.startswith(relroot):
1108 1109 ui.warn(_('warning: %s not inside relative root %s\n') % (
1109 1110 match.uipath(matchroot), uirelroot))
1110 1111
1111 1112 if stat:
1112 1113 diffopts = diffopts.copy(context=0)
1113 1114 width = 80
1114 1115 if not ui.plain():
1115 1116 width = ui.termwidth()
1116 1117 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1117 1118 prefix=prefix, relroot=relroot)
1118 1119 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1119 1120 width=width):
1120 1121 write(chunk, label=label)
1121 1122 else:
1122 1123 for chunk, label in patch.diffui(repo, node1, node2, match,
1123 1124 changes, diffopts, prefix=prefix,
1124 1125 relroot=relroot):
1125 1126 write(chunk, label=label)
1126 1127
1127 1128 if listsubrepos:
1128 1129 ctx1 = repo[node1]
1129 1130 ctx2 = repo[node2]
1130 1131 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1131 1132 tempnode2 = node2
1132 1133 try:
1133 1134 if node2 is not None:
1134 1135 tempnode2 = ctx2.substate[subpath][1]
1135 1136 except KeyError:
1136 1137 # A subrepo that existed in node1 was deleted between node1 and
1137 1138 # node2 (inclusive). Thus, ctx2's substate won't contain that
1138 1139 # subpath. The best we can do is to ignore it.
1139 1140 tempnode2 = None
1140 1141 submatch = matchmod.subdirmatcher(subpath, match)
1141 1142 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1142 1143 stat=stat, fp=fp, prefix=prefix)
1143 1144
1144 1145 class changeset_printer(object):
1145 1146 '''show changeset information when templating not requested.'''
1146 1147
1147 1148 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1148 1149 self.ui = ui
1149 1150 self.repo = repo
1150 1151 self.buffered = buffered
1151 1152 self.matchfn = matchfn
1152 1153 self.diffopts = diffopts
1153 1154 self.header = {}
1154 1155 self.hunk = {}
1155 1156 self.lastheader = None
1156 1157 self.footer = None
1157 1158
1158 1159 def flush(self, ctx):
1159 1160 rev = ctx.rev()
1160 1161 if rev in self.header:
1161 1162 h = self.header[rev]
1162 1163 if h != self.lastheader:
1163 1164 self.lastheader = h
1164 1165 self.ui.write(h)
1165 1166 del self.header[rev]
1166 1167 if rev in self.hunk:
1167 1168 self.ui.write(self.hunk[rev])
1168 1169 del self.hunk[rev]
1169 1170 return 1
1170 1171 return 0
1171 1172
1172 1173 def close(self):
1173 1174 if self.footer:
1174 1175 self.ui.write(self.footer)
1175 1176
1176 1177 def show(self, ctx, copies=None, matchfn=None, **props):
1177 1178 if self.buffered:
1178 1179 self.ui.pushbuffer(labeled=True)
1179 1180 self._show(ctx, copies, matchfn, props)
1180 1181 self.hunk[ctx.rev()] = self.ui.popbuffer()
1181 1182 else:
1182 1183 self._show(ctx, copies, matchfn, props)
1183 1184
1184 1185 def _show(self, ctx, copies, matchfn, props):
1185 1186 '''show a single changeset or file revision'''
1186 1187 changenode = ctx.node()
1187 1188 rev = ctx.rev()
1188 1189 if self.ui.debugflag:
1189 1190 hexfunc = hex
1190 1191 else:
1191 1192 hexfunc = short
1192 1193 # as of now, wctx.node() and wctx.rev() return None, but we want to
1193 1194 # show the same values as {node} and {rev} templatekw
1194 1195 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1195 1196
1196 1197 if self.ui.quiet:
1197 1198 self.ui.write("%d:%s\n" % revnode, label='log.node')
1198 1199 return
1199 1200
1200 1201 date = util.datestr(ctx.date())
1201 1202
1202 1203 # i18n: column positioning for "hg log"
1203 1204 self.ui.write(_("changeset: %d:%s\n") % revnode,
1204 1205 label='log.changeset changeset.%s' % ctx.phasestr())
1205 1206
1206 1207 # branches are shown first before any other names due to backwards
1207 1208 # compatibility
1208 1209 branch = ctx.branch()
1209 1210 # don't show the default branch name
1210 1211 if branch != 'default':
1211 1212 # i18n: column positioning for "hg log"
1212 1213 self.ui.write(_("branch: %s\n") % branch,
1213 1214 label='log.branch')
1214 1215
1215 1216 for nsname, ns in self.repo.names.iteritems():
1216 1217 # branches has special logic already handled above, so here we just
1217 1218 # skip it
1218 1219 if nsname == 'branches':
1219 1220 continue
1220 1221 # we will use the templatename as the color name since those two
1221 1222 # should be the same
1222 1223 for name in ns.names(self.repo, changenode):
1223 1224 self.ui.write(ns.logfmt % name,
1224 1225 label='log.%s' % ns.colorname)
1225 1226 if self.ui.debugflag:
1226 1227 # i18n: column positioning for "hg log"
1227 1228 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1228 1229 label='log.phase')
1229 1230 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1230 1231 label = 'log.parent changeset.%s' % pctx.phasestr()
1231 1232 # i18n: column positioning for "hg log"
1232 1233 self.ui.write(_("parent: %d:%s\n")
1233 1234 % (pctx.rev(), hexfunc(pctx.node())),
1234 1235 label=label)
1235 1236
1236 1237 if self.ui.debugflag and rev is not None:
1237 1238 mnode = ctx.manifestnode()
1238 1239 # i18n: column positioning for "hg log"
1239 1240 self.ui.write(_("manifest: %d:%s\n") %
1240 1241 (self.repo.manifestlog._revlog.rev(mnode),
1241 1242 hex(mnode)),
1242 1243 label='ui.debug log.manifest')
1243 1244 # i18n: column positioning for "hg log"
1244 1245 self.ui.write(_("user: %s\n") % ctx.user(),
1245 1246 label='log.user')
1246 1247 # i18n: column positioning for "hg log"
1247 1248 self.ui.write(_("date: %s\n") % date,
1248 1249 label='log.date')
1249 1250
1250 1251 if self.ui.debugflag:
1251 1252 files = ctx.p1().status(ctx)[:3]
1252 1253 for key, value in zip([# i18n: column positioning for "hg log"
1253 1254 _("files:"),
1254 1255 # i18n: column positioning for "hg log"
1255 1256 _("files+:"),
1256 1257 # i18n: column positioning for "hg log"
1257 1258 _("files-:")], files):
1258 1259 if value:
1259 1260 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1260 1261 label='ui.debug log.files')
1261 1262 elif ctx.files() and self.ui.verbose:
1262 1263 # i18n: column positioning for "hg log"
1263 1264 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1264 1265 label='ui.note log.files')
1265 1266 if copies and self.ui.verbose:
1266 1267 copies = ['%s (%s)' % c for c in copies]
1267 1268 # i18n: column positioning for "hg log"
1268 1269 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1269 1270 label='ui.note log.copies')
1270 1271
1271 1272 extra = ctx.extra()
1272 1273 if extra and self.ui.debugflag:
1273 1274 for key, value in sorted(extra.items()):
1274 1275 # i18n: column positioning for "hg log"
1275 1276 self.ui.write(_("extra: %s=%s\n")
1276 1277 % (key, value.encode('string_escape')),
1277 1278 label='ui.debug log.extra')
1278 1279
1279 1280 description = ctx.description().strip()
1280 1281 if description:
1281 1282 if self.ui.verbose:
1282 1283 self.ui.write(_("description:\n"),
1283 1284 label='ui.note log.description')
1284 1285 self.ui.write(description,
1285 1286 label='ui.note log.description')
1286 1287 self.ui.write("\n\n")
1287 1288 else:
1288 1289 # i18n: column positioning for "hg log"
1289 1290 self.ui.write(_("summary: %s\n") %
1290 1291 description.splitlines()[0],
1291 1292 label='log.summary')
1292 1293 self.ui.write("\n")
1293 1294
1294 1295 self.showpatch(ctx, matchfn)
1295 1296
1296 1297 def showpatch(self, ctx, matchfn):
1297 1298 if not matchfn:
1298 1299 matchfn = self.matchfn
1299 1300 if matchfn:
1300 1301 stat = self.diffopts.get('stat')
1301 1302 diff = self.diffopts.get('patch')
1302 1303 diffopts = patch.diffallopts(self.ui, self.diffopts)
1303 1304 node = ctx.node()
1304 1305 prev = ctx.p1().node()
1305 1306 if stat:
1306 1307 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1307 1308 match=matchfn, stat=True)
1308 1309 if diff:
1309 1310 if stat:
1310 1311 self.ui.write("\n")
1311 1312 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1312 1313 match=matchfn, stat=False)
1313 1314 self.ui.write("\n")
1314 1315
1315 1316 class jsonchangeset(changeset_printer):
1316 1317 '''format changeset information.'''
1317 1318
1318 1319 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1319 1320 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1320 1321 self.cache = {}
1321 1322 self._first = True
1322 1323
1323 1324 def close(self):
1324 1325 if not self._first:
1325 1326 self.ui.write("\n]\n")
1326 1327 else:
1327 1328 self.ui.write("[]\n")
1328 1329
1329 1330 def _show(self, ctx, copies, matchfn, props):
1330 1331 '''show a single changeset or file revision'''
1331 1332 rev = ctx.rev()
1332 1333 if rev is None:
1333 1334 jrev = jnode = 'null'
1334 1335 else:
1335 1336 jrev = str(rev)
1336 1337 jnode = '"%s"' % hex(ctx.node())
1337 1338 j = encoding.jsonescape
1338 1339
1339 1340 if self._first:
1340 1341 self.ui.write("[\n {")
1341 1342 self._first = False
1342 1343 else:
1343 1344 self.ui.write(",\n {")
1344 1345
1345 1346 if self.ui.quiet:
1346 1347 self.ui.write(('\n "rev": %s') % jrev)
1347 1348 self.ui.write((',\n "node": %s') % jnode)
1348 1349 self.ui.write('\n }')
1349 1350 return
1350 1351
1351 1352 self.ui.write(('\n "rev": %s') % jrev)
1352 1353 self.ui.write((',\n "node": %s') % jnode)
1353 1354 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1354 1355 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1355 1356 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1356 1357 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1357 1358 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1358 1359
1359 1360 self.ui.write((',\n "bookmarks": [%s]') %
1360 1361 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1361 1362 self.ui.write((',\n "tags": [%s]') %
1362 1363 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1363 1364 self.ui.write((',\n "parents": [%s]') %
1364 1365 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1365 1366
1366 1367 if self.ui.debugflag:
1367 1368 if rev is None:
1368 1369 jmanifestnode = 'null'
1369 1370 else:
1370 1371 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1371 1372 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1372 1373
1373 1374 self.ui.write((',\n "extra": {%s}') %
1374 1375 ", ".join('"%s": "%s"' % (j(k), j(v))
1375 1376 for k, v in ctx.extra().items()))
1376 1377
1377 1378 files = ctx.p1().status(ctx)
1378 1379 self.ui.write((',\n "modified": [%s]') %
1379 1380 ", ".join('"%s"' % j(f) for f in files[0]))
1380 1381 self.ui.write((',\n "added": [%s]') %
1381 1382 ", ".join('"%s"' % j(f) for f in files[1]))
1382 1383 self.ui.write((',\n "removed": [%s]') %
1383 1384 ", ".join('"%s"' % j(f) for f in files[2]))
1384 1385
1385 1386 elif self.ui.verbose:
1386 1387 self.ui.write((',\n "files": [%s]') %
1387 1388 ", ".join('"%s"' % j(f) for f in ctx.files()))
1388 1389
1389 1390 if copies:
1390 1391 self.ui.write((',\n "copies": {%s}') %
1391 1392 ", ".join('"%s": "%s"' % (j(k), j(v))
1392 1393 for k, v in copies))
1393 1394
1394 1395 matchfn = self.matchfn
1395 1396 if matchfn:
1396 1397 stat = self.diffopts.get('stat')
1397 1398 diff = self.diffopts.get('patch')
1398 1399 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1399 1400 node, prev = ctx.node(), ctx.p1().node()
1400 1401 if stat:
1401 1402 self.ui.pushbuffer()
1402 1403 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1403 1404 match=matchfn, stat=True)
1404 1405 self.ui.write((',\n "diffstat": "%s"')
1405 1406 % j(self.ui.popbuffer()))
1406 1407 if diff:
1407 1408 self.ui.pushbuffer()
1408 1409 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1409 1410 match=matchfn, stat=False)
1410 1411 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1411 1412
1412 1413 self.ui.write("\n }")
1413 1414
1414 1415 class changeset_templater(changeset_printer):
1415 1416 '''format changeset information.'''
1416 1417
1417 1418 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1418 1419 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1419 1420 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1420 1421 filters = {'formatnode': formatnode}
1421 1422 defaulttempl = {
1422 1423 'parent': '{rev}:{node|formatnode} ',
1423 1424 'manifest': '{rev}:{node|formatnode}',
1424 1425 'file_copy': '{name} ({source})',
1425 1426 'extra': '{key}={value|stringescape}'
1426 1427 }
1427 1428 # filecopy is preserved for compatibility reasons
1428 1429 defaulttempl['filecopy'] = defaulttempl['file_copy']
1429 1430 assert not (tmpl and mapfile)
1430 1431 if mapfile:
1431 1432 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1432 1433 cache=defaulttempl)
1433 1434 else:
1434 1435 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1435 1436 filters=filters,
1436 1437 cache=defaulttempl)
1437 1438
1438 1439 self.cache = {}
1439 1440
1440 1441 # find correct templates for current mode
1441 1442 tmplmodes = [
1442 1443 (True, None),
1443 1444 (self.ui.verbose, 'verbose'),
1444 1445 (self.ui.quiet, 'quiet'),
1445 1446 (self.ui.debugflag, 'debug'),
1446 1447 ]
1447 1448
1448 1449 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1449 1450 'docheader': '', 'docfooter': ''}
1450 1451 for mode, postfix in tmplmodes:
1451 1452 for t in self._parts:
1452 1453 cur = t
1453 1454 if postfix:
1454 1455 cur += "_" + postfix
1455 1456 if mode and cur in self.t:
1456 1457 self._parts[t] = cur
1457 1458
1458 1459 if self._parts['docheader']:
1459 1460 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1460 1461
1461 1462 def close(self):
1462 1463 if self._parts['docfooter']:
1463 1464 if not self.footer:
1464 1465 self.footer = ""
1465 1466 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1466 1467 return super(changeset_templater, self).close()
1467 1468
1468 1469 def _show(self, ctx, copies, matchfn, props):
1469 1470 '''show a single changeset or file revision'''
1470 1471 props = props.copy()
1471 1472 props.update(templatekw.keywords)
1472 1473 props['templ'] = self.t
1473 1474 props['ctx'] = ctx
1474 1475 props['repo'] = self.repo
1475 1476 props['ui'] = self.repo.ui
1476 1477 props['revcache'] = {'copies': copies}
1477 1478 props['cache'] = self.cache
1478 1479
1479 1480 # write header
1480 1481 if self._parts['header']:
1481 1482 h = templater.stringify(self.t(self._parts['header'], **props))
1482 1483 if self.buffered:
1483 1484 self.header[ctx.rev()] = h
1484 1485 else:
1485 1486 if self.lastheader != h:
1486 1487 self.lastheader = h
1487 1488 self.ui.write(h)
1488 1489
1489 1490 # write changeset metadata, then patch if requested
1490 1491 key = self._parts['changeset']
1491 1492 self.ui.write(templater.stringify(self.t(key, **props)))
1492 1493 self.showpatch(ctx, matchfn)
1493 1494
1494 1495 if self._parts['footer']:
1495 1496 if not self.footer:
1496 1497 self.footer = templater.stringify(
1497 1498 self.t(self._parts['footer'], **props))
1498 1499
1499 1500 def gettemplate(ui, tmpl, style):
1500 1501 """
1501 1502 Find the template matching the given template spec or style.
1502 1503 """
1503 1504
1504 1505 # ui settings
1505 1506 if not tmpl and not style: # template are stronger than style
1506 1507 tmpl = ui.config('ui', 'logtemplate')
1507 1508 if tmpl:
1508 1509 return templater.unquotestring(tmpl), None
1509 1510 else:
1510 1511 style = util.expandpath(ui.config('ui', 'style', ''))
1511 1512
1512 1513 if not tmpl and style:
1513 1514 mapfile = style
1514 1515 if not os.path.split(mapfile)[0]:
1515 1516 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1516 1517 or templater.templatepath(mapfile))
1517 1518 if mapname:
1518 1519 mapfile = mapname
1519 1520 return None, mapfile
1520 1521
1521 1522 if not tmpl:
1522 1523 return None, None
1523 1524
1524 1525 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1525 1526
1526 1527 def show_changeset(ui, repo, opts, buffered=False):
1527 1528 """show one changeset using template or regular display.
1528 1529
1529 1530 Display format will be the first non-empty hit of:
1530 1531 1. option 'template'
1531 1532 2. option 'style'
1532 1533 3. [ui] setting 'logtemplate'
1533 1534 4. [ui] setting 'style'
1534 1535 If all of these values are either the unset or the empty string,
1535 1536 regular display via changeset_printer() is done.
1536 1537 """
1537 1538 # options
1538 1539 matchfn = None
1539 1540 if opts.get('patch') or opts.get('stat'):
1540 1541 matchfn = scmutil.matchall(repo)
1541 1542
1542 1543 if opts.get('template') == 'json':
1543 1544 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1544 1545
1545 1546 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1546 1547
1547 1548 if not tmpl and not mapfile:
1548 1549 return changeset_printer(ui, repo, matchfn, opts, buffered)
1549 1550
1550 1551 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1551 1552
1552 1553 def showmarker(fm, marker, index=None):
1553 1554 """utility function to display obsolescence marker in a readable way
1554 1555
1555 1556 To be used by debug function."""
1556 1557 if index is not None:
1557 1558 fm.write('index', '%i ', index)
1558 1559 fm.write('precnode', '%s ', hex(marker.precnode()))
1559 1560 succs = marker.succnodes()
1560 1561 fm.condwrite(succs, 'succnodes', '%s ',
1561 1562 fm.formatlist(map(hex, succs), name='node'))
1562 1563 fm.write('flag', '%X ', marker.flags())
1563 1564 parents = marker.parentnodes()
1564 1565 if parents is not None:
1565 1566 fm.write('parentnodes', '{%s} ',
1566 1567 fm.formatlist(map(hex, parents), name='node', sep=', '))
1567 1568 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1568 1569 meta = marker.metadata().copy()
1569 1570 meta.pop('date', None)
1570 1571 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1571 1572 fm.plain('\n')
1572 1573
1573 1574 def finddate(ui, repo, date):
1574 1575 """Find the tipmost changeset that matches the given date spec"""
1575 1576
1576 1577 df = util.matchdate(date)
1577 1578 m = scmutil.matchall(repo)
1578 1579 results = {}
1579 1580
1580 1581 def prep(ctx, fns):
1581 1582 d = ctx.date()
1582 1583 if df(d[0]):
1583 1584 results[ctx.rev()] = d
1584 1585
1585 1586 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1586 1587 rev = ctx.rev()
1587 1588 if rev in results:
1588 1589 ui.status(_("found revision %s from %s\n") %
1589 1590 (rev, util.datestr(results[rev])))
1590 1591 return str(rev)
1591 1592
1592 1593 raise error.Abort(_("revision matching date not found"))
1593 1594
1594 1595 def increasingwindows(windowsize=8, sizelimit=512):
1595 1596 while True:
1596 1597 yield windowsize
1597 1598 if windowsize < sizelimit:
1598 1599 windowsize *= 2
1599 1600
1600 1601 class FileWalkError(Exception):
1601 1602 pass
1602 1603
1603 1604 def walkfilerevs(repo, match, follow, revs, fncache):
1604 1605 '''Walks the file history for the matched files.
1605 1606
1606 1607 Returns the changeset revs that are involved in the file history.
1607 1608
1608 1609 Throws FileWalkError if the file history can't be walked using
1609 1610 filelogs alone.
1610 1611 '''
1611 1612 wanted = set()
1612 1613 copies = []
1613 1614 minrev, maxrev = min(revs), max(revs)
1614 1615 def filerevgen(filelog, last):
1615 1616 """
1616 1617 Only files, no patterns. Check the history of each file.
1617 1618
1618 1619 Examines filelog entries within minrev, maxrev linkrev range
1619 1620 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1620 1621 tuples in backwards order
1621 1622 """
1622 1623 cl_count = len(repo)
1623 1624 revs = []
1624 1625 for j in xrange(0, last + 1):
1625 1626 linkrev = filelog.linkrev(j)
1626 1627 if linkrev < minrev:
1627 1628 continue
1628 1629 # only yield rev for which we have the changelog, it can
1629 1630 # happen while doing "hg log" during a pull or commit
1630 1631 if linkrev >= cl_count:
1631 1632 break
1632 1633
1633 1634 parentlinkrevs = []
1634 1635 for p in filelog.parentrevs(j):
1635 1636 if p != nullrev:
1636 1637 parentlinkrevs.append(filelog.linkrev(p))
1637 1638 n = filelog.node(j)
1638 1639 revs.append((linkrev, parentlinkrevs,
1639 1640 follow and filelog.renamed(n)))
1640 1641
1641 1642 return reversed(revs)
1642 1643 def iterfiles():
1643 1644 pctx = repo['.']
1644 1645 for filename in match.files():
1645 1646 if follow:
1646 1647 if filename not in pctx:
1647 1648 raise error.Abort(_('cannot follow file not in parent '
1648 1649 'revision: "%s"') % filename)
1649 1650 yield filename, pctx[filename].filenode()
1650 1651 else:
1651 1652 yield filename, None
1652 1653 for filename_node in copies:
1653 1654 yield filename_node
1654 1655
1655 1656 for file_, node in iterfiles():
1656 1657 filelog = repo.file(file_)
1657 1658 if not len(filelog):
1658 1659 if node is None:
1659 1660 # A zero count may be a directory or deleted file, so
1660 1661 # try to find matching entries on the slow path.
1661 1662 if follow:
1662 1663 raise error.Abort(
1663 1664 _('cannot follow nonexistent file: "%s"') % file_)
1664 1665 raise FileWalkError("Cannot walk via filelog")
1665 1666 else:
1666 1667 continue
1667 1668
1668 1669 if node is None:
1669 1670 last = len(filelog) - 1
1670 1671 else:
1671 1672 last = filelog.rev(node)
1672 1673
1673 1674 # keep track of all ancestors of the file
1674 1675 ancestors = set([filelog.linkrev(last)])
1675 1676
1676 1677 # iterate from latest to oldest revision
1677 1678 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1678 1679 if not follow:
1679 1680 if rev > maxrev:
1680 1681 continue
1681 1682 else:
1682 1683 # Note that last might not be the first interesting
1683 1684 # rev to us:
1684 1685 # if the file has been changed after maxrev, we'll
1685 1686 # have linkrev(last) > maxrev, and we still need
1686 1687 # to explore the file graph
1687 1688 if rev not in ancestors:
1688 1689 continue
1689 1690 # XXX insert 1327 fix here
1690 1691 if flparentlinkrevs:
1691 1692 ancestors.update(flparentlinkrevs)
1692 1693
1693 1694 fncache.setdefault(rev, []).append(file_)
1694 1695 wanted.add(rev)
1695 1696 if copied:
1696 1697 copies.append(copied)
1697 1698
1698 1699 return wanted
1699 1700
1700 1701 class _followfilter(object):
1701 1702 def __init__(self, repo, onlyfirst=False):
1702 1703 self.repo = repo
1703 1704 self.startrev = nullrev
1704 1705 self.roots = set()
1705 1706 self.onlyfirst = onlyfirst
1706 1707
1707 1708 def match(self, rev):
1708 1709 def realparents(rev):
1709 1710 if self.onlyfirst:
1710 1711 return self.repo.changelog.parentrevs(rev)[0:1]
1711 1712 else:
1712 1713 return filter(lambda x: x != nullrev,
1713 1714 self.repo.changelog.parentrevs(rev))
1714 1715
1715 1716 if self.startrev == nullrev:
1716 1717 self.startrev = rev
1717 1718 return True
1718 1719
1719 1720 if rev > self.startrev:
1720 1721 # forward: all descendants
1721 1722 if not self.roots:
1722 1723 self.roots.add(self.startrev)
1723 1724 for parent in realparents(rev):
1724 1725 if parent in self.roots:
1725 1726 self.roots.add(rev)
1726 1727 return True
1727 1728 else:
1728 1729 # backwards: all parents
1729 1730 if not self.roots:
1730 1731 self.roots.update(realparents(self.startrev))
1731 1732 if rev in self.roots:
1732 1733 self.roots.remove(rev)
1733 1734 self.roots.update(realparents(rev))
1734 1735 return True
1735 1736
1736 1737 return False
1737 1738
1738 1739 def walkchangerevs(repo, match, opts, prepare):
1739 1740 '''Iterate over files and the revs in which they changed.
1740 1741
1741 1742 Callers most commonly need to iterate backwards over the history
1742 1743 in which they are interested. Doing so has awful (quadratic-looking)
1743 1744 performance, so we use iterators in a "windowed" way.
1744 1745
1745 1746 We walk a window of revisions in the desired order. Within the
1746 1747 window, we first walk forwards to gather data, then in the desired
1747 1748 order (usually backwards) to display it.
1748 1749
1749 1750 This function returns an iterator yielding contexts. Before
1750 1751 yielding each context, the iterator will first call the prepare
1751 1752 function on each context in the window in forward order.'''
1752 1753
1753 1754 follow = opts.get('follow') or opts.get('follow_first')
1754 1755 revs = _logrevs(repo, opts)
1755 1756 if not revs:
1756 1757 return []
1757 1758 wanted = set()
1758 1759 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1759 1760 opts.get('removed'))
1760 1761 fncache = {}
1761 1762 change = repo.changectx
1762 1763
1763 1764 # First step is to fill wanted, the set of revisions that we want to yield.
1764 1765 # When it does not induce extra cost, we also fill fncache for revisions in
1765 1766 # wanted: a cache of filenames that were changed (ctx.files()) and that
1766 1767 # match the file filtering conditions.
1767 1768
1768 1769 if match.always():
1769 1770 # No files, no patterns. Display all revs.
1770 1771 wanted = revs
1771 1772 elif not slowpath:
1772 1773 # We only have to read through the filelog to find wanted revisions
1773 1774
1774 1775 try:
1775 1776 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1776 1777 except FileWalkError:
1777 1778 slowpath = True
1778 1779
1779 1780 # We decided to fall back to the slowpath because at least one
1780 1781 # of the paths was not a file. Check to see if at least one of them
1781 1782 # existed in history, otherwise simply return
1782 1783 for path in match.files():
1783 1784 if path == '.' or path in repo.store:
1784 1785 break
1785 1786 else:
1786 1787 return []
1787 1788
1788 1789 if slowpath:
1789 1790 # We have to read the changelog to match filenames against
1790 1791 # changed files
1791 1792
1792 1793 if follow:
1793 1794 raise error.Abort(_('can only follow copies/renames for explicit '
1794 1795 'filenames'))
1795 1796
1796 1797 # The slow path checks files modified in every changeset.
1797 1798 # This is really slow on large repos, so compute the set lazily.
1798 1799 class lazywantedset(object):
1799 1800 def __init__(self):
1800 1801 self.set = set()
1801 1802 self.revs = set(revs)
1802 1803
1803 1804 # No need to worry about locality here because it will be accessed
1804 1805 # in the same order as the increasing window below.
1805 1806 def __contains__(self, value):
1806 1807 if value in self.set:
1807 1808 return True
1808 1809 elif not value in self.revs:
1809 1810 return False
1810 1811 else:
1811 1812 self.revs.discard(value)
1812 1813 ctx = change(value)
1813 1814 matches = filter(match, ctx.files())
1814 1815 if matches:
1815 1816 fncache[value] = matches
1816 1817 self.set.add(value)
1817 1818 return True
1818 1819 return False
1819 1820
1820 1821 def discard(self, value):
1821 1822 self.revs.discard(value)
1822 1823 self.set.discard(value)
1823 1824
1824 1825 wanted = lazywantedset()
1825 1826
1826 1827 # it might be worthwhile to do this in the iterator if the rev range
1827 1828 # is descending and the prune args are all within that range
1828 1829 for rev in opts.get('prune', ()):
1829 1830 rev = repo[rev].rev()
1830 1831 ff = _followfilter(repo)
1831 1832 stop = min(revs[0], revs[-1])
1832 1833 for x in xrange(rev, stop - 1, -1):
1833 1834 if ff.match(x):
1834 1835 wanted = wanted - [x]
1835 1836
1836 1837 # Now that wanted is correctly initialized, we can iterate over the
1837 1838 # revision range, yielding only revisions in wanted.
1838 1839 def iterate():
1839 1840 if follow and match.always():
1840 1841 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1841 1842 def want(rev):
1842 1843 return ff.match(rev) and rev in wanted
1843 1844 else:
1844 1845 def want(rev):
1845 1846 return rev in wanted
1846 1847
1847 1848 it = iter(revs)
1848 1849 stopiteration = False
1849 1850 for windowsize in increasingwindows():
1850 1851 nrevs = []
1851 1852 for i in xrange(windowsize):
1852 1853 rev = next(it, None)
1853 1854 if rev is None:
1854 1855 stopiteration = True
1855 1856 break
1856 1857 elif want(rev):
1857 1858 nrevs.append(rev)
1858 1859 for rev in sorted(nrevs):
1859 1860 fns = fncache.get(rev)
1860 1861 ctx = change(rev)
1861 1862 if not fns:
1862 1863 def fns_generator():
1863 1864 for f in ctx.files():
1864 1865 if match(f):
1865 1866 yield f
1866 1867 fns = fns_generator()
1867 1868 prepare(ctx, fns)
1868 1869 for rev in nrevs:
1869 1870 yield change(rev)
1870 1871
1871 1872 if stopiteration:
1872 1873 break
1873 1874
1874 1875 return iterate()
1875 1876
1876 1877 def _makefollowlogfilematcher(repo, files, followfirst):
1877 1878 # When displaying a revision with --patch --follow FILE, we have
1878 1879 # to know which file of the revision must be diffed. With
1879 1880 # --follow, we want the names of the ancestors of FILE in the
1880 1881 # revision, stored in "fcache". "fcache" is populated by
1881 1882 # reproducing the graph traversal already done by --follow revset
1882 1883 # and relating revs to file names (which is not "correct" but
1883 1884 # good enough).
1884 1885 fcache = {}
1885 1886 fcacheready = [False]
1886 1887 pctx = repo['.']
1887 1888
1888 1889 def populate():
1889 1890 for fn in files:
1890 1891 fctx = pctx[fn]
1891 1892 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1892 1893 for c in fctx.ancestors(followfirst=followfirst):
1893 1894 fcache.setdefault(c.rev(), set()).add(c.path())
1894 1895
1895 1896 def filematcher(rev):
1896 1897 if not fcacheready[0]:
1897 1898 # Lazy initialization
1898 1899 fcacheready[0] = True
1899 1900 populate()
1900 1901 return scmutil.matchfiles(repo, fcache.get(rev, []))
1901 1902
1902 1903 return filematcher
1903 1904
1904 1905 def _makenofollowlogfilematcher(repo, pats, opts):
1905 1906 '''hook for extensions to override the filematcher for non-follow cases'''
1906 1907 return None
1907 1908
1908 1909 def _makelogrevset(repo, pats, opts, revs):
1909 1910 """Return (expr, filematcher) where expr is a revset string built
1910 1911 from log options and file patterns or None. If --stat or --patch
1911 1912 are not passed filematcher is None. Otherwise it is a callable
1912 1913 taking a revision number and returning a match objects filtering
1913 1914 the files to be detailed when displaying the revision.
1914 1915 """
1915 1916 opt2revset = {
1916 1917 'no_merges': ('not merge()', None),
1917 1918 'only_merges': ('merge()', None),
1918 1919 '_ancestors': ('ancestors(%(val)s)', None),
1919 1920 '_fancestors': ('_firstancestors(%(val)s)', None),
1920 1921 '_descendants': ('descendants(%(val)s)', None),
1921 1922 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1922 1923 '_matchfiles': ('_matchfiles(%(val)s)', None),
1923 1924 'date': ('date(%(val)r)', None),
1924 1925 'branch': ('branch(%(val)r)', ' or '),
1925 1926 '_patslog': ('filelog(%(val)r)', ' or '),
1926 1927 '_patsfollow': ('follow(%(val)r)', ' or '),
1927 1928 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1928 1929 'keyword': ('keyword(%(val)r)', ' or '),
1929 1930 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1930 1931 'user': ('user(%(val)r)', ' or '),
1931 1932 }
1932 1933
1933 1934 opts = dict(opts)
1934 1935 # follow or not follow?
1935 1936 follow = opts.get('follow') or opts.get('follow_first')
1936 1937 if opts.get('follow_first'):
1937 1938 followfirst = 1
1938 1939 else:
1939 1940 followfirst = 0
1940 1941 # --follow with FILE behavior depends on revs...
1941 1942 it = iter(revs)
1942 1943 startrev = next(it)
1943 1944 followdescendants = startrev < next(it, startrev)
1944 1945
1945 1946 # branch and only_branch are really aliases and must be handled at
1946 1947 # the same time
1947 1948 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1948 1949 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1949 1950 # pats/include/exclude are passed to match.match() directly in
1950 1951 # _matchfiles() revset but walkchangerevs() builds its matcher with
1951 1952 # scmutil.match(). The difference is input pats are globbed on
1952 1953 # platforms without shell expansion (windows).
1953 1954 wctx = repo[None]
1954 1955 match, pats = scmutil.matchandpats(wctx, pats, opts)
1955 1956 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1956 1957 opts.get('removed'))
1957 1958 if not slowpath:
1958 1959 for f in match.files():
1959 1960 if follow and f not in wctx:
1960 1961 # If the file exists, it may be a directory, so let it
1961 1962 # take the slow path.
1962 1963 if os.path.exists(repo.wjoin(f)):
1963 1964 slowpath = True
1964 1965 continue
1965 1966 else:
1966 1967 raise error.Abort(_('cannot follow file not in parent '
1967 1968 'revision: "%s"') % f)
1968 1969 filelog = repo.file(f)
1969 1970 if not filelog:
1970 1971 # A zero count may be a directory or deleted file, so
1971 1972 # try to find matching entries on the slow path.
1972 1973 if follow:
1973 1974 raise error.Abort(
1974 1975 _('cannot follow nonexistent file: "%s"') % f)
1975 1976 slowpath = True
1976 1977
1977 1978 # We decided to fall back to the slowpath because at least one
1978 1979 # of the paths was not a file. Check to see if at least one of them
1979 1980 # existed in history - in that case, we'll continue down the
1980 1981 # slowpath; otherwise, we can turn off the slowpath
1981 1982 if slowpath:
1982 1983 for path in match.files():
1983 1984 if path == '.' or path in repo.store:
1984 1985 break
1985 1986 else:
1986 1987 slowpath = False
1987 1988
1988 1989 fpats = ('_patsfollow', '_patsfollowfirst')
1989 1990 fnopats = (('_ancestors', '_fancestors'),
1990 1991 ('_descendants', '_fdescendants'))
1991 1992 if slowpath:
1992 1993 # See walkchangerevs() slow path.
1993 1994 #
1994 1995 # pats/include/exclude cannot be represented as separate
1995 1996 # revset expressions as their filtering logic applies at file
1996 1997 # level. For instance "-I a -X a" matches a revision touching
1997 1998 # "a" and "b" while "file(a) and not file(b)" does
1998 1999 # not. Besides, filesets are evaluated against the working
1999 2000 # directory.
2000 2001 matchargs = ['r:', 'd:relpath']
2001 2002 for p in pats:
2002 2003 matchargs.append('p:' + p)
2003 2004 for p in opts.get('include', []):
2004 2005 matchargs.append('i:' + p)
2005 2006 for p in opts.get('exclude', []):
2006 2007 matchargs.append('x:' + p)
2007 2008 matchargs = ','.join(('%r' % p) for p in matchargs)
2008 2009 opts['_matchfiles'] = matchargs
2009 2010 if follow:
2010 2011 opts[fnopats[0][followfirst]] = '.'
2011 2012 else:
2012 2013 if follow:
2013 2014 if pats:
2014 2015 # follow() revset interprets its file argument as a
2015 2016 # manifest entry, so use match.files(), not pats.
2016 2017 opts[fpats[followfirst]] = list(match.files())
2017 2018 else:
2018 2019 op = fnopats[followdescendants][followfirst]
2019 2020 opts[op] = 'rev(%d)' % startrev
2020 2021 else:
2021 2022 opts['_patslog'] = list(pats)
2022 2023
2023 2024 filematcher = None
2024 2025 if opts.get('patch') or opts.get('stat'):
2025 2026 # When following files, track renames via a special matcher.
2026 2027 # If we're forced to take the slowpath it means we're following
2027 2028 # at least one pattern/directory, so don't bother with rename tracking.
2028 2029 if follow and not match.always() and not slowpath:
2029 2030 # _makefollowlogfilematcher expects its files argument to be
2030 2031 # relative to the repo root, so use match.files(), not pats.
2031 2032 filematcher = _makefollowlogfilematcher(repo, match.files(),
2032 2033 followfirst)
2033 2034 else:
2034 2035 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2035 2036 if filematcher is None:
2036 2037 filematcher = lambda rev: match
2037 2038
2038 2039 expr = []
2039 2040 for op, val in sorted(opts.iteritems()):
2040 2041 if not val:
2041 2042 continue
2042 2043 if op not in opt2revset:
2043 2044 continue
2044 2045 revop, andor = opt2revset[op]
2045 2046 if '%(val)' not in revop:
2046 2047 expr.append(revop)
2047 2048 else:
2048 2049 if not isinstance(val, list):
2049 2050 e = revop % {'val': val}
2050 2051 else:
2051 2052 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2052 2053 expr.append(e)
2053 2054
2054 2055 if expr:
2055 2056 expr = '(' + ' and '.join(expr) + ')'
2056 2057 else:
2057 2058 expr = None
2058 2059 return expr, filematcher
2059 2060
2060 2061 def _logrevs(repo, opts):
2061 2062 # Default --rev value depends on --follow but --follow behavior
2062 2063 # depends on revisions resolved from --rev...
2063 2064 follow = opts.get('follow') or opts.get('follow_first')
2064 2065 if opts.get('rev'):
2065 2066 revs = scmutil.revrange(repo, opts['rev'])
2066 2067 elif follow and repo.dirstate.p1() == nullid:
2067 2068 revs = revset.baseset()
2068 2069 elif follow:
2069 2070 revs = repo.revs('reverse(:.)')
2070 2071 else:
2071 2072 revs = revset.spanset(repo)
2072 2073 revs.reverse()
2073 2074 return revs
2074 2075
2075 2076 def getgraphlogrevs(repo, pats, opts):
2076 2077 """Return (revs, expr, filematcher) where revs is an iterable of
2077 2078 revision numbers, expr is a revset string built from log options
2078 2079 and file patterns or None, and used to filter 'revs'. If --stat or
2079 2080 --patch are not passed filematcher is None. Otherwise it is a
2080 2081 callable taking a revision number and returning a match objects
2081 2082 filtering the files to be detailed when displaying the revision.
2082 2083 """
2083 2084 limit = loglimit(opts)
2084 2085 revs = _logrevs(repo, opts)
2085 2086 if not revs:
2086 2087 return revset.baseset(), None, None
2087 2088 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2088 2089 if opts.get('rev'):
2089 2090 # User-specified revs might be unsorted, but don't sort before
2090 2091 # _makelogrevset because it might depend on the order of revs
2091 2092 if not (revs.isdescending() or revs.istopo()):
2092 2093 revs.sort(reverse=True)
2093 2094 if expr:
2094 2095 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2095 2096 revs = matcher(repo, revs)
2096 2097 if limit is not None:
2097 2098 limitedrevs = []
2098 2099 for idx, rev in enumerate(revs):
2099 2100 if idx >= limit:
2100 2101 break
2101 2102 limitedrevs.append(rev)
2102 2103 revs = revset.baseset(limitedrevs)
2103 2104
2104 2105 return revs, expr, filematcher
2105 2106
2106 2107 def getlogrevs(repo, pats, opts):
2107 2108 """Return (revs, expr, filematcher) where revs is an iterable of
2108 2109 revision numbers, expr is a revset string built from log options
2109 2110 and file patterns or None, and used to filter 'revs'. If --stat or
2110 2111 --patch are not passed filematcher is None. Otherwise it is a
2111 2112 callable taking a revision number and returning a match objects
2112 2113 filtering the files to be detailed when displaying the revision.
2113 2114 """
2114 2115 limit = loglimit(opts)
2115 2116 revs = _logrevs(repo, opts)
2116 2117 if not revs:
2117 2118 return revset.baseset([]), None, None
2118 2119 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2119 2120 if expr:
2120 2121 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2121 2122 revs = matcher(repo, revs)
2122 2123 if limit is not None:
2123 2124 limitedrevs = []
2124 2125 for idx, r in enumerate(revs):
2125 2126 if limit <= idx:
2126 2127 break
2127 2128 limitedrevs.append(r)
2128 2129 revs = revset.baseset(limitedrevs)
2129 2130
2130 2131 return revs, expr, filematcher
2131 2132
2132 2133 def _graphnodeformatter(ui, displayer):
2133 2134 spec = ui.config('ui', 'graphnodetemplate')
2134 2135 if not spec:
2135 2136 return templatekw.showgraphnode # fast path for "{graphnode}"
2136 2137
2137 2138 templ = formatter.gettemplater(ui, 'graphnode', spec)
2138 2139 cache = {}
2139 2140 if isinstance(displayer, changeset_templater):
2140 2141 cache = displayer.cache # reuse cache of slow templates
2141 2142 props = templatekw.keywords.copy()
2142 2143 props['templ'] = templ
2143 2144 props['cache'] = cache
2144 2145 def formatnode(repo, ctx):
2145 2146 props['ctx'] = ctx
2146 2147 props['repo'] = repo
2147 2148 props['ui'] = repo.ui
2148 2149 props['revcache'] = {}
2149 2150 return templater.stringify(templ('graphnode', **props))
2150 2151 return formatnode
2151 2152
2152 2153 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2153 2154 filematcher=None):
2154 2155 formatnode = _graphnodeformatter(ui, displayer)
2155 2156 state = graphmod.asciistate()
2156 2157 styles = state['styles']
2157 2158
2158 2159 # only set graph styling if HGPLAIN is not set.
2159 2160 if ui.plain('graph'):
2160 2161 # set all edge styles to |, the default pre-3.8 behaviour
2161 2162 styles.update(dict.fromkeys(styles, '|'))
2162 2163 else:
2163 2164 edgetypes = {
2164 2165 'parent': graphmod.PARENT,
2165 2166 'grandparent': graphmod.GRANDPARENT,
2166 2167 'missing': graphmod.MISSINGPARENT
2167 2168 }
2168 2169 for name, key in edgetypes.items():
2169 2170 # experimental config: experimental.graphstyle.*
2170 2171 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2171 2172 styles[key])
2172 2173 if not styles[key]:
2173 2174 styles[key] = None
2174 2175
2175 2176 # experimental config: experimental.graphshorten
2176 2177 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2177 2178
2178 2179 for rev, type, ctx, parents in dag:
2179 2180 char = formatnode(repo, ctx)
2180 2181 copies = None
2181 2182 if getrenamed and ctx.rev():
2182 2183 copies = []
2183 2184 for fn in ctx.files():
2184 2185 rename = getrenamed(fn, ctx.rev())
2185 2186 if rename:
2186 2187 copies.append((fn, rename[0]))
2187 2188 revmatchfn = None
2188 2189 if filematcher is not None:
2189 2190 revmatchfn = filematcher(ctx.rev())
2190 2191 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2191 2192 lines = displayer.hunk.pop(rev).split('\n')
2192 2193 if not lines[-1]:
2193 2194 del lines[-1]
2194 2195 displayer.flush(ctx)
2195 2196 edges = edgefn(type, char, lines, state, rev, parents)
2196 2197 for type, char, lines, coldata in edges:
2197 2198 graphmod.ascii(ui, state, type, char, lines, coldata)
2198 2199 displayer.close()
2199 2200
2200 2201 def graphlog(ui, repo, *pats, **opts):
2201 2202 # Parameters are identical to log command ones
2202 2203 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2203 2204 revdag = graphmod.dagwalker(repo, revs)
2204 2205
2205 2206 getrenamed = None
2206 2207 if opts.get('copies'):
2207 2208 endrev = None
2208 2209 if opts.get('rev'):
2209 2210 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2210 2211 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2211 2212 displayer = show_changeset(ui, repo, opts, buffered=True)
2212 2213 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2213 2214 filematcher)
2214 2215
2215 2216 def checkunsupportedgraphflags(pats, opts):
2216 2217 for op in ["newest_first"]:
2217 2218 if op in opts and opts[op]:
2218 2219 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2219 2220 % op.replace("_", "-"))
2220 2221
2221 2222 def graphrevs(repo, nodes, opts):
2222 2223 limit = loglimit(opts)
2223 2224 nodes.reverse()
2224 2225 if limit is not None:
2225 2226 nodes = nodes[:limit]
2226 2227 return graphmod.nodes(repo, nodes)
2227 2228
2228 2229 def add(ui, repo, match, prefix, explicitonly, **opts):
2229 2230 join = lambda f: os.path.join(prefix, f)
2230 2231 bad = []
2231 2232
2232 2233 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2233 2234 names = []
2234 2235 wctx = repo[None]
2235 2236 cca = None
2236 2237 abort, warn = scmutil.checkportabilityalert(ui)
2237 2238 if abort or warn:
2238 2239 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2239 2240
2240 2241 badmatch = matchmod.badmatch(match, badfn)
2241 2242 dirstate = repo.dirstate
2242 2243 # We don't want to just call wctx.walk here, since it would return a lot of
2243 2244 # clean files, which we aren't interested in and takes time.
2244 2245 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2245 2246 True, False, full=False)):
2246 2247 exact = match.exact(f)
2247 2248 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2248 2249 if cca:
2249 2250 cca(f)
2250 2251 names.append(f)
2251 2252 if ui.verbose or not exact:
2252 2253 ui.status(_('adding %s\n') % match.rel(f))
2253 2254
2254 2255 for subpath in sorted(wctx.substate):
2255 2256 sub = wctx.sub(subpath)
2256 2257 try:
2257 2258 submatch = matchmod.subdirmatcher(subpath, match)
2258 2259 if opts.get('subrepos'):
2259 2260 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2260 2261 else:
2261 2262 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2262 2263 except error.LookupError:
2263 2264 ui.status(_("skipping missing subrepository: %s\n")
2264 2265 % join(subpath))
2265 2266
2266 2267 if not opts.get('dry_run'):
2267 2268 rejected = wctx.add(names, prefix)
2268 2269 bad.extend(f for f in rejected if f in match.files())
2269 2270 return bad
2270 2271
2271 2272 def forget(ui, repo, match, prefix, explicitonly):
2272 2273 join = lambda f: os.path.join(prefix, f)
2273 2274 bad = []
2274 2275 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2275 2276 wctx = repo[None]
2276 2277 forgot = []
2277 2278
2278 2279 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2279 2280 forget = sorted(s[0] + s[1] + s[3] + s[6])
2280 2281 if explicitonly:
2281 2282 forget = [f for f in forget if match.exact(f)]
2282 2283
2283 2284 for subpath in sorted(wctx.substate):
2284 2285 sub = wctx.sub(subpath)
2285 2286 try:
2286 2287 submatch = matchmod.subdirmatcher(subpath, match)
2287 2288 subbad, subforgot = sub.forget(submatch, prefix)
2288 2289 bad.extend([subpath + '/' + f for f in subbad])
2289 2290 forgot.extend([subpath + '/' + f for f in subforgot])
2290 2291 except error.LookupError:
2291 2292 ui.status(_("skipping missing subrepository: %s\n")
2292 2293 % join(subpath))
2293 2294
2294 2295 if not explicitonly:
2295 2296 for f in match.files():
2296 2297 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2297 2298 if f not in forgot:
2298 2299 if repo.wvfs.exists(f):
2299 2300 # Don't complain if the exact case match wasn't given.
2300 2301 # But don't do this until after checking 'forgot', so
2301 2302 # that subrepo files aren't normalized, and this op is
2302 2303 # purely from data cached by the status walk above.
2303 2304 if repo.dirstate.normalize(f) in repo.dirstate:
2304 2305 continue
2305 2306 ui.warn(_('not removing %s: '
2306 2307 'file is already untracked\n')
2307 2308 % match.rel(f))
2308 2309 bad.append(f)
2309 2310
2310 2311 for f in forget:
2311 2312 if ui.verbose or not match.exact(f):
2312 2313 ui.status(_('removing %s\n') % match.rel(f))
2313 2314
2314 2315 rejected = wctx.forget(forget, prefix)
2315 2316 bad.extend(f for f in rejected if f in match.files())
2316 2317 forgot.extend(f for f in forget if f not in rejected)
2317 2318 return bad, forgot
2318 2319
2319 2320 def files(ui, ctx, m, fm, fmt, subrepos):
2320 2321 rev = ctx.rev()
2321 2322 ret = 1
2322 2323 ds = ctx.repo().dirstate
2323 2324
2324 2325 for f in ctx.matches(m):
2325 2326 if rev is None and ds[f] == 'r':
2326 2327 continue
2327 2328 fm.startitem()
2328 2329 if ui.verbose:
2329 2330 fc = ctx[f]
2330 2331 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2331 2332 fm.data(abspath=f)
2332 2333 fm.write('path', fmt, m.rel(f))
2333 2334 ret = 0
2334 2335
2335 2336 for subpath in sorted(ctx.substate):
2336 2337 submatch = matchmod.subdirmatcher(subpath, m)
2337 2338 if (subrepos or m.exact(subpath) or any(submatch.files())):
2338 2339 sub = ctx.sub(subpath)
2339 2340 try:
2340 2341 recurse = m.exact(subpath) or subrepos
2341 2342 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2342 2343 ret = 0
2343 2344 except error.LookupError:
2344 2345 ui.status(_("skipping missing subrepository: %s\n")
2345 2346 % m.abs(subpath))
2346 2347
2347 2348 return ret
2348 2349
2349 2350 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2350 2351 join = lambda f: os.path.join(prefix, f)
2351 2352 ret = 0
2352 2353 s = repo.status(match=m, clean=True)
2353 2354 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2354 2355
2355 2356 wctx = repo[None]
2356 2357
2357 2358 if warnings is None:
2358 2359 warnings = []
2359 2360 warn = True
2360 2361 else:
2361 2362 warn = False
2362 2363
2363 2364 subs = sorted(wctx.substate)
2364 2365 total = len(subs)
2365 2366 count = 0
2366 2367 for subpath in subs:
2367 2368 count += 1
2368 2369 submatch = matchmod.subdirmatcher(subpath, m)
2369 2370 if subrepos or m.exact(subpath) or any(submatch.files()):
2370 2371 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2371 2372 sub = wctx.sub(subpath)
2372 2373 try:
2373 2374 if sub.removefiles(submatch, prefix, after, force, subrepos,
2374 2375 warnings):
2375 2376 ret = 1
2376 2377 except error.LookupError:
2377 2378 warnings.append(_("skipping missing subrepository: %s\n")
2378 2379 % join(subpath))
2379 2380 ui.progress(_('searching'), None)
2380 2381
2381 2382 # warn about failure to delete explicit files/dirs
2382 2383 deleteddirs = util.dirs(deleted)
2383 2384 files = m.files()
2384 2385 total = len(files)
2385 2386 count = 0
2386 2387 for f in files:
2387 2388 def insubrepo():
2388 2389 for subpath in wctx.substate:
2389 2390 if f.startswith(subpath + '/'):
2390 2391 return True
2391 2392 return False
2392 2393
2393 2394 count += 1
2394 2395 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2395 2396 isdir = f in deleteddirs or wctx.hasdir(f)
2396 2397 if (f in repo.dirstate or isdir or f == '.'
2397 2398 or insubrepo() or f in subs):
2398 2399 continue
2399 2400
2400 2401 if repo.wvfs.exists(f):
2401 2402 if repo.wvfs.isdir(f):
2402 2403 warnings.append(_('not removing %s: no tracked files\n')
2403 2404 % m.rel(f))
2404 2405 else:
2405 2406 warnings.append(_('not removing %s: file is untracked\n')
2406 2407 % m.rel(f))
2407 2408 # missing files will generate a warning elsewhere
2408 2409 ret = 1
2409 2410 ui.progress(_('deleting'), None)
2410 2411
2411 2412 if force:
2412 2413 list = modified + deleted + clean + added
2413 2414 elif after:
2414 2415 list = deleted
2415 2416 remaining = modified + added + clean
2416 2417 total = len(remaining)
2417 2418 count = 0
2418 2419 for f in remaining:
2419 2420 count += 1
2420 2421 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2421 2422 warnings.append(_('not removing %s: file still exists\n')
2422 2423 % m.rel(f))
2423 2424 ret = 1
2424 2425 ui.progress(_('skipping'), None)
2425 2426 else:
2426 2427 list = deleted + clean
2427 2428 total = len(modified) + len(added)
2428 2429 count = 0
2429 2430 for f in modified:
2430 2431 count += 1
2431 2432 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2432 2433 warnings.append(_('not removing %s: file is modified (use -f'
2433 2434 ' to force removal)\n') % m.rel(f))
2434 2435 ret = 1
2435 2436 for f in added:
2436 2437 count += 1
2437 2438 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2438 2439 warnings.append(_("not removing %s: file has been marked for add"
2439 2440 " (use 'hg forget' to undo add)\n") % m.rel(f))
2440 2441 ret = 1
2441 2442 ui.progress(_('skipping'), None)
2442 2443
2443 2444 list = sorted(list)
2444 2445 total = len(list)
2445 2446 count = 0
2446 2447 for f in list:
2447 2448 count += 1
2448 2449 if ui.verbose or not m.exact(f):
2449 2450 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2450 2451 ui.status(_('removing %s\n') % m.rel(f))
2451 2452 ui.progress(_('deleting'), None)
2452 2453
2453 2454 with repo.wlock():
2454 2455 if not after:
2455 2456 for f in list:
2456 2457 if f in added:
2457 2458 continue # we never unlink added files on remove
2458 2459 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2459 2460 repo[None].forget(list)
2460 2461
2461 2462 if warn:
2462 2463 for warning in warnings:
2463 2464 ui.warn(warning)
2464 2465
2465 2466 return ret
2466 2467
2467 2468 def cat(ui, repo, ctx, matcher, prefix, **opts):
2468 2469 err = 1
2469 2470
2470 2471 def write(path):
2471 2472 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2472 2473 pathname=os.path.join(prefix, path))
2473 2474 data = ctx[path].data()
2474 2475 if opts.get('decode'):
2475 2476 data = repo.wwritedata(path, data)
2476 2477 fp.write(data)
2477 2478 fp.close()
2478 2479
2479 2480 # Automation often uses hg cat on single files, so special case it
2480 2481 # for performance to avoid the cost of parsing the manifest.
2481 2482 if len(matcher.files()) == 1 and not matcher.anypats():
2482 2483 file = matcher.files()[0]
2483 2484 mfl = repo.manifestlog
2484 2485 mfnode = ctx.manifestnode()
2485 2486 try:
2486 2487 if mfnode and mfl[mfnode].find(file)[0]:
2487 2488 write(file)
2488 2489 return 0
2489 2490 except KeyError:
2490 2491 pass
2491 2492
2492 2493 for abs in ctx.walk(matcher):
2493 2494 write(abs)
2494 2495 err = 0
2495 2496
2496 2497 for subpath in sorted(ctx.substate):
2497 2498 sub = ctx.sub(subpath)
2498 2499 try:
2499 2500 submatch = matchmod.subdirmatcher(subpath, matcher)
2500 2501
2501 2502 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2502 2503 **opts):
2503 2504 err = 0
2504 2505 except error.RepoLookupError:
2505 2506 ui.status(_("skipping missing subrepository: %s\n")
2506 2507 % os.path.join(prefix, subpath))
2507 2508
2508 2509 return err
2509 2510
2510 2511 def commit(ui, repo, commitfunc, pats, opts):
2511 2512 '''commit the specified files or all outstanding changes'''
2512 2513 date = opts.get('date')
2513 2514 if date:
2514 2515 opts['date'] = util.parsedate(date)
2515 2516 message = logmessage(ui, opts)
2516 2517 matcher = scmutil.match(repo[None], pats, opts)
2517 2518
2518 2519 # extract addremove carefully -- this function can be called from a command
2519 2520 # that doesn't support addremove
2520 2521 if opts.get('addremove'):
2521 2522 if scmutil.addremove(repo, matcher, "", opts) != 0:
2522 2523 raise error.Abort(
2523 2524 _("failed to mark all new/missing files as added/removed"))
2524 2525
2525 2526 return commitfunc(ui, repo, message, matcher, opts)
2526 2527
2527 2528 def samefile(f, ctx1, ctx2):
2528 2529 if f in ctx1.manifest():
2529 2530 a = ctx1.filectx(f)
2530 2531 if f in ctx2.manifest():
2531 2532 b = ctx2.filectx(f)
2532 2533 return (not a.cmp(b)
2533 2534 and a.flags() == b.flags())
2534 2535 else:
2535 2536 return False
2536 2537 else:
2537 2538 return f not in ctx2.manifest()
2538 2539
2539 2540 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2540 2541 # avoid cycle context -> subrepo -> cmdutil
2541 2542 from . import context
2542 2543
2543 2544 # amend will reuse the existing user if not specified, but the obsolete
2544 2545 # marker creation requires that the current user's name is specified.
2545 2546 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2546 2547 ui.username() # raise exception if username not set
2547 2548
2548 2549 ui.note(_('amending changeset %s\n') % old)
2549 2550 base = old.p1()
2550 2551 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2551 2552
2552 2553 wlock = lock = newid = None
2553 2554 try:
2554 2555 wlock = repo.wlock()
2555 2556 lock = repo.lock()
2556 2557 with repo.transaction('amend') as tr:
2557 2558 # See if we got a message from -m or -l, if not, open the editor
2558 2559 # with the message of the changeset to amend
2559 2560 message = logmessage(ui, opts)
2560 2561 # ensure logfile does not conflict with later enforcement of the
2561 2562 # message. potential logfile content has been processed by
2562 2563 # `logmessage` anyway.
2563 2564 opts.pop('logfile')
2564 2565 # First, do a regular commit to record all changes in the working
2565 2566 # directory (if there are any)
2566 2567 ui.callhooks = False
2567 2568 activebookmark = repo._bookmarks.active
2568 2569 try:
2569 2570 repo._bookmarks.active = None
2570 2571 opts['message'] = 'temporary amend commit for %s' % old
2571 2572 node = commit(ui, repo, commitfunc, pats, opts)
2572 2573 finally:
2573 2574 repo._bookmarks.active = activebookmark
2574 2575 repo._bookmarks.recordchange(tr)
2575 2576 ui.callhooks = True
2576 2577 ctx = repo[node]
2577 2578
2578 2579 # Participating changesets:
2579 2580 #
2580 2581 # node/ctx o - new (intermediate) commit that contains changes
2581 2582 # | from working dir to go into amending commit
2582 2583 # | (or a workingctx if there were no changes)
2583 2584 # |
2584 2585 # old o - changeset to amend
2585 2586 # |
2586 2587 # base o - parent of amending changeset
2587 2588
2588 2589 # Update extra dict from amended commit (e.g. to preserve graft
2589 2590 # source)
2590 2591 extra.update(old.extra())
2591 2592
2592 2593 # Also update it from the intermediate commit or from the wctx
2593 2594 extra.update(ctx.extra())
2594 2595
2595 2596 if len(old.parents()) > 1:
2596 2597 # ctx.files() isn't reliable for merges, so fall back to the
2597 2598 # slower repo.status() method
2598 2599 files = set([fn for st in repo.status(base, old)[:3]
2599 2600 for fn in st])
2600 2601 else:
2601 2602 files = set(old.files())
2602 2603
2603 2604 # Second, we use either the commit we just did, or if there were no
2604 2605 # changes the parent of the working directory as the version of the
2605 2606 # files in the final amend commit
2606 2607 if node:
2607 2608 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2608 2609
2609 2610 user = ctx.user()
2610 2611 date = ctx.date()
2611 2612 # Recompute copies (avoid recording a -> b -> a)
2612 2613 copied = copies.pathcopies(base, ctx)
2613 2614 if old.p2:
2614 2615 copied.update(copies.pathcopies(old.p2(), ctx))
2615 2616
2616 2617 # Prune files which were reverted by the updates: if old
2617 2618 # introduced file X and our intermediate commit, node,
2618 2619 # renamed that file, then those two files are the same and
2619 2620 # we can discard X from our list of files. Likewise if X
2620 2621 # was deleted, it's no longer relevant
2621 2622 files.update(ctx.files())
2622 2623 files = [f for f in files if not samefile(f, ctx, base)]
2623 2624
2624 2625 def filectxfn(repo, ctx_, path):
2625 2626 try:
2626 2627 fctx = ctx[path]
2627 2628 flags = fctx.flags()
2628 2629 mctx = context.memfilectx(repo,
2629 2630 fctx.path(), fctx.data(),
2630 2631 islink='l' in flags,
2631 2632 isexec='x' in flags,
2632 2633 copied=copied.get(path))
2633 2634 return mctx
2634 2635 except KeyError:
2635 2636 return None
2636 2637 else:
2637 2638 ui.note(_('copying changeset %s to %s\n') % (old, base))
2638 2639
2639 2640 # Use version of files as in the old cset
2640 2641 def filectxfn(repo, ctx_, path):
2641 2642 try:
2642 2643 return old.filectx(path)
2643 2644 except KeyError:
2644 2645 return None
2645 2646
2646 2647 user = opts.get('user') or old.user()
2647 2648 date = opts.get('date') or old.date()
2648 2649 editform = mergeeditform(old, 'commit.amend')
2649 2650 editor = getcommiteditor(editform=editform, **opts)
2650 2651 if not message:
2651 2652 editor = getcommiteditor(edit=True, editform=editform)
2652 2653 message = old.description()
2653 2654
2654 2655 pureextra = extra.copy()
2655 2656 extra['amend_source'] = old.hex()
2656 2657
2657 2658 new = context.memctx(repo,
2658 2659 parents=[base.node(), old.p2().node()],
2659 2660 text=message,
2660 2661 files=files,
2661 2662 filectxfn=filectxfn,
2662 2663 user=user,
2663 2664 date=date,
2664 2665 extra=extra,
2665 2666 editor=editor)
2666 2667
2667 2668 newdesc = changelog.stripdesc(new.description())
2668 2669 if ((not node)
2669 2670 and newdesc == old.description()
2670 2671 and user == old.user()
2671 2672 and date == old.date()
2672 2673 and pureextra == old.extra()):
2673 2674 # nothing changed. continuing here would create a new node
2674 2675 # anyway because of the amend_source noise.
2675 2676 #
2676 2677 # This not what we expect from amend.
2677 2678 return old.node()
2678 2679
2679 2680 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2680 2681 try:
2681 2682 if opts.get('secret'):
2682 2683 commitphase = 'secret'
2683 2684 else:
2684 2685 commitphase = old.phase()
2685 2686 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2686 2687 newid = repo.commitctx(new)
2687 2688 finally:
2688 2689 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2689 2690 if newid != old.node():
2690 2691 # Reroute the working copy parent to the new changeset
2691 2692 repo.setparents(newid, nullid)
2692 2693
2693 2694 # Move bookmarks from old parent to amend commit
2694 2695 bms = repo.nodebookmarks(old.node())
2695 2696 if bms:
2696 2697 marks = repo._bookmarks
2697 2698 for bm in bms:
2698 2699 ui.debug('moving bookmarks %r from %s to %s\n' %
2699 2700 (marks, old.hex(), hex(newid)))
2700 2701 marks[bm] = newid
2701 2702 marks.recordchange(tr)
2702 2703 #commit the whole amend process
2703 2704 if createmarkers:
2704 2705 # mark the new changeset as successor of the rewritten one
2705 2706 new = repo[newid]
2706 2707 obs = [(old, (new,))]
2707 2708 if node:
2708 2709 obs.append((ctx, ()))
2709 2710
2710 2711 obsolete.createmarkers(repo, obs)
2711 2712 if not createmarkers and newid != old.node():
2712 2713 # Strip the intermediate commit (if there was one) and the amended
2713 2714 # commit
2714 2715 if node:
2715 2716 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2716 2717 ui.note(_('stripping amended changeset %s\n') % old)
2717 2718 repair.strip(ui, repo, old.node(), topic='amend-backup')
2718 2719 finally:
2719 2720 lockmod.release(lock, wlock)
2720 2721 return newid
2721 2722
2722 2723 def commiteditor(repo, ctx, subs, editform=''):
2723 2724 if ctx.description():
2724 2725 return ctx.description()
2725 2726 return commitforceeditor(repo, ctx, subs, editform=editform,
2726 2727 unchangedmessagedetection=True)
2727 2728
2728 2729 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2729 2730 editform='', unchangedmessagedetection=False):
2730 2731 if not extramsg:
2731 2732 extramsg = _("Leave message empty to abort commit.")
2732 2733
2733 2734 forms = [e for e in editform.split('.') if e]
2734 2735 forms.insert(0, 'changeset')
2735 2736 templatetext = None
2736 2737 while forms:
2737 2738 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2738 2739 if tmpl:
2739 2740 templatetext = committext = buildcommittemplate(
2740 2741 repo, ctx, subs, extramsg, tmpl)
2741 2742 break
2742 2743 forms.pop()
2743 2744 else:
2744 2745 committext = buildcommittext(repo, ctx, subs, extramsg)
2745 2746
2746 2747 # run editor in the repository root
2747 olddir = os.getcwd()
2748 olddir = pycompat.getcwd()
2748 2749 os.chdir(repo.root)
2749 2750
2750 2751 # make in-memory changes visible to external process
2751 2752 tr = repo.currenttransaction()
2752 2753 repo.dirstate.write(tr)
2753 2754 pending = tr and tr.writepending() and repo.root
2754 2755
2755 2756 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2756 2757 editform=editform, pending=pending)
2757 2758 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2758 2759 os.chdir(olddir)
2759 2760
2760 2761 if finishdesc:
2761 2762 text = finishdesc(text)
2762 2763 if not text.strip():
2763 2764 raise error.Abort(_("empty commit message"))
2764 2765 if unchangedmessagedetection and editortext == templatetext:
2765 2766 raise error.Abort(_("commit message unchanged"))
2766 2767
2767 2768 return text
2768 2769
2769 2770 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2770 2771 ui = repo.ui
2771 2772 tmpl, mapfile = gettemplate(ui, tmpl, None)
2772 2773
2773 2774 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2774 2775
2775 2776 for k, v in repo.ui.configitems('committemplate'):
2776 2777 if k != 'changeset':
2777 2778 t.t.cache[k] = v
2778 2779
2779 2780 if not extramsg:
2780 2781 extramsg = '' # ensure that extramsg is string
2781 2782
2782 2783 ui.pushbuffer()
2783 2784 t.show(ctx, extramsg=extramsg)
2784 2785 return ui.popbuffer()
2785 2786
2786 2787 def hgprefix(msg):
2787 2788 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2788 2789
2789 2790 def buildcommittext(repo, ctx, subs, extramsg):
2790 2791 edittext = []
2791 2792 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2792 2793 if ctx.description():
2793 2794 edittext.append(ctx.description())
2794 2795 edittext.append("")
2795 2796 edittext.append("") # Empty line between message and comments.
2796 2797 edittext.append(hgprefix(_("Enter commit message."
2797 2798 " Lines beginning with 'HG:' are removed.")))
2798 2799 edittext.append(hgprefix(extramsg))
2799 2800 edittext.append("HG: --")
2800 2801 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2801 2802 if ctx.p2():
2802 2803 edittext.append(hgprefix(_("branch merge")))
2803 2804 if ctx.branch():
2804 2805 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2805 2806 if bookmarks.isactivewdirparent(repo):
2806 2807 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2807 2808 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2808 2809 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2809 2810 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2810 2811 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2811 2812 if not added and not modified and not removed:
2812 2813 edittext.append(hgprefix(_("no files changed")))
2813 2814 edittext.append("")
2814 2815
2815 2816 return "\n".join(edittext)
2816 2817
2817 2818 def commitstatus(repo, node, branch, bheads=None, opts=None):
2818 2819 if opts is None:
2819 2820 opts = {}
2820 2821 ctx = repo[node]
2821 2822 parents = ctx.parents()
2822 2823
2823 2824 if (not opts.get('amend') and bheads and node not in bheads and not
2824 2825 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2825 2826 repo.ui.status(_('created new head\n'))
2826 2827 # The message is not printed for initial roots. For the other
2827 2828 # changesets, it is printed in the following situations:
2828 2829 #
2829 2830 # Par column: for the 2 parents with ...
2830 2831 # N: null or no parent
2831 2832 # B: parent is on another named branch
2832 2833 # C: parent is a regular non head changeset
2833 2834 # H: parent was a branch head of the current branch
2834 2835 # Msg column: whether we print "created new head" message
2835 2836 # In the following, it is assumed that there already exists some
2836 2837 # initial branch heads of the current branch, otherwise nothing is
2837 2838 # printed anyway.
2838 2839 #
2839 2840 # Par Msg Comment
2840 2841 # N N y additional topo root
2841 2842 #
2842 2843 # B N y additional branch root
2843 2844 # C N y additional topo head
2844 2845 # H N n usual case
2845 2846 #
2846 2847 # B B y weird additional branch root
2847 2848 # C B y branch merge
2848 2849 # H B n merge with named branch
2849 2850 #
2850 2851 # C C y additional head from merge
2851 2852 # C H n merge with a head
2852 2853 #
2853 2854 # H H n head merge: head count decreases
2854 2855
2855 2856 if not opts.get('close_branch'):
2856 2857 for r in parents:
2857 2858 if r.closesbranch() and r.branch() == branch:
2858 2859 repo.ui.status(_('reopening closed branch head %d\n') % r)
2859 2860
2860 2861 if repo.ui.debugflag:
2861 2862 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2862 2863 elif repo.ui.verbose:
2863 2864 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2864 2865
2865 2866 def postcommitstatus(repo, pats, opts):
2866 2867 return repo.status(match=scmutil.match(repo[None], pats, opts))
2867 2868
2868 2869 def revert(ui, repo, ctx, parents, *pats, **opts):
2869 2870 parent, p2 = parents
2870 2871 node = ctx.node()
2871 2872
2872 2873 mf = ctx.manifest()
2873 2874 if node == p2:
2874 2875 parent = p2
2875 2876
2876 2877 # need all matching names in dirstate and manifest of target rev,
2877 2878 # so have to walk both. do not print errors if files exist in one
2878 2879 # but not other. in both cases, filesets should be evaluated against
2879 2880 # workingctx to get consistent result (issue4497). this means 'set:**'
2880 2881 # cannot be used to select missing files from target rev.
2881 2882
2882 2883 # `names` is a mapping for all elements in working copy and target revision
2883 2884 # The mapping is in the form:
2884 2885 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2885 2886 names = {}
2886 2887
2887 2888 with repo.wlock():
2888 2889 ## filling of the `names` mapping
2889 2890 # walk dirstate to fill `names`
2890 2891
2891 2892 interactive = opts.get('interactive', False)
2892 2893 wctx = repo[None]
2893 2894 m = scmutil.match(wctx, pats, opts)
2894 2895
2895 2896 # we'll need this later
2896 2897 targetsubs = sorted(s for s in wctx.substate if m(s))
2897 2898
2898 2899 if not m.always():
2899 2900 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2900 2901 names[abs] = m.rel(abs), m.exact(abs)
2901 2902
2902 2903 # walk target manifest to fill `names`
2903 2904
2904 2905 def badfn(path, msg):
2905 2906 if path in names:
2906 2907 return
2907 2908 if path in ctx.substate:
2908 2909 return
2909 2910 path_ = path + '/'
2910 2911 for f in names:
2911 2912 if f.startswith(path_):
2912 2913 return
2913 2914 ui.warn("%s: %s\n" % (m.rel(path), msg))
2914 2915
2915 2916 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2916 2917 if abs not in names:
2917 2918 names[abs] = m.rel(abs), m.exact(abs)
2918 2919
2919 2920 # Find status of all file in `names`.
2920 2921 m = scmutil.matchfiles(repo, names)
2921 2922
2922 2923 changes = repo.status(node1=node, match=m,
2923 2924 unknown=True, ignored=True, clean=True)
2924 2925 else:
2925 2926 changes = repo.status(node1=node, match=m)
2926 2927 for kind in changes:
2927 2928 for abs in kind:
2928 2929 names[abs] = m.rel(abs), m.exact(abs)
2929 2930
2930 2931 m = scmutil.matchfiles(repo, names)
2931 2932
2932 2933 modified = set(changes.modified)
2933 2934 added = set(changes.added)
2934 2935 removed = set(changes.removed)
2935 2936 _deleted = set(changes.deleted)
2936 2937 unknown = set(changes.unknown)
2937 2938 unknown.update(changes.ignored)
2938 2939 clean = set(changes.clean)
2939 2940 modadded = set()
2940 2941
2941 2942 # split between files known in target manifest and the others
2942 2943 smf = set(mf)
2943 2944
2944 2945 # determine the exact nature of the deleted changesets
2945 2946 deladded = _deleted - smf
2946 2947 deleted = _deleted - deladded
2947 2948
2948 2949 # We need to account for the state of the file in the dirstate,
2949 2950 # even when we revert against something else than parent. This will
2950 2951 # slightly alter the behavior of revert (doing back up or not, delete
2951 2952 # or just forget etc).
2952 2953 if parent == node:
2953 2954 dsmodified = modified
2954 2955 dsadded = added
2955 2956 dsremoved = removed
2956 2957 # store all local modifications, useful later for rename detection
2957 2958 localchanges = dsmodified | dsadded
2958 2959 modified, added, removed = set(), set(), set()
2959 2960 else:
2960 2961 changes = repo.status(node1=parent, match=m)
2961 2962 dsmodified = set(changes.modified)
2962 2963 dsadded = set(changes.added)
2963 2964 dsremoved = set(changes.removed)
2964 2965 # store all local modifications, useful later for rename detection
2965 2966 localchanges = dsmodified | dsadded
2966 2967
2967 2968 # only take into account for removes between wc and target
2968 2969 clean |= dsremoved - removed
2969 2970 dsremoved &= removed
2970 2971 # distinct between dirstate remove and other
2971 2972 removed -= dsremoved
2972 2973
2973 2974 modadded = added & dsmodified
2974 2975 added -= modadded
2975 2976
2976 2977 # tell newly modified apart.
2977 2978 dsmodified &= modified
2978 2979 dsmodified |= modified & dsadded # dirstate added may need backup
2979 2980 modified -= dsmodified
2980 2981
2981 2982 # We need to wait for some post-processing to update this set
2982 2983 # before making the distinction. The dirstate will be used for
2983 2984 # that purpose.
2984 2985 dsadded = added
2985 2986
2986 2987 # in case of merge, files that are actually added can be reported as
2987 2988 # modified, we need to post process the result
2988 2989 if p2 != nullid:
2989 2990 mergeadd = dsmodified - smf
2990 2991 dsadded |= mergeadd
2991 2992 dsmodified -= mergeadd
2992 2993
2993 2994 # if f is a rename, update `names` to also revert the source
2994 2995 cwd = repo.getcwd()
2995 2996 for f in localchanges:
2996 2997 src = repo.dirstate.copied(f)
2997 2998 # XXX should we check for rename down to target node?
2998 2999 if src and src not in names and repo.dirstate[src] == 'r':
2999 3000 dsremoved.add(src)
3000 3001 names[src] = (repo.pathto(src, cwd), True)
3001 3002
3002 3003 # distinguish between file to forget and the other
3003 3004 added = set()
3004 3005 for abs in dsadded:
3005 3006 if repo.dirstate[abs] != 'a':
3006 3007 added.add(abs)
3007 3008 dsadded -= added
3008 3009
3009 3010 for abs in deladded:
3010 3011 if repo.dirstate[abs] == 'a':
3011 3012 dsadded.add(abs)
3012 3013 deladded -= dsadded
3013 3014
3014 3015 # For files marked as removed, we check if an unknown file is present at
3015 3016 # the same path. If a such file exists it may need to be backed up.
3016 3017 # Making the distinction at this stage helps have simpler backup
3017 3018 # logic.
3018 3019 removunk = set()
3019 3020 for abs in removed:
3020 3021 target = repo.wjoin(abs)
3021 3022 if os.path.lexists(target):
3022 3023 removunk.add(abs)
3023 3024 removed -= removunk
3024 3025
3025 3026 dsremovunk = set()
3026 3027 for abs in dsremoved:
3027 3028 target = repo.wjoin(abs)
3028 3029 if os.path.lexists(target):
3029 3030 dsremovunk.add(abs)
3030 3031 dsremoved -= dsremovunk
3031 3032
3032 3033 # action to be actually performed by revert
3033 3034 # (<list of file>, message>) tuple
3034 3035 actions = {'revert': ([], _('reverting %s\n')),
3035 3036 'add': ([], _('adding %s\n')),
3036 3037 'remove': ([], _('removing %s\n')),
3037 3038 'drop': ([], _('removing %s\n')),
3038 3039 'forget': ([], _('forgetting %s\n')),
3039 3040 'undelete': ([], _('undeleting %s\n')),
3040 3041 'noop': (None, _('no changes needed to %s\n')),
3041 3042 'unknown': (None, _('file not managed: %s\n')),
3042 3043 }
3043 3044
3044 3045 # "constant" that convey the backup strategy.
3045 3046 # All set to `discard` if `no-backup` is set do avoid checking
3046 3047 # no_backup lower in the code.
3047 3048 # These values are ordered for comparison purposes
3048 3049 backupinteractive = 3 # do backup if interactively modified
3049 3050 backup = 2 # unconditionally do backup
3050 3051 check = 1 # check if the existing file differs from target
3051 3052 discard = 0 # never do backup
3052 3053 if opts.get('no_backup'):
3053 3054 backupinteractive = backup = check = discard
3054 3055 if interactive:
3055 3056 dsmodifiedbackup = backupinteractive
3056 3057 else:
3057 3058 dsmodifiedbackup = backup
3058 3059 tobackup = set()
3059 3060
3060 3061 backupanddel = actions['remove']
3061 3062 if not opts.get('no_backup'):
3062 3063 backupanddel = actions['drop']
3063 3064
3064 3065 disptable = (
3065 3066 # dispatch table:
3066 3067 # file state
3067 3068 # action
3068 3069 # make backup
3069 3070
3070 3071 ## Sets that results that will change file on disk
3071 3072 # Modified compared to target, no local change
3072 3073 (modified, actions['revert'], discard),
3073 3074 # Modified compared to target, but local file is deleted
3074 3075 (deleted, actions['revert'], discard),
3075 3076 # Modified compared to target, local change
3076 3077 (dsmodified, actions['revert'], dsmodifiedbackup),
3077 3078 # Added since target
3078 3079 (added, actions['remove'], discard),
3079 3080 # Added in working directory
3080 3081 (dsadded, actions['forget'], discard),
3081 3082 # Added since target, have local modification
3082 3083 (modadded, backupanddel, backup),
3083 3084 # Added since target but file is missing in working directory
3084 3085 (deladded, actions['drop'], discard),
3085 3086 # Removed since target, before working copy parent
3086 3087 (removed, actions['add'], discard),
3087 3088 # Same as `removed` but an unknown file exists at the same path
3088 3089 (removunk, actions['add'], check),
3089 3090 # Removed since targe, marked as such in working copy parent
3090 3091 (dsremoved, actions['undelete'], discard),
3091 3092 # Same as `dsremoved` but an unknown file exists at the same path
3092 3093 (dsremovunk, actions['undelete'], check),
3093 3094 ## the following sets does not result in any file changes
3094 3095 # File with no modification
3095 3096 (clean, actions['noop'], discard),
3096 3097 # Existing file, not tracked anywhere
3097 3098 (unknown, actions['unknown'], discard),
3098 3099 )
3099 3100
3100 3101 for abs, (rel, exact) in sorted(names.items()):
3101 3102 # target file to be touch on disk (relative to cwd)
3102 3103 target = repo.wjoin(abs)
3103 3104 # search the entry in the dispatch table.
3104 3105 # if the file is in any of these sets, it was touched in the working
3105 3106 # directory parent and we are sure it needs to be reverted.
3106 3107 for table, (xlist, msg), dobackup in disptable:
3107 3108 if abs not in table:
3108 3109 continue
3109 3110 if xlist is not None:
3110 3111 xlist.append(abs)
3111 3112 if dobackup:
3112 3113 # If in interactive mode, don't automatically create
3113 3114 # .orig files (issue4793)
3114 3115 if dobackup == backupinteractive:
3115 3116 tobackup.add(abs)
3116 3117 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3117 3118 bakname = scmutil.origpath(ui, repo, rel)
3118 3119 ui.note(_('saving current version of %s as %s\n') %
3119 3120 (rel, bakname))
3120 3121 if not opts.get('dry_run'):
3121 3122 if interactive:
3122 3123 util.copyfile(target, bakname)
3123 3124 else:
3124 3125 util.rename(target, bakname)
3125 3126 if ui.verbose or not exact:
3126 3127 if not isinstance(msg, basestring):
3127 3128 msg = msg(abs)
3128 3129 ui.status(msg % rel)
3129 3130 elif exact:
3130 3131 ui.warn(msg % rel)
3131 3132 break
3132 3133
3133 3134 if not opts.get('dry_run'):
3134 3135 needdata = ('revert', 'add', 'undelete')
3135 3136 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3136 3137 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3137 3138
3138 3139 if targetsubs:
3139 3140 # Revert the subrepos on the revert list
3140 3141 for sub in targetsubs:
3141 3142 try:
3142 3143 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3143 3144 except KeyError:
3144 3145 raise error.Abort("subrepository '%s' does not exist in %s!"
3145 3146 % (sub, short(ctx.node())))
3146 3147
3147 3148 def _revertprefetch(repo, ctx, *files):
3148 3149 """Let extension changing the storage layer prefetch content"""
3149 3150 pass
3150 3151
3151 3152 def _performrevert(repo, parents, ctx, actions, interactive=False,
3152 3153 tobackup=None):
3153 3154 """function that actually perform all the actions computed for revert
3154 3155
3155 3156 This is an independent function to let extension to plug in and react to
3156 3157 the imminent revert.
3157 3158
3158 3159 Make sure you have the working directory locked when calling this function.
3159 3160 """
3160 3161 parent, p2 = parents
3161 3162 node = ctx.node()
3162 3163 excluded_files = []
3163 3164 matcher_opts = {"exclude": excluded_files}
3164 3165
3165 3166 def checkout(f):
3166 3167 fc = ctx[f]
3167 3168 repo.wwrite(f, fc.data(), fc.flags())
3168 3169
3169 3170 audit_path = pathutil.pathauditor(repo.root)
3170 3171 for f in actions['forget'][0]:
3171 3172 if interactive:
3172 3173 choice = \
3173 3174 repo.ui.promptchoice(
3174 3175 _("forget added file %s (yn)?$$ &Yes $$ &No")
3175 3176 % f)
3176 3177 if choice == 0:
3177 3178 repo.dirstate.drop(f)
3178 3179 else:
3179 3180 excluded_files.append(repo.wjoin(f))
3180 3181 else:
3181 3182 repo.dirstate.drop(f)
3182 3183 for f in actions['remove'][0]:
3183 3184 audit_path(f)
3184 3185 try:
3185 3186 util.unlinkpath(repo.wjoin(f))
3186 3187 except OSError:
3187 3188 pass
3188 3189 repo.dirstate.remove(f)
3189 3190 for f in actions['drop'][0]:
3190 3191 audit_path(f)
3191 3192 repo.dirstate.remove(f)
3192 3193
3193 3194 normal = None
3194 3195 if node == parent:
3195 3196 # We're reverting to our parent. If possible, we'd like status
3196 3197 # to report the file as clean. We have to use normallookup for
3197 3198 # merges to avoid losing information about merged/dirty files.
3198 3199 if p2 != nullid:
3199 3200 normal = repo.dirstate.normallookup
3200 3201 else:
3201 3202 normal = repo.dirstate.normal
3202 3203
3203 3204 newlyaddedandmodifiedfiles = set()
3204 3205 if interactive:
3205 3206 # Prompt the user for changes to revert
3206 3207 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3207 3208 m = scmutil.match(ctx, torevert, matcher_opts)
3208 3209 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3209 3210 diffopts.nodates = True
3210 3211 diffopts.git = True
3211 3212 reversehunks = repo.ui.configbool('experimental',
3212 3213 'revertalternateinteractivemode',
3213 3214 True)
3214 3215 if reversehunks:
3215 3216 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3216 3217 else:
3217 3218 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3218 3219 originalchunks = patch.parsepatch(diff)
3219 3220 operation = 'discard' if node == parent else 'revert'
3220 3221
3221 3222 try:
3222 3223
3223 3224 chunks, opts = recordfilter(repo.ui, originalchunks,
3224 3225 operation=operation)
3225 3226 if reversehunks:
3226 3227 chunks = patch.reversehunks(chunks)
3227 3228
3228 3229 except patch.PatchError as err:
3229 3230 raise error.Abort(_('error parsing patch: %s') % err)
3230 3231
3231 3232 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3232 3233 if tobackup is None:
3233 3234 tobackup = set()
3234 3235 # Apply changes
3235 3236 fp = stringio()
3236 3237 for c in chunks:
3237 3238 # Create a backup file only if this hunk should be backed up
3238 3239 if ishunk(c) and c.header.filename() in tobackup:
3239 3240 abs = c.header.filename()
3240 3241 target = repo.wjoin(abs)
3241 3242 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3242 3243 util.copyfile(target, bakname)
3243 3244 tobackup.remove(abs)
3244 3245 c.write(fp)
3245 3246 dopatch = fp.tell()
3246 3247 fp.seek(0)
3247 3248 if dopatch:
3248 3249 try:
3249 3250 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3250 3251 except patch.PatchError as err:
3251 3252 raise error.Abort(str(err))
3252 3253 del fp
3253 3254 else:
3254 3255 for f in actions['revert'][0]:
3255 3256 checkout(f)
3256 3257 if normal:
3257 3258 normal(f)
3258 3259
3259 3260 for f in actions['add'][0]:
3260 3261 # Don't checkout modified files, they are already created by the diff
3261 3262 if f not in newlyaddedandmodifiedfiles:
3262 3263 checkout(f)
3263 3264 repo.dirstate.add(f)
3264 3265
3265 3266 normal = repo.dirstate.normallookup
3266 3267 if node == parent and p2 == nullid:
3267 3268 normal = repo.dirstate.normal
3268 3269 for f in actions['undelete'][0]:
3269 3270 checkout(f)
3270 3271 normal(f)
3271 3272
3272 3273 copied = copies.pathcopies(repo[parent], ctx)
3273 3274
3274 3275 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3275 3276 if f in copied:
3276 3277 repo.dirstate.copy(copied[f], f)
3277 3278
3278 3279 def command(table):
3279 3280 """Returns a function object to be used as a decorator for making commands.
3280 3281
3281 3282 This function receives a command table as its argument. The table should
3282 3283 be a dict.
3283 3284
3284 3285 The returned function can be used as a decorator for adding commands
3285 3286 to that command table. This function accepts multiple arguments to define
3286 3287 a command.
3287 3288
3288 3289 The first argument is the command name.
3289 3290
3290 3291 The options argument is an iterable of tuples defining command arguments.
3291 3292 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3292 3293
3293 3294 The synopsis argument defines a short, one line summary of how to use the
3294 3295 command. This shows up in the help output.
3295 3296
3296 3297 The norepo argument defines whether the command does not require a
3297 3298 local repository. Most commands operate against a repository, thus the
3298 3299 default is False.
3299 3300
3300 3301 The optionalrepo argument defines whether the command optionally requires
3301 3302 a local repository.
3302 3303
3303 3304 The inferrepo argument defines whether to try to find a repository from the
3304 3305 command line arguments. If True, arguments will be examined for potential
3305 3306 repository locations. See ``findrepo()``. If a repository is found, it
3306 3307 will be used.
3307 3308 """
3308 3309 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3309 3310 inferrepo=False):
3310 3311 def decorator(func):
3311 3312 func.norepo = norepo
3312 3313 func.optionalrepo = optionalrepo
3313 3314 func.inferrepo = inferrepo
3314 3315 if synopsis:
3315 3316 table[name] = func, list(options), synopsis
3316 3317 else:
3317 3318 table[name] = func, list(options)
3318 3319 return func
3319 3320 return decorator
3320 3321
3321 3322 return cmd
3322 3323
3323 3324 def checkunresolved(ms):
3324 3325 ms._repo.ui.deprecwarn('checkunresolved moved from cmdutil to mergeutil',
3325 3326 '4.1')
3326 3327 return mergeutil.checkunresolved(ms)
3327 3328
3328 3329 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3329 3330 # commands.outgoing. "missing" is "missing" of the result of
3330 3331 # "findcommonoutgoing()"
3331 3332 outgoinghooks = util.hooks()
3332 3333
3333 3334 # a list of (ui, repo) functions called by commands.summary
3334 3335 summaryhooks = util.hooks()
3335 3336
3336 3337 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3337 3338 #
3338 3339 # functions should return tuple of booleans below, if 'changes' is None:
3339 3340 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3340 3341 #
3341 3342 # otherwise, 'changes' is a tuple of tuples below:
3342 3343 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3343 3344 # - (desturl, destbranch, destpeer, outgoing)
3344 3345 summaryremotehooks = util.hooks()
3345 3346
3346 3347 # A list of state files kept by multistep operations like graft.
3347 3348 # Since graft cannot be aborted, it is considered 'clearable' by update.
3348 3349 # note: bisect is intentionally excluded
3349 3350 # (state file, clearable, allowcommit, error, hint)
3350 3351 unfinishedstates = [
3351 3352 ('graftstate', True, False, _('graft in progress'),
3352 3353 _("use 'hg graft --continue' or 'hg update' to abort")),
3353 3354 ('updatestate', True, False, _('last update was interrupted'),
3354 3355 _("use 'hg update' to get a consistent checkout"))
3355 3356 ]
3356 3357
3357 3358 def checkunfinished(repo, commit=False):
3358 3359 '''Look for an unfinished multistep operation, like graft, and abort
3359 3360 if found. It's probably good to check this right before
3360 3361 bailifchanged().
3361 3362 '''
3362 3363 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3363 3364 if commit and allowcommit:
3364 3365 continue
3365 3366 if repo.vfs.exists(f):
3366 3367 raise error.Abort(msg, hint=hint)
3367 3368
3368 3369 def clearunfinished(repo):
3369 3370 '''Check for unfinished operations (as above), and clear the ones
3370 3371 that are clearable.
3371 3372 '''
3372 3373 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3373 3374 if not clearable and repo.vfs.exists(f):
3374 3375 raise error.Abort(msg, hint=hint)
3375 3376 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3376 3377 if clearable and repo.vfs.exists(f):
3377 3378 util.unlink(repo.join(f))
3378 3379
3379 3380 afterresolvedstates = [
3380 3381 ('graftstate',
3381 3382 _('hg graft --continue')),
3382 3383 ]
3383 3384
3384 3385 def howtocontinue(repo):
3385 3386 '''Check for an unfinished operation and return the command to finish
3386 3387 it.
3387 3388
3388 3389 afterresolvedstates tuples define a .hg/{file} and the corresponding
3389 3390 command needed to finish it.
3390 3391
3391 3392 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3392 3393 a boolean.
3393 3394 '''
3394 3395 contmsg = _("continue: %s")
3395 3396 for f, msg in afterresolvedstates:
3396 3397 if repo.vfs.exists(f):
3397 3398 return contmsg % msg, True
3398 3399 workingctx = repo[None]
3399 3400 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3400 3401 for s in workingctx.substate)
3401 3402 if dirty:
3402 3403 return contmsg % _("hg commit"), False
3403 3404 return None, None
3404 3405
3405 3406 def checkafterresolved(repo):
3406 3407 '''Inform the user about the next action after completing hg resolve
3407 3408
3408 3409 If there's a matching afterresolvedstates, howtocontinue will yield
3409 3410 repo.ui.warn as the reporter.
3410 3411
3411 3412 Otherwise, it will yield repo.ui.note.
3412 3413 '''
3413 3414 msg, warning = howtocontinue(repo)
3414 3415 if msg is not None:
3415 3416 if warning:
3416 3417 repo.ui.warn("%s\n" % msg)
3417 3418 else:
3418 3419 repo.ui.note("%s\n" % msg)
3419 3420
3420 3421 def wrongtooltocontinue(repo, task):
3421 3422 '''Raise an abort suggesting how to properly continue if there is an
3422 3423 active task.
3423 3424
3424 3425 Uses howtocontinue() to find the active task.
3425 3426
3426 3427 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3427 3428 a hint.
3428 3429 '''
3429 3430 after = howtocontinue(repo)
3430 3431 hint = None
3431 3432 if after[1]:
3432 3433 hint = after[0]
3433 3434 raise error.Abort(_('no %s in progress') % task, hint=hint)
3434 3435
3435 3436 class dirstateguard(dirstateguardmod.dirstateguard):
3436 3437 def __init__(self, repo, name):
3437 3438 dirstateguardmod.dirstateguard.__init__(self, repo, name)
3438 3439 repo.ui.deprecwarn(
3439 3440 'dirstateguard has moved from cmdutil to dirstateguard',
3440 3441 '4.1')
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now