##// END OF EJS Templates
vfs: replace 'scmutil.opener' usage with 'scmutil.vfs'...
Pierre-Yves David -
r31216:21fa3d36 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,46 +1,46 b''
1 1 #!/usr/bin/env python
2 2 # Undump a dump from dumprevlog
3 3 # $ hg init
4 4 # $ undumprevlog < repo.dump
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import sys
9 9 from mercurial import (
10 10 node,
11 11 revlog,
12 12 scmutil,
13 13 transaction,
14 14 util,
15 15 )
16 16
17 17 for fp in (sys.stdin, sys.stdout, sys.stderr):
18 18 util.setbinary(fp)
19 19
20 opener = scmutil.opener('.', False)
20 opener = scmutil.vfs('.', False)
21 21 tr = transaction.transaction(sys.stderr.write, opener, {'store': opener},
22 22 "undump.journal")
23 23 while True:
24 24 l = sys.stdin.readline()
25 25 if not l:
26 26 break
27 27 if l.startswith("file:"):
28 28 f = l[6:-1]
29 29 r = revlog.revlog(opener, f)
30 30 print f
31 31 elif l.startswith("node:"):
32 32 n = node.bin(l[6:-1])
33 33 elif l.startswith("linkrev:"):
34 34 lr = int(l[9:-1])
35 35 elif l.startswith("parents:"):
36 36 p = l[9:-1].split()
37 37 p1 = node.bin(p[0])
38 38 p2 = node.bin(p[1])
39 39 elif l.startswith("length:"):
40 40 length = int(l[8:-1])
41 41 sys.stdin.readline() # start marker
42 42 d = sys.stdin.read(length)
43 43 sys.stdin.readline() # end marker
44 44 r.addrevision(d, tr, lr, p1, p2)
45 45
46 46 tr.close()
@@ -1,1353 +1,1353 b''
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4 from __future__ import absolute_import
5 5
6 6 import os
7 7 import re
8 8 import tempfile
9 9 import xml.dom.minidom
10 10
11 11 from mercurial.i18n import _
12 12 from mercurial import (
13 13 encoding,
14 14 error,
15 15 pycompat,
16 16 scmutil,
17 17 util,
18 18 )
19 19
20 20 from . import common
21 21
22 22 pickle = util.pickle
23 23 stringio = util.stringio
24 24 propertycache = util.propertycache
25 25 urlerr = util.urlerr
26 26 urlreq = util.urlreq
27 27
28 28 commandline = common.commandline
29 29 commit = common.commit
30 30 converter_sink = common.converter_sink
31 31 converter_source = common.converter_source
32 32 decodeargs = common.decodeargs
33 33 encodeargs = common.encodeargs
34 34 makedatetimestamp = common.makedatetimestamp
35 35 mapfile = common.mapfile
36 36 MissingTool = common.MissingTool
37 37 NoRepo = common.NoRepo
38 38
39 39 # Subversion stuff. Works best with very recent Python SVN bindings
40 40 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
41 41 # these bindings.
42 42
43 43 try:
44 44 import svn
45 45 import svn.client
46 46 import svn.core
47 47 import svn.ra
48 48 import svn.delta
49 49 from . import transport
50 50 import warnings
51 51 warnings.filterwarnings('ignore',
52 52 module='svn.core',
53 53 category=DeprecationWarning)
54 54 svn.core.SubversionException # trigger import to catch error
55 55
56 56 except ImportError:
57 57 svn = None
58 58
59 59 class SvnPathNotFound(Exception):
60 60 pass
61 61
62 62 def revsplit(rev):
63 63 """Parse a revision string and return (uuid, path, revnum).
64 64 >>> revsplit('svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
65 65 ... '/proj%20B/mytrunk/mytrunk@1')
66 66 ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1)
67 67 >>> revsplit('svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1')
68 68 ('', '', 1)
69 69 >>> revsplit('@7')
70 70 ('', '', 7)
71 71 >>> revsplit('7')
72 72 ('', '', 0)
73 73 >>> revsplit('bad')
74 74 ('', '', 0)
75 75 """
76 76 parts = rev.rsplit('@', 1)
77 77 revnum = 0
78 78 if len(parts) > 1:
79 79 revnum = int(parts[1])
80 80 parts = parts[0].split('/', 1)
81 81 uuid = ''
82 82 mod = ''
83 83 if len(parts) > 1 and parts[0].startswith('svn:'):
84 84 uuid = parts[0][4:]
85 85 mod = '/' + parts[1]
86 86 return uuid, mod, revnum
87 87
88 88 def quote(s):
89 89 # As of svn 1.7, many svn calls expect "canonical" paths. In
90 90 # theory, we should call svn.core.*canonicalize() on all paths
91 91 # before passing them to the API. Instead, we assume the base url
92 92 # is canonical and copy the behaviour of svn URL encoding function
93 93 # so we can extend it safely with new components. The "safe"
94 94 # characters were taken from the "svn_uri__char_validity" table in
95 95 # libsvn_subr/path.c.
96 96 return urlreq.quote(s, "!$&'()*+,-./:=@_~")
97 97
98 98 def geturl(path):
99 99 try:
100 100 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
101 101 except svn.core.SubversionException:
102 102 # svn.client.url_from_path() fails with local repositories
103 103 pass
104 104 if os.path.isdir(path):
105 105 path = os.path.normpath(os.path.abspath(path))
106 106 if pycompat.osname == 'nt':
107 107 path = '/' + util.normpath(path)
108 108 # Module URL is later compared with the repository URL returned
109 109 # by svn API, which is UTF-8.
110 110 path = encoding.tolocal(path)
111 111 path = 'file://%s' % quote(path)
112 112 return svn.core.svn_path_canonicalize(path)
113 113
114 114 def optrev(number):
115 115 optrev = svn.core.svn_opt_revision_t()
116 116 optrev.kind = svn.core.svn_opt_revision_number
117 117 optrev.value.number = number
118 118 return optrev
119 119
120 120 class changedpath(object):
121 121 def __init__(self, p):
122 122 self.copyfrom_path = p.copyfrom_path
123 123 self.copyfrom_rev = p.copyfrom_rev
124 124 self.action = p.action
125 125
126 126 def get_log_child(fp, url, paths, start, end, limit=0,
127 127 discover_changed_paths=True, strict_node_history=False):
128 128 protocol = -1
129 129 def receiver(orig_paths, revnum, author, date, message, pool):
130 130 paths = {}
131 131 if orig_paths is not None:
132 132 for k, v in orig_paths.iteritems():
133 133 paths[k] = changedpath(v)
134 134 pickle.dump((paths, revnum, author, date, message),
135 135 fp, protocol)
136 136
137 137 try:
138 138 # Use an ra of our own so that our parent can consume
139 139 # our results without confusing the server.
140 140 t = transport.SvnRaTransport(url=url)
141 141 svn.ra.get_log(t.ra, paths, start, end, limit,
142 142 discover_changed_paths,
143 143 strict_node_history,
144 144 receiver)
145 145 except IOError:
146 146 # Caller may interrupt the iteration
147 147 pickle.dump(None, fp, protocol)
148 148 except Exception as inst:
149 149 pickle.dump(str(inst), fp, protocol)
150 150 else:
151 151 pickle.dump(None, fp, protocol)
152 152 fp.close()
153 153 # With large history, cleanup process goes crazy and suddenly
154 154 # consumes *huge* amount of memory. The output file being closed,
155 155 # there is no need for clean termination.
156 156 os._exit(0)
157 157
158 158 def debugsvnlog(ui, **opts):
159 159 """Fetch SVN log in a subprocess and channel them back to parent to
160 160 avoid memory collection issues.
161 161 """
162 162 if svn is None:
163 163 raise error.Abort(_('debugsvnlog could not load Subversion python '
164 164 'bindings'))
165 165
166 166 args = decodeargs(ui.fin.read())
167 167 get_log_child(ui.fout, *args)
168 168
169 169 class logstream(object):
170 170 """Interruptible revision log iterator."""
171 171 def __init__(self, stdout):
172 172 self._stdout = stdout
173 173
174 174 def __iter__(self):
175 175 while True:
176 176 try:
177 177 entry = pickle.load(self._stdout)
178 178 except EOFError:
179 179 raise error.Abort(_('Mercurial failed to run itself, check'
180 180 ' hg executable is in PATH'))
181 181 try:
182 182 orig_paths, revnum, author, date, message = entry
183 183 except (TypeError, ValueError):
184 184 if entry is None:
185 185 break
186 186 raise error.Abort(_("log stream exception '%s'") % entry)
187 187 yield entry
188 188
189 189 def close(self):
190 190 if self._stdout:
191 191 self._stdout.close()
192 192 self._stdout = None
193 193
194 194 class directlogstream(list):
195 195 """Direct revision log iterator.
196 196 This can be used for debugging and development but it will probably leak
197 197 memory and is not suitable for real conversions."""
198 198 def __init__(self, url, paths, start, end, limit=0,
199 199 discover_changed_paths=True, strict_node_history=False):
200 200
201 201 def receiver(orig_paths, revnum, author, date, message, pool):
202 202 paths = {}
203 203 if orig_paths is not None:
204 204 for k, v in orig_paths.iteritems():
205 205 paths[k] = changedpath(v)
206 206 self.append((paths, revnum, author, date, message))
207 207
208 208 # Use an ra of our own so that our parent can consume
209 209 # our results without confusing the server.
210 210 t = transport.SvnRaTransport(url=url)
211 211 svn.ra.get_log(t.ra, paths, start, end, limit,
212 212 discover_changed_paths,
213 213 strict_node_history,
214 214 receiver)
215 215
216 216 def close(self):
217 217 pass
218 218
219 219 # Check to see if the given path is a local Subversion repo. Verify this by
220 220 # looking for several svn-specific files and directories in the given
221 221 # directory.
222 222 def filecheck(ui, path, proto):
223 223 for x in ('locks', 'hooks', 'format', 'db'):
224 224 if not os.path.exists(os.path.join(path, x)):
225 225 return False
226 226 return True
227 227
228 228 # Check to see if a given path is the root of an svn repo over http. We verify
229 229 # this by requesting a version-controlled URL we know can't exist and looking
230 230 # for the svn-specific "not found" XML.
231 231 def httpcheck(ui, path, proto):
232 232 try:
233 233 opener = urlreq.buildopener()
234 234 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
235 235 data = rsp.read()
236 236 except urlerr.httperror as inst:
237 237 if inst.code != 404:
238 238 # Except for 404 we cannot know for sure this is not an svn repo
239 239 ui.warn(_('svn: cannot probe remote repository, assume it could '
240 240 'be a subversion repository. Use --source-type if you '
241 241 'know better.\n'))
242 242 return True
243 243 data = inst.fp.read()
244 244 except Exception:
245 245 # Could be urlerr.urlerror if the URL is invalid or anything else.
246 246 return False
247 247 return '<m:human-readable errcode="160013">' in data
248 248
249 249 protomap = {'http': httpcheck,
250 250 'https': httpcheck,
251 251 'file': filecheck,
252 252 }
253 253 def issvnurl(ui, url):
254 254 try:
255 255 proto, path = url.split('://', 1)
256 256 if proto == 'file':
257 257 if (pycompat.osname == 'nt' and path[:1] == '/'
258 258 and path[1:2].isalpha() and path[2:6].lower() == '%3a/'):
259 259 path = path[:2] + ':/' + path[6:]
260 260 path = urlreq.url2pathname(path)
261 261 except ValueError:
262 262 proto = 'file'
263 263 path = os.path.abspath(url)
264 264 if proto == 'file':
265 265 path = util.pconvert(path)
266 266 check = protomap.get(proto, lambda *args: False)
267 267 while '/' in path:
268 268 if check(ui, path, proto):
269 269 return True
270 270 path = path.rsplit('/', 1)[0]
271 271 return False
272 272
273 273 # SVN conversion code stolen from bzr-svn and tailor
274 274 #
275 275 # Subversion looks like a versioned filesystem, branches structures
276 276 # are defined by conventions and not enforced by the tool. First,
277 277 # we define the potential branches (modules) as "trunk" and "branches"
278 278 # children directories. Revisions are then identified by their
279 279 # module and revision number (and a repository identifier).
280 280 #
281 281 # The revision graph is really a tree (or a forest). By default, a
282 282 # revision parent is the previous revision in the same module. If the
283 283 # module directory is copied/moved from another module then the
284 284 # revision is the module root and its parent the source revision in
285 285 # the parent module. A revision has at most one parent.
286 286 #
287 287 class svn_source(converter_source):
288 288 def __init__(self, ui, url, revs=None):
289 289 super(svn_source, self).__init__(ui, url, revs=revs)
290 290
291 291 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
292 292 (os.path.exists(url) and
293 293 os.path.exists(os.path.join(url, '.svn'))) or
294 294 issvnurl(ui, url)):
295 295 raise NoRepo(_("%s does not look like a Subversion repository")
296 296 % url)
297 297 if svn is None:
298 298 raise MissingTool(_('could not load Subversion python bindings'))
299 299
300 300 try:
301 301 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
302 302 if version < (1, 4):
303 303 raise MissingTool(_('Subversion python bindings %d.%d found, '
304 304 '1.4 or later required') % version)
305 305 except AttributeError:
306 306 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
307 307 'or later required'))
308 308
309 309 self.lastrevs = {}
310 310
311 311 latest = None
312 312 try:
313 313 # Support file://path@rev syntax. Useful e.g. to convert
314 314 # deleted branches.
315 315 at = url.rfind('@')
316 316 if at >= 0:
317 317 latest = int(url[at + 1:])
318 318 url = url[:at]
319 319 except ValueError:
320 320 pass
321 321 self.url = geturl(url)
322 322 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
323 323 try:
324 324 self.transport = transport.SvnRaTransport(url=self.url)
325 325 self.ra = self.transport.ra
326 326 self.ctx = self.transport.client
327 327 self.baseurl = svn.ra.get_repos_root(self.ra)
328 328 # Module is either empty or a repository path starting with
329 329 # a slash and not ending with a slash.
330 330 self.module = urlreq.unquote(self.url[len(self.baseurl):])
331 331 self.prevmodule = None
332 332 self.rootmodule = self.module
333 333 self.commits = {}
334 334 self.paths = {}
335 335 self.uuid = svn.ra.get_uuid(self.ra)
336 336 except svn.core.SubversionException:
337 337 ui.traceback()
338 338 svnversion = '%d.%d.%d' % (svn.core.SVN_VER_MAJOR,
339 339 svn.core.SVN_VER_MINOR,
340 340 svn.core.SVN_VER_MICRO)
341 341 raise NoRepo(_("%s does not look like a Subversion repository "
342 342 "to libsvn version %s")
343 343 % (self.url, svnversion))
344 344
345 345 if revs:
346 346 if len(revs) > 1:
347 347 raise error.Abort(_('subversion source does not support '
348 348 'specifying multiple revisions'))
349 349 try:
350 350 latest = int(revs[0])
351 351 except ValueError:
352 352 raise error.Abort(_('svn: revision %s is not an integer') %
353 353 revs[0])
354 354
355 355 self.trunkname = self.ui.config('convert', 'svn.trunk',
356 356 'trunk').strip('/')
357 357 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
358 358 try:
359 359 self.startrev = int(self.startrev)
360 360 if self.startrev < 0:
361 361 self.startrev = 0
362 362 except ValueError:
363 363 raise error.Abort(_('svn: start revision %s is not an integer')
364 364 % self.startrev)
365 365
366 366 try:
367 367 self.head = self.latest(self.module, latest)
368 368 except SvnPathNotFound:
369 369 self.head = None
370 370 if not self.head:
371 371 raise error.Abort(_('no revision found in module %s')
372 372 % self.module)
373 373 self.last_changed = self.revnum(self.head)
374 374
375 375 self._changescache = (None, None)
376 376
377 377 if os.path.exists(os.path.join(url, '.svn/entries')):
378 378 self.wc = url
379 379 else:
380 380 self.wc = None
381 381 self.convertfp = None
382 382
383 383 def setrevmap(self, revmap):
384 384 lastrevs = {}
385 385 for revid in revmap.iterkeys():
386 386 uuid, module, revnum = revsplit(revid)
387 387 lastrevnum = lastrevs.setdefault(module, revnum)
388 388 if revnum > lastrevnum:
389 389 lastrevs[module] = revnum
390 390 self.lastrevs = lastrevs
391 391
392 392 def exists(self, path, optrev):
393 393 try:
394 394 svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
395 395 optrev, False, self.ctx)
396 396 return True
397 397 except svn.core.SubversionException:
398 398 return False
399 399
400 400 def getheads(self):
401 401
402 402 def isdir(path, revnum):
403 403 kind = self._checkpath(path, revnum)
404 404 return kind == svn.core.svn_node_dir
405 405
406 406 def getcfgpath(name, rev):
407 407 cfgpath = self.ui.config('convert', 'svn.' + name)
408 408 if cfgpath is not None and cfgpath.strip() == '':
409 409 return None
410 410 path = (cfgpath or name).strip('/')
411 411 if not self.exists(path, rev):
412 412 if self.module.endswith(path) and name == 'trunk':
413 413 # we are converting from inside this directory
414 414 return None
415 415 if cfgpath:
416 416 raise error.Abort(_('expected %s to be at %r, but not found'
417 417 ) % (name, path))
418 418 return None
419 419 self.ui.note(_('found %s at %r\n') % (name, path))
420 420 return path
421 421
422 422 rev = optrev(self.last_changed)
423 423 oldmodule = ''
424 424 trunk = getcfgpath('trunk', rev)
425 425 self.tags = getcfgpath('tags', rev)
426 426 branches = getcfgpath('branches', rev)
427 427
428 428 # If the project has a trunk or branches, we will extract heads
429 429 # from them. We keep the project root otherwise.
430 430 if trunk:
431 431 oldmodule = self.module or ''
432 432 self.module += '/' + trunk
433 433 self.head = self.latest(self.module, self.last_changed)
434 434 if not self.head:
435 435 raise error.Abort(_('no revision found in module %s')
436 436 % self.module)
437 437
438 438 # First head in the list is the module's head
439 439 self.heads = [self.head]
440 440 if self.tags is not None:
441 441 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
442 442
443 443 # Check if branches bring a few more heads to the list
444 444 if branches:
445 445 rpath = self.url.strip('/')
446 446 branchnames = svn.client.ls(rpath + '/' + quote(branches),
447 447 rev, False, self.ctx)
448 448 for branch in sorted(branchnames):
449 449 module = '%s/%s/%s' % (oldmodule, branches, branch)
450 450 if not isdir(module, self.last_changed):
451 451 continue
452 452 brevid = self.latest(module, self.last_changed)
453 453 if not brevid:
454 454 self.ui.note(_('ignoring empty branch %s\n') % branch)
455 455 continue
456 456 self.ui.note(_('found branch %s at %d\n') %
457 457 (branch, self.revnum(brevid)))
458 458 self.heads.append(brevid)
459 459
460 460 if self.startrev and self.heads:
461 461 if len(self.heads) > 1:
462 462 raise error.Abort(_('svn: start revision is not supported '
463 463 'with more than one branch'))
464 464 revnum = self.revnum(self.heads[0])
465 465 if revnum < self.startrev:
466 466 raise error.Abort(
467 467 _('svn: no revision found after start revision %d')
468 468 % self.startrev)
469 469
470 470 return self.heads
471 471
472 472 def _getchanges(self, rev, full):
473 473 (paths, parents) = self.paths[rev]
474 474 copies = {}
475 475 if parents:
476 476 files, self.removed, copies = self.expandpaths(rev, paths, parents)
477 477 if full or not parents:
478 478 # Perform a full checkout on roots
479 479 uuid, module, revnum = revsplit(rev)
480 480 entries = svn.client.ls(self.baseurl + quote(module),
481 481 optrev(revnum), True, self.ctx)
482 482 files = [n for n, e in entries.iteritems()
483 483 if e.kind == svn.core.svn_node_file]
484 484 self.removed = set()
485 485
486 486 files.sort()
487 487 files = zip(files, [rev] * len(files))
488 488 return (files, copies)
489 489
490 490 def getchanges(self, rev, full):
491 491 # reuse cache from getchangedfiles
492 492 if self._changescache[0] == rev and not full:
493 493 (files, copies) = self._changescache[1]
494 494 else:
495 495 (files, copies) = self._getchanges(rev, full)
496 496 # caller caches the result, so free it here to release memory
497 497 del self.paths[rev]
498 498 return (files, copies, set())
499 499
500 500 def getchangedfiles(self, rev, i):
501 501 # called from filemap - cache computed values for reuse in getchanges
502 502 (files, copies) = self._getchanges(rev, False)
503 503 self._changescache = (rev, (files, copies))
504 504 return [f[0] for f in files]
505 505
506 506 def getcommit(self, rev):
507 507 if rev not in self.commits:
508 508 uuid, module, revnum = revsplit(rev)
509 509 self.module = module
510 510 self.reparent(module)
511 511 # We assume that:
512 512 # - requests for revisions after "stop" come from the
513 513 # revision graph backward traversal. Cache all of them
514 514 # down to stop, they will be used eventually.
515 515 # - requests for revisions before "stop" come to get
516 516 # isolated branches parents. Just fetch what is needed.
517 517 stop = self.lastrevs.get(module, 0)
518 518 if revnum < stop:
519 519 stop = revnum + 1
520 520 self._fetch_revisions(revnum, stop)
521 521 if rev not in self.commits:
522 522 raise error.Abort(_('svn: revision %s not found') % revnum)
523 523 revcommit = self.commits[rev]
524 524 # caller caches the result, so free it here to release memory
525 525 del self.commits[rev]
526 526 return revcommit
527 527
528 528 def checkrevformat(self, revstr, mapname='splicemap'):
529 529 """ fails if revision format does not match the correct format"""
530 530 if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
531 531 r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
532 532 r'{12,12}(.*)\@[0-9]+$',revstr):
533 533 raise error.Abort(_('%s entry %s is not a valid revision'
534 534 ' identifier') % (mapname, revstr))
535 535
536 536 def numcommits(self):
537 537 return int(self.head.rsplit('@', 1)[1]) - self.startrev
538 538
539 539 def gettags(self):
540 540 tags = {}
541 541 if self.tags is None:
542 542 return tags
543 543
544 544 # svn tags are just a convention, project branches left in a
545 545 # 'tags' directory. There is no other relationship than
546 546 # ancestry, which is expensive to discover and makes them hard
547 547 # to update incrementally. Worse, past revisions may be
548 548 # referenced by tags far away in the future, requiring a deep
549 549 # history traversal on every calculation. Current code
550 550 # performs a single backward traversal, tracking moves within
551 551 # the tags directory (tag renaming) and recording a new tag
552 552 # everytime a project is copied from outside the tags
553 553 # directory. It also lists deleted tags, this behaviour may
554 554 # change in the future.
555 555 pendings = []
556 556 tagspath = self.tags
557 557 start = svn.ra.get_latest_revnum(self.ra)
558 558 stream = self._getlog([self.tags], start, self.startrev)
559 559 try:
560 560 for entry in stream:
561 561 origpaths, revnum, author, date, message = entry
562 562 if not origpaths:
563 563 origpaths = []
564 564 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
565 565 in origpaths.iteritems() if e.copyfrom_path]
566 566 # Apply moves/copies from more specific to general
567 567 copies.sort(reverse=True)
568 568
569 569 srctagspath = tagspath
570 570 if copies and copies[-1][2] == tagspath:
571 571 # Track tags directory moves
572 572 srctagspath = copies.pop()[0]
573 573
574 574 for source, sourcerev, dest in copies:
575 575 if not dest.startswith(tagspath + '/'):
576 576 continue
577 577 for tag in pendings:
578 578 if tag[0].startswith(dest):
579 579 tagpath = source + tag[0][len(dest):]
580 580 tag[:2] = [tagpath, sourcerev]
581 581 break
582 582 else:
583 583 pendings.append([source, sourcerev, dest])
584 584
585 585 # Filter out tags with children coming from different
586 586 # parts of the repository like:
587 587 # /tags/tag.1 (from /trunk:10)
588 588 # /tags/tag.1/foo (from /branches/foo:12)
589 589 # Here/tags/tag.1 discarded as well as its children.
590 590 # It happens with tools like cvs2svn. Such tags cannot
591 591 # be represented in mercurial.
592 592 addeds = dict((p, e.copyfrom_path) for p, e
593 593 in origpaths.iteritems()
594 594 if e.action == 'A' and e.copyfrom_path)
595 595 badroots = set()
596 596 for destroot in addeds:
597 597 for source, sourcerev, dest in pendings:
598 598 if (not dest.startswith(destroot + '/')
599 599 or source.startswith(addeds[destroot] + '/')):
600 600 continue
601 601 badroots.add(destroot)
602 602 break
603 603
604 604 for badroot in badroots:
605 605 pendings = [p for p in pendings if p[2] != badroot
606 606 and not p[2].startswith(badroot + '/')]
607 607
608 608 # Tell tag renamings from tag creations
609 609 renamings = []
610 610 for source, sourcerev, dest in pendings:
611 611 tagname = dest.split('/')[-1]
612 612 if source.startswith(srctagspath):
613 613 renamings.append([source, sourcerev, tagname])
614 614 continue
615 615 if tagname in tags:
616 616 # Keep the latest tag value
617 617 continue
618 618 # From revision may be fake, get one with changes
619 619 try:
620 620 tagid = self.latest(source, sourcerev)
621 621 if tagid and tagname not in tags:
622 622 tags[tagname] = tagid
623 623 except SvnPathNotFound:
624 624 # It happens when we are following directories
625 625 # we assumed were copied with their parents
626 626 # but were really created in the tag
627 627 # directory.
628 628 pass
629 629 pendings = renamings
630 630 tagspath = srctagspath
631 631 finally:
632 632 stream.close()
633 633 return tags
634 634
635 635 def converted(self, rev, destrev):
636 636 if not self.wc:
637 637 return
638 638 if self.convertfp is None:
639 639 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
640 640 'a')
641 641 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
642 642 self.convertfp.flush()
643 643
644 644 def revid(self, revnum, module=None):
645 645 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
646 646
647 647 def revnum(self, rev):
648 648 return int(rev.split('@')[-1])
649 649
650 650 def latest(self, path, stop=None):
651 651 """Find the latest revid affecting path, up to stop revision
652 652 number. If stop is None, default to repository latest
653 653 revision. It may return a revision in a different module,
654 654 since a branch may be moved without a change being
655 655 reported. Return None if computed module does not belong to
656 656 rootmodule subtree.
657 657 """
658 658 def findchanges(path, start, stop=None):
659 659 stream = self._getlog([path], start, stop or 1)
660 660 try:
661 661 for entry in stream:
662 662 paths, revnum, author, date, message = entry
663 663 if stop is None and paths:
664 664 # We do not know the latest changed revision,
665 665 # keep the first one with changed paths.
666 666 break
667 667 if revnum <= stop:
668 668 break
669 669
670 670 for p in paths:
671 671 if (not path.startswith(p) or
672 672 not paths[p].copyfrom_path):
673 673 continue
674 674 newpath = paths[p].copyfrom_path + path[len(p):]
675 675 self.ui.debug("branch renamed from %s to %s at %d\n" %
676 676 (path, newpath, revnum))
677 677 path = newpath
678 678 break
679 679 if not paths:
680 680 revnum = None
681 681 return revnum, path
682 682 finally:
683 683 stream.close()
684 684
685 685 if not path.startswith(self.rootmodule):
686 686 # Requests on foreign branches may be forbidden at server level
687 687 self.ui.debug('ignoring foreign branch %r\n' % path)
688 688 return None
689 689
690 690 if stop is None:
691 691 stop = svn.ra.get_latest_revnum(self.ra)
692 692 try:
693 693 prevmodule = self.reparent('')
694 694 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
695 695 self.reparent(prevmodule)
696 696 except svn.core.SubversionException:
697 697 dirent = None
698 698 if not dirent:
699 699 raise SvnPathNotFound(_('%s not found up to revision %d')
700 700 % (path, stop))
701 701
702 702 # stat() gives us the previous revision on this line of
703 703 # development, but it might be in *another module*. Fetch the
704 704 # log and detect renames down to the latest revision.
705 705 revnum, realpath = findchanges(path, stop, dirent.created_rev)
706 706 if revnum is None:
707 707 # Tools like svnsync can create empty revision, when
708 708 # synchronizing only a subtree for instance. These empty
709 709 # revisions created_rev still have their original values
710 710 # despite all changes having disappeared and can be
711 711 # returned by ra.stat(), at least when stating the root
712 712 # module. In that case, do not trust created_rev and scan
713 713 # the whole history.
714 714 revnum, realpath = findchanges(path, stop)
715 715 if revnum is None:
716 716 self.ui.debug('ignoring empty branch %r\n' % realpath)
717 717 return None
718 718
719 719 if not realpath.startswith(self.rootmodule):
720 720 self.ui.debug('ignoring foreign branch %r\n' % realpath)
721 721 return None
722 722 return self.revid(revnum, realpath)
723 723
724 724 def reparent(self, module):
725 725 """Reparent the svn transport and return the previous parent."""
726 726 if self.prevmodule == module:
727 727 return module
728 728 svnurl = self.baseurl + quote(module)
729 729 prevmodule = self.prevmodule
730 730 if prevmodule is None:
731 731 prevmodule = ''
732 732 self.ui.debug("reparent to %s\n" % svnurl)
733 733 svn.ra.reparent(self.ra, svnurl)
734 734 self.prevmodule = module
735 735 return prevmodule
736 736
737 737 def expandpaths(self, rev, paths, parents):
738 738 changed, removed = set(), set()
739 739 copies = {}
740 740
741 741 new_module, revnum = revsplit(rev)[1:]
742 742 if new_module != self.module:
743 743 self.module = new_module
744 744 self.reparent(self.module)
745 745
746 746 for i, (path, ent) in enumerate(paths):
747 747 self.ui.progress(_('scanning paths'), i, item=path,
748 748 total=len(paths), unit=_('paths'))
749 749 entrypath = self.getrelpath(path)
750 750
751 751 kind = self._checkpath(entrypath, revnum)
752 752 if kind == svn.core.svn_node_file:
753 753 changed.add(self.recode(entrypath))
754 754 if not ent.copyfrom_path or not parents:
755 755 continue
756 756 # Copy sources not in parent revisions cannot be
757 757 # represented, ignore their origin for now
758 758 pmodule, prevnum = revsplit(parents[0])[1:]
759 759 if ent.copyfrom_rev < prevnum:
760 760 continue
761 761 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
762 762 if not copyfrom_path:
763 763 continue
764 764 self.ui.debug("copied to %s from %s@%s\n" %
765 765 (entrypath, copyfrom_path, ent.copyfrom_rev))
766 766 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
767 767 elif kind == 0: # gone, but had better be a deleted *file*
768 768 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
769 769 pmodule, prevnum = revsplit(parents[0])[1:]
770 770 parentpath = pmodule + "/" + entrypath
771 771 fromkind = self._checkpath(entrypath, prevnum, pmodule)
772 772
773 773 if fromkind == svn.core.svn_node_file:
774 774 removed.add(self.recode(entrypath))
775 775 elif fromkind == svn.core.svn_node_dir:
776 776 oroot = parentpath.strip('/')
777 777 nroot = path.strip('/')
778 778 children = self._iterfiles(oroot, prevnum)
779 779 for childpath in children:
780 780 childpath = childpath.replace(oroot, nroot)
781 781 childpath = self.getrelpath("/" + childpath, pmodule)
782 782 if childpath:
783 783 removed.add(self.recode(childpath))
784 784 else:
785 785 self.ui.debug('unknown path in revision %d: %s\n' % \
786 786 (revnum, path))
787 787 elif kind == svn.core.svn_node_dir:
788 788 if ent.action == 'M':
789 789 # If the directory just had a prop change,
790 790 # then we shouldn't need to look for its children.
791 791 continue
792 792 if ent.action == 'R' and parents:
793 793 # If a directory is replacing a file, mark the previous
794 794 # file as deleted
795 795 pmodule, prevnum = revsplit(parents[0])[1:]
796 796 pkind = self._checkpath(entrypath, prevnum, pmodule)
797 797 if pkind == svn.core.svn_node_file:
798 798 removed.add(self.recode(entrypath))
799 799 elif pkind == svn.core.svn_node_dir:
800 800 # We do not know what files were kept or removed,
801 801 # mark them all as changed.
802 802 for childpath in self._iterfiles(pmodule, prevnum):
803 803 childpath = self.getrelpath("/" + childpath)
804 804 if childpath:
805 805 changed.add(self.recode(childpath))
806 806
807 807 for childpath in self._iterfiles(path, revnum):
808 808 childpath = self.getrelpath("/" + childpath)
809 809 if childpath:
810 810 changed.add(self.recode(childpath))
811 811
812 812 # Handle directory copies
813 813 if not ent.copyfrom_path or not parents:
814 814 continue
815 815 # Copy sources not in parent revisions cannot be
816 816 # represented, ignore their origin for now
817 817 pmodule, prevnum = revsplit(parents[0])[1:]
818 818 if ent.copyfrom_rev < prevnum:
819 819 continue
820 820 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
821 821 if not copyfrompath:
822 822 continue
823 823 self.ui.debug("mark %s came from %s:%d\n"
824 824 % (path, copyfrompath, ent.copyfrom_rev))
825 825 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
826 826 for childpath in children:
827 827 childpath = self.getrelpath("/" + childpath, pmodule)
828 828 if not childpath:
829 829 continue
830 830 copytopath = path + childpath[len(copyfrompath):]
831 831 copytopath = self.getrelpath(copytopath)
832 832 copies[self.recode(copytopath)] = self.recode(childpath)
833 833
834 834 self.ui.progress(_('scanning paths'), None)
835 835 changed.update(removed)
836 836 return (list(changed), removed, copies)
837 837
838 838 def _fetch_revisions(self, from_revnum, to_revnum):
839 839 if from_revnum < to_revnum:
840 840 from_revnum, to_revnum = to_revnum, from_revnum
841 841
842 842 self.child_cset = None
843 843
844 844 def parselogentry(orig_paths, revnum, author, date, message):
845 845 """Return the parsed commit object or None, and True if
846 846 the revision is a branch root.
847 847 """
848 848 self.ui.debug("parsing revision %d (%d changes)\n" %
849 849 (revnum, len(orig_paths)))
850 850
851 851 branched = False
852 852 rev = self.revid(revnum)
853 853 # branch log might return entries for a parent we already have
854 854
855 855 if rev in self.commits or revnum < to_revnum:
856 856 return None, branched
857 857
858 858 parents = []
859 859 # check whether this revision is the start of a branch or part
860 860 # of a branch renaming
861 861 orig_paths = sorted(orig_paths.iteritems())
862 862 root_paths = [(p, e) for p, e in orig_paths
863 863 if self.module.startswith(p)]
864 864 if root_paths:
865 865 path, ent = root_paths[-1]
866 866 if ent.copyfrom_path:
867 867 branched = True
868 868 newpath = ent.copyfrom_path + self.module[len(path):]
869 869 # ent.copyfrom_rev may not be the actual last revision
870 870 previd = self.latest(newpath, ent.copyfrom_rev)
871 871 if previd is not None:
872 872 prevmodule, prevnum = revsplit(previd)[1:]
873 873 if prevnum >= self.startrev:
874 874 parents = [previd]
875 875 self.ui.note(
876 876 _('found parent of branch %s at %d: %s\n') %
877 877 (self.module, prevnum, prevmodule))
878 878 else:
879 879 self.ui.debug("no copyfrom path, don't know what to do.\n")
880 880
881 881 paths = []
882 882 # filter out unrelated paths
883 883 for path, ent in orig_paths:
884 884 if self.getrelpath(path) is None:
885 885 continue
886 886 paths.append((path, ent))
887 887
888 888 # Example SVN datetime. Includes microseconds.
889 889 # ISO-8601 conformant
890 890 # '2007-01-04T17:35:00.902377Z'
891 891 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
892 892 if self.ui.configbool('convert', 'localtimezone'):
893 893 date = makedatetimestamp(date[0])
894 894
895 895 if message:
896 896 log = self.recode(message)
897 897 else:
898 898 log = ''
899 899
900 900 if author:
901 901 author = self.recode(author)
902 902 else:
903 903 author = ''
904 904
905 905 try:
906 906 branch = self.module.split("/")[-1]
907 907 if branch == self.trunkname:
908 908 branch = None
909 909 except IndexError:
910 910 branch = None
911 911
912 912 cset = commit(author=author,
913 913 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
914 914 desc=log,
915 915 parents=parents,
916 916 branch=branch,
917 917 rev=rev)
918 918
919 919 self.commits[rev] = cset
920 920 # The parents list is *shared* among self.paths and the
921 921 # commit object. Both will be updated below.
922 922 self.paths[rev] = (paths, cset.parents)
923 923 if self.child_cset and not self.child_cset.parents:
924 924 self.child_cset.parents[:] = [rev]
925 925 self.child_cset = cset
926 926 return cset, branched
927 927
928 928 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
929 929 (self.module, from_revnum, to_revnum))
930 930
931 931 try:
932 932 firstcset = None
933 933 lastonbranch = False
934 934 stream = self._getlog([self.module], from_revnum, to_revnum)
935 935 try:
936 936 for entry in stream:
937 937 paths, revnum, author, date, message = entry
938 938 if revnum < self.startrev:
939 939 lastonbranch = True
940 940 break
941 941 if not paths:
942 942 self.ui.debug('revision %d has no entries\n' % revnum)
943 943 # If we ever leave the loop on an empty
944 944 # revision, do not try to get a parent branch
945 945 lastonbranch = lastonbranch or revnum == 0
946 946 continue
947 947 cset, lastonbranch = parselogentry(paths, revnum, author,
948 948 date, message)
949 949 if cset:
950 950 firstcset = cset
951 951 if lastonbranch:
952 952 break
953 953 finally:
954 954 stream.close()
955 955
956 956 if not lastonbranch and firstcset and not firstcset.parents:
957 957 # The first revision of the sequence (the last fetched one)
958 958 # has invalid parents if not a branch root. Find the parent
959 959 # revision now, if any.
960 960 try:
961 961 firstrevnum = self.revnum(firstcset.rev)
962 962 if firstrevnum > 1:
963 963 latest = self.latest(self.module, firstrevnum - 1)
964 964 if latest:
965 965 firstcset.parents.append(latest)
966 966 except SvnPathNotFound:
967 967 pass
968 968 except svn.core.SubversionException as xxx_todo_changeme:
969 969 (inst, num) = xxx_todo_changeme.args
970 970 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
971 971 raise error.Abort(_('svn: branch has no revision %s')
972 972 % to_revnum)
973 973 raise
974 974
975 975 def getfile(self, file, rev):
976 976 # TODO: ra.get_file transmits the whole file instead of diffs.
977 977 if file in self.removed:
978 978 return None, None
979 979 mode = ''
980 980 try:
981 981 new_module, revnum = revsplit(rev)[1:]
982 982 if self.module != new_module:
983 983 self.module = new_module
984 984 self.reparent(self.module)
985 985 io = stringio()
986 986 info = svn.ra.get_file(self.ra, file, revnum, io)
987 987 data = io.getvalue()
988 988 # ra.get_file() seems to keep a reference on the input buffer
989 989 # preventing collection. Release it explicitly.
990 990 io.close()
991 991 if isinstance(info, list):
992 992 info = info[-1]
993 993 mode = ("svn:executable" in info) and 'x' or ''
994 994 mode = ("svn:special" in info) and 'l' or mode
995 995 except svn.core.SubversionException as e:
996 996 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
997 997 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
998 998 if e.apr_err in notfound: # File not found
999 999 return None, None
1000 1000 raise
1001 1001 if mode == 'l':
1002 1002 link_prefix = "link "
1003 1003 if data.startswith(link_prefix):
1004 1004 data = data[len(link_prefix):]
1005 1005 return data, mode
1006 1006
1007 1007 def _iterfiles(self, path, revnum):
1008 1008 """Enumerate all files in path at revnum, recursively."""
1009 1009 path = path.strip('/')
1010 1010 pool = svn.core.Pool()
1011 1011 rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
1012 1012 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
1013 1013 if path:
1014 1014 path += '/'
1015 1015 return ((path + p) for p, e in entries.iteritems()
1016 1016 if e.kind == svn.core.svn_node_file)
1017 1017
1018 1018 def getrelpath(self, path, module=None):
1019 1019 if module is None:
1020 1020 module = self.module
1021 1021 # Given the repository url of this wc, say
1022 1022 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
1023 1023 # extract the "entry" portion (a relative path) from what
1024 1024 # svn log --xml says, i.e.
1025 1025 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
1026 1026 # that is to say "tests/PloneTestCase.py"
1027 1027 if path.startswith(module):
1028 1028 relative = path.rstrip('/')[len(module):]
1029 1029 if relative.startswith('/'):
1030 1030 return relative[1:]
1031 1031 elif relative == '':
1032 1032 return relative
1033 1033
1034 1034 # The path is outside our tracked tree...
1035 1035 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
1036 1036 return None
1037 1037
1038 1038 def _checkpath(self, path, revnum, module=None):
1039 1039 if module is not None:
1040 1040 prevmodule = self.reparent('')
1041 1041 path = module + '/' + path
1042 1042 try:
1043 1043 # ra.check_path does not like leading slashes very much, it leads
1044 1044 # to PROPFIND subversion errors
1045 1045 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
1046 1046 finally:
1047 1047 if module is not None:
1048 1048 self.reparent(prevmodule)
1049 1049
1050 1050 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
1051 1051 strict_node_history=False):
1052 1052 # Normalize path names, svn >= 1.5 only wants paths relative to
1053 1053 # supplied URL
1054 1054 relpaths = []
1055 1055 for p in paths:
1056 1056 if not p.startswith('/'):
1057 1057 p = self.module + '/' + p
1058 1058 relpaths.append(p.strip('/'))
1059 1059 args = [self.baseurl, relpaths, start, end, limit,
1060 1060 discover_changed_paths, strict_node_history]
1061 1061 # developer config: convert.svn.debugsvnlog
1062 1062 if not self.ui.configbool('convert', 'svn.debugsvnlog', True):
1063 1063 return directlogstream(*args)
1064 1064 arg = encodeargs(args)
1065 1065 hgexe = util.hgexecutable()
1066 1066 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
1067 1067 stdin, stdout = util.popen2(util.quotecommand(cmd))
1068 1068 stdin.write(arg)
1069 1069 try:
1070 1070 stdin.close()
1071 1071 except IOError:
1072 1072 raise error.Abort(_('Mercurial failed to run itself, check'
1073 1073 ' hg executable is in PATH'))
1074 1074 return logstream(stdout)
1075 1075
1076 1076 pre_revprop_change = '''#!/bin/sh
1077 1077
1078 1078 REPOS="$1"
1079 1079 REV="$2"
1080 1080 USER="$3"
1081 1081 PROPNAME="$4"
1082 1082 ACTION="$5"
1083 1083
1084 1084 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
1085 1085 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
1086 1086 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
1087 1087
1088 1088 echo "Changing prohibited revision property" >&2
1089 1089 exit 1
1090 1090 '''
1091 1091
1092 1092 class svn_sink(converter_sink, commandline):
1093 1093 commit_re = re.compile(r'Committed revision (\d+).', re.M)
1094 1094 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
1095 1095
1096 1096 def prerun(self):
1097 1097 if self.wc:
1098 1098 os.chdir(self.wc)
1099 1099
1100 1100 def postrun(self):
1101 1101 if self.wc:
1102 1102 os.chdir(self.cwd)
1103 1103
1104 1104 def join(self, name):
1105 1105 return os.path.join(self.wc, '.svn', name)
1106 1106
1107 1107 def revmapfile(self):
1108 1108 return self.join('hg-shamap')
1109 1109
1110 1110 def authorfile(self):
1111 1111 return self.join('hg-authormap')
1112 1112
1113 1113 def __init__(self, ui, path):
1114 1114
1115 1115 converter_sink.__init__(self, ui, path)
1116 1116 commandline.__init__(self, ui, 'svn')
1117 1117 self.delete = []
1118 1118 self.setexec = []
1119 1119 self.delexec = []
1120 1120 self.copies = []
1121 1121 self.wc = None
1122 1122 self.cwd = pycompat.getcwd()
1123 1123
1124 1124 created = False
1125 1125 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1126 1126 self.wc = os.path.realpath(path)
1127 1127 self.run0('update')
1128 1128 else:
1129 1129 if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
1130 1130 path = os.path.realpath(path)
1131 1131 if os.path.isdir(os.path.dirname(path)):
1132 1132 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1133 1133 ui.status(_('initializing svn repository %r\n') %
1134 1134 os.path.basename(path))
1135 1135 commandline(ui, 'svnadmin').run0('create', path)
1136 1136 created = path
1137 1137 path = util.normpath(path)
1138 1138 if not path.startswith('/'):
1139 1139 path = '/' + path
1140 1140 path = 'file://' + path
1141 1141
1142 1142 wcpath = os.path.join(pycompat.getcwd(), os.path.basename(path) +
1143 1143 '-wc')
1144 1144 ui.status(_('initializing svn working copy %r\n')
1145 1145 % os.path.basename(wcpath))
1146 1146 self.run0('checkout', path, wcpath)
1147 1147
1148 1148 self.wc = wcpath
1149 self.opener = scmutil.opener(self.wc)
1150 self.wopener = scmutil.opener(self.wc)
1149 self.opener = scmutil.vfs(self.wc)
1150 self.wopener = scmutil.vfs(self.wc)
1151 1151 self.childmap = mapfile(ui, self.join('hg-childmap'))
1152 1152 if util.checkexec(self.wc):
1153 1153 self.is_exec = util.isexec
1154 1154 else:
1155 1155 self.is_exec = None
1156 1156
1157 1157 if created:
1158 1158 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1159 1159 fp = open(hook, 'w')
1160 1160 fp.write(pre_revprop_change)
1161 1161 fp.close()
1162 1162 util.setflags(hook, False, True)
1163 1163
1164 1164 output = self.run0('info')
1165 1165 self.uuid = self.uuid_re.search(output).group(1).strip()
1166 1166
1167 1167 def wjoin(self, *names):
1168 1168 return os.path.join(self.wc, *names)
1169 1169
1170 1170 @propertycache
1171 1171 def manifest(self):
1172 1172 # As of svn 1.7, the "add" command fails when receiving
1173 1173 # already tracked entries, so we have to track and filter them
1174 1174 # ourselves.
1175 1175 m = set()
1176 1176 output = self.run0('ls', recursive=True, xml=True)
1177 1177 doc = xml.dom.minidom.parseString(output)
1178 1178 for e in doc.getElementsByTagName('entry'):
1179 1179 for n in e.childNodes:
1180 1180 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1181 1181 continue
1182 1182 name = ''.join(c.data for c in n.childNodes
1183 1183 if c.nodeType == c.TEXT_NODE)
1184 1184 # Entries are compared with names coming from
1185 1185 # mercurial, so bytes with undefined encoding. Our
1186 1186 # best bet is to assume they are in local
1187 1187 # encoding. They will be passed to command line calls
1188 1188 # later anyway, so they better be.
1189 1189 m.add(encoding.tolocal(name.encode('utf-8')))
1190 1190 break
1191 1191 return m
1192 1192
1193 1193 def putfile(self, filename, flags, data):
1194 1194 if 'l' in flags:
1195 1195 self.wopener.symlink(data, filename)
1196 1196 else:
1197 1197 try:
1198 1198 if os.path.islink(self.wjoin(filename)):
1199 1199 os.unlink(filename)
1200 1200 except OSError:
1201 1201 pass
1202 1202 self.wopener.write(filename, data)
1203 1203
1204 1204 if self.is_exec:
1205 1205 if self.is_exec(self.wjoin(filename)):
1206 1206 if 'x' not in flags:
1207 1207 self.delexec.append(filename)
1208 1208 else:
1209 1209 if 'x' in flags:
1210 1210 self.setexec.append(filename)
1211 1211 util.setflags(self.wjoin(filename), False, 'x' in flags)
1212 1212
1213 1213 def _copyfile(self, source, dest):
1214 1214 # SVN's copy command pukes if the destination file exists, but
1215 1215 # our copyfile method expects to record a copy that has
1216 1216 # already occurred. Cross the semantic gap.
1217 1217 wdest = self.wjoin(dest)
1218 1218 exists = os.path.lexists(wdest)
1219 1219 if exists:
1220 1220 fd, tempname = tempfile.mkstemp(
1221 1221 prefix='hg-copy-', dir=os.path.dirname(wdest))
1222 1222 os.close(fd)
1223 1223 os.unlink(tempname)
1224 1224 os.rename(wdest, tempname)
1225 1225 try:
1226 1226 self.run0('copy', source, dest)
1227 1227 finally:
1228 1228 self.manifest.add(dest)
1229 1229 if exists:
1230 1230 try:
1231 1231 os.unlink(wdest)
1232 1232 except OSError:
1233 1233 pass
1234 1234 os.rename(tempname, wdest)
1235 1235
1236 1236 def dirs_of(self, files):
1237 1237 dirs = set()
1238 1238 for f in files:
1239 1239 if os.path.isdir(self.wjoin(f)):
1240 1240 dirs.add(f)
1241 1241 i = len(f)
1242 1242 for i in iter(lambda: f.rfind('/', 0, i), -1):
1243 1243 dirs.add(f[:i])
1244 1244 return dirs
1245 1245
1246 1246 def add_dirs(self, files):
1247 1247 add_dirs = [d for d in sorted(self.dirs_of(files))
1248 1248 if d not in self.manifest]
1249 1249 if add_dirs:
1250 1250 self.manifest.update(add_dirs)
1251 1251 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1252 1252 return add_dirs
1253 1253
1254 1254 def add_files(self, files):
1255 1255 files = [f for f in files if f not in self.manifest]
1256 1256 if files:
1257 1257 self.manifest.update(files)
1258 1258 self.xargs(files, 'add', quiet=True)
1259 1259 return files
1260 1260
1261 1261 def addchild(self, parent, child):
1262 1262 self.childmap[parent] = child
1263 1263
1264 1264 def revid(self, rev):
1265 1265 return u"svn:%s@%s" % (self.uuid, rev)
1266 1266
1267 1267 def putcommit(self, files, copies, parents, commit, source, revmap, full,
1268 1268 cleanp2):
1269 1269 for parent in parents:
1270 1270 try:
1271 1271 return self.revid(self.childmap[parent])
1272 1272 except KeyError:
1273 1273 pass
1274 1274
1275 1275 # Apply changes to working copy
1276 1276 for f, v in files:
1277 1277 data, mode = source.getfile(f, v)
1278 1278 if data is None:
1279 1279 self.delete.append(f)
1280 1280 else:
1281 1281 self.putfile(f, mode, data)
1282 1282 if f in copies:
1283 1283 self.copies.append([copies[f], f])
1284 1284 if full:
1285 1285 self.delete.extend(sorted(self.manifest.difference(files)))
1286 1286 files = [f[0] for f in files]
1287 1287
1288 1288 entries = set(self.delete)
1289 1289 files = frozenset(files)
1290 1290 entries.update(self.add_dirs(files.difference(entries)))
1291 1291 if self.copies:
1292 1292 for s, d in self.copies:
1293 1293 self._copyfile(s, d)
1294 1294 self.copies = []
1295 1295 if self.delete:
1296 1296 self.xargs(self.delete, 'delete')
1297 1297 for f in self.delete:
1298 1298 self.manifest.remove(f)
1299 1299 self.delete = []
1300 1300 entries.update(self.add_files(files.difference(entries)))
1301 1301 if self.delexec:
1302 1302 self.xargs(self.delexec, 'propdel', 'svn:executable')
1303 1303 self.delexec = []
1304 1304 if self.setexec:
1305 1305 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1306 1306 self.setexec = []
1307 1307
1308 1308 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1309 1309 fp = os.fdopen(fd, pycompat.sysstr('w'))
1310 1310 fp.write(commit.desc)
1311 1311 fp.close()
1312 1312 try:
1313 1313 output = self.run0('commit',
1314 1314 username=util.shortuser(commit.author),
1315 1315 file=messagefile,
1316 1316 encoding='utf-8')
1317 1317 try:
1318 1318 rev = self.commit_re.search(output).group(1)
1319 1319 except AttributeError:
1320 1320 if parents and not files:
1321 1321 return parents[0]
1322 1322 self.ui.warn(_('unexpected svn output:\n'))
1323 1323 self.ui.warn(output)
1324 1324 raise error.Abort(_('unable to cope with svn output'))
1325 1325 if commit.rev:
1326 1326 self.run('propset', 'hg:convert-rev', commit.rev,
1327 1327 revprop=True, revision=rev)
1328 1328 if commit.branch and commit.branch != 'default':
1329 1329 self.run('propset', 'hg:convert-branch', commit.branch,
1330 1330 revprop=True, revision=rev)
1331 1331 for parent in parents:
1332 1332 self.addchild(parent, rev)
1333 1333 return self.revid(rev)
1334 1334 finally:
1335 1335 os.unlink(messagefile)
1336 1336
1337 1337 def puttags(self, tags):
1338 1338 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1339 1339 return None, None
1340 1340
1341 1341 def hascommitfrommap(self, rev):
1342 1342 # We trust that revisions referenced in a map still is present
1343 1343 # TODO: implement something better if necessary and feasible
1344 1344 return True
1345 1345
1346 1346 def hascommitforsplicemap(self, rev):
1347 1347 # This is not correct as one can convert to an existing subversion
1348 1348 # repository and childmap would not list all revisions. Too bad.
1349 1349 if rev in self.childmap:
1350 1350 return True
1351 1351 raise error.Abort(_('splice map revision %s not found in subversion '
1352 1352 'child map (revision lookups are not implemented)')
1353 1353 % rev)
@@ -1,666 +1,666 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 shortname = '.hglf'
33 33 shortnameslash = shortname + '/'
34 34 longname = 'largefiles'
35 35
36 36 # -- Private worker functions ------------------------------------------
37 37
38 38 def getminsize(ui, assumelfiles, opt, default=10):
39 39 lfsize = opt
40 40 if not lfsize and assumelfiles:
41 41 lfsize = ui.config(longname, 'minsize', default=default)
42 42 if lfsize:
43 43 try:
44 44 lfsize = float(lfsize)
45 45 except ValueError:
46 46 raise error.Abort(_('largefiles: size must be number (not %s)\n')
47 47 % lfsize)
48 48 if lfsize is None:
49 49 raise error.Abort(_('minimum size for largefiles must be specified'))
50 50 return lfsize
51 51
52 52 def link(src, dest):
53 53 """Try to create hardlink - if that fails, efficiently make a copy."""
54 54 util.makedirs(os.path.dirname(dest))
55 55 try:
56 56 util.oslink(src, dest)
57 57 except OSError:
58 58 # if hardlinks fail, fallback on atomic copy
59 59 with open(src, 'rb') as srcf:
60 60 with util.atomictempfile(dest) as dstf:
61 61 for chunk in util.filechunkiter(srcf):
62 62 dstf.write(chunk)
63 63 os.chmod(dest, os.stat(src).st_mode)
64 64
65 65 def usercachepath(ui, hash):
66 66 '''Return the correct location in the "global" largefiles cache for a file
67 67 with the given hash.
68 68 This cache is used for sharing of largefiles across repositories - both
69 69 to preserve download bandwidth and storage space.'''
70 70 return os.path.join(_usercachedir(ui), hash)
71 71
72 72 def _usercachedir(ui):
73 73 '''Return the location of the "global" largefiles cache.'''
74 74 path = ui.configpath(longname, 'usercache', None)
75 75 if path:
76 76 return path
77 77 if pycompat.osname == 'nt':
78 78 appdata = encoding.environ.get('LOCALAPPDATA',\
79 79 encoding.environ.get('APPDATA'))
80 80 if appdata:
81 81 return os.path.join(appdata, longname)
82 82 elif platform.system() == 'Darwin':
83 83 home = encoding.environ.get('HOME')
84 84 if home:
85 85 return os.path.join(home, 'Library', 'Caches', longname)
86 86 elif pycompat.osname == 'posix':
87 87 path = encoding.environ.get('XDG_CACHE_HOME')
88 88 if path:
89 89 return os.path.join(path, longname)
90 90 home = encoding.environ.get('HOME')
91 91 if home:
92 92 return os.path.join(home, '.cache', longname)
93 93 else:
94 94 raise error.Abort(_('unknown operating system: %s\n')
95 95 % pycompat.osname)
96 96 raise error.Abort(_('unknown %s usercache location') % longname)
97 97
98 98 def inusercache(ui, hash):
99 99 path = usercachepath(ui, hash)
100 100 return os.path.exists(path)
101 101
102 102 def findfile(repo, hash):
103 103 '''Return store path of the largefile with the specified hash.
104 104 As a side effect, the file might be linked from user cache.
105 105 Return None if the file can't be found locally.'''
106 106 path, exists = findstorepath(repo, hash)
107 107 if exists:
108 108 repo.ui.note(_('found %s in store\n') % hash)
109 109 return path
110 110 elif inusercache(repo.ui, hash):
111 111 repo.ui.note(_('found %s in system cache\n') % hash)
112 112 path = storepath(repo, hash)
113 113 link(usercachepath(repo.ui, hash), path)
114 114 return path
115 115 return None
116 116
117 117 class largefilesdirstate(dirstate.dirstate):
118 118 def __getitem__(self, key):
119 119 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 120 def normal(self, f):
121 121 return super(largefilesdirstate, self).normal(unixpath(f))
122 122 def remove(self, f):
123 123 return super(largefilesdirstate, self).remove(unixpath(f))
124 124 def add(self, f):
125 125 return super(largefilesdirstate, self).add(unixpath(f))
126 126 def drop(self, f):
127 127 return super(largefilesdirstate, self).drop(unixpath(f))
128 128 def forget(self, f):
129 129 return super(largefilesdirstate, self).forget(unixpath(f))
130 130 def normallookup(self, f):
131 131 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 132 def _ignore(self, f):
133 133 return False
134 134 def write(self, tr=False):
135 135 # (1) disable PENDING mode always
136 136 # (lfdirstate isn't yet managed as a part of the transaction)
137 137 # (2) avoid develwarn 'use dirstate.write with ....'
138 138 super(largefilesdirstate, self).write(None)
139 139
140 140 def openlfdirstate(ui, repo, create=True):
141 141 '''
142 142 Return a dirstate object that tracks largefiles: i.e. its root is
143 143 the repo root, but it is saved in .hg/largefiles/dirstate.
144 144 '''
145 145 vfs = repo.vfs
146 146 lfstoredir = longname
147 opener = scmutil.opener(vfs.join(lfstoredir))
147 opener = scmutil.vfs(vfs.join(lfstoredir))
148 148 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 149 repo.dirstate._validate)
150 150
151 151 # If the largefiles dirstate does not exist, populate and create
152 152 # it. This ensures that we create it on the first meaningful
153 153 # largefiles operation in a new clone.
154 154 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
155 155 matcher = getstandinmatcher(repo)
156 156 standins = repo.dirstate.walk(matcher, [], False, False)
157 157
158 158 if len(standins) > 0:
159 159 vfs.makedirs(lfstoredir)
160 160
161 161 for standin in standins:
162 162 lfile = splitstandin(standin)
163 163 lfdirstate.normallookup(lfile)
164 164 return lfdirstate
165 165
166 166 def lfdirstatestatus(lfdirstate, repo):
167 167 wctx = repo['.']
168 168 match = matchmod.always(repo.root, repo.getcwd())
169 169 unsure, s = lfdirstate.status(match, [], False, False, False)
170 170 modified, clean = s.modified, s.clean
171 171 for lfile in unsure:
172 172 try:
173 173 fctx = wctx[standin(lfile)]
174 174 except LookupError:
175 175 fctx = None
176 176 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
177 177 modified.append(lfile)
178 178 else:
179 179 clean.append(lfile)
180 180 lfdirstate.normal(lfile)
181 181 return s
182 182
183 183 def listlfiles(repo, rev=None, matcher=None):
184 184 '''return a list of largefiles in the working copy or the
185 185 specified changeset'''
186 186
187 187 if matcher is None:
188 188 matcher = getstandinmatcher(repo)
189 189
190 190 # ignore unknown files in working directory
191 191 return [splitstandin(f)
192 192 for f in repo[rev].walk(matcher)
193 193 if rev is not None or repo.dirstate[f] != '?']
194 194
195 195 def instore(repo, hash, forcelocal=False):
196 196 '''Return true if a largefile with the given hash exists in the store'''
197 197 return os.path.exists(storepath(repo, hash, forcelocal))
198 198
199 199 def storepath(repo, hash, forcelocal=False):
200 200 '''Return the correct location in the repository largefiles store for a
201 201 file with the given hash.'''
202 202 if not forcelocal and repo.shared():
203 203 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
204 204 return repo.join(longname, hash)
205 205
206 206 def findstorepath(repo, hash):
207 207 '''Search through the local store path(s) to find the file for the given
208 208 hash. If the file is not found, its path in the primary store is returned.
209 209 The return value is a tuple of (path, exists(path)).
210 210 '''
211 211 # For shared repos, the primary store is in the share source. But for
212 212 # backward compatibility, force a lookup in the local store if it wasn't
213 213 # found in the share source.
214 214 path = storepath(repo, hash, False)
215 215
216 216 if instore(repo, hash):
217 217 return (path, True)
218 218 elif repo.shared() and instore(repo, hash, True):
219 219 return storepath(repo, hash, True), True
220 220
221 221 return (path, False)
222 222
223 223 def copyfromcache(repo, hash, filename):
224 224 '''Copy the specified largefile from the repo or system cache to
225 225 filename in the repository. Return true on success or false if the
226 226 file was not found in either cache (which should not happened:
227 227 this is meant to be called only after ensuring that the needed
228 228 largefile exists in the cache).'''
229 229 wvfs = repo.wvfs
230 230 path = findfile(repo, hash)
231 231 if path is None:
232 232 return False
233 233 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
234 234 # The write may fail before the file is fully written, but we
235 235 # don't use atomic writes in the working copy.
236 236 with open(path, 'rb') as srcfd:
237 237 with wvfs(filename, 'wb') as destfd:
238 238 gothash = copyandhash(
239 239 util.filechunkiter(srcfd), destfd)
240 240 if gothash != hash:
241 241 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
242 242 % (filename, path, gothash))
243 243 wvfs.unlink(filename)
244 244 return False
245 245 return True
246 246
247 247 def copytostore(repo, rev, file, uploaded=False):
248 248 wvfs = repo.wvfs
249 249 hash = readstandin(repo, file, rev)
250 250 if instore(repo, hash):
251 251 return
252 252 if wvfs.exists(file):
253 253 copytostoreabsolute(repo, wvfs.join(file), hash)
254 254 else:
255 255 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
256 256 (file, hash))
257 257
258 258 def copyalltostore(repo, node):
259 259 '''Copy all largefiles in a given revision to the store'''
260 260
261 261 ctx = repo[node]
262 262 for filename in ctx.files():
263 263 if isstandin(filename) and filename in ctx.manifest():
264 264 realfile = splitstandin(filename)
265 265 copytostore(repo, ctx.node(), realfile)
266 266
267 267 def copytostoreabsolute(repo, file, hash):
268 268 if inusercache(repo.ui, hash):
269 269 link(usercachepath(repo.ui, hash), storepath(repo, hash))
270 270 else:
271 271 util.makedirs(os.path.dirname(storepath(repo, hash)))
272 272 with open(file, 'rb') as srcf:
273 273 with util.atomictempfile(storepath(repo, hash),
274 274 createmode=repo.store.createmode) as dstf:
275 275 for chunk in util.filechunkiter(srcf):
276 276 dstf.write(chunk)
277 277 linktousercache(repo, hash)
278 278
279 279 def linktousercache(repo, hash):
280 280 '''Link / copy the largefile with the specified hash from the store
281 281 to the cache.'''
282 282 path = usercachepath(repo.ui, hash)
283 283 link(storepath(repo, hash), path)
284 284
285 285 def getstandinmatcher(repo, rmatcher=None):
286 286 '''Return a match object that applies rmatcher to the standin directory'''
287 287 wvfs = repo.wvfs
288 288 standindir = shortname
289 289
290 290 # no warnings about missing files or directories
291 291 badfn = lambda f, msg: None
292 292
293 293 if rmatcher and not rmatcher.always():
294 294 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
295 295 if not pats:
296 296 pats = [wvfs.join(standindir)]
297 297 match = scmutil.match(repo[None], pats, badfn=badfn)
298 298 # if pats is empty, it would incorrectly always match, so clear _always
299 299 match._always = False
300 300 else:
301 301 # no patterns: relative to repo root
302 302 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 303 return match
304 304
305 305 def composestandinmatcher(repo, rmatcher):
306 306 '''Return a matcher that accepts standins corresponding to the
307 307 files accepted by rmatcher. Pass the list of files in the matcher
308 308 as the paths specified by the user.'''
309 309 smatcher = getstandinmatcher(repo, rmatcher)
310 310 isstandin = smatcher.matchfn
311 311 def composedmatchfn(f):
312 312 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 313 smatcher.matchfn = composedmatchfn
314 314
315 315 return smatcher
316 316
317 317 def standin(filename):
318 318 '''Return the repo-relative path to the standin for the specified big
319 319 file.'''
320 320 # Notes:
321 321 # 1) Some callers want an absolute path, but for instance addlargefiles
322 322 # needs it repo-relative so it can be passed to repo[None].add(). So
323 323 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 324 # 2) Join with '/' because that's what dirstate always uses, even on
325 325 # Windows. Change existing separator to '/' first in case we are
326 326 # passed filenames from an external source (like the command line).
327 327 return shortnameslash + util.pconvert(filename)
328 328
329 329 def isstandin(filename):
330 330 '''Return true if filename is a big file standin. filename must be
331 331 in Mercurial's internal form (slash-separated).'''
332 332 return filename.startswith(shortnameslash)
333 333
334 334 def splitstandin(filename):
335 335 # Split on / because that's what dirstate always uses, even on Windows.
336 336 # Change local separator to / first just in case we are passed filenames
337 337 # from an external source (like the command line).
338 338 bits = util.pconvert(filename).split('/', 1)
339 339 if len(bits) == 2 and bits[0] == shortname:
340 340 return bits[1]
341 341 else:
342 342 return None
343 343
344 344 def updatestandin(repo, standin):
345 345 file = repo.wjoin(splitstandin(standin))
346 346 if repo.wvfs.exists(splitstandin(standin)):
347 347 hash = hashfile(file)
348 348 executable = getexecutable(file)
349 349 writestandin(repo, standin, hash, executable)
350 350 else:
351 351 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
352 352
353 353 def readstandin(repo, filename, node=None):
354 354 '''read hex hash from standin for filename at given node, or working
355 355 directory if no node is given'''
356 356 return repo[node][standin(filename)].data().strip()
357 357
358 358 def writestandin(repo, standin, hash, executable):
359 359 '''write hash to <repo.root>/<standin>'''
360 360 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
361 361
362 362 def copyandhash(instream, outfile):
363 363 '''Read bytes from instream (iterable) and write them to outfile,
364 364 computing the SHA-1 hash of the data along the way. Return the hash.'''
365 365 hasher = hashlib.sha1('')
366 366 for data in instream:
367 367 hasher.update(data)
368 368 outfile.write(data)
369 369 return hasher.hexdigest()
370 370
371 371 def hashrepofile(repo, file):
372 372 return hashfile(repo.wjoin(file))
373 373
374 374 def hashfile(file):
375 375 if not os.path.exists(file):
376 376 return ''
377 377 hasher = hashlib.sha1('')
378 378 with open(file, 'rb') as fd:
379 379 for data in util.filechunkiter(fd):
380 380 hasher.update(data)
381 381 return hasher.hexdigest()
382 382
383 383 def getexecutable(filename):
384 384 mode = os.stat(filename).st_mode
385 385 return ((mode & stat.S_IXUSR) and
386 386 (mode & stat.S_IXGRP) and
387 387 (mode & stat.S_IXOTH))
388 388
389 389 def urljoin(first, second, *arg):
390 390 def join(left, right):
391 391 if not left.endswith('/'):
392 392 left += '/'
393 393 if right.startswith('/'):
394 394 right = right[1:]
395 395 return left + right
396 396
397 397 url = join(first, second)
398 398 for a in arg:
399 399 url = join(url, a)
400 400 return url
401 401
402 402 def hexsha1(data):
403 403 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
404 404 object data"""
405 405 h = hashlib.sha1()
406 406 for chunk in util.filechunkiter(data):
407 407 h.update(chunk)
408 408 return h.hexdigest()
409 409
410 410 def httpsendfile(ui, filename):
411 411 return httpconnection.httpsendfile(ui, filename, 'rb')
412 412
413 413 def unixpath(path):
414 414 '''Return a version of path normalized for use with the lfdirstate.'''
415 415 return util.pconvert(os.path.normpath(path))
416 416
417 417 def islfilesrepo(repo):
418 418 '''Return true if the repo is a largefile repo.'''
419 419 if ('largefiles' in repo.requirements and
420 420 any(shortnameslash in f[0] for f in repo.store.datafiles())):
421 421 return True
422 422
423 423 return any(openlfdirstate(repo.ui, repo, False))
424 424
425 425 class storeprotonotcapable(Exception):
426 426 def __init__(self, storetypes):
427 427 self.storetypes = storetypes
428 428
429 429 def getstandinsstate(repo):
430 430 standins = []
431 431 matcher = getstandinmatcher(repo)
432 432 for standin in repo.dirstate.walk(matcher, [], False, False):
433 433 lfile = splitstandin(standin)
434 434 try:
435 435 hash = readstandin(repo, lfile)
436 436 except IOError:
437 437 hash = None
438 438 standins.append((lfile, hash))
439 439 return standins
440 440
441 441 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
442 442 lfstandin = standin(lfile)
443 443 if lfstandin in repo.dirstate:
444 444 stat = repo.dirstate._map[lfstandin]
445 445 state, mtime = stat[0], stat[3]
446 446 else:
447 447 state, mtime = '?', -1
448 448 if state == 'n':
449 449 if (normallookup or mtime < 0 or
450 450 not repo.wvfs.exists(lfile)):
451 451 # state 'n' doesn't ensure 'clean' in this case
452 452 lfdirstate.normallookup(lfile)
453 453 else:
454 454 lfdirstate.normal(lfile)
455 455 elif state == 'm':
456 456 lfdirstate.normallookup(lfile)
457 457 elif state == 'r':
458 458 lfdirstate.remove(lfile)
459 459 elif state == 'a':
460 460 lfdirstate.add(lfile)
461 461 elif state == '?':
462 462 lfdirstate.drop(lfile)
463 463
464 464 def markcommitted(orig, ctx, node):
465 465 repo = ctx.repo()
466 466
467 467 orig(node)
468 468
469 469 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
470 470 # because files coming from the 2nd parent are omitted in the latter.
471 471 #
472 472 # The former should be used to get targets of "synclfdirstate",
473 473 # because such files:
474 474 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
475 475 # - have to be marked as "n" after commit, but
476 476 # - aren't listed in "repo[node].files()"
477 477
478 478 lfdirstate = openlfdirstate(repo.ui, repo)
479 479 for f in ctx.files():
480 480 if isstandin(f):
481 481 lfile = splitstandin(f)
482 482 synclfdirstate(repo, lfdirstate, lfile, False)
483 483 lfdirstate.write()
484 484
485 485 # As part of committing, copy all of the largefiles into the cache.
486 486 copyalltostore(repo, node)
487 487
488 488 def getlfilestoupdate(oldstandins, newstandins):
489 489 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
490 490 filelist = []
491 491 for f in changedstandins:
492 492 if f[0] not in filelist:
493 493 filelist.append(f[0])
494 494 return filelist
495 495
496 496 def getlfilestoupload(repo, missing, addfunc):
497 497 for i, n in enumerate(missing):
498 498 repo.ui.progress(_('finding outgoing largefiles'), i,
499 499 unit=_('revisions'), total=len(missing))
500 500 parents = [p for p in repo[n].parents() if p != node.nullid]
501 501
502 502 oldlfstatus = repo.lfstatus
503 503 repo.lfstatus = False
504 504 try:
505 505 ctx = repo[n]
506 506 finally:
507 507 repo.lfstatus = oldlfstatus
508 508
509 509 files = set(ctx.files())
510 510 if len(parents) == 2:
511 511 mc = ctx.manifest()
512 512 mp1 = ctx.parents()[0].manifest()
513 513 mp2 = ctx.parents()[1].manifest()
514 514 for f in mp1:
515 515 if f not in mc:
516 516 files.add(f)
517 517 for f in mp2:
518 518 if f not in mc:
519 519 files.add(f)
520 520 for f in mc:
521 521 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
522 522 files.add(f)
523 523 for fn in files:
524 524 if isstandin(fn) and fn in ctx:
525 525 addfunc(fn, ctx[fn].data().strip())
526 526 repo.ui.progress(_('finding outgoing largefiles'), None)
527 527
528 528 def updatestandinsbymatch(repo, match):
529 529 '''Update standins in the working directory according to specified match
530 530
531 531 This returns (possibly modified) ``match`` object to be used for
532 532 subsequent commit process.
533 533 '''
534 534
535 535 ui = repo.ui
536 536
537 537 # Case 1: user calls commit with no specific files or
538 538 # include/exclude patterns: refresh and commit all files that
539 539 # are "dirty".
540 540 if match is None or match.always():
541 541 # Spend a bit of time here to get a list of files we know
542 542 # are modified so we can compare only against those.
543 543 # It can cost a lot of time (several seconds)
544 544 # otherwise to update all standins if the largefiles are
545 545 # large.
546 546 lfdirstate = openlfdirstate(ui, repo)
547 547 dirtymatch = matchmod.always(repo.root, repo.getcwd())
548 548 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
549 549 False)
550 550 modifiedfiles = unsure + s.modified + s.added + s.removed
551 551 lfiles = listlfiles(repo)
552 552 # this only loops through largefiles that exist (not
553 553 # removed/renamed)
554 554 for lfile in lfiles:
555 555 if lfile in modifiedfiles:
556 556 if repo.wvfs.exists(standin(lfile)):
557 557 # this handles the case where a rebase is being
558 558 # performed and the working copy is not updated
559 559 # yet.
560 560 if repo.wvfs.exists(lfile):
561 561 updatestandin(repo,
562 562 standin(lfile))
563 563
564 564 return match
565 565
566 566 lfiles = listlfiles(repo)
567 567 match._files = repo._subdirlfs(match.files(), lfiles)
568 568
569 569 # Case 2: user calls commit with specified patterns: refresh
570 570 # any matching big files.
571 571 smatcher = composestandinmatcher(repo, match)
572 572 standins = repo.dirstate.walk(smatcher, [], False, False)
573 573
574 574 # No matching big files: get out of the way and pass control to
575 575 # the usual commit() method.
576 576 if not standins:
577 577 return match
578 578
579 579 # Refresh all matching big files. It's possible that the
580 580 # commit will end up failing, in which case the big files will
581 581 # stay refreshed. No harm done: the user modified them and
582 582 # asked to commit them, so sooner or later we're going to
583 583 # refresh the standins. Might as well leave them refreshed.
584 584 lfdirstate = openlfdirstate(ui, repo)
585 585 for fstandin in standins:
586 586 lfile = splitstandin(fstandin)
587 587 if lfdirstate[lfile] != 'r':
588 588 updatestandin(repo, fstandin)
589 589
590 590 # Cook up a new matcher that only matches regular files or
591 591 # standins corresponding to the big files requested by the
592 592 # user. Have to modify _files to prevent commit() from
593 593 # complaining "not tracked" for big files.
594 594 match = copy.copy(match)
595 595 origmatchfn = match.matchfn
596 596
597 597 # Check both the list of largefiles and the list of
598 598 # standins because if a largefile was removed, it
599 599 # won't be in the list of largefiles at this point
600 600 match._files += sorted(standins)
601 601
602 602 actualfiles = []
603 603 for f in match._files:
604 604 fstandin = standin(f)
605 605
606 606 # For largefiles, only one of the normal and standin should be
607 607 # committed (except if one of them is a remove). In the case of a
608 608 # standin removal, drop the normal file if it is unknown to dirstate.
609 609 # Thus, skip plain largefile names but keep the standin.
610 610 if f in lfiles or fstandin in standins:
611 611 if repo.dirstate[fstandin] != 'r':
612 612 if repo.dirstate[f] != 'r':
613 613 continue
614 614 elif repo.dirstate[f] == '?':
615 615 continue
616 616
617 617 actualfiles.append(f)
618 618 match._files = actualfiles
619 619
620 620 def matchfn(f):
621 621 if origmatchfn(f):
622 622 return f not in lfiles
623 623 else:
624 624 return f in standins
625 625
626 626 match.matchfn = matchfn
627 627
628 628 return match
629 629
630 630 class automatedcommithook(object):
631 631 '''Stateful hook to update standins at the 1st commit of resuming
632 632
633 633 For efficiency, updating standins in the working directory should
634 634 be avoided while automated committing (like rebase, transplant and
635 635 so on), because they should be updated before committing.
636 636
637 637 But the 1st commit of resuming automated committing (e.g. ``rebase
638 638 --continue``) should update them, because largefiles may be
639 639 modified manually.
640 640 '''
641 641 def __init__(self, resuming):
642 642 self.resuming = resuming
643 643
644 644 def __call__(self, repo, match):
645 645 if self.resuming:
646 646 self.resuming = False # avoids updating at subsequent commits
647 647 return updatestandinsbymatch(repo, match)
648 648 else:
649 649 return match
650 650
651 651 def getstatuswriter(ui, repo, forcibly=None):
652 652 '''Return the function to write largefiles specific status out
653 653
654 654 If ``forcibly`` is ``None``, this returns the last element of
655 655 ``repo._lfstatuswriters`` as "default" writer function.
656 656
657 657 Otherwise, this returns the function to always write out (or
658 658 ignore if ``not forcibly``) status.
659 659 '''
660 660 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
661 661 return repo._lfstatuswriters[-1]
662 662 else:
663 663 if forcibly:
664 664 return ui.status # forcibly WRITE OUT
665 665 else:
666 666 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,3611 +1,3611 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help COMMAND` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65 from __future__ import absolute_import
66 66
67 67 import errno
68 68 import os
69 69 import re
70 70 import shutil
71 71 from mercurial.i18n import _
72 72 from mercurial.node import (
73 73 bin,
74 74 hex,
75 75 nullid,
76 76 nullrev,
77 77 short,
78 78 )
79 79 from mercurial import (
80 80 cmdutil,
81 81 commands,
82 82 dirstateguard,
83 83 error,
84 84 extensions,
85 85 hg,
86 86 localrepo,
87 87 lock as lockmod,
88 88 patch as patchmod,
89 89 phases,
90 90 pycompat,
91 91 registrar,
92 92 revsetlang,
93 93 scmutil,
94 94 smartset,
95 95 subrepo,
96 96 util,
97 97 )
98 98
99 99 release = lockmod.release
100 100 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
101 101
102 102 cmdtable = {}
103 103 command = cmdutil.command(cmdtable)
104 104 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
105 105 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
106 106 # be specifying the version(s) of Mercurial they are tested with, or
107 107 # leave the attribute unspecified.
108 108 testedwith = 'ships-with-hg-core'
109 109
110 110 # force load strip extension formerly included in mq and import some utility
111 111 try:
112 112 stripext = extensions.find('strip')
113 113 except KeyError:
114 114 # note: load is lazy so we could avoid the try-except,
115 115 # but I (marmoute) prefer this explicit code.
116 116 class dummyui(object):
117 117 def debug(self, msg):
118 118 pass
119 119 stripext = extensions.load(dummyui(), 'strip', '')
120 120
121 121 strip = stripext.strip
122 122 checksubstate = stripext.checksubstate
123 123 checklocalchanges = stripext.checklocalchanges
124 124
125 125
126 126 # Patch names looks like unix-file names.
127 127 # They must be joinable with queue directory and result in the patch path.
128 128 normname = util.normpath
129 129
130 130 class statusentry(object):
131 131 def __init__(self, node, name):
132 132 self.node, self.name = node, name
133 133 def __repr__(self):
134 134 return hex(self.node) + ':' + self.name
135 135
136 136 # The order of the headers in 'hg export' HG patches:
137 137 HGHEADERS = [
138 138 # '# HG changeset patch',
139 139 '# User ',
140 140 '# Date ',
141 141 '# ',
142 142 '# Branch ',
143 143 '# Node ID ',
144 144 '# Parent ', # can occur twice for merges - but that is not relevant for mq
145 145 ]
146 146 # The order of headers in plain 'mail style' patches:
147 147 PLAINHEADERS = {
148 148 'from': 0,
149 149 'date': 1,
150 150 'subject': 2,
151 151 }
152 152
153 153 def inserthgheader(lines, header, value):
154 154 """Assuming lines contains a HG patch header, add a header line with value.
155 155 >>> try: inserthgheader([], '# Date ', 'z')
156 156 ... except ValueError, inst: print "oops"
157 157 oops
158 158 >>> inserthgheader(['# HG changeset patch'], '# Date ', 'z')
159 159 ['# HG changeset patch', '# Date z']
160 160 >>> inserthgheader(['# HG changeset patch', ''], '# Date ', 'z')
161 161 ['# HG changeset patch', '# Date z', '']
162 162 >>> inserthgheader(['# HG changeset patch', '# User y'], '# Date ', 'z')
163 163 ['# HG changeset patch', '# User y', '# Date z']
164 164 >>> inserthgheader(['# HG changeset patch', '# Date x', '# User y'],
165 165 ... '# User ', 'z')
166 166 ['# HG changeset patch', '# Date x', '# User z']
167 167 >>> inserthgheader(['# HG changeset patch', '# Date y'], '# Date ', 'z')
168 168 ['# HG changeset patch', '# Date z']
169 169 >>> inserthgheader(['# HG changeset patch', '', '# Date y'], '# Date ', 'z')
170 170 ['# HG changeset patch', '# Date z', '', '# Date y']
171 171 >>> inserthgheader(['# HG changeset patch', '# Parent y'], '# Date ', 'z')
172 172 ['# HG changeset patch', '# Date z', '# Parent y']
173 173 """
174 174 start = lines.index('# HG changeset patch') + 1
175 175 newindex = HGHEADERS.index(header)
176 176 bestpos = len(lines)
177 177 for i in range(start, len(lines)):
178 178 line = lines[i]
179 179 if not line.startswith('# '):
180 180 bestpos = min(bestpos, i)
181 181 break
182 182 for lineindex, h in enumerate(HGHEADERS):
183 183 if line.startswith(h):
184 184 if lineindex == newindex:
185 185 lines[i] = header + value
186 186 return lines
187 187 if lineindex > newindex:
188 188 bestpos = min(bestpos, i)
189 189 break # next line
190 190 lines.insert(bestpos, header + value)
191 191 return lines
192 192
193 193 def insertplainheader(lines, header, value):
194 194 """For lines containing a plain patch header, add a header line with value.
195 195 >>> insertplainheader([], 'Date', 'z')
196 196 ['Date: z']
197 197 >>> insertplainheader([''], 'Date', 'z')
198 198 ['Date: z', '']
199 199 >>> insertplainheader(['x'], 'Date', 'z')
200 200 ['Date: z', '', 'x']
201 201 >>> insertplainheader(['From: y', 'x'], 'Date', 'z')
202 202 ['From: y', 'Date: z', '', 'x']
203 203 >>> insertplainheader([' date : x', ' from : y', ''], 'From', 'z')
204 204 [' date : x', 'From: z', '']
205 205 >>> insertplainheader(['', 'Date: y'], 'Date', 'z')
206 206 ['Date: z', '', 'Date: y']
207 207 >>> insertplainheader(['foo: bar', 'DATE: z', 'x'], 'From', 'y')
208 208 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
209 209 """
210 210 newprio = PLAINHEADERS[header.lower()]
211 211 bestpos = len(lines)
212 212 for i, line in enumerate(lines):
213 213 if ':' in line:
214 214 lheader = line.split(':', 1)[0].strip().lower()
215 215 lprio = PLAINHEADERS.get(lheader, newprio + 1)
216 216 if lprio == newprio:
217 217 lines[i] = '%s: %s' % (header, value)
218 218 return lines
219 219 if lprio > newprio and i < bestpos:
220 220 bestpos = i
221 221 else:
222 222 if line:
223 223 lines.insert(i, '')
224 224 if i < bestpos:
225 225 bestpos = i
226 226 break
227 227 lines.insert(bestpos, '%s: %s' % (header, value))
228 228 return lines
229 229
230 230 class patchheader(object):
231 231 def __init__(self, pf, plainmode=False):
232 232 def eatdiff(lines):
233 233 while lines:
234 234 l = lines[-1]
235 235 if (l.startswith("diff -") or
236 236 l.startswith("Index:") or
237 237 l.startswith("===========")):
238 238 del lines[-1]
239 239 else:
240 240 break
241 241 def eatempty(lines):
242 242 while lines:
243 243 if not lines[-1].strip():
244 244 del lines[-1]
245 245 else:
246 246 break
247 247
248 248 message = []
249 249 comments = []
250 250 user = None
251 251 date = None
252 252 parent = None
253 253 format = None
254 254 subject = None
255 255 branch = None
256 256 nodeid = None
257 257 diffstart = 0
258 258
259 259 for line in file(pf):
260 260 line = line.rstrip()
261 261 if (line.startswith('diff --git')
262 262 or (diffstart and line.startswith('+++ '))):
263 263 diffstart = 2
264 264 break
265 265 diffstart = 0 # reset
266 266 if line.startswith("--- "):
267 267 diffstart = 1
268 268 continue
269 269 elif format == "hgpatch":
270 270 # parse values when importing the result of an hg export
271 271 if line.startswith("# User "):
272 272 user = line[7:]
273 273 elif line.startswith("# Date "):
274 274 date = line[7:]
275 275 elif line.startswith("# Parent "):
276 276 parent = line[9:].lstrip() # handle double trailing space
277 277 elif line.startswith("# Branch "):
278 278 branch = line[9:]
279 279 elif line.startswith("# Node ID "):
280 280 nodeid = line[10:]
281 281 elif not line.startswith("# ") and line:
282 282 message.append(line)
283 283 format = None
284 284 elif line == '# HG changeset patch':
285 285 message = []
286 286 format = "hgpatch"
287 287 elif (format != "tagdone" and (line.startswith("Subject: ") or
288 288 line.startswith("subject: "))):
289 289 subject = line[9:]
290 290 format = "tag"
291 291 elif (format != "tagdone" and (line.startswith("From: ") or
292 292 line.startswith("from: "))):
293 293 user = line[6:]
294 294 format = "tag"
295 295 elif (format != "tagdone" and (line.startswith("Date: ") or
296 296 line.startswith("date: "))):
297 297 date = line[6:]
298 298 format = "tag"
299 299 elif format == "tag" and line == "":
300 300 # when looking for tags (subject: from: etc) they
301 301 # end once you find a blank line in the source
302 302 format = "tagdone"
303 303 elif message or line:
304 304 message.append(line)
305 305 comments.append(line)
306 306
307 307 eatdiff(message)
308 308 eatdiff(comments)
309 309 # Remember the exact starting line of the patch diffs before consuming
310 310 # empty lines, for external use by TortoiseHg and others
311 311 self.diffstartline = len(comments)
312 312 eatempty(message)
313 313 eatempty(comments)
314 314
315 315 # make sure message isn't empty
316 316 if format and format.startswith("tag") and subject:
317 317 message.insert(0, subject)
318 318
319 319 self.message = message
320 320 self.comments = comments
321 321 self.user = user
322 322 self.date = date
323 323 self.parent = parent
324 324 # nodeid and branch are for external use by TortoiseHg and others
325 325 self.nodeid = nodeid
326 326 self.branch = branch
327 327 self.haspatch = diffstart > 1
328 328 self.plainmode = (plainmode or
329 329 '# HG changeset patch' not in self.comments and
330 330 any(c.startswith('Date: ') or
331 331 c.startswith('From: ')
332 332 for c in self.comments))
333 333
334 334 def setuser(self, user):
335 335 try:
336 336 inserthgheader(self.comments, '# User ', user)
337 337 except ValueError:
338 338 if self.plainmode:
339 339 insertplainheader(self.comments, 'From', user)
340 340 else:
341 341 tmp = ['# HG changeset patch', '# User ' + user]
342 342 self.comments = tmp + self.comments
343 343 self.user = user
344 344
345 345 def setdate(self, date):
346 346 try:
347 347 inserthgheader(self.comments, '# Date ', date)
348 348 except ValueError:
349 349 if self.plainmode:
350 350 insertplainheader(self.comments, 'Date', date)
351 351 else:
352 352 tmp = ['# HG changeset patch', '# Date ' + date]
353 353 self.comments = tmp + self.comments
354 354 self.date = date
355 355
356 356 def setparent(self, parent):
357 357 try:
358 358 inserthgheader(self.comments, '# Parent ', parent)
359 359 except ValueError:
360 360 if not self.plainmode:
361 361 tmp = ['# HG changeset patch', '# Parent ' + parent]
362 362 self.comments = tmp + self.comments
363 363 self.parent = parent
364 364
365 365 def setmessage(self, message):
366 366 if self.comments:
367 367 self._delmsg()
368 368 self.message = [message]
369 369 if message:
370 370 if self.plainmode and self.comments and self.comments[-1]:
371 371 self.comments.append('')
372 372 self.comments.append(message)
373 373
374 374 def __str__(self):
375 375 s = '\n'.join(self.comments).rstrip()
376 376 if not s:
377 377 return ''
378 378 return s + '\n\n'
379 379
380 380 def _delmsg(self):
381 381 '''Remove existing message, keeping the rest of the comments fields.
382 382 If comments contains 'subject: ', message will prepend
383 383 the field and a blank line.'''
384 384 if self.message:
385 385 subj = 'subject: ' + self.message[0].lower()
386 386 for i in xrange(len(self.comments)):
387 387 if subj == self.comments[i].lower():
388 388 del self.comments[i]
389 389 self.message = self.message[2:]
390 390 break
391 391 ci = 0
392 392 for mi in self.message:
393 393 while mi != self.comments[ci]:
394 394 ci += 1
395 395 del self.comments[ci]
396 396
397 397 def newcommit(repo, phase, *args, **kwargs):
398 398 """helper dedicated to ensure a commit respect mq.secret setting
399 399
400 400 It should be used instead of repo.commit inside the mq source for operation
401 401 creating new changeset.
402 402 """
403 403 repo = repo.unfiltered()
404 404 if phase is None:
405 405 if repo.ui.configbool('mq', 'secret', False):
406 406 phase = phases.secret
407 407 if phase is not None:
408 408 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
409 409 allowemptybackup = repo.ui.backupconfig('ui', 'allowemptycommit')
410 410 try:
411 411 if phase is not None:
412 412 repo.ui.setconfig('phases', 'new-commit', phase, 'mq')
413 413 repo.ui.setconfig('ui', 'allowemptycommit', True)
414 414 return repo.commit(*args, **kwargs)
415 415 finally:
416 416 repo.ui.restoreconfig(allowemptybackup)
417 417 if phase is not None:
418 418 repo.ui.restoreconfig(phasebackup)
419 419
420 420 class AbortNoCleanup(error.Abort):
421 421 pass
422 422
423 423 class queue(object):
424 424 def __init__(self, ui, baseui, path, patchdir=None):
425 425 self.basepath = path
426 426 try:
427 427 fh = open(os.path.join(path, 'patches.queue'))
428 428 cur = fh.read().rstrip()
429 429 fh.close()
430 430 if not cur:
431 431 curpath = os.path.join(path, 'patches')
432 432 else:
433 433 curpath = os.path.join(path, 'patches-' + cur)
434 434 except IOError:
435 435 curpath = os.path.join(path, 'patches')
436 436 self.path = patchdir or curpath
437 self.opener = scmutil.opener(self.path)
437 self.opener = scmutil.vfs(self.path)
438 438 self.ui = ui
439 439 self.baseui = baseui
440 440 self.applieddirty = False
441 441 self.seriesdirty = False
442 442 self.added = []
443 443 self.seriespath = "series"
444 444 self.statuspath = "status"
445 445 self.guardspath = "guards"
446 446 self.activeguards = None
447 447 self.guardsdirty = False
448 448 # Handle mq.git as a bool with extended values
449 449 try:
450 450 gitmode = ui.configbool('mq', 'git', None)
451 451 if gitmode is None:
452 452 raise error.ConfigError
453 453 if gitmode:
454 454 self.gitmode = 'yes'
455 455 else:
456 456 self.gitmode = 'no'
457 457 except error.ConfigError:
458 458 # let's have check-config ignore the type mismatch
459 459 self.gitmode = ui.config(r'mq', 'git', 'auto').lower()
460 460 # deprecated config: mq.plain
461 461 self.plainmode = ui.configbool('mq', 'plain', False)
462 462 self.checkapplied = True
463 463
464 464 @util.propertycache
465 465 def applied(self):
466 466 def parselines(lines):
467 467 for l in lines:
468 468 entry = l.split(':', 1)
469 469 if len(entry) > 1:
470 470 n, name = entry
471 471 yield statusentry(bin(n), name)
472 472 elif l.strip():
473 473 self.ui.warn(_('malformated mq status line: %s\n') % entry)
474 474 # else we ignore empty lines
475 475 try:
476 476 lines = self.opener.read(self.statuspath).splitlines()
477 477 return list(parselines(lines))
478 478 except IOError as e:
479 479 if e.errno == errno.ENOENT:
480 480 return []
481 481 raise
482 482
483 483 @util.propertycache
484 484 def fullseries(self):
485 485 try:
486 486 return self.opener.read(self.seriespath).splitlines()
487 487 except IOError as e:
488 488 if e.errno == errno.ENOENT:
489 489 return []
490 490 raise
491 491
492 492 @util.propertycache
493 493 def series(self):
494 494 self.parseseries()
495 495 return self.series
496 496
497 497 @util.propertycache
498 498 def seriesguards(self):
499 499 self.parseseries()
500 500 return self.seriesguards
501 501
502 502 def invalidate(self):
503 503 for a in 'applied fullseries series seriesguards'.split():
504 504 if a in self.__dict__:
505 505 delattr(self, a)
506 506 self.applieddirty = False
507 507 self.seriesdirty = False
508 508 self.guardsdirty = False
509 509 self.activeguards = None
510 510
511 511 def diffopts(self, opts=None, patchfn=None):
512 512 diffopts = patchmod.diffopts(self.ui, opts)
513 513 if self.gitmode == 'auto':
514 514 diffopts.upgrade = True
515 515 elif self.gitmode == 'keep':
516 516 pass
517 517 elif self.gitmode in ('yes', 'no'):
518 518 diffopts.git = self.gitmode == 'yes'
519 519 else:
520 520 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
521 521 ' got %s') % self.gitmode)
522 522 if patchfn:
523 523 diffopts = self.patchopts(diffopts, patchfn)
524 524 return diffopts
525 525
526 526 def patchopts(self, diffopts, *patches):
527 527 """Return a copy of input diff options with git set to true if
528 528 referenced patch is a git patch and should be preserved as such.
529 529 """
530 530 diffopts = diffopts.copy()
531 531 if not diffopts.git and self.gitmode == 'keep':
532 532 for patchfn in patches:
533 533 patchf = self.opener(patchfn, 'r')
534 534 # if the patch was a git patch, refresh it as a git patch
535 535 for line in patchf:
536 536 if line.startswith('diff --git'):
537 537 diffopts.git = True
538 538 break
539 539 patchf.close()
540 540 return diffopts
541 541
542 542 def join(self, *p):
543 543 return os.path.join(self.path, *p)
544 544
545 545 def findseries(self, patch):
546 546 def matchpatch(l):
547 547 l = l.split('#', 1)[0]
548 548 return l.strip() == patch
549 549 for index, l in enumerate(self.fullseries):
550 550 if matchpatch(l):
551 551 return index
552 552 return None
553 553
554 554 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
555 555
556 556 def parseseries(self):
557 557 self.series = []
558 558 self.seriesguards = []
559 559 for l in self.fullseries:
560 560 h = l.find('#')
561 561 if h == -1:
562 562 patch = l
563 563 comment = ''
564 564 elif h == 0:
565 565 continue
566 566 else:
567 567 patch = l[:h]
568 568 comment = l[h:]
569 569 patch = patch.strip()
570 570 if patch:
571 571 if patch in self.series:
572 572 raise error.Abort(_('%s appears more than once in %s') %
573 573 (patch, self.join(self.seriespath)))
574 574 self.series.append(patch)
575 575 self.seriesguards.append(self.guard_re.findall(comment))
576 576
577 577 def checkguard(self, guard):
578 578 if not guard:
579 579 return _('guard cannot be an empty string')
580 580 bad_chars = '# \t\r\n\f'
581 581 first = guard[0]
582 582 if first in '-+':
583 583 return (_('guard %r starts with invalid character: %r') %
584 584 (guard, first))
585 585 for c in bad_chars:
586 586 if c in guard:
587 587 return _('invalid character in guard %r: %r') % (guard, c)
588 588
589 589 def setactive(self, guards):
590 590 for guard in guards:
591 591 bad = self.checkguard(guard)
592 592 if bad:
593 593 raise error.Abort(bad)
594 594 guards = sorted(set(guards))
595 595 self.ui.debug('active guards: %s\n' % ' '.join(guards))
596 596 self.activeguards = guards
597 597 self.guardsdirty = True
598 598
599 599 def active(self):
600 600 if self.activeguards is None:
601 601 self.activeguards = []
602 602 try:
603 603 guards = self.opener.read(self.guardspath).split()
604 604 except IOError as err:
605 605 if err.errno != errno.ENOENT:
606 606 raise
607 607 guards = []
608 608 for i, guard in enumerate(guards):
609 609 bad = self.checkguard(guard)
610 610 if bad:
611 611 self.ui.warn('%s:%d: %s\n' %
612 612 (self.join(self.guardspath), i + 1, bad))
613 613 else:
614 614 self.activeguards.append(guard)
615 615 return self.activeguards
616 616
617 617 def setguards(self, idx, guards):
618 618 for g in guards:
619 619 if len(g) < 2:
620 620 raise error.Abort(_('guard %r too short') % g)
621 621 if g[0] not in '-+':
622 622 raise error.Abort(_('guard %r starts with invalid char') % g)
623 623 bad = self.checkguard(g[1:])
624 624 if bad:
625 625 raise error.Abort(bad)
626 626 drop = self.guard_re.sub('', self.fullseries[idx])
627 627 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
628 628 self.parseseries()
629 629 self.seriesdirty = True
630 630
631 631 def pushable(self, idx):
632 632 if isinstance(idx, str):
633 633 idx = self.series.index(idx)
634 634 patchguards = self.seriesguards[idx]
635 635 if not patchguards:
636 636 return True, None
637 637 guards = self.active()
638 638 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
639 639 if exactneg:
640 640 return False, repr(exactneg[0])
641 641 pos = [g for g in patchguards if g[0] == '+']
642 642 exactpos = [g for g in pos if g[1:] in guards]
643 643 if pos:
644 644 if exactpos:
645 645 return True, repr(exactpos[0])
646 646 return False, ' '.join(map(repr, pos))
647 647 return True, ''
648 648
649 649 def explainpushable(self, idx, all_patches=False):
650 650 if all_patches:
651 651 write = self.ui.write
652 652 else:
653 653 write = self.ui.warn
654 654
655 655 if all_patches or self.ui.verbose:
656 656 if isinstance(idx, str):
657 657 idx = self.series.index(idx)
658 658 pushable, why = self.pushable(idx)
659 659 if all_patches and pushable:
660 660 if why is None:
661 661 write(_('allowing %s - no guards in effect\n') %
662 662 self.series[idx])
663 663 else:
664 664 if not why:
665 665 write(_('allowing %s - no matching negative guards\n') %
666 666 self.series[idx])
667 667 else:
668 668 write(_('allowing %s - guarded by %s\n') %
669 669 (self.series[idx], why))
670 670 if not pushable:
671 671 if why:
672 672 write(_('skipping %s - guarded by %s\n') %
673 673 (self.series[idx], why))
674 674 else:
675 675 write(_('skipping %s - no matching guards\n') %
676 676 self.series[idx])
677 677
678 678 def savedirty(self):
679 679 def writelist(items, path):
680 680 fp = self.opener(path, 'w')
681 681 for i in items:
682 682 fp.write("%s\n" % i)
683 683 fp.close()
684 684 if self.applieddirty:
685 685 writelist(map(str, self.applied), self.statuspath)
686 686 self.applieddirty = False
687 687 if self.seriesdirty:
688 688 writelist(self.fullseries, self.seriespath)
689 689 self.seriesdirty = False
690 690 if self.guardsdirty:
691 691 writelist(self.activeguards, self.guardspath)
692 692 self.guardsdirty = False
693 693 if self.added:
694 694 qrepo = self.qrepo()
695 695 if qrepo:
696 696 qrepo[None].add(f for f in self.added if f not in qrepo[None])
697 697 self.added = []
698 698
699 699 def removeundo(self, repo):
700 700 undo = repo.sjoin('undo')
701 701 if not os.path.exists(undo):
702 702 return
703 703 try:
704 704 os.unlink(undo)
705 705 except OSError as inst:
706 706 self.ui.warn(_('error removing undo: %s\n') % str(inst))
707 707
708 708 def backup(self, repo, files, copy=False):
709 709 # backup local changes in --force case
710 710 for f in sorted(files):
711 711 absf = repo.wjoin(f)
712 712 if os.path.lexists(absf):
713 713 self.ui.note(_('saving current version of %s as %s\n') %
714 714 (f, scmutil.origpath(self.ui, repo, f)))
715 715
716 716 absorig = scmutil.origpath(self.ui, repo, absf)
717 717 if copy:
718 718 util.copyfile(absf, absorig)
719 719 else:
720 720 util.rename(absf, absorig)
721 721
722 722 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
723 723 fp=None, changes=None, opts={}):
724 724 stat = opts.get('stat')
725 725 m = scmutil.match(repo[node1], files, opts)
726 726 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
727 727 changes, stat, fp)
728 728
729 729 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
730 730 # first try just applying the patch
731 731 (err, n) = self.apply(repo, [patch], update_status=False,
732 732 strict=True, merge=rev)
733 733
734 734 if err == 0:
735 735 return (err, n)
736 736
737 737 if n is None:
738 738 raise error.Abort(_("apply failed for patch %s") % patch)
739 739
740 740 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
741 741
742 742 # apply failed, strip away that rev and merge.
743 743 hg.clean(repo, head)
744 744 strip(self.ui, repo, [n], update=False, backup=False)
745 745
746 746 ctx = repo[rev]
747 747 ret = hg.merge(repo, rev)
748 748 if ret:
749 749 raise error.Abort(_("update returned %d") % ret)
750 750 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
751 751 if n is None:
752 752 raise error.Abort(_("repo commit failed"))
753 753 try:
754 754 ph = patchheader(mergeq.join(patch), self.plainmode)
755 755 except Exception:
756 756 raise error.Abort(_("unable to read %s") % patch)
757 757
758 758 diffopts = self.patchopts(diffopts, patch)
759 759 patchf = self.opener(patch, "w")
760 760 comments = str(ph)
761 761 if comments:
762 762 patchf.write(comments)
763 763 self.printdiff(repo, diffopts, head, n, fp=patchf)
764 764 patchf.close()
765 765 self.removeundo(repo)
766 766 return (0, n)
767 767
768 768 def qparents(self, repo, rev=None):
769 769 """return the mq handled parent or p1
770 770
771 771 In some case where mq get himself in being the parent of a merge the
772 772 appropriate parent may be p2.
773 773 (eg: an in progress merge started with mq disabled)
774 774
775 775 If no parent are managed by mq, p1 is returned.
776 776 """
777 777 if rev is None:
778 778 (p1, p2) = repo.dirstate.parents()
779 779 if p2 == nullid:
780 780 return p1
781 781 if not self.applied:
782 782 return None
783 783 return self.applied[-1].node
784 784 p1, p2 = repo.changelog.parents(rev)
785 785 if p2 != nullid and p2 in [x.node for x in self.applied]:
786 786 return p2
787 787 return p1
788 788
789 789 def mergepatch(self, repo, mergeq, series, diffopts):
790 790 if not self.applied:
791 791 # each of the patches merged in will have two parents. This
792 792 # can confuse the qrefresh, qdiff, and strip code because it
793 793 # needs to know which parent is actually in the patch queue.
794 794 # so, we insert a merge marker with only one parent. This way
795 795 # the first patch in the queue is never a merge patch
796 796 #
797 797 pname = ".hg.patches.merge.marker"
798 798 n = newcommit(repo, None, '[mq]: merge marker', force=True)
799 799 self.removeundo(repo)
800 800 self.applied.append(statusentry(n, pname))
801 801 self.applieddirty = True
802 802
803 803 head = self.qparents(repo)
804 804
805 805 for patch in series:
806 806 patch = mergeq.lookup(patch, strict=True)
807 807 if not patch:
808 808 self.ui.warn(_("patch %s does not exist\n") % patch)
809 809 return (1, None)
810 810 pushable, reason = self.pushable(patch)
811 811 if not pushable:
812 812 self.explainpushable(patch, all_patches=True)
813 813 continue
814 814 info = mergeq.isapplied(patch)
815 815 if not info:
816 816 self.ui.warn(_("patch %s is not applied\n") % patch)
817 817 return (1, None)
818 818 rev = info[1]
819 819 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
820 820 if head:
821 821 self.applied.append(statusentry(head, patch))
822 822 self.applieddirty = True
823 823 if err:
824 824 return (err, head)
825 825 self.savedirty()
826 826 return (0, head)
827 827
828 828 def patch(self, repo, patchfile):
829 829 '''Apply patchfile to the working directory.
830 830 patchfile: name of patch file'''
831 831 files = set()
832 832 try:
833 833 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
834 834 files=files, eolmode=None)
835 835 return (True, list(files), fuzz)
836 836 except Exception as inst:
837 837 self.ui.note(str(inst) + '\n')
838 838 if not self.ui.verbose:
839 839 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
840 840 self.ui.traceback()
841 841 return (False, list(files), False)
842 842
843 843 def apply(self, repo, series, list=False, update_status=True,
844 844 strict=False, patchdir=None, merge=None, all_files=None,
845 845 tobackup=None, keepchanges=False):
846 846 wlock = lock = tr = None
847 847 try:
848 848 wlock = repo.wlock()
849 849 lock = repo.lock()
850 850 tr = repo.transaction("qpush")
851 851 try:
852 852 ret = self._apply(repo, series, list, update_status,
853 853 strict, patchdir, merge, all_files=all_files,
854 854 tobackup=tobackup, keepchanges=keepchanges)
855 855 tr.close()
856 856 self.savedirty()
857 857 return ret
858 858 except AbortNoCleanup:
859 859 tr.close()
860 860 self.savedirty()
861 861 raise
862 862 except: # re-raises
863 863 try:
864 864 tr.abort()
865 865 finally:
866 866 self.invalidate()
867 867 raise
868 868 finally:
869 869 release(tr, lock, wlock)
870 870 self.removeundo(repo)
871 871
872 872 def _apply(self, repo, series, list=False, update_status=True,
873 873 strict=False, patchdir=None, merge=None, all_files=None,
874 874 tobackup=None, keepchanges=False):
875 875 """returns (error, hash)
876 876
877 877 error = 1 for unable to read, 2 for patch failed, 3 for patch
878 878 fuzz. tobackup is None or a set of files to backup before they
879 879 are modified by a patch.
880 880 """
881 881 # TODO unify with commands.py
882 882 if not patchdir:
883 883 patchdir = self.path
884 884 err = 0
885 885 n = None
886 886 for patchname in series:
887 887 pushable, reason = self.pushable(patchname)
888 888 if not pushable:
889 889 self.explainpushable(patchname, all_patches=True)
890 890 continue
891 891 self.ui.status(_("applying %s\n") % patchname)
892 892 pf = os.path.join(patchdir, patchname)
893 893
894 894 try:
895 895 ph = patchheader(self.join(patchname), self.plainmode)
896 896 except IOError:
897 897 self.ui.warn(_("unable to read %s\n") % patchname)
898 898 err = 1
899 899 break
900 900
901 901 message = ph.message
902 902 if not message:
903 903 # The commit message should not be translated
904 904 message = "imported patch %s\n" % patchname
905 905 else:
906 906 if list:
907 907 # The commit message should not be translated
908 908 message.append("\nimported patch %s" % patchname)
909 909 message = '\n'.join(message)
910 910
911 911 if ph.haspatch:
912 912 if tobackup:
913 913 touched = patchmod.changedfiles(self.ui, repo, pf)
914 914 touched = set(touched) & tobackup
915 915 if touched and keepchanges:
916 916 raise AbortNoCleanup(
917 917 _("conflicting local changes found"),
918 918 hint=_("did you forget to qrefresh?"))
919 919 self.backup(repo, touched, copy=True)
920 920 tobackup = tobackup - touched
921 921 (patcherr, files, fuzz) = self.patch(repo, pf)
922 922 if all_files is not None:
923 923 all_files.update(files)
924 924 patcherr = not patcherr
925 925 else:
926 926 self.ui.warn(_("patch %s is empty\n") % patchname)
927 927 patcherr, files, fuzz = 0, [], 0
928 928
929 929 if merge and files:
930 930 # Mark as removed/merged and update dirstate parent info
931 931 removed = []
932 932 merged = []
933 933 for f in files:
934 934 if os.path.lexists(repo.wjoin(f)):
935 935 merged.append(f)
936 936 else:
937 937 removed.append(f)
938 938 repo.dirstate.beginparentchange()
939 939 for f in removed:
940 940 repo.dirstate.remove(f)
941 941 for f in merged:
942 942 repo.dirstate.merge(f)
943 943 p1, p2 = repo.dirstate.parents()
944 944 repo.setparents(p1, merge)
945 945 repo.dirstate.endparentchange()
946 946
947 947 if all_files and '.hgsubstate' in all_files:
948 948 wctx = repo[None]
949 949 pctx = repo['.']
950 950 overwrite = False
951 951 mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
952 952 overwrite)
953 953 files += mergedsubstate.keys()
954 954
955 955 match = scmutil.matchfiles(repo, files or [])
956 956 oldtip = repo['tip']
957 957 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
958 958 force=True)
959 959 if repo['tip'] == oldtip:
960 960 raise error.Abort(_("qpush exactly duplicates child changeset"))
961 961 if n is None:
962 962 raise error.Abort(_("repository commit failed"))
963 963
964 964 if update_status:
965 965 self.applied.append(statusentry(n, patchname))
966 966
967 967 if patcherr:
968 968 self.ui.warn(_("patch failed, rejects left in working "
969 969 "directory\n"))
970 970 err = 2
971 971 break
972 972
973 973 if fuzz and strict:
974 974 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
975 975 err = 3
976 976 break
977 977 return (err, n)
978 978
979 979 def _cleanup(self, patches, numrevs, keep=False):
980 980 if not keep:
981 981 r = self.qrepo()
982 982 if r:
983 983 r[None].forget(patches)
984 984 for p in patches:
985 985 try:
986 986 os.unlink(self.join(p))
987 987 except OSError as inst:
988 988 if inst.errno != errno.ENOENT:
989 989 raise
990 990
991 991 qfinished = []
992 992 if numrevs:
993 993 qfinished = self.applied[:numrevs]
994 994 del self.applied[:numrevs]
995 995 self.applieddirty = True
996 996
997 997 unknown = []
998 998
999 999 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
1000 1000 reverse=True):
1001 1001 if i is not None:
1002 1002 del self.fullseries[i]
1003 1003 else:
1004 1004 unknown.append(p)
1005 1005
1006 1006 if unknown:
1007 1007 if numrevs:
1008 1008 rev = dict((entry.name, entry.node) for entry in qfinished)
1009 1009 for p in unknown:
1010 1010 msg = _('revision %s refers to unknown patches: %s\n')
1011 1011 self.ui.warn(msg % (short(rev[p]), p))
1012 1012 else:
1013 1013 msg = _('unknown patches: %s\n')
1014 1014 raise error.Abort(''.join(msg % p for p in unknown))
1015 1015
1016 1016 self.parseseries()
1017 1017 self.seriesdirty = True
1018 1018 return [entry.node for entry in qfinished]
1019 1019
1020 1020 def _revpatches(self, repo, revs):
1021 1021 firstrev = repo[self.applied[0].node].rev()
1022 1022 patches = []
1023 1023 for i, rev in enumerate(revs):
1024 1024
1025 1025 if rev < firstrev:
1026 1026 raise error.Abort(_('revision %d is not managed') % rev)
1027 1027
1028 1028 ctx = repo[rev]
1029 1029 base = self.applied[i].node
1030 1030 if ctx.node() != base:
1031 1031 msg = _('cannot delete revision %d above applied patches')
1032 1032 raise error.Abort(msg % rev)
1033 1033
1034 1034 patch = self.applied[i].name
1035 1035 for fmt in ('[mq]: %s', 'imported patch %s'):
1036 1036 if ctx.description() == fmt % patch:
1037 1037 msg = _('patch %s finalized without changeset message\n')
1038 1038 repo.ui.status(msg % patch)
1039 1039 break
1040 1040
1041 1041 patches.append(patch)
1042 1042 return patches
1043 1043
1044 1044 def finish(self, repo, revs):
1045 1045 # Manually trigger phase computation to ensure phasedefaults is
1046 1046 # executed before we remove the patches.
1047 1047 repo._phasecache
1048 1048 patches = self._revpatches(repo, sorted(revs))
1049 1049 qfinished = self._cleanup(patches, len(patches))
1050 1050 if qfinished and repo.ui.configbool('mq', 'secret', False):
1051 1051 # only use this logic when the secret option is added
1052 1052 oldqbase = repo[qfinished[0]]
1053 1053 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
1054 1054 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1055 1055 with repo.transaction('qfinish') as tr:
1056 1056 phases.advanceboundary(repo, tr, tphase, qfinished)
1057 1057
1058 1058 def delete(self, repo, patches, opts):
1059 1059 if not patches and not opts.get('rev'):
1060 1060 raise error.Abort(_('qdelete requires at least one revision or '
1061 1061 'patch name'))
1062 1062
1063 1063 realpatches = []
1064 1064 for patch in patches:
1065 1065 patch = self.lookup(patch, strict=True)
1066 1066 info = self.isapplied(patch)
1067 1067 if info:
1068 1068 raise error.Abort(_("cannot delete applied patch %s") % patch)
1069 1069 if patch not in self.series:
1070 1070 raise error.Abort(_("patch %s not in series file") % patch)
1071 1071 if patch not in realpatches:
1072 1072 realpatches.append(patch)
1073 1073
1074 1074 numrevs = 0
1075 1075 if opts.get('rev'):
1076 1076 if not self.applied:
1077 1077 raise error.Abort(_('no patches applied'))
1078 1078 revs = scmutil.revrange(repo, opts.get('rev'))
1079 1079 revs.sort()
1080 1080 revpatches = self._revpatches(repo, revs)
1081 1081 realpatches += revpatches
1082 1082 numrevs = len(revpatches)
1083 1083
1084 1084 self._cleanup(realpatches, numrevs, opts.get('keep'))
1085 1085
1086 1086 def checktoppatch(self, repo):
1087 1087 '''check that working directory is at qtip'''
1088 1088 if self.applied:
1089 1089 top = self.applied[-1].node
1090 1090 patch = self.applied[-1].name
1091 1091 if repo.dirstate.p1() != top:
1092 1092 raise error.Abort(_("working directory revision is not qtip"))
1093 1093 return top, patch
1094 1094 return None, None
1095 1095
1096 1096 def putsubstate2changes(self, substatestate, changes):
1097 1097 for files in changes[:3]:
1098 1098 if '.hgsubstate' in files:
1099 1099 return # already listed up
1100 1100 # not yet listed up
1101 1101 if substatestate in 'a?':
1102 1102 changes[1].append('.hgsubstate')
1103 1103 elif substatestate in 'r':
1104 1104 changes[2].append('.hgsubstate')
1105 1105 else: # modified
1106 1106 changes[0].append('.hgsubstate')
1107 1107
1108 1108 def checklocalchanges(self, repo, force=False, refresh=True):
1109 1109 excsuffix = ''
1110 1110 if refresh:
1111 1111 excsuffix = ', qrefresh first'
1112 1112 # plain versions for i18n tool to detect them
1113 1113 _("local changes found, qrefresh first")
1114 1114 _("local changed subrepos found, qrefresh first")
1115 1115 return checklocalchanges(repo, force, excsuffix)
1116 1116
1117 1117 _reserved = ('series', 'status', 'guards', '.', '..')
1118 1118 def checkreservedname(self, name):
1119 1119 if name in self._reserved:
1120 1120 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1121 1121 % name)
1122 1122 for prefix in ('.hg', '.mq'):
1123 1123 if name.startswith(prefix):
1124 1124 raise error.Abort(_('patch name cannot begin with "%s"')
1125 1125 % prefix)
1126 1126 for c in ('#', ':', '\r', '\n'):
1127 1127 if c in name:
1128 1128 raise error.Abort(_('%r cannot be used in the name of a patch')
1129 1129 % c)
1130 1130
1131 1131 def checkpatchname(self, name, force=False):
1132 1132 self.checkreservedname(name)
1133 1133 if not force and os.path.exists(self.join(name)):
1134 1134 if os.path.isdir(self.join(name)):
1135 1135 raise error.Abort(_('"%s" already exists as a directory')
1136 1136 % name)
1137 1137 else:
1138 1138 raise error.Abort(_('patch "%s" already exists') % name)
1139 1139
1140 1140 def makepatchname(self, title, fallbackname):
1141 1141 """Return a suitable filename for title, adding a suffix to make
1142 1142 it unique in the existing list"""
1143 1143 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
1144 1144 namebase = namebase[:75] # avoid too long name (issue5117)
1145 1145 if namebase:
1146 1146 try:
1147 1147 self.checkreservedname(namebase)
1148 1148 except error.Abort:
1149 1149 namebase = fallbackname
1150 1150 else:
1151 1151 namebase = fallbackname
1152 1152 name = namebase
1153 1153 i = 0
1154 1154 while True:
1155 1155 if name not in self.fullseries:
1156 1156 try:
1157 1157 self.checkpatchname(name)
1158 1158 break
1159 1159 except error.Abort:
1160 1160 pass
1161 1161 i += 1
1162 1162 name = '%s__%s' % (namebase, i)
1163 1163 return name
1164 1164
1165 1165 def checkkeepchanges(self, keepchanges, force):
1166 1166 if force and keepchanges:
1167 1167 raise error.Abort(_('cannot use both --force and --keep-changes'))
1168 1168
1169 1169 def new(self, repo, patchfn, *pats, **opts):
1170 1170 """options:
1171 1171 msg: a string or a no-argument function returning a string
1172 1172 """
1173 1173 msg = opts.get('msg')
1174 1174 edit = opts.get('edit')
1175 1175 editform = opts.get('editform', 'mq.qnew')
1176 1176 user = opts.get('user')
1177 1177 date = opts.get('date')
1178 1178 if date:
1179 1179 date = util.parsedate(date)
1180 1180 diffopts = self.diffopts({'git': opts.get('git')})
1181 1181 if opts.get('checkname', True):
1182 1182 self.checkpatchname(patchfn)
1183 1183 inclsubs = checksubstate(repo)
1184 1184 if inclsubs:
1185 1185 substatestate = repo.dirstate['.hgsubstate']
1186 1186 if opts.get('include') or opts.get('exclude') or pats:
1187 1187 # detect missing files in pats
1188 1188 def badfn(f, msg):
1189 1189 if f != '.hgsubstate': # .hgsubstate is auto-created
1190 1190 raise error.Abort('%s: %s' % (f, msg))
1191 1191 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1192 1192 changes = repo.status(match=match)
1193 1193 else:
1194 1194 changes = self.checklocalchanges(repo, force=True)
1195 1195 commitfiles = list(inclsubs)
1196 1196 for files in changes[:3]:
1197 1197 commitfiles.extend(files)
1198 1198 match = scmutil.matchfiles(repo, commitfiles)
1199 1199 if len(repo[None].parents()) > 1:
1200 1200 raise error.Abort(_('cannot manage merge changesets'))
1201 1201 self.checktoppatch(repo)
1202 1202 insert = self.fullseriesend()
1203 1203 with repo.wlock():
1204 1204 try:
1205 1205 # if patch file write fails, abort early
1206 1206 p = self.opener(patchfn, "w")
1207 1207 except IOError as e:
1208 1208 raise error.Abort(_('cannot write patch "%s": %s')
1209 1209 % (patchfn, e.strerror))
1210 1210 try:
1211 1211 defaultmsg = "[mq]: %s" % patchfn
1212 1212 editor = cmdutil.getcommiteditor(editform=editform)
1213 1213 if edit:
1214 1214 def finishdesc(desc):
1215 1215 if desc.rstrip():
1216 1216 return desc
1217 1217 else:
1218 1218 return defaultmsg
1219 1219 # i18n: this message is shown in editor with "HG: " prefix
1220 1220 extramsg = _('Leave message empty to use default message.')
1221 1221 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1222 1222 extramsg=extramsg,
1223 1223 editform=editform)
1224 1224 commitmsg = msg
1225 1225 else:
1226 1226 commitmsg = msg or defaultmsg
1227 1227
1228 1228 n = newcommit(repo, None, commitmsg, user, date, match=match,
1229 1229 force=True, editor=editor)
1230 1230 if n is None:
1231 1231 raise error.Abort(_("repo commit failed"))
1232 1232 try:
1233 1233 self.fullseries[insert:insert] = [patchfn]
1234 1234 self.applied.append(statusentry(n, patchfn))
1235 1235 self.parseseries()
1236 1236 self.seriesdirty = True
1237 1237 self.applieddirty = True
1238 1238 nctx = repo[n]
1239 1239 ph = patchheader(self.join(patchfn), self.plainmode)
1240 1240 if user:
1241 1241 ph.setuser(user)
1242 1242 if date:
1243 1243 ph.setdate('%s %s' % date)
1244 1244 ph.setparent(hex(nctx.p1().node()))
1245 1245 msg = nctx.description().strip()
1246 1246 if msg == defaultmsg.strip():
1247 1247 msg = ''
1248 1248 ph.setmessage(msg)
1249 1249 p.write(str(ph))
1250 1250 if commitfiles:
1251 1251 parent = self.qparents(repo, n)
1252 1252 if inclsubs:
1253 1253 self.putsubstate2changes(substatestate, changes)
1254 1254 chunks = patchmod.diff(repo, node1=parent, node2=n,
1255 1255 changes=changes, opts=diffopts)
1256 1256 for chunk in chunks:
1257 1257 p.write(chunk)
1258 1258 p.close()
1259 1259 r = self.qrepo()
1260 1260 if r:
1261 1261 r[None].add([patchfn])
1262 1262 except: # re-raises
1263 1263 repo.rollback()
1264 1264 raise
1265 1265 except Exception:
1266 1266 patchpath = self.join(patchfn)
1267 1267 try:
1268 1268 os.unlink(patchpath)
1269 1269 except OSError:
1270 1270 self.ui.warn(_('error unlinking %s\n') % patchpath)
1271 1271 raise
1272 1272 self.removeundo(repo)
1273 1273
1274 1274 def isapplied(self, patch):
1275 1275 """returns (index, rev, patch)"""
1276 1276 for i, a in enumerate(self.applied):
1277 1277 if a.name == patch:
1278 1278 return (i, a.node, a.name)
1279 1279 return None
1280 1280
1281 1281 # if the exact patch name does not exist, we try a few
1282 1282 # variations. If strict is passed, we try only #1
1283 1283 #
1284 1284 # 1) a number (as string) to indicate an offset in the series file
1285 1285 # 2) a unique substring of the patch name was given
1286 1286 # 3) patchname[-+]num to indicate an offset in the series file
1287 1287 def lookup(self, patch, strict=False):
1288 1288 def partialname(s):
1289 1289 if s in self.series:
1290 1290 return s
1291 1291 matches = [x for x in self.series if s in x]
1292 1292 if len(matches) > 1:
1293 1293 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1294 1294 for m in matches:
1295 1295 self.ui.warn(' %s\n' % m)
1296 1296 return None
1297 1297 if matches:
1298 1298 return matches[0]
1299 1299 if self.series and self.applied:
1300 1300 if s == 'qtip':
1301 1301 return self.series[self.seriesend(True) - 1]
1302 1302 if s == 'qbase':
1303 1303 return self.series[0]
1304 1304 return None
1305 1305
1306 1306 if patch in self.series:
1307 1307 return patch
1308 1308
1309 1309 if not os.path.isfile(self.join(patch)):
1310 1310 try:
1311 1311 sno = int(patch)
1312 1312 except (ValueError, OverflowError):
1313 1313 pass
1314 1314 else:
1315 1315 if -len(self.series) <= sno < len(self.series):
1316 1316 return self.series[sno]
1317 1317
1318 1318 if not strict:
1319 1319 res = partialname(patch)
1320 1320 if res:
1321 1321 return res
1322 1322 minus = patch.rfind('-')
1323 1323 if minus >= 0:
1324 1324 res = partialname(patch[:minus])
1325 1325 if res:
1326 1326 i = self.series.index(res)
1327 1327 try:
1328 1328 off = int(patch[minus + 1:] or 1)
1329 1329 except (ValueError, OverflowError):
1330 1330 pass
1331 1331 else:
1332 1332 if i - off >= 0:
1333 1333 return self.series[i - off]
1334 1334 plus = patch.rfind('+')
1335 1335 if plus >= 0:
1336 1336 res = partialname(patch[:plus])
1337 1337 if res:
1338 1338 i = self.series.index(res)
1339 1339 try:
1340 1340 off = int(patch[plus + 1:] or 1)
1341 1341 except (ValueError, OverflowError):
1342 1342 pass
1343 1343 else:
1344 1344 if i + off < len(self.series):
1345 1345 return self.series[i + off]
1346 1346 raise error.Abort(_("patch %s not in series") % patch)
1347 1347
1348 1348 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1349 1349 all=False, move=False, exact=False, nobackup=False,
1350 1350 keepchanges=False):
1351 1351 self.checkkeepchanges(keepchanges, force)
1352 1352 diffopts = self.diffopts()
1353 1353 with repo.wlock():
1354 1354 heads = []
1355 1355 for hs in repo.branchmap().itervalues():
1356 1356 heads.extend(hs)
1357 1357 if not heads:
1358 1358 heads = [nullid]
1359 1359 if repo.dirstate.p1() not in heads and not exact:
1360 1360 self.ui.status(_("(working directory not at a head)\n"))
1361 1361
1362 1362 if not self.series:
1363 1363 self.ui.warn(_('no patches in series\n'))
1364 1364 return 0
1365 1365
1366 1366 # Suppose our series file is: A B C and the current 'top'
1367 1367 # patch is B. qpush C should be performed (moving forward)
1368 1368 # qpush B is a NOP (no change) qpush A is an error (can't
1369 1369 # go backwards with qpush)
1370 1370 if patch:
1371 1371 patch = self.lookup(patch)
1372 1372 info = self.isapplied(patch)
1373 1373 if info and info[0] >= len(self.applied) - 1:
1374 1374 self.ui.warn(
1375 1375 _('qpush: %s is already at the top\n') % patch)
1376 1376 return 0
1377 1377
1378 1378 pushable, reason = self.pushable(patch)
1379 1379 if pushable:
1380 1380 if self.series.index(patch) < self.seriesend():
1381 1381 raise error.Abort(
1382 1382 _("cannot push to a previous patch: %s") % patch)
1383 1383 else:
1384 1384 if reason:
1385 1385 reason = _('guarded by %s') % reason
1386 1386 else:
1387 1387 reason = _('no matching guards')
1388 1388 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1389 1389 return 1
1390 1390 elif all:
1391 1391 patch = self.series[-1]
1392 1392 if self.isapplied(patch):
1393 1393 self.ui.warn(_('all patches are currently applied\n'))
1394 1394 return 0
1395 1395
1396 1396 # Following the above example, starting at 'top' of B:
1397 1397 # qpush should be performed (pushes C), but a subsequent
1398 1398 # qpush without an argument is an error (nothing to
1399 1399 # apply). This allows a loop of "...while hg qpush..." to
1400 1400 # work as it detects an error when done
1401 1401 start = self.seriesend()
1402 1402 if start == len(self.series):
1403 1403 self.ui.warn(_('patch series already fully applied\n'))
1404 1404 return 1
1405 1405 if not force and not keepchanges:
1406 1406 self.checklocalchanges(repo, refresh=self.applied)
1407 1407
1408 1408 if exact:
1409 1409 if keepchanges:
1410 1410 raise error.Abort(
1411 1411 _("cannot use --exact and --keep-changes together"))
1412 1412 if move:
1413 1413 raise error.Abort(_('cannot use --exact and --move '
1414 1414 'together'))
1415 1415 if self.applied:
1416 1416 raise error.Abort(_('cannot push --exact with applied '
1417 1417 'patches'))
1418 1418 root = self.series[start]
1419 1419 target = patchheader(self.join(root), self.plainmode).parent
1420 1420 if not target:
1421 1421 raise error.Abort(
1422 1422 _("%s does not have a parent recorded") % root)
1423 1423 if not repo[target] == repo['.']:
1424 1424 hg.update(repo, target)
1425 1425
1426 1426 if move:
1427 1427 if not patch:
1428 1428 raise error.Abort(_("please specify the patch to move"))
1429 1429 for fullstart, rpn in enumerate(self.fullseries):
1430 1430 # strip markers for patch guards
1431 1431 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1432 1432 break
1433 1433 for i, rpn in enumerate(self.fullseries[fullstart:]):
1434 1434 # strip markers for patch guards
1435 1435 if self.guard_re.split(rpn, 1)[0] == patch:
1436 1436 break
1437 1437 index = fullstart + i
1438 1438 assert index < len(self.fullseries)
1439 1439 fullpatch = self.fullseries[index]
1440 1440 del self.fullseries[index]
1441 1441 self.fullseries.insert(fullstart, fullpatch)
1442 1442 self.parseseries()
1443 1443 self.seriesdirty = True
1444 1444
1445 1445 self.applieddirty = True
1446 1446 if start > 0:
1447 1447 self.checktoppatch(repo)
1448 1448 if not patch:
1449 1449 patch = self.series[start]
1450 1450 end = start + 1
1451 1451 else:
1452 1452 end = self.series.index(patch, start) + 1
1453 1453
1454 1454 tobackup = set()
1455 1455 if (not nobackup and force) or keepchanges:
1456 1456 status = self.checklocalchanges(repo, force=True)
1457 1457 if keepchanges:
1458 1458 tobackup.update(status.modified + status.added +
1459 1459 status.removed + status.deleted)
1460 1460 else:
1461 1461 tobackup.update(status.modified + status.added)
1462 1462
1463 1463 s = self.series[start:end]
1464 1464 all_files = set()
1465 1465 try:
1466 1466 if mergeq:
1467 1467 ret = self.mergepatch(repo, mergeq, s, diffopts)
1468 1468 else:
1469 1469 ret = self.apply(repo, s, list, all_files=all_files,
1470 1470 tobackup=tobackup, keepchanges=keepchanges)
1471 1471 except AbortNoCleanup:
1472 1472 raise
1473 1473 except: # re-raises
1474 1474 self.ui.warn(_('cleaning up working directory...\n'))
1475 1475 cmdutil.revert(self.ui, repo, repo['.'],
1476 1476 repo.dirstate.parents(), no_backup=True)
1477 1477 # only remove unknown files that we know we touched or
1478 1478 # created while patching
1479 1479 for f in all_files:
1480 1480 if f not in repo.dirstate:
1481 1481 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1482 1482 self.ui.warn(_('done\n'))
1483 1483 raise
1484 1484
1485 1485 if not self.applied:
1486 1486 return ret[0]
1487 1487 top = self.applied[-1].name
1488 1488 if ret[0] and ret[0] > 1:
1489 1489 msg = _("errors during apply, please fix and qrefresh %s\n")
1490 1490 self.ui.write(msg % top)
1491 1491 else:
1492 1492 self.ui.write(_("now at: %s\n") % top)
1493 1493 return ret[0]
1494 1494
1495 1495 def pop(self, repo, patch=None, force=False, update=True, all=False,
1496 1496 nobackup=False, keepchanges=False):
1497 1497 self.checkkeepchanges(keepchanges, force)
1498 1498 with repo.wlock():
1499 1499 if patch:
1500 1500 # index, rev, patch
1501 1501 info = self.isapplied(patch)
1502 1502 if not info:
1503 1503 patch = self.lookup(patch)
1504 1504 info = self.isapplied(patch)
1505 1505 if not info:
1506 1506 raise error.Abort(_("patch %s is not applied") % patch)
1507 1507
1508 1508 if not self.applied:
1509 1509 # Allow qpop -a to work repeatedly,
1510 1510 # but not qpop without an argument
1511 1511 self.ui.warn(_("no patches applied\n"))
1512 1512 return not all
1513 1513
1514 1514 if all:
1515 1515 start = 0
1516 1516 elif patch:
1517 1517 start = info[0] + 1
1518 1518 else:
1519 1519 start = len(self.applied) - 1
1520 1520
1521 1521 if start >= len(self.applied):
1522 1522 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1523 1523 return
1524 1524
1525 1525 if not update:
1526 1526 parents = repo.dirstate.parents()
1527 1527 rr = [x.node for x in self.applied]
1528 1528 for p in parents:
1529 1529 if p in rr:
1530 1530 self.ui.warn(_("qpop: forcing dirstate update\n"))
1531 1531 update = True
1532 1532 else:
1533 1533 parents = [p.node() for p in repo[None].parents()]
1534 1534 needupdate = False
1535 1535 for entry in self.applied[start:]:
1536 1536 if entry.node in parents:
1537 1537 needupdate = True
1538 1538 break
1539 1539 update = needupdate
1540 1540
1541 1541 tobackup = set()
1542 1542 if update:
1543 1543 s = self.checklocalchanges(repo, force=force or keepchanges)
1544 1544 if force:
1545 1545 if not nobackup:
1546 1546 tobackup.update(s.modified + s.added)
1547 1547 elif keepchanges:
1548 1548 tobackup.update(s.modified + s.added +
1549 1549 s.removed + s.deleted)
1550 1550
1551 1551 self.applieddirty = True
1552 1552 end = len(self.applied)
1553 1553 rev = self.applied[start].node
1554 1554
1555 1555 try:
1556 1556 heads = repo.changelog.heads(rev)
1557 1557 except error.LookupError:
1558 1558 node = short(rev)
1559 1559 raise error.Abort(_('trying to pop unknown node %s') % node)
1560 1560
1561 1561 if heads != [self.applied[-1].node]:
1562 1562 raise error.Abort(_("popping would remove a revision not "
1563 1563 "managed by this patch queue"))
1564 1564 if not repo[self.applied[-1].node].mutable():
1565 1565 raise error.Abort(
1566 1566 _("popping would remove a public revision"),
1567 1567 hint=_("see 'hg help phases' for details"))
1568 1568
1569 1569 # we know there are no local changes, so we can make a simplified
1570 1570 # form of hg.update.
1571 1571 if update:
1572 1572 qp = self.qparents(repo, rev)
1573 1573 ctx = repo[qp]
1574 1574 m, a, r, d = repo.status(qp, '.')[:4]
1575 1575 if d:
1576 1576 raise error.Abort(_("deletions found between repo revs"))
1577 1577
1578 1578 tobackup = set(a + m + r) & tobackup
1579 1579 if keepchanges and tobackup:
1580 1580 raise error.Abort(_("local changes found, qrefresh first"))
1581 1581 self.backup(repo, tobackup)
1582 1582 repo.dirstate.beginparentchange()
1583 1583 for f in a:
1584 1584 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1585 1585 repo.dirstate.drop(f)
1586 1586 for f in m + r:
1587 1587 fctx = ctx[f]
1588 1588 repo.wwrite(f, fctx.data(), fctx.flags())
1589 1589 repo.dirstate.normal(f)
1590 1590 repo.setparents(qp, nullid)
1591 1591 repo.dirstate.endparentchange()
1592 1592 for patch in reversed(self.applied[start:end]):
1593 1593 self.ui.status(_("popping %s\n") % patch.name)
1594 1594 del self.applied[start:end]
1595 1595 strip(self.ui, repo, [rev], update=False, backup=False)
1596 1596 for s, state in repo['.'].substate.items():
1597 1597 repo['.'].sub(s).get(state)
1598 1598 if self.applied:
1599 1599 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1600 1600 else:
1601 1601 self.ui.write(_("patch queue now empty\n"))
1602 1602
1603 1603 def diff(self, repo, pats, opts):
1604 1604 top, patch = self.checktoppatch(repo)
1605 1605 if not top:
1606 1606 self.ui.write(_("no patches applied\n"))
1607 1607 return
1608 1608 qp = self.qparents(repo, top)
1609 1609 if opts.get('reverse'):
1610 1610 node1, node2 = None, qp
1611 1611 else:
1612 1612 node1, node2 = qp, None
1613 1613 diffopts = self.diffopts(opts, patch)
1614 1614 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1615 1615
1616 1616 def refresh(self, repo, pats=None, **opts):
1617 1617 if not self.applied:
1618 1618 self.ui.write(_("no patches applied\n"))
1619 1619 return 1
1620 1620 msg = opts.get('msg', '').rstrip()
1621 1621 edit = opts.get('edit')
1622 1622 editform = opts.get('editform', 'mq.qrefresh')
1623 1623 newuser = opts.get('user')
1624 1624 newdate = opts.get('date')
1625 1625 if newdate:
1626 1626 newdate = '%d %d' % util.parsedate(newdate)
1627 1627 wlock = repo.wlock()
1628 1628
1629 1629 try:
1630 1630 self.checktoppatch(repo)
1631 1631 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1632 1632 if repo.changelog.heads(top) != [top]:
1633 1633 raise error.Abort(_("cannot qrefresh a revision with children"))
1634 1634 if not repo[top].mutable():
1635 1635 raise error.Abort(_("cannot qrefresh public revision"),
1636 1636 hint=_("see 'hg help phases' for details"))
1637 1637
1638 1638 cparents = repo.changelog.parents(top)
1639 1639 patchparent = self.qparents(repo, top)
1640 1640
1641 1641 inclsubs = checksubstate(repo, hex(patchparent))
1642 1642 if inclsubs:
1643 1643 substatestate = repo.dirstate['.hgsubstate']
1644 1644
1645 1645 ph = patchheader(self.join(patchfn), self.plainmode)
1646 1646 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1647 1647 if newuser:
1648 1648 ph.setuser(newuser)
1649 1649 if newdate:
1650 1650 ph.setdate(newdate)
1651 1651 ph.setparent(hex(patchparent))
1652 1652
1653 1653 # only commit new patch when write is complete
1654 1654 patchf = self.opener(patchfn, 'w', atomictemp=True)
1655 1655
1656 1656 # update the dirstate in place, strip off the qtip commit
1657 1657 # and then commit.
1658 1658 #
1659 1659 # this should really read:
1660 1660 # mm, dd, aa = repo.status(top, patchparent)[:3]
1661 1661 # but we do it backwards to take advantage of manifest/changelog
1662 1662 # caching against the next repo.status call
1663 1663 mm, aa, dd = repo.status(patchparent, top)[:3]
1664 1664 changes = repo.changelog.read(top)
1665 1665 man = repo.manifestlog[changes[0]].read()
1666 1666 aaa = aa[:]
1667 1667 matchfn = scmutil.match(repo[None], pats, opts)
1668 1668 # in short mode, we only diff the files included in the
1669 1669 # patch already plus specified files
1670 1670 if opts.get('short'):
1671 1671 # if amending a patch, we start with existing
1672 1672 # files plus specified files - unfiltered
1673 1673 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1674 1674 # filter with include/exclude options
1675 1675 matchfn = scmutil.match(repo[None], opts=opts)
1676 1676 else:
1677 1677 match = scmutil.matchall(repo)
1678 1678 m, a, r, d = repo.status(match=match)[:4]
1679 1679 mm = set(mm)
1680 1680 aa = set(aa)
1681 1681 dd = set(dd)
1682 1682
1683 1683 # we might end up with files that were added between
1684 1684 # qtip and the dirstate parent, but then changed in the
1685 1685 # local dirstate. in this case, we want them to only
1686 1686 # show up in the added section
1687 1687 for x in m:
1688 1688 if x not in aa:
1689 1689 mm.add(x)
1690 1690 # we might end up with files added by the local dirstate that
1691 1691 # were deleted by the patch. In this case, they should only
1692 1692 # show up in the changed section.
1693 1693 for x in a:
1694 1694 if x in dd:
1695 1695 dd.remove(x)
1696 1696 mm.add(x)
1697 1697 else:
1698 1698 aa.add(x)
1699 1699 # make sure any files deleted in the local dirstate
1700 1700 # are not in the add or change column of the patch
1701 1701 forget = []
1702 1702 for x in d + r:
1703 1703 if x in aa:
1704 1704 aa.remove(x)
1705 1705 forget.append(x)
1706 1706 continue
1707 1707 else:
1708 1708 mm.discard(x)
1709 1709 dd.add(x)
1710 1710
1711 1711 m = list(mm)
1712 1712 r = list(dd)
1713 1713 a = list(aa)
1714 1714
1715 1715 # create 'match' that includes the files to be recommitted.
1716 1716 # apply matchfn via repo.status to ensure correct case handling.
1717 1717 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1718 1718 allmatches = set(cm + ca + cr + cd)
1719 1719 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1720 1720
1721 1721 files = set(inclsubs)
1722 1722 for x in refreshchanges:
1723 1723 files.update(x)
1724 1724 match = scmutil.matchfiles(repo, files)
1725 1725
1726 1726 bmlist = repo[top].bookmarks()
1727 1727
1728 1728 dsguard = None
1729 1729 try:
1730 1730 dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
1731 1731 if diffopts.git or diffopts.upgrade:
1732 1732 copies = {}
1733 1733 for dst in a:
1734 1734 src = repo.dirstate.copied(dst)
1735 1735 # during qfold, the source file for copies may
1736 1736 # be removed. Treat this as a simple add.
1737 1737 if src is not None and src in repo.dirstate:
1738 1738 copies.setdefault(src, []).append(dst)
1739 1739 repo.dirstate.add(dst)
1740 1740 # remember the copies between patchparent and qtip
1741 1741 for dst in aaa:
1742 1742 f = repo.file(dst)
1743 1743 src = f.renamed(man[dst])
1744 1744 if src:
1745 1745 copies.setdefault(src[0], []).extend(
1746 1746 copies.get(dst, []))
1747 1747 if dst in a:
1748 1748 copies[src[0]].append(dst)
1749 1749 # we can't copy a file created by the patch itself
1750 1750 if dst in copies:
1751 1751 del copies[dst]
1752 1752 for src, dsts in copies.iteritems():
1753 1753 for dst in dsts:
1754 1754 repo.dirstate.copy(src, dst)
1755 1755 else:
1756 1756 for dst in a:
1757 1757 repo.dirstate.add(dst)
1758 1758 # Drop useless copy information
1759 1759 for f in list(repo.dirstate.copies()):
1760 1760 repo.dirstate.copy(None, f)
1761 1761 for f in r:
1762 1762 repo.dirstate.remove(f)
1763 1763 # if the patch excludes a modified file, mark that
1764 1764 # file with mtime=0 so status can see it.
1765 1765 mm = []
1766 1766 for i in xrange(len(m) - 1, -1, -1):
1767 1767 if not matchfn(m[i]):
1768 1768 mm.append(m[i])
1769 1769 del m[i]
1770 1770 for f in m:
1771 1771 repo.dirstate.normal(f)
1772 1772 for f in mm:
1773 1773 repo.dirstate.normallookup(f)
1774 1774 for f in forget:
1775 1775 repo.dirstate.drop(f)
1776 1776
1777 1777 user = ph.user or changes[1]
1778 1778
1779 1779 oldphase = repo[top].phase()
1780 1780
1781 1781 # assumes strip can roll itself back if interrupted
1782 1782 repo.setparents(*cparents)
1783 1783 self.applied.pop()
1784 1784 self.applieddirty = True
1785 1785 strip(self.ui, repo, [top], update=False, backup=False)
1786 1786 dsguard.close()
1787 1787 finally:
1788 1788 release(dsguard)
1789 1789
1790 1790 try:
1791 1791 # might be nice to attempt to roll back strip after this
1792 1792
1793 1793 defaultmsg = "[mq]: %s" % patchfn
1794 1794 editor = cmdutil.getcommiteditor(editform=editform)
1795 1795 if edit:
1796 1796 def finishdesc(desc):
1797 1797 if desc.rstrip():
1798 1798 ph.setmessage(desc)
1799 1799 return desc
1800 1800 return defaultmsg
1801 1801 # i18n: this message is shown in editor with "HG: " prefix
1802 1802 extramsg = _('Leave message empty to use default message.')
1803 1803 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1804 1804 extramsg=extramsg,
1805 1805 editform=editform)
1806 1806 message = msg or "\n".join(ph.message)
1807 1807 elif not msg:
1808 1808 if not ph.message:
1809 1809 message = defaultmsg
1810 1810 else:
1811 1811 message = "\n".join(ph.message)
1812 1812 else:
1813 1813 message = msg
1814 1814 ph.setmessage(msg)
1815 1815
1816 1816 # Ensure we create a new changeset in the same phase than
1817 1817 # the old one.
1818 1818 lock = tr = None
1819 1819 try:
1820 1820 lock = repo.lock()
1821 1821 tr = repo.transaction('mq')
1822 1822 n = newcommit(repo, oldphase, message, user, ph.date,
1823 1823 match=match, force=True, editor=editor)
1824 1824 # only write patch after a successful commit
1825 1825 c = [list(x) for x in refreshchanges]
1826 1826 if inclsubs:
1827 1827 self.putsubstate2changes(substatestate, c)
1828 1828 chunks = patchmod.diff(repo, patchparent,
1829 1829 changes=c, opts=diffopts)
1830 1830 comments = str(ph)
1831 1831 if comments:
1832 1832 patchf.write(comments)
1833 1833 for chunk in chunks:
1834 1834 patchf.write(chunk)
1835 1835 patchf.close()
1836 1836
1837 1837 marks = repo._bookmarks
1838 1838 for bm in bmlist:
1839 1839 marks[bm] = n
1840 1840 marks.recordchange(tr)
1841 1841 tr.close()
1842 1842
1843 1843 self.applied.append(statusentry(n, patchfn))
1844 1844 finally:
1845 1845 lockmod.release(tr, lock)
1846 1846 except: # re-raises
1847 1847 ctx = repo[cparents[0]]
1848 1848 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1849 1849 self.savedirty()
1850 1850 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1851 1851 '(revert --all, qpush to recover)\n'))
1852 1852 raise
1853 1853 finally:
1854 1854 wlock.release()
1855 1855 self.removeundo(repo)
1856 1856
1857 1857 def init(self, repo, create=False):
1858 1858 if not create and os.path.isdir(self.path):
1859 1859 raise error.Abort(_("patch queue directory already exists"))
1860 1860 try:
1861 1861 os.mkdir(self.path)
1862 1862 except OSError as inst:
1863 1863 if inst.errno != errno.EEXIST or not create:
1864 1864 raise
1865 1865 if create:
1866 1866 return self.qrepo(create=True)
1867 1867
1868 1868 def unapplied(self, repo, patch=None):
1869 1869 if patch and patch not in self.series:
1870 1870 raise error.Abort(_("patch %s is not in series file") % patch)
1871 1871 if not patch:
1872 1872 start = self.seriesend()
1873 1873 else:
1874 1874 start = self.series.index(patch) + 1
1875 1875 unapplied = []
1876 1876 for i in xrange(start, len(self.series)):
1877 1877 pushable, reason = self.pushable(i)
1878 1878 if pushable:
1879 1879 unapplied.append((i, self.series[i]))
1880 1880 self.explainpushable(i)
1881 1881 return unapplied
1882 1882
1883 1883 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1884 1884 summary=False):
1885 1885 def displayname(pfx, patchname, state):
1886 1886 if pfx:
1887 1887 self.ui.write(pfx)
1888 1888 if summary:
1889 1889 ph = patchheader(self.join(patchname), self.plainmode)
1890 1890 if ph.message:
1891 1891 msg = ph.message[0]
1892 1892 else:
1893 1893 msg = ''
1894 1894
1895 1895 if self.ui.formatted():
1896 1896 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1897 1897 if width > 0:
1898 1898 msg = util.ellipsis(msg, width)
1899 1899 else:
1900 1900 msg = ''
1901 1901 self.ui.write(patchname, label='qseries.' + state)
1902 1902 self.ui.write(': ')
1903 1903 self.ui.write(msg, label='qseries.message.' + state)
1904 1904 else:
1905 1905 self.ui.write(patchname, label='qseries.' + state)
1906 1906 self.ui.write('\n')
1907 1907
1908 1908 applied = set([p.name for p in self.applied])
1909 1909 if length is None:
1910 1910 length = len(self.series) - start
1911 1911 if not missing:
1912 1912 if self.ui.verbose:
1913 1913 idxwidth = len(str(start + length - 1))
1914 1914 for i in xrange(start, start + length):
1915 1915 patch = self.series[i]
1916 1916 if patch in applied:
1917 1917 char, state = 'A', 'applied'
1918 1918 elif self.pushable(i)[0]:
1919 1919 char, state = 'U', 'unapplied'
1920 1920 else:
1921 1921 char, state = 'G', 'guarded'
1922 1922 pfx = ''
1923 1923 if self.ui.verbose:
1924 1924 pfx = '%*d %s ' % (idxwidth, i, char)
1925 1925 elif status and status != char:
1926 1926 continue
1927 1927 displayname(pfx, patch, state)
1928 1928 else:
1929 1929 msng_list = []
1930 1930 for root, dirs, files in os.walk(self.path):
1931 1931 d = root[len(self.path) + 1:]
1932 1932 for f in files:
1933 1933 fl = os.path.join(d, f)
1934 1934 if (fl not in self.series and
1935 1935 fl not in (self.statuspath, self.seriespath,
1936 1936 self.guardspath)
1937 1937 and not fl.startswith('.')):
1938 1938 msng_list.append(fl)
1939 1939 for x in sorted(msng_list):
1940 1940 pfx = self.ui.verbose and ('D ') or ''
1941 1941 displayname(pfx, x, 'missing')
1942 1942
1943 1943 def issaveline(self, l):
1944 1944 if l.name == '.hg.patches.save.line':
1945 1945 return True
1946 1946
1947 1947 def qrepo(self, create=False):
1948 1948 ui = self.baseui.copy()
1949 1949 if create or os.path.isdir(self.join(".hg")):
1950 1950 return hg.repository(ui, path=self.path, create=create)
1951 1951
1952 1952 def restore(self, repo, rev, delete=None, qupdate=None):
1953 1953 desc = repo[rev].description().strip()
1954 1954 lines = desc.splitlines()
1955 1955 i = 0
1956 1956 datastart = None
1957 1957 series = []
1958 1958 applied = []
1959 1959 qpp = None
1960 1960 for i, line in enumerate(lines):
1961 1961 if line == 'Patch Data:':
1962 1962 datastart = i + 1
1963 1963 elif line.startswith('Dirstate:'):
1964 1964 l = line.rstrip()
1965 1965 l = l[10:].split(' ')
1966 1966 qpp = [bin(x) for x in l]
1967 1967 elif datastart is not None:
1968 1968 l = line.rstrip()
1969 1969 n, name = l.split(':', 1)
1970 1970 if n:
1971 1971 applied.append(statusentry(bin(n), name))
1972 1972 else:
1973 1973 series.append(l)
1974 1974 if datastart is None:
1975 1975 self.ui.warn(_("no saved patch data found\n"))
1976 1976 return 1
1977 1977 self.ui.warn(_("restoring status: %s\n") % lines[0])
1978 1978 self.fullseries = series
1979 1979 self.applied = applied
1980 1980 self.parseseries()
1981 1981 self.seriesdirty = True
1982 1982 self.applieddirty = True
1983 1983 heads = repo.changelog.heads()
1984 1984 if delete:
1985 1985 if rev not in heads:
1986 1986 self.ui.warn(_("save entry has children, leaving it alone\n"))
1987 1987 else:
1988 1988 self.ui.warn(_("removing save entry %s\n") % short(rev))
1989 1989 pp = repo.dirstate.parents()
1990 1990 if rev in pp:
1991 1991 update = True
1992 1992 else:
1993 1993 update = False
1994 1994 strip(self.ui, repo, [rev], update=update, backup=False)
1995 1995 if qpp:
1996 1996 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1997 1997 (short(qpp[0]), short(qpp[1])))
1998 1998 if qupdate:
1999 1999 self.ui.status(_("updating queue directory\n"))
2000 2000 r = self.qrepo()
2001 2001 if not r:
2002 2002 self.ui.warn(_("unable to load queue repository\n"))
2003 2003 return 1
2004 2004 hg.clean(r, qpp[0])
2005 2005
2006 2006 def save(self, repo, msg=None):
2007 2007 if not self.applied:
2008 2008 self.ui.warn(_("save: no patches applied, exiting\n"))
2009 2009 return 1
2010 2010 if self.issaveline(self.applied[-1]):
2011 2011 self.ui.warn(_("status is already saved\n"))
2012 2012 return 1
2013 2013
2014 2014 if not msg:
2015 2015 msg = _("hg patches saved state")
2016 2016 else:
2017 2017 msg = "hg patches: " + msg.rstrip('\r\n')
2018 2018 r = self.qrepo()
2019 2019 if r:
2020 2020 pp = r.dirstate.parents()
2021 2021 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2022 2022 msg += "\n\nPatch Data:\n"
2023 2023 msg += ''.join('%s\n' % x for x in self.applied)
2024 2024 msg += ''.join(':%s\n' % x for x in self.fullseries)
2025 2025 n = repo.commit(msg, force=True)
2026 2026 if not n:
2027 2027 self.ui.warn(_("repo commit failed\n"))
2028 2028 return 1
2029 2029 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2030 2030 self.applieddirty = True
2031 2031 self.removeundo(repo)
2032 2032
2033 2033 def fullseriesend(self):
2034 2034 if self.applied:
2035 2035 p = self.applied[-1].name
2036 2036 end = self.findseries(p)
2037 2037 if end is None:
2038 2038 return len(self.fullseries)
2039 2039 return end + 1
2040 2040 return 0
2041 2041
2042 2042 def seriesend(self, all_patches=False):
2043 2043 """If all_patches is False, return the index of the next pushable patch
2044 2044 in the series, or the series length. If all_patches is True, return the
2045 2045 index of the first patch past the last applied one.
2046 2046 """
2047 2047 end = 0
2048 2048 def nextpatch(start):
2049 2049 if all_patches or start >= len(self.series):
2050 2050 return start
2051 2051 for i in xrange(start, len(self.series)):
2052 2052 p, reason = self.pushable(i)
2053 2053 if p:
2054 2054 return i
2055 2055 self.explainpushable(i)
2056 2056 return len(self.series)
2057 2057 if self.applied:
2058 2058 p = self.applied[-1].name
2059 2059 try:
2060 2060 end = self.series.index(p)
2061 2061 except ValueError:
2062 2062 return 0
2063 2063 return nextpatch(end + 1)
2064 2064 return nextpatch(end)
2065 2065
2066 2066 def appliedname(self, index):
2067 2067 pname = self.applied[index].name
2068 2068 if not self.ui.verbose:
2069 2069 p = pname
2070 2070 else:
2071 2071 p = str(self.series.index(pname)) + " " + pname
2072 2072 return p
2073 2073
2074 2074 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2075 2075 force=None, git=False):
2076 2076 def checkseries(patchname):
2077 2077 if patchname in self.series:
2078 2078 raise error.Abort(_('patch %s is already in the series file')
2079 2079 % patchname)
2080 2080
2081 2081 if rev:
2082 2082 if files:
2083 2083 raise error.Abort(_('option "-r" not valid when importing '
2084 2084 'files'))
2085 2085 rev = scmutil.revrange(repo, rev)
2086 2086 rev.sort(reverse=True)
2087 2087 elif not files:
2088 2088 raise error.Abort(_('no files or revisions specified'))
2089 2089 if (len(files) > 1 or len(rev) > 1) and patchname:
2090 2090 raise error.Abort(_('option "-n" not valid when importing multiple '
2091 2091 'patches'))
2092 2092 imported = []
2093 2093 if rev:
2094 2094 # If mq patches are applied, we can only import revisions
2095 2095 # that form a linear path to qbase.
2096 2096 # Otherwise, they should form a linear path to a head.
2097 2097 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2098 2098 if len(heads) > 1:
2099 2099 raise error.Abort(_('revision %d is the root of more than one '
2100 2100 'branch') % rev.last())
2101 2101 if self.applied:
2102 2102 base = repo.changelog.node(rev.first())
2103 2103 if base in [n.node for n in self.applied]:
2104 2104 raise error.Abort(_('revision %d is already managed')
2105 2105 % rev.first())
2106 2106 if heads != [self.applied[-1].node]:
2107 2107 raise error.Abort(_('revision %d is not the parent of '
2108 2108 'the queue') % rev.first())
2109 2109 base = repo.changelog.rev(self.applied[0].node)
2110 2110 lastparent = repo.changelog.parentrevs(base)[0]
2111 2111 else:
2112 2112 if heads != [repo.changelog.node(rev.first())]:
2113 2113 raise error.Abort(_('revision %d has unmanaged children')
2114 2114 % rev.first())
2115 2115 lastparent = None
2116 2116
2117 2117 diffopts = self.diffopts({'git': git})
2118 2118 with repo.transaction('qimport') as tr:
2119 2119 for r in rev:
2120 2120 if not repo[r].mutable():
2121 2121 raise error.Abort(_('revision %d is not mutable') % r,
2122 2122 hint=_("see 'hg help phases' "
2123 2123 'for details'))
2124 2124 p1, p2 = repo.changelog.parentrevs(r)
2125 2125 n = repo.changelog.node(r)
2126 2126 if p2 != nullrev:
2127 2127 raise error.Abort(_('cannot import merge revision %d')
2128 2128 % r)
2129 2129 if lastparent and lastparent != r:
2130 2130 raise error.Abort(_('revision %d is not the parent of '
2131 2131 '%d')
2132 2132 % (r, lastparent))
2133 2133 lastparent = p1
2134 2134
2135 2135 if not patchname:
2136 2136 patchname = self.makepatchname(
2137 2137 repo[r].description().split('\n', 1)[0],
2138 2138 '%d.diff' % r)
2139 2139 checkseries(patchname)
2140 2140 self.checkpatchname(patchname, force)
2141 2141 self.fullseries.insert(0, patchname)
2142 2142
2143 2143 patchf = self.opener(patchname, "w")
2144 2144 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
2145 2145 patchf.close()
2146 2146
2147 2147 se = statusentry(n, patchname)
2148 2148 self.applied.insert(0, se)
2149 2149
2150 2150 self.added.append(patchname)
2151 2151 imported.append(patchname)
2152 2152 patchname = None
2153 2153 if rev and repo.ui.configbool('mq', 'secret', False):
2154 2154 # if we added anything with --rev, move the secret root
2155 2155 phases.retractboundary(repo, tr, phases.secret, [n])
2156 2156 self.parseseries()
2157 2157 self.applieddirty = True
2158 2158 self.seriesdirty = True
2159 2159
2160 2160 for i, filename in enumerate(files):
2161 2161 if existing:
2162 2162 if filename == '-':
2163 2163 raise error.Abort(_('-e is incompatible with import from -')
2164 2164 )
2165 2165 filename = normname(filename)
2166 2166 self.checkreservedname(filename)
2167 2167 if util.url(filename).islocal():
2168 2168 originpath = self.join(filename)
2169 2169 if not os.path.isfile(originpath):
2170 2170 raise error.Abort(
2171 2171 _("patch %s does not exist") % filename)
2172 2172
2173 2173 if patchname:
2174 2174 self.checkpatchname(patchname, force)
2175 2175
2176 2176 self.ui.write(_('renaming %s to %s\n')
2177 2177 % (filename, patchname))
2178 2178 util.rename(originpath, self.join(patchname))
2179 2179 else:
2180 2180 patchname = filename
2181 2181
2182 2182 else:
2183 2183 if filename == '-' and not patchname:
2184 2184 raise error.Abort(_('need --name to import a patch from -'))
2185 2185 elif not patchname:
2186 2186 patchname = normname(os.path.basename(filename.rstrip('/')))
2187 2187 self.checkpatchname(patchname, force)
2188 2188 try:
2189 2189 if filename == '-':
2190 2190 text = self.ui.fin.read()
2191 2191 else:
2192 2192 fp = hg.openpath(self.ui, filename)
2193 2193 text = fp.read()
2194 2194 fp.close()
2195 2195 except (OSError, IOError):
2196 2196 raise error.Abort(_("unable to read file %s") % filename)
2197 2197 patchf = self.opener(patchname, "w")
2198 2198 patchf.write(text)
2199 2199 patchf.close()
2200 2200 if not force:
2201 2201 checkseries(patchname)
2202 2202 if patchname not in self.series:
2203 2203 index = self.fullseriesend() + i
2204 2204 self.fullseries[index:index] = [patchname]
2205 2205 self.parseseries()
2206 2206 self.seriesdirty = True
2207 2207 self.ui.warn(_("adding %s to series file\n") % patchname)
2208 2208 self.added.append(patchname)
2209 2209 imported.append(patchname)
2210 2210 patchname = None
2211 2211
2212 2212 self.removeundo(repo)
2213 2213 return imported
2214 2214
2215 2215 def fixkeepchangesopts(ui, opts):
2216 2216 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2217 2217 or opts.get('exact')):
2218 2218 return opts
2219 2219 opts = dict(opts)
2220 2220 opts['keep_changes'] = True
2221 2221 return opts
2222 2222
2223 2223 @command("qdelete|qremove|qrm",
2224 2224 [('k', 'keep', None, _('keep patch file')),
2225 2225 ('r', 'rev', [],
2226 2226 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2227 2227 _('hg qdelete [-k] [PATCH]...'))
2228 2228 def delete(ui, repo, *patches, **opts):
2229 2229 """remove patches from queue
2230 2230
2231 2231 The patches must not be applied, and at least one patch is required. Exact
2232 2232 patch identifiers must be given. With -k/--keep, the patch files are
2233 2233 preserved in the patch directory.
2234 2234
2235 2235 To stop managing a patch and move it into permanent history,
2236 2236 use the :hg:`qfinish` command."""
2237 2237 q = repo.mq
2238 2238 q.delete(repo, patches, opts)
2239 2239 q.savedirty()
2240 2240 return 0
2241 2241
2242 2242 @command("qapplied",
2243 2243 [('1', 'last', None, _('show only the preceding applied patch'))
2244 2244 ] + seriesopts,
2245 2245 _('hg qapplied [-1] [-s] [PATCH]'))
2246 2246 def applied(ui, repo, patch=None, **opts):
2247 2247 """print the patches already applied
2248 2248
2249 2249 Returns 0 on success."""
2250 2250
2251 2251 q = repo.mq
2252 2252
2253 2253 if patch:
2254 2254 if patch not in q.series:
2255 2255 raise error.Abort(_("patch %s is not in series file") % patch)
2256 2256 end = q.series.index(patch) + 1
2257 2257 else:
2258 2258 end = q.seriesend(True)
2259 2259
2260 2260 if opts.get('last') and not end:
2261 2261 ui.write(_("no patches applied\n"))
2262 2262 return 1
2263 2263 elif opts.get('last') and end == 1:
2264 2264 ui.write(_("only one patch applied\n"))
2265 2265 return 1
2266 2266 elif opts.get('last'):
2267 2267 start = end - 2
2268 2268 end = 1
2269 2269 else:
2270 2270 start = 0
2271 2271
2272 2272 q.qseries(repo, length=end, start=start, status='A',
2273 2273 summary=opts.get('summary'))
2274 2274
2275 2275
2276 2276 @command("qunapplied",
2277 2277 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2278 2278 _('hg qunapplied [-1] [-s] [PATCH]'))
2279 2279 def unapplied(ui, repo, patch=None, **opts):
2280 2280 """print the patches not yet applied
2281 2281
2282 2282 Returns 0 on success."""
2283 2283
2284 2284 q = repo.mq
2285 2285 if patch:
2286 2286 if patch not in q.series:
2287 2287 raise error.Abort(_("patch %s is not in series file") % patch)
2288 2288 start = q.series.index(patch) + 1
2289 2289 else:
2290 2290 start = q.seriesend(True)
2291 2291
2292 2292 if start == len(q.series) and opts.get('first'):
2293 2293 ui.write(_("all patches applied\n"))
2294 2294 return 1
2295 2295
2296 2296 if opts.get('first'):
2297 2297 length = 1
2298 2298 else:
2299 2299 length = None
2300 2300 q.qseries(repo, start=start, length=length, status='U',
2301 2301 summary=opts.get('summary'))
2302 2302
2303 2303 @command("qimport",
2304 2304 [('e', 'existing', None, _('import file in patch directory')),
2305 2305 ('n', 'name', '',
2306 2306 _('name of patch file'), _('NAME')),
2307 2307 ('f', 'force', None, _('overwrite existing files')),
2308 2308 ('r', 'rev', [],
2309 2309 _('place existing revisions under mq control'), _('REV')),
2310 2310 ('g', 'git', None, _('use git extended diff format')),
2311 2311 ('P', 'push', None, _('qpush after importing'))],
2312 2312 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2313 2313 def qimport(ui, repo, *filename, **opts):
2314 2314 """import a patch or existing changeset
2315 2315
2316 2316 The patch is inserted into the series after the last applied
2317 2317 patch. If no patches have been applied, qimport prepends the patch
2318 2318 to the series.
2319 2319
2320 2320 The patch will have the same name as its source file unless you
2321 2321 give it a new one with -n/--name.
2322 2322
2323 2323 You can register an existing patch inside the patch directory with
2324 2324 the -e/--existing flag.
2325 2325
2326 2326 With -f/--force, an existing patch of the same name will be
2327 2327 overwritten.
2328 2328
2329 2329 An existing changeset may be placed under mq control with -r/--rev
2330 2330 (e.g. qimport --rev . -n patch will place the current revision
2331 2331 under mq control). With -g/--git, patches imported with --rev will
2332 2332 use the git diff format. See the diffs help topic for information
2333 2333 on why this is important for preserving rename/copy information
2334 2334 and permission changes. Use :hg:`qfinish` to remove changesets
2335 2335 from mq control.
2336 2336
2337 2337 To import a patch from standard input, pass - as the patch file.
2338 2338 When importing from standard input, a patch name must be specified
2339 2339 using the --name flag.
2340 2340
2341 2341 To import an existing patch while renaming it::
2342 2342
2343 2343 hg qimport -e existing-patch -n new-name
2344 2344
2345 2345 Returns 0 if import succeeded.
2346 2346 """
2347 2347 with repo.lock(): # cause this may move phase
2348 2348 q = repo.mq
2349 2349 try:
2350 2350 imported = q.qimport(
2351 2351 repo, filename, patchname=opts.get('name'),
2352 2352 existing=opts.get('existing'), force=opts.get('force'),
2353 2353 rev=opts.get('rev'), git=opts.get('git'))
2354 2354 finally:
2355 2355 q.savedirty()
2356 2356
2357 2357 if imported and opts.get('push') and not opts.get('rev'):
2358 2358 return q.push(repo, imported[-1])
2359 2359 return 0
2360 2360
2361 2361 def qinit(ui, repo, create):
2362 2362 """initialize a new queue repository
2363 2363
2364 2364 This command also creates a series file for ordering patches, and
2365 2365 an mq-specific .hgignore file in the queue repository, to exclude
2366 2366 the status and guards files (these contain mostly transient state).
2367 2367
2368 2368 Returns 0 if initialization succeeded."""
2369 2369 q = repo.mq
2370 2370 r = q.init(repo, create)
2371 2371 q.savedirty()
2372 2372 if r:
2373 2373 if not os.path.exists(r.wjoin('.hgignore')):
2374 2374 fp = r.wvfs('.hgignore', 'w')
2375 2375 fp.write('^\\.hg\n')
2376 2376 fp.write('^\\.mq\n')
2377 2377 fp.write('syntax: glob\n')
2378 2378 fp.write('status\n')
2379 2379 fp.write('guards\n')
2380 2380 fp.close()
2381 2381 if not os.path.exists(r.wjoin('series')):
2382 2382 r.wvfs('series', 'w').close()
2383 2383 r[None].add(['.hgignore', 'series'])
2384 2384 commands.add(ui, r)
2385 2385 return 0
2386 2386
2387 2387 @command("^qinit",
2388 2388 [('c', 'create-repo', None, _('create queue repository'))],
2389 2389 _('hg qinit [-c]'))
2390 2390 def init(ui, repo, **opts):
2391 2391 """init a new queue repository (DEPRECATED)
2392 2392
2393 2393 The queue repository is unversioned by default. If
2394 2394 -c/--create-repo is specified, qinit will create a separate nested
2395 2395 repository for patches (qinit -c may also be run later to convert
2396 2396 an unversioned patch repository into a versioned one). You can use
2397 2397 qcommit to commit changes to this queue repository.
2398 2398
2399 2399 This command is deprecated. Without -c, it's implied by other relevant
2400 2400 commands. With -c, use :hg:`init --mq` instead."""
2401 2401 return qinit(ui, repo, create=opts.get('create_repo'))
2402 2402
2403 2403 @command("qclone",
2404 2404 [('', 'pull', None, _('use pull protocol to copy metadata')),
2405 2405 ('U', 'noupdate', None,
2406 2406 _('do not update the new working directories')),
2407 2407 ('', 'uncompressed', None,
2408 2408 _('use uncompressed transfer (fast over LAN)')),
2409 2409 ('p', 'patches', '',
2410 2410 _('location of source patch repository'), _('REPO')),
2411 2411 ] + commands.remoteopts,
2412 2412 _('hg qclone [OPTION]... SOURCE [DEST]'),
2413 2413 norepo=True)
2414 2414 def clone(ui, source, dest=None, **opts):
2415 2415 '''clone main and patch repository at same time
2416 2416
2417 2417 If source is local, destination will have no patches applied. If
2418 2418 source is remote, this command can not check if patches are
2419 2419 applied in source, so cannot guarantee that patches are not
2420 2420 applied in destination. If you clone remote repository, be sure
2421 2421 before that it has no patches applied.
2422 2422
2423 2423 Source patch repository is looked for in <src>/.hg/patches by
2424 2424 default. Use -p <url> to change.
2425 2425
2426 2426 The patch directory must be a nested Mercurial repository, as
2427 2427 would be created by :hg:`init --mq`.
2428 2428
2429 2429 Return 0 on success.
2430 2430 '''
2431 2431 def patchdir(repo):
2432 2432 """compute a patch repo url from a repo object"""
2433 2433 url = repo.url()
2434 2434 if url.endswith('/'):
2435 2435 url = url[:-1]
2436 2436 return url + '/.hg/patches'
2437 2437
2438 2438 # main repo (destination and sources)
2439 2439 if dest is None:
2440 2440 dest = hg.defaultdest(source)
2441 2441 sr = hg.peer(ui, opts, ui.expandpath(source))
2442 2442
2443 2443 # patches repo (source only)
2444 2444 if opts.get('patches'):
2445 2445 patchespath = ui.expandpath(opts.get('patches'))
2446 2446 else:
2447 2447 patchespath = patchdir(sr)
2448 2448 try:
2449 2449 hg.peer(ui, opts, patchespath)
2450 2450 except error.RepoError:
2451 2451 raise error.Abort(_('versioned patch repository not found'
2452 2452 ' (see init --mq)'))
2453 2453 qbase, destrev = None, None
2454 2454 if sr.local():
2455 2455 repo = sr.local()
2456 2456 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2457 2457 qbase = repo.mq.applied[0].node
2458 2458 if not hg.islocal(dest):
2459 2459 heads = set(repo.heads())
2460 2460 destrev = list(heads.difference(repo.heads(qbase)))
2461 2461 destrev.append(repo.changelog.parents(qbase)[0])
2462 2462 elif sr.capable('lookup'):
2463 2463 try:
2464 2464 qbase = sr.lookup('qbase')
2465 2465 except error.RepoError:
2466 2466 pass
2467 2467
2468 2468 ui.note(_('cloning main repository\n'))
2469 2469 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2470 2470 pull=opts.get('pull'),
2471 2471 rev=destrev,
2472 2472 update=False,
2473 2473 stream=opts.get('uncompressed'))
2474 2474
2475 2475 ui.note(_('cloning patch repository\n'))
2476 2476 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2477 2477 pull=opts.get('pull'), update=not opts.get('noupdate'),
2478 2478 stream=opts.get('uncompressed'))
2479 2479
2480 2480 if dr.local():
2481 2481 repo = dr.local()
2482 2482 if qbase:
2483 2483 ui.note(_('stripping applied patches from destination '
2484 2484 'repository\n'))
2485 2485 strip(ui, repo, [qbase], update=False, backup=None)
2486 2486 if not opts.get('noupdate'):
2487 2487 ui.note(_('updating destination repository\n'))
2488 2488 hg.update(repo, repo.changelog.tip())
2489 2489
2490 2490 @command("qcommit|qci",
2491 2491 commands.table["^commit|ci"][1],
2492 2492 _('hg qcommit [OPTION]... [FILE]...'),
2493 2493 inferrepo=True)
2494 2494 def commit(ui, repo, *pats, **opts):
2495 2495 """commit changes in the queue repository (DEPRECATED)
2496 2496
2497 2497 This command is deprecated; use :hg:`commit --mq` instead."""
2498 2498 q = repo.mq
2499 2499 r = q.qrepo()
2500 2500 if not r:
2501 2501 raise error.Abort('no queue repository')
2502 2502 commands.commit(r.ui, r, *pats, **opts)
2503 2503
2504 2504 @command("qseries",
2505 2505 [('m', 'missing', None, _('print patches not in series')),
2506 2506 ] + seriesopts,
2507 2507 _('hg qseries [-ms]'))
2508 2508 def series(ui, repo, **opts):
2509 2509 """print the entire series file
2510 2510
2511 2511 Returns 0 on success."""
2512 2512 repo.mq.qseries(repo, missing=opts.get('missing'),
2513 2513 summary=opts.get('summary'))
2514 2514 return 0
2515 2515
2516 2516 @command("qtop", seriesopts, _('hg qtop [-s]'))
2517 2517 def top(ui, repo, **opts):
2518 2518 """print the name of the current patch
2519 2519
2520 2520 Returns 0 on success."""
2521 2521 q = repo.mq
2522 2522 if q.applied:
2523 2523 t = q.seriesend(True)
2524 2524 else:
2525 2525 t = 0
2526 2526
2527 2527 if t:
2528 2528 q.qseries(repo, start=t - 1, length=1, status='A',
2529 2529 summary=opts.get('summary'))
2530 2530 else:
2531 2531 ui.write(_("no patches applied\n"))
2532 2532 return 1
2533 2533
2534 2534 @command("qnext", seriesopts, _('hg qnext [-s]'))
2535 2535 def next(ui, repo, **opts):
2536 2536 """print the name of the next pushable patch
2537 2537
2538 2538 Returns 0 on success."""
2539 2539 q = repo.mq
2540 2540 end = q.seriesend()
2541 2541 if end == len(q.series):
2542 2542 ui.write(_("all patches applied\n"))
2543 2543 return 1
2544 2544 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2545 2545
2546 2546 @command("qprev", seriesopts, _('hg qprev [-s]'))
2547 2547 def prev(ui, repo, **opts):
2548 2548 """print the name of the preceding applied patch
2549 2549
2550 2550 Returns 0 on success."""
2551 2551 q = repo.mq
2552 2552 l = len(q.applied)
2553 2553 if l == 1:
2554 2554 ui.write(_("only one patch applied\n"))
2555 2555 return 1
2556 2556 if not l:
2557 2557 ui.write(_("no patches applied\n"))
2558 2558 return 1
2559 2559 idx = q.series.index(q.applied[-2].name)
2560 2560 q.qseries(repo, start=idx, length=1, status='A',
2561 2561 summary=opts.get('summary'))
2562 2562
2563 2563 def setupheaderopts(ui, opts):
2564 2564 if not opts.get('user') and opts.get('currentuser'):
2565 2565 opts['user'] = ui.username()
2566 2566 if not opts.get('date') and opts.get('currentdate'):
2567 2567 opts['date'] = "%d %d" % util.makedate()
2568 2568
2569 2569 @command("^qnew",
2570 2570 [('e', 'edit', None, _('invoke editor on commit messages')),
2571 2571 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2572 2572 ('g', 'git', None, _('use git extended diff format')),
2573 2573 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2574 2574 ('u', 'user', '',
2575 2575 _('add "From: <USER>" to patch'), _('USER')),
2576 2576 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2577 2577 ('d', 'date', '',
2578 2578 _('add "Date: <DATE>" to patch'), _('DATE'))
2579 2579 ] + commands.walkopts + commands.commitopts,
2580 2580 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2581 2581 inferrepo=True)
2582 2582 def new(ui, repo, patch, *args, **opts):
2583 2583 """create a new patch
2584 2584
2585 2585 qnew creates a new patch on top of the currently-applied patch (if
2586 2586 any). The patch will be initialized with any outstanding changes
2587 2587 in the working directory. You may also use -I/--include,
2588 2588 -X/--exclude, and/or a list of files after the patch name to add
2589 2589 only changes to matching files to the new patch, leaving the rest
2590 2590 as uncommitted modifications.
2591 2591
2592 2592 -u/--user and -d/--date can be used to set the (given) user and
2593 2593 date, respectively. -U/--currentuser and -D/--currentdate set user
2594 2594 to current user and date to current date.
2595 2595
2596 2596 -e/--edit, -m/--message or -l/--logfile set the patch header as
2597 2597 well as the commit message. If none is specified, the header is
2598 2598 empty and the commit message is '[mq]: PATCH'.
2599 2599
2600 2600 Use the -g/--git option to keep the patch in the git extended diff
2601 2601 format. Read the diffs help topic for more information on why this
2602 2602 is important for preserving permission changes and copy/rename
2603 2603 information.
2604 2604
2605 2605 Returns 0 on successful creation of a new patch.
2606 2606 """
2607 2607 msg = cmdutil.logmessage(ui, opts)
2608 2608 q = repo.mq
2609 2609 opts['msg'] = msg
2610 2610 setupheaderopts(ui, opts)
2611 2611 q.new(repo, patch, *args, **opts)
2612 2612 q.savedirty()
2613 2613 return 0
2614 2614
2615 2615 @command("^qrefresh",
2616 2616 [('e', 'edit', None, _('invoke editor on commit messages')),
2617 2617 ('g', 'git', None, _('use git extended diff format')),
2618 2618 ('s', 'short', None,
2619 2619 _('refresh only files already in the patch and specified files')),
2620 2620 ('U', 'currentuser', None,
2621 2621 _('add/update author field in patch with current user')),
2622 2622 ('u', 'user', '',
2623 2623 _('add/update author field in patch with given user'), _('USER')),
2624 2624 ('D', 'currentdate', None,
2625 2625 _('add/update date field in patch with current date')),
2626 2626 ('d', 'date', '',
2627 2627 _('add/update date field in patch with given date'), _('DATE'))
2628 2628 ] + commands.walkopts + commands.commitopts,
2629 2629 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2630 2630 inferrepo=True)
2631 2631 def refresh(ui, repo, *pats, **opts):
2632 2632 """update the current patch
2633 2633
2634 2634 If any file patterns are provided, the refreshed patch will
2635 2635 contain only the modifications that match those patterns; the
2636 2636 remaining modifications will remain in the working directory.
2637 2637
2638 2638 If -s/--short is specified, files currently included in the patch
2639 2639 will be refreshed just like matched files and remain in the patch.
2640 2640
2641 2641 If -e/--edit is specified, Mercurial will start your configured editor for
2642 2642 you to enter a message. In case qrefresh fails, you will find a backup of
2643 2643 your message in ``.hg/last-message.txt``.
2644 2644
2645 2645 hg add/remove/copy/rename work as usual, though you might want to
2646 2646 use git-style patches (-g/--git or [diff] git=1) to track copies
2647 2647 and renames. See the diffs help topic for more information on the
2648 2648 git diff format.
2649 2649
2650 2650 Returns 0 on success.
2651 2651 """
2652 2652 q = repo.mq
2653 2653 message = cmdutil.logmessage(ui, opts)
2654 2654 setupheaderopts(ui, opts)
2655 2655 with repo.wlock():
2656 2656 ret = q.refresh(repo, pats, msg=message, **opts)
2657 2657 q.savedirty()
2658 2658 return ret
2659 2659
2660 2660 @command("^qdiff",
2661 2661 commands.diffopts + commands.diffopts2 + commands.walkopts,
2662 2662 _('hg qdiff [OPTION]... [FILE]...'),
2663 2663 inferrepo=True)
2664 2664 def diff(ui, repo, *pats, **opts):
2665 2665 """diff of the current patch and subsequent modifications
2666 2666
2667 2667 Shows a diff which includes the current patch as well as any
2668 2668 changes which have been made in the working directory since the
2669 2669 last refresh (thus showing what the current patch would become
2670 2670 after a qrefresh).
2671 2671
2672 2672 Use :hg:`diff` if you only want to see the changes made since the
2673 2673 last qrefresh, or :hg:`export qtip` if you want to see changes
2674 2674 made by the current patch without including changes made since the
2675 2675 qrefresh.
2676 2676
2677 2677 Returns 0 on success.
2678 2678 """
2679 2679 ui.pager('qdiff')
2680 2680 repo.mq.diff(repo, pats, opts)
2681 2681 return 0
2682 2682
2683 2683 @command('qfold',
2684 2684 [('e', 'edit', None, _('invoke editor on commit messages')),
2685 2685 ('k', 'keep', None, _('keep folded patch files')),
2686 2686 ] + commands.commitopts,
2687 2687 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2688 2688 def fold(ui, repo, *files, **opts):
2689 2689 """fold the named patches into the current patch
2690 2690
2691 2691 Patches must not yet be applied. Each patch will be successively
2692 2692 applied to the current patch in the order given. If all the
2693 2693 patches apply successfully, the current patch will be refreshed
2694 2694 with the new cumulative patch, and the folded patches will be
2695 2695 deleted. With -k/--keep, the folded patch files will not be
2696 2696 removed afterwards.
2697 2697
2698 2698 The header for each folded patch will be concatenated with the
2699 2699 current patch header, separated by a line of ``* * *``.
2700 2700
2701 2701 Returns 0 on success."""
2702 2702 q = repo.mq
2703 2703 if not files:
2704 2704 raise error.Abort(_('qfold requires at least one patch name'))
2705 2705 if not q.checktoppatch(repo)[0]:
2706 2706 raise error.Abort(_('no patches applied'))
2707 2707 q.checklocalchanges(repo)
2708 2708
2709 2709 message = cmdutil.logmessage(ui, opts)
2710 2710
2711 2711 parent = q.lookup('qtip')
2712 2712 patches = []
2713 2713 messages = []
2714 2714 for f in files:
2715 2715 p = q.lookup(f)
2716 2716 if p in patches or p == parent:
2717 2717 ui.warn(_('skipping already folded patch %s\n') % p)
2718 2718 if q.isapplied(p):
2719 2719 raise error.Abort(_('qfold cannot fold already applied patch %s')
2720 2720 % p)
2721 2721 patches.append(p)
2722 2722
2723 2723 for p in patches:
2724 2724 if not message:
2725 2725 ph = patchheader(q.join(p), q.plainmode)
2726 2726 if ph.message:
2727 2727 messages.append(ph.message)
2728 2728 pf = q.join(p)
2729 2729 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2730 2730 if not patchsuccess:
2731 2731 raise error.Abort(_('error folding patch %s') % p)
2732 2732
2733 2733 if not message:
2734 2734 ph = patchheader(q.join(parent), q.plainmode)
2735 2735 message = ph.message
2736 2736 for msg in messages:
2737 2737 if msg:
2738 2738 if message:
2739 2739 message.append('* * *')
2740 2740 message.extend(msg)
2741 2741 message = '\n'.join(message)
2742 2742
2743 2743 diffopts = q.patchopts(q.diffopts(), *patches)
2744 2744 with repo.wlock():
2745 2745 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2746 2746 editform='mq.qfold')
2747 2747 q.delete(repo, patches, opts)
2748 2748 q.savedirty()
2749 2749
2750 2750 @command("qgoto",
2751 2751 [('', 'keep-changes', None,
2752 2752 _('tolerate non-conflicting local changes')),
2753 2753 ('f', 'force', None, _('overwrite any local changes')),
2754 2754 ('', 'no-backup', None, _('do not save backup copies of files'))],
2755 2755 _('hg qgoto [OPTION]... PATCH'))
2756 2756 def goto(ui, repo, patch, **opts):
2757 2757 '''push or pop patches until named patch is at top of stack
2758 2758
2759 2759 Returns 0 on success.'''
2760 2760 opts = fixkeepchangesopts(ui, opts)
2761 2761 q = repo.mq
2762 2762 patch = q.lookup(patch)
2763 2763 nobackup = opts.get('no_backup')
2764 2764 keepchanges = opts.get('keep_changes')
2765 2765 if q.isapplied(patch):
2766 2766 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2767 2767 keepchanges=keepchanges)
2768 2768 else:
2769 2769 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2770 2770 keepchanges=keepchanges)
2771 2771 q.savedirty()
2772 2772 return ret
2773 2773
2774 2774 @command("qguard",
2775 2775 [('l', 'list', None, _('list all patches and guards')),
2776 2776 ('n', 'none', None, _('drop all guards'))],
2777 2777 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2778 2778 def guard(ui, repo, *args, **opts):
2779 2779 '''set or print guards for a patch
2780 2780
2781 2781 Guards control whether a patch can be pushed. A patch with no
2782 2782 guards is always pushed. A patch with a positive guard ("+foo") is
2783 2783 pushed only if the :hg:`qselect` command has activated it. A patch with
2784 2784 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2785 2785 has activated it.
2786 2786
2787 2787 With no arguments, print the currently active guards.
2788 2788 With arguments, set guards for the named patch.
2789 2789
2790 2790 .. note::
2791 2791
2792 2792 Specifying negative guards now requires '--'.
2793 2793
2794 2794 To set guards on another patch::
2795 2795
2796 2796 hg qguard other.patch -- +2.6.17 -stable
2797 2797
2798 2798 Returns 0 on success.
2799 2799 '''
2800 2800 def status(idx):
2801 2801 guards = q.seriesguards[idx] or ['unguarded']
2802 2802 if q.series[idx] in applied:
2803 2803 state = 'applied'
2804 2804 elif q.pushable(idx)[0]:
2805 2805 state = 'unapplied'
2806 2806 else:
2807 2807 state = 'guarded'
2808 2808 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2809 2809 ui.write('%s: ' % ui.label(q.series[idx], label))
2810 2810
2811 2811 for i, guard in enumerate(guards):
2812 2812 if guard.startswith('+'):
2813 2813 ui.write(guard, label='qguard.positive')
2814 2814 elif guard.startswith('-'):
2815 2815 ui.write(guard, label='qguard.negative')
2816 2816 else:
2817 2817 ui.write(guard, label='qguard.unguarded')
2818 2818 if i != len(guards) - 1:
2819 2819 ui.write(' ')
2820 2820 ui.write('\n')
2821 2821 q = repo.mq
2822 2822 applied = set(p.name for p in q.applied)
2823 2823 patch = None
2824 2824 args = list(args)
2825 2825 if opts.get('list'):
2826 2826 if args or opts.get('none'):
2827 2827 raise error.Abort(_('cannot mix -l/--list with options or '
2828 2828 'arguments'))
2829 2829 for i in xrange(len(q.series)):
2830 2830 status(i)
2831 2831 return
2832 2832 if not args or args[0][0:1] in '-+':
2833 2833 if not q.applied:
2834 2834 raise error.Abort(_('no patches applied'))
2835 2835 patch = q.applied[-1].name
2836 2836 if patch is None and args[0][0:1] not in '-+':
2837 2837 patch = args.pop(0)
2838 2838 if patch is None:
2839 2839 raise error.Abort(_('no patch to work with'))
2840 2840 if args or opts.get('none'):
2841 2841 idx = q.findseries(patch)
2842 2842 if idx is None:
2843 2843 raise error.Abort(_('no patch named %s') % patch)
2844 2844 q.setguards(idx, args)
2845 2845 q.savedirty()
2846 2846 else:
2847 2847 status(q.series.index(q.lookup(patch)))
2848 2848
2849 2849 @command("qheader", [], _('hg qheader [PATCH]'))
2850 2850 def header(ui, repo, patch=None):
2851 2851 """print the header of the topmost or specified patch
2852 2852
2853 2853 Returns 0 on success."""
2854 2854 q = repo.mq
2855 2855
2856 2856 if patch:
2857 2857 patch = q.lookup(patch)
2858 2858 else:
2859 2859 if not q.applied:
2860 2860 ui.write(_('no patches applied\n'))
2861 2861 return 1
2862 2862 patch = q.lookup('qtip')
2863 2863 ph = patchheader(q.join(patch), q.plainmode)
2864 2864
2865 2865 ui.write('\n'.join(ph.message) + '\n')
2866 2866
2867 2867 def lastsavename(path):
2868 2868 (directory, base) = os.path.split(path)
2869 2869 names = os.listdir(directory)
2870 2870 namere = re.compile("%s.([0-9]+)" % base)
2871 2871 maxindex = None
2872 2872 maxname = None
2873 2873 for f in names:
2874 2874 m = namere.match(f)
2875 2875 if m:
2876 2876 index = int(m.group(1))
2877 2877 if maxindex is None or index > maxindex:
2878 2878 maxindex = index
2879 2879 maxname = f
2880 2880 if maxname:
2881 2881 return (os.path.join(directory, maxname), maxindex)
2882 2882 return (None, None)
2883 2883
2884 2884 def savename(path):
2885 2885 (last, index) = lastsavename(path)
2886 2886 if last is None:
2887 2887 index = 0
2888 2888 newpath = path + ".%d" % (index + 1)
2889 2889 return newpath
2890 2890
2891 2891 @command("^qpush",
2892 2892 [('', 'keep-changes', None,
2893 2893 _('tolerate non-conflicting local changes')),
2894 2894 ('f', 'force', None, _('apply on top of local changes')),
2895 2895 ('e', 'exact', None,
2896 2896 _('apply the target patch to its recorded parent')),
2897 2897 ('l', 'list', None, _('list patch name in commit text')),
2898 2898 ('a', 'all', None, _('apply all patches')),
2899 2899 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2900 2900 ('n', 'name', '',
2901 2901 _('merge queue name (DEPRECATED)'), _('NAME')),
2902 2902 ('', 'move', None,
2903 2903 _('reorder patch series and apply only the patch')),
2904 2904 ('', 'no-backup', None, _('do not save backup copies of files'))],
2905 2905 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2906 2906 def push(ui, repo, patch=None, **opts):
2907 2907 """push the next patch onto the stack
2908 2908
2909 2909 By default, abort if the working directory contains uncommitted
2910 2910 changes. With --keep-changes, abort only if the uncommitted files
2911 2911 overlap with patched files. With -f/--force, backup and patch over
2912 2912 uncommitted changes.
2913 2913
2914 2914 Return 0 on success.
2915 2915 """
2916 2916 q = repo.mq
2917 2917 mergeq = None
2918 2918
2919 2919 opts = fixkeepchangesopts(ui, opts)
2920 2920 if opts.get('merge'):
2921 2921 if opts.get('name'):
2922 2922 newpath = repo.join(opts.get('name'))
2923 2923 else:
2924 2924 newpath, i = lastsavename(q.path)
2925 2925 if not newpath:
2926 2926 ui.warn(_("no saved queues found, please use -n\n"))
2927 2927 return 1
2928 2928 mergeq = queue(ui, repo.baseui, repo.path, newpath)
2929 2929 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2930 2930 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2931 2931 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2932 2932 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2933 2933 keepchanges=opts.get('keep_changes'))
2934 2934 return ret
2935 2935
2936 2936 @command("^qpop",
2937 2937 [('a', 'all', None, _('pop all patches')),
2938 2938 ('n', 'name', '',
2939 2939 _('queue name to pop (DEPRECATED)'), _('NAME')),
2940 2940 ('', 'keep-changes', None,
2941 2941 _('tolerate non-conflicting local changes')),
2942 2942 ('f', 'force', None, _('forget any local changes to patched files')),
2943 2943 ('', 'no-backup', None, _('do not save backup copies of files'))],
2944 2944 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2945 2945 def pop(ui, repo, patch=None, **opts):
2946 2946 """pop the current patch off the stack
2947 2947
2948 2948 Without argument, pops off the top of the patch stack. If given a
2949 2949 patch name, keeps popping off patches until the named patch is at
2950 2950 the top of the stack.
2951 2951
2952 2952 By default, abort if the working directory contains uncommitted
2953 2953 changes. With --keep-changes, abort only if the uncommitted files
2954 2954 overlap with patched files. With -f/--force, backup and discard
2955 2955 changes made to such files.
2956 2956
2957 2957 Return 0 on success.
2958 2958 """
2959 2959 opts = fixkeepchangesopts(ui, opts)
2960 2960 localupdate = True
2961 2961 if opts.get('name'):
2962 2962 q = queue(ui, repo.baseui, repo.path, repo.join(opts.get('name')))
2963 2963 ui.warn(_('using patch queue: %s\n') % q.path)
2964 2964 localupdate = False
2965 2965 else:
2966 2966 q = repo.mq
2967 2967 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2968 2968 all=opts.get('all'), nobackup=opts.get('no_backup'),
2969 2969 keepchanges=opts.get('keep_changes'))
2970 2970 q.savedirty()
2971 2971 return ret
2972 2972
2973 2973 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2974 2974 def rename(ui, repo, patch, name=None, **opts):
2975 2975 """rename a patch
2976 2976
2977 2977 With one argument, renames the current patch to PATCH1.
2978 2978 With two arguments, renames PATCH1 to PATCH2.
2979 2979
2980 2980 Returns 0 on success."""
2981 2981 q = repo.mq
2982 2982 if not name:
2983 2983 name = patch
2984 2984 patch = None
2985 2985
2986 2986 if patch:
2987 2987 patch = q.lookup(patch)
2988 2988 else:
2989 2989 if not q.applied:
2990 2990 ui.write(_('no patches applied\n'))
2991 2991 return
2992 2992 patch = q.lookup('qtip')
2993 2993 absdest = q.join(name)
2994 2994 if os.path.isdir(absdest):
2995 2995 name = normname(os.path.join(name, os.path.basename(patch)))
2996 2996 absdest = q.join(name)
2997 2997 q.checkpatchname(name)
2998 2998
2999 2999 ui.note(_('renaming %s to %s\n') % (patch, name))
3000 3000 i = q.findseries(patch)
3001 3001 guards = q.guard_re.findall(q.fullseries[i])
3002 3002 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
3003 3003 q.parseseries()
3004 3004 q.seriesdirty = True
3005 3005
3006 3006 info = q.isapplied(patch)
3007 3007 if info:
3008 3008 q.applied[info[0]] = statusentry(info[1], name)
3009 3009 q.applieddirty = True
3010 3010
3011 3011 destdir = os.path.dirname(absdest)
3012 3012 if not os.path.isdir(destdir):
3013 3013 os.makedirs(destdir)
3014 3014 util.rename(q.join(patch), absdest)
3015 3015 r = q.qrepo()
3016 3016 if r and patch in r.dirstate:
3017 3017 wctx = r[None]
3018 3018 with r.wlock():
3019 3019 if r.dirstate[patch] == 'a':
3020 3020 r.dirstate.drop(patch)
3021 3021 r.dirstate.add(name)
3022 3022 else:
3023 3023 wctx.copy(patch, name)
3024 3024 wctx.forget([patch])
3025 3025
3026 3026 q.savedirty()
3027 3027
3028 3028 @command("qrestore",
3029 3029 [('d', 'delete', None, _('delete save entry')),
3030 3030 ('u', 'update', None, _('update queue working directory'))],
3031 3031 _('hg qrestore [-d] [-u] REV'))
3032 3032 def restore(ui, repo, rev, **opts):
3033 3033 """restore the queue state saved by a revision (DEPRECATED)
3034 3034
3035 3035 This command is deprecated, use :hg:`rebase` instead."""
3036 3036 rev = repo.lookup(rev)
3037 3037 q = repo.mq
3038 3038 q.restore(repo, rev, delete=opts.get('delete'),
3039 3039 qupdate=opts.get('update'))
3040 3040 q.savedirty()
3041 3041 return 0
3042 3042
3043 3043 @command("qsave",
3044 3044 [('c', 'copy', None, _('copy patch directory')),
3045 3045 ('n', 'name', '',
3046 3046 _('copy directory name'), _('NAME')),
3047 3047 ('e', 'empty', None, _('clear queue status file')),
3048 3048 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3049 3049 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
3050 3050 def save(ui, repo, **opts):
3051 3051 """save current queue state (DEPRECATED)
3052 3052
3053 3053 This command is deprecated, use :hg:`rebase` instead."""
3054 3054 q = repo.mq
3055 3055 message = cmdutil.logmessage(ui, opts)
3056 3056 ret = q.save(repo, msg=message)
3057 3057 if ret:
3058 3058 return ret
3059 3059 q.savedirty() # save to .hg/patches before copying
3060 3060 if opts.get('copy'):
3061 3061 path = q.path
3062 3062 if opts.get('name'):
3063 3063 newpath = os.path.join(q.basepath, opts.get('name'))
3064 3064 if os.path.exists(newpath):
3065 3065 if not os.path.isdir(newpath):
3066 3066 raise error.Abort(_('destination %s exists and is not '
3067 3067 'a directory') % newpath)
3068 3068 if not opts.get('force'):
3069 3069 raise error.Abort(_('destination %s exists, '
3070 3070 'use -f to force') % newpath)
3071 3071 else:
3072 3072 newpath = savename(path)
3073 3073 ui.warn(_("copy %s to %s\n") % (path, newpath))
3074 3074 util.copyfiles(path, newpath)
3075 3075 if opts.get('empty'):
3076 3076 del q.applied[:]
3077 3077 q.applieddirty = True
3078 3078 q.savedirty()
3079 3079 return 0
3080 3080
3081 3081
3082 3082 @command("qselect",
3083 3083 [('n', 'none', None, _('disable all guards')),
3084 3084 ('s', 'series', None, _('list all guards in series file')),
3085 3085 ('', 'pop', None, _('pop to before first guarded applied patch')),
3086 3086 ('', 'reapply', None, _('pop, then reapply patches'))],
3087 3087 _('hg qselect [OPTION]... [GUARD]...'))
3088 3088 def select(ui, repo, *args, **opts):
3089 3089 '''set or print guarded patches to push
3090 3090
3091 3091 Use the :hg:`qguard` command to set or print guards on patch, then use
3092 3092 qselect to tell mq which guards to use. A patch will be pushed if
3093 3093 it has no guards or any positive guards match the currently
3094 3094 selected guard, but will not be pushed if any negative guards
3095 3095 match the current guard. For example::
3096 3096
3097 3097 qguard foo.patch -- -stable (negative guard)
3098 3098 qguard bar.patch +stable (positive guard)
3099 3099 qselect stable
3100 3100
3101 3101 This activates the "stable" guard. mq will skip foo.patch (because
3102 3102 it has a negative match) but push bar.patch (because it has a
3103 3103 positive match).
3104 3104
3105 3105 With no arguments, prints the currently active guards.
3106 3106 With one argument, sets the active guard.
3107 3107
3108 3108 Use -n/--none to deactivate guards (no other arguments needed).
3109 3109 When no guards are active, patches with positive guards are
3110 3110 skipped and patches with negative guards are pushed.
3111 3111
3112 3112 qselect can change the guards on applied patches. It does not pop
3113 3113 guarded patches by default. Use --pop to pop back to the last
3114 3114 applied patch that is not guarded. Use --reapply (which implies
3115 3115 --pop) to push back to the current patch afterwards, but skip
3116 3116 guarded patches.
3117 3117
3118 3118 Use -s/--series to print a list of all guards in the series file
3119 3119 (no other arguments needed). Use -v for more information.
3120 3120
3121 3121 Returns 0 on success.'''
3122 3122
3123 3123 q = repo.mq
3124 3124 guards = q.active()
3125 3125 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3126 3126 if args or opts.get('none'):
3127 3127 old_unapplied = q.unapplied(repo)
3128 3128 old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3129 3129 q.setactive(args)
3130 3130 q.savedirty()
3131 3131 if not args:
3132 3132 ui.status(_('guards deactivated\n'))
3133 3133 if not opts.get('pop') and not opts.get('reapply'):
3134 3134 unapplied = q.unapplied(repo)
3135 3135 guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3136 3136 if len(unapplied) != len(old_unapplied):
3137 3137 ui.status(_('number of unguarded, unapplied patches has '
3138 3138 'changed from %d to %d\n') %
3139 3139 (len(old_unapplied), len(unapplied)))
3140 3140 if len(guarded) != len(old_guarded):
3141 3141 ui.status(_('number of guarded, applied patches has changed '
3142 3142 'from %d to %d\n') %
3143 3143 (len(old_guarded), len(guarded)))
3144 3144 elif opts.get('series'):
3145 3145 guards = {}
3146 3146 noguards = 0
3147 3147 for gs in q.seriesguards:
3148 3148 if not gs:
3149 3149 noguards += 1
3150 3150 for g in gs:
3151 3151 guards.setdefault(g, 0)
3152 3152 guards[g] += 1
3153 3153 if ui.verbose:
3154 3154 guards['NONE'] = noguards
3155 3155 guards = guards.items()
3156 3156 guards.sort(key=lambda x: x[0][1:])
3157 3157 if guards:
3158 3158 ui.note(_('guards in series file:\n'))
3159 3159 for guard, count in guards:
3160 3160 ui.note('%2d ' % count)
3161 3161 ui.write(guard, '\n')
3162 3162 else:
3163 3163 ui.note(_('no guards in series file\n'))
3164 3164 else:
3165 3165 if guards:
3166 3166 ui.note(_('active guards:\n'))
3167 3167 for g in guards:
3168 3168 ui.write(g, '\n')
3169 3169 else:
3170 3170 ui.write(_('no active guards\n'))
3171 3171 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3172 3172 popped = False
3173 3173 if opts.get('pop') or opts.get('reapply'):
3174 3174 for i in xrange(len(q.applied)):
3175 3175 if not pushable(i):
3176 3176 ui.status(_('popping guarded patches\n'))
3177 3177 popped = True
3178 3178 if i == 0:
3179 3179 q.pop(repo, all=True)
3180 3180 else:
3181 3181 q.pop(repo, q.applied[i - 1].name)
3182 3182 break
3183 3183 if popped:
3184 3184 try:
3185 3185 if reapply:
3186 3186 ui.status(_('reapplying unguarded patches\n'))
3187 3187 q.push(repo, reapply)
3188 3188 finally:
3189 3189 q.savedirty()
3190 3190
3191 3191 @command("qfinish",
3192 3192 [('a', 'applied', None, _('finish all applied changesets'))],
3193 3193 _('hg qfinish [-a] [REV]...'))
3194 3194 def finish(ui, repo, *revrange, **opts):
3195 3195 """move applied patches into repository history
3196 3196
3197 3197 Finishes the specified revisions (corresponding to applied
3198 3198 patches) by moving them out of mq control into regular repository
3199 3199 history.
3200 3200
3201 3201 Accepts a revision range or the -a/--applied option. If --applied
3202 3202 is specified, all applied mq revisions are removed from mq
3203 3203 control. Otherwise, the given revisions must be at the base of the
3204 3204 stack of applied patches.
3205 3205
3206 3206 This can be especially useful if your changes have been applied to
3207 3207 an upstream repository, or if you are about to push your changes
3208 3208 to upstream.
3209 3209
3210 3210 Returns 0 on success.
3211 3211 """
3212 3212 if not opts.get('applied') and not revrange:
3213 3213 raise error.Abort(_('no revisions specified'))
3214 3214 elif opts.get('applied'):
3215 3215 revrange = ('qbase::qtip',) + revrange
3216 3216
3217 3217 q = repo.mq
3218 3218 if not q.applied:
3219 3219 ui.status(_('no patches applied\n'))
3220 3220 return 0
3221 3221
3222 3222 revs = scmutil.revrange(repo, revrange)
3223 3223 if repo['.'].rev() in revs and repo[None].files():
3224 3224 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3225 3225 # queue.finish may changes phases but leave the responsibility to lock the
3226 3226 # repo to the caller to avoid deadlock with wlock. This command code is
3227 3227 # responsibility for this locking.
3228 3228 with repo.lock():
3229 3229 q.finish(repo, revs)
3230 3230 q.savedirty()
3231 3231 return 0
3232 3232
3233 3233 @command("qqueue",
3234 3234 [('l', 'list', False, _('list all available queues')),
3235 3235 ('', 'active', False, _('print name of active queue')),
3236 3236 ('c', 'create', False, _('create new queue')),
3237 3237 ('', 'rename', False, _('rename active queue')),
3238 3238 ('', 'delete', False, _('delete reference to queue')),
3239 3239 ('', 'purge', False, _('delete queue, and remove patch dir')),
3240 3240 ],
3241 3241 _('[OPTION] [QUEUE]'))
3242 3242 def qqueue(ui, repo, name=None, **opts):
3243 3243 '''manage multiple patch queues
3244 3244
3245 3245 Supports switching between different patch queues, as well as creating
3246 3246 new patch queues and deleting existing ones.
3247 3247
3248 3248 Omitting a queue name or specifying -l/--list will show you the registered
3249 3249 queues - by default the "normal" patches queue is registered. The currently
3250 3250 active queue will be marked with "(active)". Specifying --active will print
3251 3251 only the name of the active queue.
3252 3252
3253 3253 To create a new queue, use -c/--create. The queue is automatically made
3254 3254 active, except in the case where there are applied patches from the
3255 3255 currently active queue in the repository. Then the queue will only be
3256 3256 created and switching will fail.
3257 3257
3258 3258 To delete an existing queue, use --delete. You cannot delete the currently
3259 3259 active queue.
3260 3260
3261 3261 Returns 0 on success.
3262 3262 '''
3263 3263 q = repo.mq
3264 3264 _defaultqueue = 'patches'
3265 3265 _allqueues = 'patches.queues'
3266 3266 _activequeue = 'patches.queue'
3267 3267
3268 3268 def _getcurrent():
3269 3269 cur = os.path.basename(q.path)
3270 3270 if cur.startswith('patches-'):
3271 3271 cur = cur[8:]
3272 3272 return cur
3273 3273
3274 3274 def _noqueues():
3275 3275 try:
3276 3276 fh = repo.vfs(_allqueues, 'r')
3277 3277 fh.close()
3278 3278 except IOError:
3279 3279 return True
3280 3280
3281 3281 return False
3282 3282
3283 3283 def _getqueues():
3284 3284 current = _getcurrent()
3285 3285
3286 3286 try:
3287 3287 fh = repo.vfs(_allqueues, 'r')
3288 3288 queues = [queue.strip() for queue in fh if queue.strip()]
3289 3289 fh.close()
3290 3290 if current not in queues:
3291 3291 queues.append(current)
3292 3292 except IOError:
3293 3293 queues = [_defaultqueue]
3294 3294
3295 3295 return sorted(queues)
3296 3296
3297 3297 def _setactive(name):
3298 3298 if q.applied:
3299 3299 raise error.Abort(_('new queue created, but cannot make active '
3300 3300 'as patches are applied'))
3301 3301 _setactivenocheck(name)
3302 3302
3303 3303 def _setactivenocheck(name):
3304 3304 fh = repo.vfs(_activequeue, 'w')
3305 3305 if name != 'patches':
3306 3306 fh.write(name)
3307 3307 fh.close()
3308 3308
3309 3309 def _addqueue(name):
3310 3310 fh = repo.vfs(_allqueues, 'a')
3311 3311 fh.write('%s\n' % (name,))
3312 3312 fh.close()
3313 3313
3314 3314 def _queuedir(name):
3315 3315 if name == 'patches':
3316 3316 return repo.join('patches')
3317 3317 else:
3318 3318 return repo.join('patches-' + name)
3319 3319
3320 3320 def _validname(name):
3321 3321 for n in name:
3322 3322 if n in ':\\/.':
3323 3323 return False
3324 3324 return True
3325 3325
3326 3326 def _delete(name):
3327 3327 if name not in existing:
3328 3328 raise error.Abort(_('cannot delete queue that does not exist'))
3329 3329
3330 3330 current = _getcurrent()
3331 3331
3332 3332 if name == current:
3333 3333 raise error.Abort(_('cannot delete currently active queue'))
3334 3334
3335 3335 fh = repo.vfs('patches.queues.new', 'w')
3336 3336 for queue in existing:
3337 3337 if queue == name:
3338 3338 continue
3339 3339 fh.write('%s\n' % (queue,))
3340 3340 fh.close()
3341 3341 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3342 3342
3343 3343 if not name or opts.get('list') or opts.get('active'):
3344 3344 current = _getcurrent()
3345 3345 if opts.get('active'):
3346 3346 ui.write('%s\n' % (current,))
3347 3347 return
3348 3348 for queue in _getqueues():
3349 3349 ui.write('%s' % (queue,))
3350 3350 if queue == current and not ui.quiet:
3351 3351 ui.write(_(' (active)\n'))
3352 3352 else:
3353 3353 ui.write('\n')
3354 3354 return
3355 3355
3356 3356 if not _validname(name):
3357 3357 raise error.Abort(
3358 3358 _('invalid queue name, may not contain the characters ":\\/."'))
3359 3359
3360 3360 with repo.wlock():
3361 3361 existing = _getqueues()
3362 3362
3363 3363 if opts.get('create'):
3364 3364 if name in existing:
3365 3365 raise error.Abort(_('queue "%s" already exists') % name)
3366 3366 if _noqueues():
3367 3367 _addqueue(_defaultqueue)
3368 3368 _addqueue(name)
3369 3369 _setactive(name)
3370 3370 elif opts.get('rename'):
3371 3371 current = _getcurrent()
3372 3372 if name == current:
3373 3373 raise error.Abort(_('can\'t rename "%s" to its current name')
3374 3374 % name)
3375 3375 if name in existing:
3376 3376 raise error.Abort(_('queue "%s" already exists') % name)
3377 3377
3378 3378 olddir = _queuedir(current)
3379 3379 newdir = _queuedir(name)
3380 3380
3381 3381 if os.path.exists(newdir):
3382 3382 raise error.Abort(_('non-queue directory "%s" already exists') %
3383 3383 newdir)
3384 3384
3385 3385 fh = repo.vfs('patches.queues.new', 'w')
3386 3386 for queue in existing:
3387 3387 if queue == current:
3388 3388 fh.write('%s\n' % (name,))
3389 3389 if os.path.exists(olddir):
3390 3390 util.rename(olddir, newdir)
3391 3391 else:
3392 3392 fh.write('%s\n' % (queue,))
3393 3393 fh.close()
3394 3394 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3395 3395 _setactivenocheck(name)
3396 3396 elif opts.get('delete'):
3397 3397 _delete(name)
3398 3398 elif opts.get('purge'):
3399 3399 if name in existing:
3400 3400 _delete(name)
3401 3401 qdir = _queuedir(name)
3402 3402 if os.path.exists(qdir):
3403 3403 shutil.rmtree(qdir)
3404 3404 else:
3405 3405 if name not in existing:
3406 3406 raise error.Abort(_('use --create to create a new queue'))
3407 3407 _setactive(name)
3408 3408
3409 3409 def mqphasedefaults(repo, roots):
3410 3410 """callback used to set mq changeset as secret when no phase data exists"""
3411 3411 if repo.mq.applied:
3412 3412 if repo.ui.configbool('mq', 'secret', False):
3413 3413 mqphase = phases.secret
3414 3414 else:
3415 3415 mqphase = phases.draft
3416 3416 qbase = repo[repo.mq.applied[0].node]
3417 3417 roots[mqphase].add(qbase.node())
3418 3418 return roots
3419 3419
3420 3420 def reposetup(ui, repo):
3421 3421 class mqrepo(repo.__class__):
3422 3422 @localrepo.unfilteredpropertycache
3423 3423 def mq(self):
3424 3424 return queue(self.ui, self.baseui, self.path)
3425 3425
3426 3426 def invalidateall(self):
3427 3427 super(mqrepo, self).invalidateall()
3428 3428 if localrepo.hasunfilteredcache(self, 'mq'):
3429 3429 # recreate mq in case queue path was changed
3430 3430 delattr(self.unfiltered(), 'mq')
3431 3431
3432 3432 def abortifwdirpatched(self, errmsg, force=False):
3433 3433 if self.mq.applied and self.mq.checkapplied and not force:
3434 3434 parents = self.dirstate.parents()
3435 3435 patches = [s.node for s in self.mq.applied]
3436 3436 if parents[0] in patches or parents[1] in patches:
3437 3437 raise error.Abort(errmsg)
3438 3438
3439 3439 def commit(self, text="", user=None, date=None, match=None,
3440 3440 force=False, editor=False, extra={}):
3441 3441 self.abortifwdirpatched(
3442 3442 _('cannot commit over an applied mq patch'),
3443 3443 force)
3444 3444
3445 3445 return super(mqrepo, self).commit(text, user, date, match, force,
3446 3446 editor, extra)
3447 3447
3448 3448 def checkpush(self, pushop):
3449 3449 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3450 3450 outapplied = [e.node for e in self.mq.applied]
3451 3451 if pushop.revs:
3452 3452 # Assume applied patches have no non-patch descendants and
3453 3453 # are not on remote already. Filtering any changeset not
3454 3454 # pushed.
3455 3455 heads = set(pushop.revs)
3456 3456 for node in reversed(outapplied):
3457 3457 if node in heads:
3458 3458 break
3459 3459 else:
3460 3460 outapplied.pop()
3461 3461 # looking for pushed and shared changeset
3462 3462 for node in outapplied:
3463 3463 if self[node].phase() < phases.secret:
3464 3464 raise error.Abort(_('source has mq patches applied'))
3465 3465 # no non-secret patches pushed
3466 3466 super(mqrepo, self).checkpush(pushop)
3467 3467
3468 3468 def _findtags(self):
3469 3469 '''augment tags from base class with patch tags'''
3470 3470 result = super(mqrepo, self)._findtags()
3471 3471
3472 3472 q = self.mq
3473 3473 if not q.applied:
3474 3474 return result
3475 3475
3476 3476 mqtags = [(patch.node, patch.name) for patch in q.applied]
3477 3477
3478 3478 try:
3479 3479 # for now ignore filtering business
3480 3480 self.unfiltered().changelog.rev(mqtags[-1][0])
3481 3481 except error.LookupError:
3482 3482 self.ui.warn(_('mq status file refers to unknown node %s\n')
3483 3483 % short(mqtags[-1][0]))
3484 3484 return result
3485 3485
3486 3486 # do not add fake tags for filtered revisions
3487 3487 included = self.changelog.hasnode
3488 3488 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3489 3489 if not mqtags:
3490 3490 return result
3491 3491
3492 3492 mqtags.append((mqtags[-1][0], 'qtip'))
3493 3493 mqtags.append((mqtags[0][0], 'qbase'))
3494 3494 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3495 3495 tags = result[0]
3496 3496 for patch in mqtags:
3497 3497 if patch[1] in tags:
3498 3498 self.ui.warn(_('tag %s overrides mq patch of the same '
3499 3499 'name\n') % patch[1])
3500 3500 else:
3501 3501 tags[patch[1]] = patch[0]
3502 3502
3503 3503 return result
3504 3504
3505 3505 if repo.local():
3506 3506 repo.__class__ = mqrepo
3507 3507
3508 3508 repo._phasedefaults.append(mqphasedefaults)
3509 3509
3510 3510 def mqimport(orig, ui, repo, *args, **kwargs):
3511 3511 if (util.safehasattr(repo, 'abortifwdirpatched')
3512 3512 and not kwargs.get('no_commit', False)):
3513 3513 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3514 3514 kwargs.get('force'))
3515 3515 return orig(ui, repo, *args, **kwargs)
3516 3516
3517 3517 def mqinit(orig, ui, *args, **kwargs):
3518 3518 mq = kwargs.pop('mq', None)
3519 3519
3520 3520 if not mq:
3521 3521 return orig(ui, *args, **kwargs)
3522 3522
3523 3523 if args:
3524 3524 repopath = args[0]
3525 3525 if not hg.islocal(repopath):
3526 3526 raise error.Abort(_('only a local queue repository '
3527 3527 'may be initialized'))
3528 3528 else:
3529 3529 repopath = cmdutil.findrepo(pycompat.getcwd())
3530 3530 if not repopath:
3531 3531 raise error.Abort(_('there is no Mercurial repository here '
3532 3532 '(.hg not found)'))
3533 3533 repo = hg.repository(ui, repopath)
3534 3534 return qinit(ui, repo, True)
3535 3535
3536 3536 def mqcommand(orig, ui, repo, *args, **kwargs):
3537 3537 """Add --mq option to operate on patch repository instead of main"""
3538 3538
3539 3539 # some commands do not like getting unknown options
3540 3540 mq = kwargs.pop('mq', None)
3541 3541
3542 3542 if not mq:
3543 3543 return orig(ui, repo, *args, **kwargs)
3544 3544
3545 3545 q = repo.mq
3546 3546 r = q.qrepo()
3547 3547 if not r:
3548 3548 raise error.Abort(_('no queue repository'))
3549 3549 return orig(r.ui, r, *args, **kwargs)
3550 3550
3551 3551 def summaryhook(ui, repo):
3552 3552 q = repo.mq
3553 3553 m = []
3554 3554 a, u = len(q.applied), len(q.unapplied(repo))
3555 3555 if a:
3556 3556 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3557 3557 if u:
3558 3558 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3559 3559 if m:
3560 3560 # i18n: column positioning for "hg summary"
3561 3561 ui.write(_("mq: %s\n") % ', '.join(m))
3562 3562 else:
3563 3563 # i18n: column positioning for "hg summary"
3564 3564 ui.note(_("mq: (empty queue)\n"))
3565 3565
3566 3566 revsetpredicate = registrar.revsetpredicate()
3567 3567
3568 3568 @revsetpredicate('mq()')
3569 3569 def revsetmq(repo, subset, x):
3570 3570 """Changesets managed by MQ.
3571 3571 """
3572 3572 revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
3573 3573 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3574 3574 return smartset.baseset([r for r in subset if r in applied])
3575 3575
3576 3576 # tell hggettext to extract docstrings from these functions:
3577 3577 i18nfunctions = [revsetmq]
3578 3578
3579 3579 def extsetup(ui):
3580 3580 # Ensure mq wrappers are called first, regardless of extension load order by
3581 3581 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3582 3582 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3583 3583
3584 3584 extensions.wrapcommand(commands.table, 'import', mqimport)
3585 3585 cmdutil.summaryhooks.add('mq', summaryhook)
3586 3586
3587 3587 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3588 3588 entry[1].extend(mqopt)
3589 3589
3590 3590 def dotable(cmdtable):
3591 3591 for cmd, entry in cmdtable.iteritems():
3592 3592 cmd = cmdutil.parsealiases(cmd)[0]
3593 3593 func = entry[0]
3594 3594 if func.norepo:
3595 3595 continue
3596 3596 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3597 3597 entry[1].extend(mqopt)
3598 3598
3599 3599 dotable(commands.table)
3600 3600
3601 3601 for extname, extmodule in extensions.extensions():
3602 3602 if extmodule.__file__ != __file__:
3603 3603 dotable(getattr(extmodule, 'cmdtable', {}))
3604 3604
3605 3605 colortable = {'qguard.negative': 'red',
3606 3606 'qguard.positive': 'yellow',
3607 3607 'qguard.unguarded': 'green',
3608 3608 'qseries.applied': 'blue bold underline',
3609 3609 'qseries.guarded': 'black bold',
3610 3610 'qseries.missing': 'red bold',
3611 3611 'qseries.unapplied': 'black bold'}
@@ -1,746 +1,746 b''
1 1 # Patch transplanting extension for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to transplant changesets from another branch
9 9
10 10 This extension allows you to transplant changes to another parent revision,
11 11 possibly in another repository. The transplant is done using 'diff' patches.
12 12
13 13 Transplanted patches are recorded in .hg/transplant/transplants, as a
14 14 map from a changeset hash to its hash in the source repository.
15 15 '''
16 16 from __future__ import absolute_import
17 17
18 18 import os
19 19 import tempfile
20 20 from mercurial.i18n import _
21 21 from mercurial import (
22 22 bundlerepo,
23 23 cmdutil,
24 24 error,
25 25 exchange,
26 26 hg,
27 27 match,
28 28 merge,
29 29 node as nodemod,
30 30 patch,
31 31 pycompat,
32 32 registrar,
33 33 revlog,
34 34 revset,
35 35 scmutil,
36 36 smartset,
37 37 util,
38 38 )
39 39
40 40 class TransplantError(error.Abort):
41 41 pass
42 42
43 43 cmdtable = {}
44 44 command = cmdutil.command(cmdtable)
45 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 49 testedwith = 'ships-with-hg-core'
50 50
51 51 class transplantentry(object):
52 52 def __init__(self, lnode, rnode):
53 53 self.lnode = lnode
54 54 self.rnode = rnode
55 55
56 56 class transplants(object):
57 57 def __init__(self, path=None, transplantfile=None, opener=None):
58 58 self.path = path
59 59 self.transplantfile = transplantfile
60 60 self.opener = opener
61 61
62 62 if not opener:
63 self.opener = scmutil.opener(self.path)
63 self.opener = scmutil.vfs(self.path)
64 64 self.transplants = {}
65 65 self.dirty = False
66 66 self.read()
67 67
68 68 def read(self):
69 69 abspath = os.path.join(self.path, self.transplantfile)
70 70 if self.transplantfile and os.path.exists(abspath):
71 71 for line in self.opener.read(self.transplantfile).splitlines():
72 72 lnode, rnode = map(revlog.bin, line.split(':'))
73 73 list = self.transplants.setdefault(rnode, [])
74 74 list.append(transplantentry(lnode, rnode))
75 75
76 76 def write(self):
77 77 if self.dirty and self.transplantfile:
78 78 if not os.path.isdir(self.path):
79 79 os.mkdir(self.path)
80 80 fp = self.opener(self.transplantfile, 'w')
81 81 for list in self.transplants.itervalues():
82 82 for t in list:
83 83 l, r = map(nodemod.hex, (t.lnode, t.rnode))
84 84 fp.write(l + ':' + r + '\n')
85 85 fp.close()
86 86 self.dirty = False
87 87
88 88 def get(self, rnode):
89 89 return self.transplants.get(rnode) or []
90 90
91 91 def set(self, lnode, rnode):
92 92 list = self.transplants.setdefault(rnode, [])
93 93 list.append(transplantentry(lnode, rnode))
94 94 self.dirty = True
95 95
96 96 def remove(self, transplant):
97 97 list = self.transplants.get(transplant.rnode)
98 98 if list:
99 99 del list[list.index(transplant)]
100 100 self.dirty = True
101 101
102 102 class transplanter(object):
103 103 def __init__(self, ui, repo, opts):
104 104 self.ui = ui
105 105 self.path = repo.join('transplant')
106 self.opener = scmutil.opener(self.path)
106 self.opener = scmutil.vfs(self.path)
107 107 self.transplants = transplants(self.path, 'transplants',
108 108 opener=self.opener)
109 109 def getcommiteditor():
110 110 editform = cmdutil.mergeeditform(repo[None], 'transplant')
111 111 return cmdutil.getcommiteditor(editform=editform, **opts)
112 112 self.getcommiteditor = getcommiteditor
113 113
114 114 def applied(self, repo, node, parent):
115 115 '''returns True if a node is already an ancestor of parent
116 116 or is parent or has already been transplanted'''
117 117 if hasnode(repo, parent):
118 118 parentrev = repo.changelog.rev(parent)
119 119 if hasnode(repo, node):
120 120 rev = repo.changelog.rev(node)
121 121 reachable = repo.changelog.ancestors([parentrev], rev,
122 122 inclusive=True)
123 123 if rev in reachable:
124 124 return True
125 125 for t in self.transplants.get(node):
126 126 # it might have been stripped
127 127 if not hasnode(repo, t.lnode):
128 128 self.transplants.remove(t)
129 129 return False
130 130 lnoderev = repo.changelog.rev(t.lnode)
131 131 if lnoderev in repo.changelog.ancestors([parentrev], lnoderev,
132 132 inclusive=True):
133 133 return True
134 134 return False
135 135
136 136 def apply(self, repo, source, revmap, merges, opts=None):
137 137 '''apply the revisions in revmap one by one in revision order'''
138 138 if opts is None:
139 139 opts = {}
140 140 revs = sorted(revmap)
141 141 p1, p2 = repo.dirstate.parents()
142 142 pulls = []
143 143 diffopts = patch.difffeatureopts(self.ui, opts)
144 144 diffopts.git = True
145 145
146 146 lock = tr = None
147 147 try:
148 148 lock = repo.lock()
149 149 tr = repo.transaction('transplant')
150 150 for rev in revs:
151 151 node = revmap[rev]
152 152 revstr = '%s:%s' % (rev, nodemod.short(node))
153 153
154 154 if self.applied(repo, node, p1):
155 155 self.ui.warn(_('skipping already applied revision %s\n') %
156 156 revstr)
157 157 continue
158 158
159 159 parents = source.changelog.parents(node)
160 160 if not (opts.get('filter') or opts.get('log')):
161 161 # If the changeset parent is the same as the
162 162 # wdir's parent, just pull it.
163 163 if parents[0] == p1:
164 164 pulls.append(node)
165 165 p1 = node
166 166 continue
167 167 if pulls:
168 168 if source != repo:
169 169 exchange.pull(repo, source.peer(), heads=pulls)
170 170 merge.update(repo, pulls[-1], False, False)
171 171 p1, p2 = repo.dirstate.parents()
172 172 pulls = []
173 173
174 174 domerge = False
175 175 if node in merges:
176 176 # pulling all the merge revs at once would mean we
177 177 # couldn't transplant after the latest even if
178 178 # transplants before them fail.
179 179 domerge = True
180 180 if not hasnode(repo, node):
181 181 exchange.pull(repo, source.peer(), heads=[node])
182 182
183 183 skipmerge = False
184 184 if parents[1] != revlog.nullid:
185 185 if not opts.get('parent'):
186 186 self.ui.note(_('skipping merge changeset %s:%s\n')
187 187 % (rev, nodemod.short(node)))
188 188 skipmerge = True
189 189 else:
190 190 parent = source.lookup(opts['parent'])
191 191 if parent not in parents:
192 192 raise error.Abort(_('%s is not a parent of %s') %
193 193 (nodemod.short(parent),
194 194 nodemod.short(node)))
195 195 else:
196 196 parent = parents[0]
197 197
198 198 if skipmerge:
199 199 patchfile = None
200 200 else:
201 201 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
202 202 fp = os.fdopen(fd, pycompat.sysstr('w'))
203 203 gen = patch.diff(source, parent, node, opts=diffopts)
204 204 for chunk in gen:
205 205 fp.write(chunk)
206 206 fp.close()
207 207
208 208 del revmap[rev]
209 209 if patchfile or domerge:
210 210 try:
211 211 try:
212 212 n = self.applyone(repo, node,
213 213 source.changelog.read(node),
214 214 patchfile, merge=domerge,
215 215 log=opts.get('log'),
216 216 filter=opts.get('filter'))
217 217 except TransplantError:
218 218 # Do not rollback, it is up to the user to
219 219 # fix the merge or cancel everything
220 220 tr.close()
221 221 raise
222 222 if n and domerge:
223 223 self.ui.status(_('%s merged at %s\n') % (revstr,
224 224 nodemod.short(n)))
225 225 elif n:
226 226 self.ui.status(_('%s transplanted to %s\n')
227 227 % (nodemod.short(node),
228 228 nodemod.short(n)))
229 229 finally:
230 230 if patchfile:
231 231 os.unlink(patchfile)
232 232 tr.close()
233 233 if pulls:
234 234 exchange.pull(repo, source.peer(), heads=pulls)
235 235 merge.update(repo, pulls[-1], False, False)
236 236 finally:
237 237 self.saveseries(revmap, merges)
238 238 self.transplants.write()
239 239 if tr:
240 240 tr.release()
241 241 if lock:
242 242 lock.release()
243 243
244 244 def filter(self, filter, node, changelog, patchfile):
245 245 '''arbitrarily rewrite changeset before applying it'''
246 246
247 247 self.ui.status(_('filtering %s\n') % patchfile)
248 248 user, date, msg = (changelog[1], changelog[2], changelog[4])
249 249 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
250 250 fp = os.fdopen(fd, pycompat.sysstr('w'))
251 251 fp.write("# HG changeset patch\n")
252 252 fp.write("# User %s\n" % user)
253 253 fp.write("# Date %d %d\n" % date)
254 254 fp.write(msg + '\n')
255 255 fp.close()
256 256
257 257 try:
258 258 self.ui.system('%s %s %s' % (filter, util.shellquote(headerfile),
259 259 util.shellquote(patchfile)),
260 260 environ={'HGUSER': changelog[1],
261 261 'HGREVISION': nodemod.hex(node),
262 262 },
263 263 onerr=error.Abort, errprefix=_('filter failed'),
264 264 blockedtag='transplant_filter')
265 265 user, date, msg = self.parselog(file(headerfile))[1:4]
266 266 finally:
267 267 os.unlink(headerfile)
268 268
269 269 return (user, date, msg)
270 270
271 271 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
272 272 filter=None):
273 273 '''apply the patch in patchfile to the repository as a transplant'''
274 274 (manifest, user, (time, timezone), files, message) = cl[:5]
275 275 date = "%d %d" % (time, timezone)
276 276 extra = {'transplant_source': node}
277 277 if filter:
278 278 (user, date, message) = self.filter(filter, node, cl, patchfile)
279 279
280 280 if log:
281 281 # we don't translate messages inserted into commits
282 282 message += '\n(transplanted from %s)' % nodemod.hex(node)
283 283
284 284 self.ui.status(_('applying %s\n') % nodemod.short(node))
285 285 self.ui.note('%s %s\n%s\n' % (user, date, message))
286 286
287 287 if not patchfile and not merge:
288 288 raise error.Abort(_('can only omit patchfile if merging'))
289 289 if patchfile:
290 290 try:
291 291 files = set()
292 292 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
293 293 files = list(files)
294 294 except Exception as inst:
295 295 seriespath = os.path.join(self.path, 'series')
296 296 if os.path.exists(seriespath):
297 297 os.unlink(seriespath)
298 298 p1 = repo.dirstate.p1()
299 299 p2 = node
300 300 self.log(user, date, message, p1, p2, merge=merge)
301 301 self.ui.write(str(inst) + '\n')
302 302 raise TransplantError(_('fix up the working directory and run '
303 303 'hg transplant --continue'))
304 304 else:
305 305 files = None
306 306 if merge:
307 307 p1, p2 = repo.dirstate.parents()
308 308 repo.setparents(p1, node)
309 309 m = match.always(repo.root, '')
310 310 else:
311 311 m = match.exact(repo.root, '', files)
312 312
313 313 n = repo.commit(message, user, date, extra=extra, match=m,
314 314 editor=self.getcommiteditor())
315 315 if not n:
316 316 self.ui.warn(_('skipping emptied changeset %s\n') %
317 317 nodemod.short(node))
318 318 return None
319 319 if not merge:
320 320 self.transplants.set(n, node)
321 321
322 322 return n
323 323
324 324 def canresume(self):
325 325 return os.path.exists(os.path.join(self.path, 'journal'))
326 326
327 327 def resume(self, repo, source, opts):
328 328 '''recover last transaction and apply remaining changesets'''
329 329 if os.path.exists(os.path.join(self.path, 'journal')):
330 330 n, node = self.recover(repo, source, opts)
331 331 if n:
332 332 self.ui.status(_('%s transplanted as %s\n') %
333 333 (nodemod.short(node),
334 334 nodemod.short(n)))
335 335 else:
336 336 self.ui.status(_('%s skipped due to empty diff\n')
337 337 % (nodemod.short(node),))
338 338 seriespath = os.path.join(self.path, 'series')
339 339 if not os.path.exists(seriespath):
340 340 self.transplants.write()
341 341 return
342 342 nodes, merges = self.readseries()
343 343 revmap = {}
344 344 for n in nodes:
345 345 revmap[source.changelog.rev(n)] = n
346 346 os.unlink(seriespath)
347 347
348 348 self.apply(repo, source, revmap, merges, opts)
349 349
350 350 def recover(self, repo, source, opts):
351 351 '''commit working directory using journal metadata'''
352 352 node, user, date, message, parents = self.readlog()
353 353 merge = False
354 354
355 355 if not user or not date or not message or not parents[0]:
356 356 raise error.Abort(_('transplant log file is corrupt'))
357 357
358 358 parent = parents[0]
359 359 if len(parents) > 1:
360 360 if opts.get('parent'):
361 361 parent = source.lookup(opts['parent'])
362 362 if parent not in parents:
363 363 raise error.Abort(_('%s is not a parent of %s') %
364 364 (nodemod.short(parent),
365 365 nodemod.short(node)))
366 366 else:
367 367 merge = True
368 368
369 369 extra = {'transplant_source': node}
370 370 try:
371 371 p1, p2 = repo.dirstate.parents()
372 372 if p1 != parent:
373 373 raise error.Abort(_('working directory not at transplant '
374 374 'parent %s') % nodemod.hex(parent))
375 375 if merge:
376 376 repo.setparents(p1, parents[1])
377 377 modified, added, removed, deleted = repo.status()[:4]
378 378 if merge or modified or added or removed or deleted:
379 379 n = repo.commit(message, user, date, extra=extra,
380 380 editor=self.getcommiteditor())
381 381 if not n:
382 382 raise error.Abort(_('commit failed'))
383 383 if not merge:
384 384 self.transplants.set(n, node)
385 385 else:
386 386 n = None
387 387 self.unlog()
388 388
389 389 return n, node
390 390 finally:
391 391 # TODO: get rid of this meaningless try/finally enclosing.
392 392 # this is kept only to reduce changes in a patch.
393 393 pass
394 394
395 395 def readseries(self):
396 396 nodes = []
397 397 merges = []
398 398 cur = nodes
399 399 for line in self.opener.read('series').splitlines():
400 400 if line.startswith('# Merges'):
401 401 cur = merges
402 402 continue
403 403 cur.append(revlog.bin(line))
404 404
405 405 return (nodes, merges)
406 406
407 407 def saveseries(self, revmap, merges):
408 408 if not revmap:
409 409 return
410 410
411 411 if not os.path.isdir(self.path):
412 412 os.mkdir(self.path)
413 413 series = self.opener('series', 'w')
414 414 for rev in sorted(revmap):
415 415 series.write(nodemod.hex(revmap[rev]) + '\n')
416 416 if merges:
417 417 series.write('# Merges\n')
418 418 for m in merges:
419 419 series.write(nodemod.hex(m) + '\n')
420 420 series.close()
421 421
422 422 def parselog(self, fp):
423 423 parents = []
424 424 message = []
425 425 node = revlog.nullid
426 426 inmsg = False
427 427 user = None
428 428 date = None
429 429 for line in fp.read().splitlines():
430 430 if inmsg:
431 431 message.append(line)
432 432 elif line.startswith('# User '):
433 433 user = line[7:]
434 434 elif line.startswith('# Date '):
435 435 date = line[7:]
436 436 elif line.startswith('# Node ID '):
437 437 node = revlog.bin(line[10:])
438 438 elif line.startswith('# Parent '):
439 439 parents.append(revlog.bin(line[9:]))
440 440 elif not line.startswith('# '):
441 441 inmsg = True
442 442 message.append(line)
443 443 if None in (user, date):
444 444 raise error.Abort(_("filter corrupted changeset (no user or date)"))
445 445 return (node, user, date, '\n'.join(message), parents)
446 446
447 447 def log(self, user, date, message, p1, p2, merge=False):
448 448 '''journal changelog metadata for later recover'''
449 449
450 450 if not os.path.isdir(self.path):
451 451 os.mkdir(self.path)
452 452 fp = self.opener('journal', 'w')
453 453 fp.write('# User %s\n' % user)
454 454 fp.write('# Date %s\n' % date)
455 455 fp.write('# Node ID %s\n' % nodemod.hex(p2))
456 456 fp.write('# Parent ' + nodemod.hex(p1) + '\n')
457 457 if merge:
458 458 fp.write('# Parent ' + nodemod.hex(p2) + '\n')
459 459 fp.write(message.rstrip() + '\n')
460 460 fp.close()
461 461
462 462 def readlog(self):
463 463 return self.parselog(self.opener('journal'))
464 464
465 465 def unlog(self):
466 466 '''remove changelog journal'''
467 467 absdst = os.path.join(self.path, 'journal')
468 468 if os.path.exists(absdst):
469 469 os.unlink(absdst)
470 470
471 471 def transplantfilter(self, repo, source, root):
472 472 def matchfn(node):
473 473 if self.applied(repo, node, root):
474 474 return False
475 475 if source.changelog.parents(node)[1] != revlog.nullid:
476 476 return False
477 477 extra = source.changelog.read(node)[5]
478 478 cnode = extra.get('transplant_source')
479 479 if cnode and self.applied(repo, cnode, root):
480 480 return False
481 481 return True
482 482
483 483 return matchfn
484 484
485 485 def hasnode(repo, node):
486 486 try:
487 487 return repo.changelog.rev(node) is not None
488 488 except error.RevlogError:
489 489 return False
490 490
491 491 def browserevs(ui, repo, nodes, opts):
492 492 '''interactively transplant changesets'''
493 493 displayer = cmdutil.show_changeset(ui, repo, opts)
494 494 transplants = []
495 495 merges = []
496 496 prompt = _('apply changeset? [ynmpcq?]:'
497 497 '$$ &yes, transplant this changeset'
498 498 '$$ &no, skip this changeset'
499 499 '$$ &merge at this changeset'
500 500 '$$ show &patch'
501 501 '$$ &commit selected changesets'
502 502 '$$ &quit and cancel transplant'
503 503 '$$ &? (show this help)')
504 504 for node in nodes:
505 505 displayer.show(repo[node])
506 506 action = None
507 507 while not action:
508 508 action = 'ynmpcq?'[ui.promptchoice(prompt)]
509 509 if action == '?':
510 510 for c, t in ui.extractchoices(prompt)[1]:
511 511 ui.write('%s: %s\n' % (c, t))
512 512 action = None
513 513 elif action == 'p':
514 514 parent = repo.changelog.parents(node)[0]
515 515 for chunk in patch.diff(repo, parent, node):
516 516 ui.write(chunk)
517 517 action = None
518 518 if action == 'y':
519 519 transplants.append(node)
520 520 elif action == 'm':
521 521 merges.append(node)
522 522 elif action == 'c':
523 523 break
524 524 elif action == 'q':
525 525 transplants = ()
526 526 merges = ()
527 527 break
528 528 displayer.close()
529 529 return (transplants, merges)
530 530
531 531 @command('transplant',
532 532 [('s', 'source', '', _('transplant changesets from REPO'), _('REPO')),
533 533 ('b', 'branch', [], _('use this source changeset as head'), _('REV')),
534 534 ('a', 'all', None, _('pull all changesets up to the --branch revisions')),
535 535 ('p', 'prune', [], _('skip over REV'), _('REV')),
536 536 ('m', 'merge', [], _('merge at REV'), _('REV')),
537 537 ('', 'parent', '',
538 538 _('parent to choose when transplanting merge'), _('REV')),
539 539 ('e', 'edit', False, _('invoke editor on commit messages')),
540 540 ('', 'log', None, _('append transplant info to log message')),
541 541 ('c', 'continue', None, _('continue last transplant session '
542 542 'after fixing conflicts')),
543 543 ('', 'filter', '',
544 544 _('filter changesets through command'), _('CMD'))],
545 545 _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
546 546 '[-m REV] [REV]...'))
547 547 def transplant(ui, repo, *revs, **opts):
548 548 '''transplant changesets from another branch
549 549
550 550 Selected changesets will be applied on top of the current working
551 551 directory with the log of the original changeset. The changesets
552 552 are copied and will thus appear twice in the history with different
553 553 identities.
554 554
555 555 Consider using the graft command if everything is inside the same
556 556 repository - it will use merges and will usually give a better result.
557 557 Use the rebase extension if the changesets are unpublished and you want
558 558 to move them instead of copying them.
559 559
560 560 If --log is specified, log messages will have a comment appended
561 561 of the form::
562 562
563 563 (transplanted from CHANGESETHASH)
564 564
565 565 You can rewrite the changelog message with the --filter option.
566 566 Its argument will be invoked with the current changelog message as
567 567 $1 and the patch as $2.
568 568
569 569 --source/-s specifies another repository to use for selecting changesets,
570 570 just as if it temporarily had been pulled.
571 571 If --branch/-b is specified, these revisions will be used as
572 572 heads when deciding which changesets to transplant, just as if only
573 573 these revisions had been pulled.
574 574 If --all/-a is specified, all the revisions up to the heads specified
575 575 with --branch will be transplanted.
576 576
577 577 Example:
578 578
579 579 - transplant all changes up to REV on top of your current revision::
580 580
581 581 hg transplant --branch REV --all
582 582
583 583 You can optionally mark selected transplanted changesets as merge
584 584 changesets. You will not be prompted to transplant any ancestors
585 585 of a merged transplant, and you can merge descendants of them
586 586 normally instead of transplanting them.
587 587
588 588 Merge changesets may be transplanted directly by specifying the
589 589 proper parent changeset by calling :hg:`transplant --parent`.
590 590
591 591 If no merges or revisions are provided, :hg:`transplant` will
592 592 start an interactive changeset browser.
593 593
594 594 If a changeset application fails, you can fix the merge by hand
595 595 and then resume where you left off by calling :hg:`transplant
596 596 --continue/-c`.
597 597 '''
598 598 with repo.wlock():
599 599 return _dotransplant(ui, repo, *revs, **opts)
600 600
601 601 def _dotransplant(ui, repo, *revs, **opts):
602 602 def incwalk(repo, csets, match=util.always):
603 603 for node in csets:
604 604 if match(node):
605 605 yield node
606 606
607 607 def transplantwalk(repo, dest, heads, match=util.always):
608 608 '''Yield all nodes that are ancestors of a head but not ancestors
609 609 of dest.
610 610 If no heads are specified, the heads of repo will be used.'''
611 611 if not heads:
612 612 heads = repo.heads()
613 613 ancestors = []
614 614 ctx = repo[dest]
615 615 for head in heads:
616 616 ancestors.append(ctx.ancestor(repo[head]).node())
617 617 for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
618 618 if match(node):
619 619 yield node
620 620
621 621 def checkopts(opts, revs):
622 622 if opts.get('continue'):
623 623 if opts.get('branch') or opts.get('all') or opts.get('merge'):
624 624 raise error.Abort(_('--continue is incompatible with '
625 625 '--branch, --all and --merge'))
626 626 return
627 627 if not (opts.get('source') or revs or
628 628 opts.get('merge') or opts.get('branch')):
629 629 raise error.Abort(_('no source URL, branch revision, or revision '
630 630 'list provided'))
631 631 if opts.get('all'):
632 632 if not opts.get('branch'):
633 633 raise error.Abort(_('--all requires a branch revision'))
634 634 if revs:
635 635 raise error.Abort(_('--all is incompatible with a '
636 636 'revision list'))
637 637
638 638 checkopts(opts, revs)
639 639
640 640 if not opts.get('log'):
641 641 # deprecated config: transplant.log
642 642 opts['log'] = ui.config('transplant', 'log')
643 643 if not opts.get('filter'):
644 644 # deprecated config: transplant.filter
645 645 opts['filter'] = ui.config('transplant', 'filter')
646 646
647 647 tp = transplanter(ui, repo, opts)
648 648
649 649 p1, p2 = repo.dirstate.parents()
650 650 if len(repo) > 0 and p1 == revlog.nullid:
651 651 raise error.Abort(_('no revision checked out'))
652 652 if opts.get('continue'):
653 653 if not tp.canresume():
654 654 raise error.Abort(_('no transplant to continue'))
655 655 else:
656 656 cmdutil.checkunfinished(repo)
657 657 if p2 != revlog.nullid:
658 658 raise error.Abort(_('outstanding uncommitted merges'))
659 659 m, a, r, d = repo.status()[:4]
660 660 if m or a or r or d:
661 661 raise error.Abort(_('outstanding local changes'))
662 662
663 663 sourcerepo = opts.get('source')
664 664 if sourcerepo:
665 665 peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
666 666 heads = map(peer.lookup, opts.get('branch', ()))
667 667 target = set(heads)
668 668 for r in revs:
669 669 try:
670 670 target.add(peer.lookup(r))
671 671 except error.RepoError:
672 672 pass
673 673 source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer,
674 674 onlyheads=sorted(target), force=True)
675 675 else:
676 676 source = repo
677 677 heads = map(source.lookup, opts.get('branch', ()))
678 678 cleanupfn = None
679 679
680 680 try:
681 681 if opts.get('continue'):
682 682 tp.resume(repo, source, opts)
683 683 return
684 684
685 685 tf = tp.transplantfilter(repo, source, p1)
686 686 if opts.get('prune'):
687 687 prune = set(source.lookup(r)
688 688 for r in scmutil.revrange(source, opts.get('prune')))
689 689 matchfn = lambda x: tf(x) and x not in prune
690 690 else:
691 691 matchfn = tf
692 692 merges = map(source.lookup, opts.get('merge', ()))
693 693 revmap = {}
694 694 if revs:
695 695 for r in scmutil.revrange(source, revs):
696 696 revmap[int(r)] = source.lookup(r)
697 697 elif opts.get('all') or not merges:
698 698 if source != repo:
699 699 alltransplants = incwalk(source, csets, match=matchfn)
700 700 else:
701 701 alltransplants = transplantwalk(source, p1, heads,
702 702 match=matchfn)
703 703 if opts.get('all'):
704 704 revs = alltransplants
705 705 else:
706 706 revs, newmerges = browserevs(ui, source, alltransplants, opts)
707 707 merges.extend(newmerges)
708 708 for r in revs:
709 709 revmap[source.changelog.rev(r)] = r
710 710 for r in merges:
711 711 revmap[source.changelog.rev(r)] = r
712 712
713 713 tp.apply(repo, source, revmap, merges, opts)
714 714 finally:
715 715 if cleanupfn:
716 716 cleanupfn()
717 717
718 718 revsetpredicate = registrar.revsetpredicate()
719 719
720 720 @revsetpredicate('transplanted([set])')
721 721 def revsettransplanted(repo, subset, x):
722 722 """Transplanted changesets in set, or all transplanted changesets.
723 723 """
724 724 if x:
725 725 s = revset.getset(repo, subset, x)
726 726 else:
727 727 s = subset
728 728 return smartset.baseset([r for r in s if
729 729 repo[r].extra().get('transplant_source')])
730 730
731 731 templatekeyword = registrar.templatekeyword()
732 732
733 733 @templatekeyword('transplanted')
734 734 def kwtransplanted(repo, ctx, **args):
735 735 """String. The node identifier of the transplanted
736 736 changeset if any."""
737 737 n = ctx.extra().get('transplant_source')
738 738 return n and nodemod.hex(n) or ''
739 739
740 740 def extsetup(ui):
741 741 cmdutil.unfinishedstates.append(
742 742 ['transplant/journal', True, False, _('transplant in progress'),
743 743 _("use 'hg transplant --continue' or 'hg update' to abort")])
744 744
745 745 # tell hggettext to extract docstrings from these functions:
746 746 i18nfunctions = [revsettransplanted, kwtransplanted]
@@ -1,340 +1,340 b''
1 1 # archival.py - revision archival for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import gzip
11 11 import os
12 12 import struct
13 13 import tarfile
14 14 import time
15 15 import zipfile
16 16 import zlib
17 17
18 18 from .i18n import _
19 19
20 20 from . import (
21 21 cmdutil,
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 scmutil,
26 26 util,
27 27 )
28 28 stringio = util.stringio
29 29
30 30 # from unzip source code:
31 31 _UNX_IFREG = 0x8000
32 32 _UNX_IFLNK = 0xa000
33 33
34 34 def tidyprefix(dest, kind, prefix):
35 35 '''choose prefix to use for names in archive. make sure prefix is
36 36 safe for consumers.'''
37 37
38 38 if prefix:
39 39 prefix = util.normpath(prefix)
40 40 else:
41 41 if not isinstance(dest, str):
42 42 raise ValueError('dest must be string if no prefix')
43 43 prefix = os.path.basename(dest)
44 44 lower = prefix.lower()
45 45 for sfx in exts.get(kind, []):
46 46 if lower.endswith(sfx):
47 47 prefix = prefix[:-len(sfx)]
48 48 break
49 49 lpfx = os.path.normpath(util.localpath(prefix))
50 50 prefix = util.pconvert(lpfx)
51 51 if not prefix.endswith('/'):
52 52 prefix += '/'
53 53 # Drop the leading '.' path component if present, so Windows can read the
54 54 # zip files (issue4634)
55 55 if prefix.startswith('./'):
56 56 prefix = prefix[2:]
57 57 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
58 58 raise error.Abort(_('archive prefix contains illegal components'))
59 59 return prefix
60 60
61 61 exts = {
62 62 'tar': ['.tar'],
63 63 'tbz2': ['.tbz2', '.tar.bz2'],
64 64 'tgz': ['.tgz', '.tar.gz'],
65 65 'zip': ['.zip'],
66 66 }
67 67
68 68 def guesskind(dest):
69 69 for kind, extensions in exts.iteritems():
70 70 if any(dest.endswith(ext) for ext in extensions):
71 71 return kind
72 72 return None
73 73
74 74 def _rootctx(repo):
75 75 # repo[0] may be hidden
76 76 for rev in repo:
77 77 return repo[rev]
78 78 return repo['null']
79 79
80 80 def buildmetadata(ctx):
81 81 '''build content of .hg_archival.txt'''
82 82 repo = ctx.repo()
83 83 hex = ctx.hex()
84 84 if ctx.rev() is None:
85 85 hex = ctx.p1().hex()
86 86 if ctx.dirty():
87 87 hex += '+'
88 88
89 89 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
90 90 _rootctx(repo).hex(), hex, encoding.fromlocal(ctx.branch()))
91 91
92 92 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
93 93 if repo.tagtype(t) == 'global')
94 94 if not tags:
95 95 repo.ui.pushbuffer()
96 96 opts = {'template': '{latesttag}\n{latesttagdistance}\n'
97 97 '{changessincelatesttag}',
98 98 'style': '', 'patch': None, 'git': None}
99 99 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
100 100 ltags, dist, changessince = repo.ui.popbuffer().split('\n')
101 101 ltags = ltags.split(':')
102 102 tags = ''.join('latesttag: %s\n' % t for t in ltags)
103 103 tags += 'latesttagdistance: %s\n' % dist
104 104 tags += 'changessincelatesttag: %s\n' % changessince
105 105
106 106 return base + tags
107 107
108 108 class tarit(object):
109 109 '''write archive to tar file or stream. can write uncompressed,
110 110 or compress with gzip or bzip2.'''
111 111
112 112 class GzipFileWithTime(gzip.GzipFile):
113 113
114 114 def __init__(self, *args, **kw):
115 115 timestamp = None
116 116 if 'timestamp' in kw:
117 117 timestamp = kw.pop('timestamp')
118 118 if timestamp is None:
119 119 self.timestamp = time.time()
120 120 else:
121 121 self.timestamp = timestamp
122 122 gzip.GzipFile.__init__(self, *args, **kw)
123 123
124 124 def _write_gzip_header(self):
125 125 self.fileobj.write('\037\213') # magic header
126 126 self.fileobj.write('\010') # compression method
127 127 fname = self.name
128 128 if fname and fname.endswith('.gz'):
129 129 fname = fname[:-3]
130 130 flags = 0
131 131 if fname:
132 132 flags = gzip.FNAME
133 133 self.fileobj.write(chr(flags))
134 134 gzip.write32u(self.fileobj, long(self.timestamp))
135 135 self.fileobj.write('\002')
136 136 self.fileobj.write('\377')
137 137 if fname:
138 138 self.fileobj.write(fname + '\000')
139 139
140 140 def __init__(self, dest, mtime, kind=''):
141 141 self.mtime = mtime
142 142 self.fileobj = None
143 143
144 144 def taropen(mode, name='', fileobj=None):
145 145 if kind == 'gz':
146 146 mode = mode[0]
147 147 if not fileobj:
148 148 fileobj = open(name, mode + 'b')
149 149 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
150 150 zlib.Z_BEST_COMPRESSION,
151 151 fileobj, timestamp=mtime)
152 152 self.fileobj = gzfileobj
153 153 return tarfile.TarFile.taropen(name, mode, gzfileobj)
154 154 else:
155 155 return tarfile.open(name, mode + kind, fileobj)
156 156
157 157 if isinstance(dest, str):
158 158 self.z = taropen('w:', name=dest)
159 159 else:
160 160 self.z = taropen('w|', fileobj=dest)
161 161
162 162 def addfile(self, name, mode, islink, data):
163 163 i = tarfile.TarInfo(name)
164 164 i.mtime = self.mtime
165 165 i.size = len(data)
166 166 if islink:
167 167 i.type = tarfile.SYMTYPE
168 168 i.mode = 0o777
169 169 i.linkname = data
170 170 data = None
171 171 i.size = 0
172 172 else:
173 173 i.mode = mode
174 174 data = stringio(data)
175 175 self.z.addfile(i, data)
176 176
177 177 def done(self):
178 178 self.z.close()
179 179 if self.fileobj:
180 180 self.fileobj.close()
181 181
182 182 class tellable(object):
183 183 '''provide tell method for zipfile.ZipFile when writing to http
184 184 response file object.'''
185 185
186 186 def __init__(self, fp):
187 187 self.fp = fp
188 188 self.offset = 0
189 189
190 190 def __getattr__(self, key):
191 191 return getattr(self.fp, key)
192 192
193 193 def write(self, s):
194 194 self.fp.write(s)
195 195 self.offset += len(s)
196 196
197 197 def tell(self):
198 198 return self.offset
199 199
200 200 class zipit(object):
201 201 '''write archive to zip file or stream. can write uncompressed,
202 202 or compressed with deflate.'''
203 203
204 204 def __init__(self, dest, mtime, compress=True):
205 205 if not isinstance(dest, str):
206 206 try:
207 207 dest.tell()
208 208 except (AttributeError, IOError):
209 209 dest = tellable(dest)
210 210 self.z = zipfile.ZipFile(dest, 'w',
211 211 compress and zipfile.ZIP_DEFLATED or
212 212 zipfile.ZIP_STORED)
213 213
214 214 # Python's zipfile module emits deprecation warnings if we try
215 215 # to store files with a date before 1980.
216 216 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
217 217 if mtime < epoch:
218 218 mtime = epoch
219 219
220 220 self.mtime = mtime
221 221 self.date_time = time.gmtime(mtime)[:6]
222 222
223 223 def addfile(self, name, mode, islink, data):
224 224 i = zipfile.ZipInfo(name, self.date_time)
225 225 i.compress_type = self.z.compression
226 226 # unzip will not honor unix file modes unless file creator is
227 227 # set to unix (id 3).
228 228 i.create_system = 3
229 229 ftype = _UNX_IFREG
230 230 if islink:
231 231 mode = 0o777
232 232 ftype = _UNX_IFLNK
233 233 i.external_attr = (mode | ftype) << 16
234 234 # add "extended-timestamp" extra block, because zip archives
235 235 # without this will be extracted with unexpected timestamp,
236 236 # if TZ is not configured as GMT
237 237 i.extra += struct.pack('<hhBl',
238 238 0x5455, # block type: "extended-timestamp"
239 239 1 + 4, # size of this block
240 240 1, # "modification time is present"
241 241 int(self.mtime)) # last modification (UTC)
242 242 self.z.writestr(i, data)
243 243
244 244 def done(self):
245 245 self.z.close()
246 246
247 247 class fileit(object):
248 248 '''write archive as files in directory.'''
249 249
250 250 def __init__(self, name, mtime):
251 251 self.basedir = name
252 self.opener = scmutil.opener(self.basedir)
252 self.opener = scmutil.vfs(self.basedir)
253 253
254 254 def addfile(self, name, mode, islink, data):
255 255 if islink:
256 256 self.opener.symlink(data, name)
257 257 return
258 258 f = self.opener(name, "w", atomictemp=True)
259 259 f.write(data)
260 260 f.close()
261 261 destfile = os.path.join(self.basedir, name)
262 262 os.chmod(destfile, mode)
263 263
264 264 def done(self):
265 265 pass
266 266
267 267 archivers = {
268 268 'files': fileit,
269 269 'tar': tarit,
270 270 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
271 271 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
272 272 'uzip': lambda name, mtime: zipit(name, mtime, False),
273 273 'zip': zipit,
274 274 }
275 275
276 276 def archive(repo, dest, node, kind, decode=True, matchfn=None,
277 277 prefix='', mtime=None, subrepos=False):
278 278 '''create archive of repo as it was at node.
279 279
280 280 dest can be name of directory, name of archive file, or file
281 281 object to write archive to.
282 282
283 283 kind is type of archive to create.
284 284
285 285 decode tells whether to put files through decode filters from
286 286 hgrc.
287 287
288 288 matchfn is function to filter names of files to write to archive.
289 289
290 290 prefix is name of path to put before every archive member.'''
291 291
292 292 if kind == 'files':
293 293 if prefix:
294 294 raise error.Abort(_('cannot give prefix when archiving to files'))
295 295 else:
296 296 prefix = tidyprefix(dest, kind, prefix)
297 297
298 298 def write(name, mode, islink, getdata):
299 299 data = getdata()
300 300 if decode:
301 301 data = repo.wwritedata(name, data)
302 302 archiver.addfile(prefix + name, mode, islink, data)
303 303
304 304 if kind not in archivers:
305 305 raise error.Abort(_("unknown archive type '%s'") % kind)
306 306
307 307 ctx = repo[node]
308 308 archiver = archivers[kind](dest, mtime or ctx.date()[0])
309 309
310 310 if repo.ui.configbool("ui", "archivemeta", True):
311 311 name = '.hg_archival.txt'
312 312 if not matchfn or matchfn(name):
313 313 write(name, 0o644, False, lambda: buildmetadata(ctx))
314 314
315 315 if matchfn:
316 316 files = [f for f in ctx.manifest().keys() if matchfn(f)]
317 317 else:
318 318 files = ctx.manifest().keys()
319 319 total = len(files)
320 320 if total:
321 321 files.sort()
322 322 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
323 323 for i, f in enumerate(files):
324 324 ff = ctx.flags(f)
325 325 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
326 326 repo.ui.progress(_('archiving'), i + 1, item=f,
327 327 unit=_('files'), total=total)
328 328 repo.ui.progress(_('archiving'), None)
329 329
330 330 if subrepos:
331 331 for subpath in sorted(ctx.substate):
332 332 sub = ctx.workingsub(subpath)
333 333 submatch = matchmod.subdirmatcher(subpath, matchfn)
334 334 total += sub.archive(archiver, prefix, submatch, decode)
335 335
336 336 if total == 0:
337 337 raise error.Abort(_('no files match the archive pattern'))
338 338
339 339 archiver.done()
340 340 return total
@@ -1,3470 +1,3470 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import tempfile
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 encoding,
30 30 error,
31 31 formatter,
32 32 graphmod,
33 33 lock as lockmod,
34 34 match as matchmod,
35 35 obsolete,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 repair,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 smartset,
45 45 templatekw,
46 46 templater,
47 47 util,
48 48 )
49 49 stringio = util.stringio
50 50
51 51 # special string such that everything below this line will be ingored in the
52 52 # editor text
53 53 _linebelow = "^HG: ------------------------ >8 ------------------------$"
54 54
55 55 def ishunk(x):
56 56 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
57 57 return isinstance(x, hunkclasses)
58 58
59 59 def newandmodified(chunks, originalchunks):
60 60 newlyaddedandmodifiedfiles = set()
61 61 for chunk in chunks:
62 62 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
63 63 originalchunks:
64 64 newlyaddedandmodifiedfiles.add(chunk.header.filename())
65 65 return newlyaddedandmodifiedfiles
66 66
67 67 def parsealiases(cmd):
68 68 return cmd.lstrip("^").split("|")
69 69
70 70 def setupwrapcolorwrite(ui):
71 71 # wrap ui.write so diff output can be labeled/colorized
72 72 def wrapwrite(orig, *args, **kw):
73 73 label = kw.pop('label', '')
74 74 for chunk, l in patch.difflabel(lambda: args):
75 75 orig(chunk, label=label + l)
76 76
77 77 oldwrite = ui.write
78 78 def wrap(*args, **kwargs):
79 79 return wrapwrite(oldwrite, *args, **kwargs)
80 80 setattr(ui, 'write', wrap)
81 81 return oldwrite
82 82
83 83 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
84 84 if usecurses:
85 85 if testfile:
86 86 recordfn = crecordmod.testdecorator(testfile,
87 87 crecordmod.testchunkselector)
88 88 else:
89 89 recordfn = crecordmod.chunkselector
90 90
91 91 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
92 92
93 93 else:
94 94 return patch.filterpatch(ui, originalhunks, operation)
95 95
96 96 def recordfilter(ui, originalhunks, operation=None):
97 97 """ Prompts the user to filter the originalhunks and return a list of
98 98 selected hunks.
99 99 *operation* is used for to build ui messages to indicate the user what
100 100 kind of filtering they are doing: reverting, committing, shelving, etc.
101 101 (see patch.filterpatch).
102 102 """
103 103 usecurses = crecordmod.checkcurses(ui)
104 104 testfile = ui.config('experimental', 'crecordtest', None)
105 105 oldwrite = setupwrapcolorwrite(ui)
106 106 try:
107 107 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
108 108 testfile, operation)
109 109 finally:
110 110 ui.write = oldwrite
111 111 return newchunks, newopts
112 112
113 113 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
114 114 filterfn, *pats, **opts):
115 115 from . import merge as mergemod
116 116 if not ui.interactive():
117 117 if cmdsuggest:
118 118 msg = _('running non-interactively, use %s instead') % cmdsuggest
119 119 else:
120 120 msg = _('running non-interactively')
121 121 raise error.Abort(msg)
122 122
123 123 # make sure username is set before going interactive
124 124 if not opts.get('user'):
125 125 ui.username() # raise exception, username not provided
126 126
127 127 def recordfunc(ui, repo, message, match, opts):
128 128 """This is generic record driver.
129 129
130 130 Its job is to interactively filter local changes, and
131 131 accordingly prepare working directory into a state in which the
132 132 job can be delegated to a non-interactive commit command such as
133 133 'commit' or 'qrefresh'.
134 134
135 135 After the actual job is done by non-interactive command, the
136 136 working directory is restored to its original state.
137 137
138 138 In the end we'll record interesting changes, and everything else
139 139 will be left in place, so the user can continue working.
140 140 """
141 141
142 142 checkunfinished(repo, commit=True)
143 143 wctx = repo[None]
144 144 merge = len(wctx.parents()) > 1
145 145 if merge:
146 146 raise error.Abort(_('cannot partially commit a merge '
147 147 '(use "hg commit" instead)'))
148 148
149 149 def fail(f, msg):
150 150 raise error.Abort('%s: %s' % (f, msg))
151 151
152 152 force = opts.get('force')
153 153 if not force:
154 154 vdirs = []
155 155 match.explicitdir = vdirs.append
156 156 match.bad = fail
157 157
158 158 status = repo.status(match=match)
159 159 if not force:
160 160 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
161 161 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
162 162 diffopts.nodates = True
163 163 diffopts.git = True
164 164 diffopts.showfunc = True
165 165 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
166 166 originalchunks = patch.parsepatch(originaldiff)
167 167
168 168 # 1. filter patch, since we are intending to apply subset of it
169 169 try:
170 170 chunks, newopts = filterfn(ui, originalchunks)
171 171 except patch.PatchError as err:
172 172 raise error.Abort(_('error parsing patch: %s') % err)
173 173 opts.update(newopts)
174 174
175 175 # We need to keep a backup of files that have been newly added and
176 176 # modified during the recording process because there is a previous
177 177 # version without the edit in the workdir
178 178 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
179 179 contenders = set()
180 180 for h in chunks:
181 181 try:
182 182 contenders.update(set(h.files()))
183 183 except AttributeError:
184 184 pass
185 185
186 186 changed = status.modified + status.added + status.removed
187 187 newfiles = [f for f in changed if f in contenders]
188 188 if not newfiles:
189 189 ui.status(_('no changes to record\n'))
190 190 return 0
191 191
192 192 modified = set(status.modified)
193 193
194 194 # 2. backup changed files, so we can restore them in the end
195 195
196 196 if backupall:
197 197 tobackup = changed
198 198 else:
199 199 tobackup = [f for f in newfiles if f in modified or f in \
200 200 newlyaddedandmodifiedfiles]
201 201 backups = {}
202 202 if tobackup:
203 203 backupdir = repo.join('record-backups')
204 204 try:
205 205 os.mkdir(backupdir)
206 206 except OSError as err:
207 207 if err.errno != errno.EEXIST:
208 208 raise
209 209 try:
210 210 # backup continues
211 211 for f in tobackup:
212 212 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
213 213 dir=backupdir)
214 214 os.close(fd)
215 215 ui.debug('backup %r as %r\n' % (f, tmpname))
216 216 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
217 217 backups[f] = tmpname
218 218
219 219 fp = stringio()
220 220 for c in chunks:
221 221 fname = c.filename()
222 222 if fname in backups:
223 223 c.write(fp)
224 224 dopatch = fp.tell()
225 225 fp.seek(0)
226 226
227 227 # 2.5 optionally review / modify patch in text editor
228 228 if opts.get('review', False):
229 229 patchtext = (crecordmod.diffhelptext
230 230 + crecordmod.patchhelptext
231 231 + fp.read())
232 232 reviewedpatch = ui.edit(patchtext, "",
233 233 extra={"suffix": ".diff"},
234 234 repopath=repo.path)
235 235 fp.truncate(0)
236 236 fp.write(reviewedpatch)
237 237 fp.seek(0)
238 238
239 239 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
240 240 # 3a. apply filtered patch to clean repo (clean)
241 241 if backups:
242 242 # Equivalent to hg.revert
243 243 m = scmutil.matchfiles(repo, backups.keys())
244 244 mergemod.update(repo, repo.dirstate.p1(),
245 245 False, True, matcher=m)
246 246
247 247 # 3b. (apply)
248 248 if dopatch:
249 249 try:
250 250 ui.debug('applying patch\n')
251 251 ui.debug(fp.getvalue())
252 252 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
253 253 except patch.PatchError as err:
254 254 raise error.Abort(str(err))
255 255 del fp
256 256
257 257 # 4. We prepared working directory according to filtered
258 258 # patch. Now is the time to delegate the job to
259 259 # commit/qrefresh or the like!
260 260
261 261 # Make all of the pathnames absolute.
262 262 newfiles = [repo.wjoin(nf) for nf in newfiles]
263 263 return commitfunc(ui, repo, *newfiles, **opts)
264 264 finally:
265 265 # 5. finally restore backed-up files
266 266 try:
267 267 dirstate = repo.dirstate
268 268 for realname, tmpname in backups.iteritems():
269 269 ui.debug('restoring %r to %r\n' % (tmpname, realname))
270 270
271 271 if dirstate[realname] == 'n':
272 272 # without normallookup, restoring timestamp
273 273 # may cause partially committed files
274 274 # to be treated as unmodified
275 275 dirstate.normallookup(realname)
276 276
277 277 # copystat=True here and above are a hack to trick any
278 278 # editors that have f open that we haven't modified them.
279 279 #
280 280 # Also note that this racy as an editor could notice the
281 281 # file's mtime before we've finished writing it.
282 282 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
283 283 os.unlink(tmpname)
284 284 if tobackup:
285 285 os.rmdir(backupdir)
286 286 except OSError:
287 287 pass
288 288
289 289 def recordinwlock(ui, repo, message, match, opts):
290 290 with repo.wlock():
291 291 return recordfunc(ui, repo, message, match, opts)
292 292
293 293 return commit(ui, repo, recordinwlock, pats, opts)
294 294
295 295 def findpossible(cmd, table, strict=False):
296 296 """
297 297 Return cmd -> (aliases, command table entry)
298 298 for each matching command.
299 299 Return debug commands (or their aliases) only if no normal command matches.
300 300 """
301 301 choice = {}
302 302 debugchoice = {}
303 303
304 304 if cmd in table:
305 305 # short-circuit exact matches, "log" alias beats "^log|history"
306 306 keys = [cmd]
307 307 else:
308 308 keys = table.keys()
309 309
310 310 allcmds = []
311 311 for e in keys:
312 312 aliases = parsealiases(e)
313 313 allcmds.extend(aliases)
314 314 found = None
315 315 if cmd in aliases:
316 316 found = cmd
317 317 elif not strict:
318 318 for a in aliases:
319 319 if a.startswith(cmd):
320 320 found = a
321 321 break
322 322 if found is not None:
323 323 if aliases[0].startswith("debug") or found.startswith("debug"):
324 324 debugchoice[found] = (aliases, table[e])
325 325 else:
326 326 choice[found] = (aliases, table[e])
327 327
328 328 if not choice and debugchoice:
329 329 choice = debugchoice
330 330
331 331 return choice, allcmds
332 332
333 333 def findcmd(cmd, table, strict=True):
334 334 """Return (aliases, command table entry) for command string."""
335 335 choice, allcmds = findpossible(cmd, table, strict)
336 336
337 337 if cmd in choice:
338 338 return choice[cmd]
339 339
340 340 if len(choice) > 1:
341 341 clist = choice.keys()
342 342 clist.sort()
343 343 raise error.AmbiguousCommand(cmd, clist)
344 344
345 345 if choice:
346 346 return choice.values()[0]
347 347
348 348 raise error.UnknownCommand(cmd, allcmds)
349 349
350 350 def findrepo(p):
351 351 while not os.path.isdir(os.path.join(p, ".hg")):
352 352 oldp, p = p, os.path.dirname(p)
353 353 if p == oldp:
354 354 return None
355 355
356 356 return p
357 357
358 358 def bailifchanged(repo, merge=True, hint=None):
359 359 """ enforce the precondition that working directory must be clean.
360 360
361 361 'merge' can be set to false if a pending uncommitted merge should be
362 362 ignored (such as when 'update --check' runs).
363 363
364 364 'hint' is the usual hint given to Abort exception.
365 365 """
366 366
367 367 if merge and repo.dirstate.p2() != nullid:
368 368 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
369 369 modified, added, removed, deleted = repo.status()[:4]
370 370 if modified or added or removed or deleted:
371 371 raise error.Abort(_('uncommitted changes'), hint=hint)
372 372 ctx = repo[None]
373 373 for s in sorted(ctx.substate):
374 374 ctx.sub(s).bailifchanged(hint=hint)
375 375
376 376 def logmessage(ui, opts):
377 377 """ get the log message according to -m and -l option """
378 378 message = opts.get('message')
379 379 logfile = opts.get('logfile')
380 380
381 381 if message and logfile:
382 382 raise error.Abort(_('options --message and --logfile are mutually '
383 383 'exclusive'))
384 384 if not message and logfile:
385 385 try:
386 386 if logfile == '-':
387 387 message = ui.fin.read()
388 388 else:
389 389 message = '\n'.join(util.readfile(logfile).splitlines())
390 390 except IOError as inst:
391 391 raise error.Abort(_("can't read commit message '%s': %s") %
392 392 (logfile, inst.strerror))
393 393 return message
394 394
395 395 def mergeeditform(ctxorbool, baseformname):
396 396 """return appropriate editform name (referencing a committemplate)
397 397
398 398 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
399 399 merging is committed.
400 400
401 401 This returns baseformname with '.merge' appended if it is a merge,
402 402 otherwise '.normal' is appended.
403 403 """
404 404 if isinstance(ctxorbool, bool):
405 405 if ctxorbool:
406 406 return baseformname + ".merge"
407 407 elif 1 < len(ctxorbool.parents()):
408 408 return baseformname + ".merge"
409 409
410 410 return baseformname + ".normal"
411 411
412 412 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
413 413 editform='', **opts):
414 414 """get appropriate commit message editor according to '--edit' option
415 415
416 416 'finishdesc' is a function to be called with edited commit message
417 417 (= 'description' of the new changeset) just after editing, but
418 418 before checking empty-ness. It should return actual text to be
419 419 stored into history. This allows to change description before
420 420 storing.
421 421
422 422 'extramsg' is a extra message to be shown in the editor instead of
423 423 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
424 424 is automatically added.
425 425
426 426 'editform' is a dot-separated list of names, to distinguish
427 427 the purpose of commit text editing.
428 428
429 429 'getcommiteditor' returns 'commitforceeditor' regardless of
430 430 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
431 431 they are specific for usage in MQ.
432 432 """
433 433 if edit or finishdesc or extramsg:
434 434 return lambda r, c, s: commitforceeditor(r, c, s,
435 435 finishdesc=finishdesc,
436 436 extramsg=extramsg,
437 437 editform=editform)
438 438 elif editform:
439 439 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
440 440 else:
441 441 return commiteditor
442 442
443 443 def loglimit(opts):
444 444 """get the log limit according to option -l/--limit"""
445 445 limit = opts.get('limit')
446 446 if limit:
447 447 try:
448 448 limit = int(limit)
449 449 except ValueError:
450 450 raise error.Abort(_('limit must be a positive integer'))
451 451 if limit <= 0:
452 452 raise error.Abort(_('limit must be positive'))
453 453 else:
454 454 limit = None
455 455 return limit
456 456
457 457 def makefilename(repo, pat, node, desc=None,
458 458 total=None, seqno=None, revwidth=None, pathname=None):
459 459 node_expander = {
460 460 'H': lambda: hex(node),
461 461 'R': lambda: str(repo.changelog.rev(node)),
462 462 'h': lambda: short(node),
463 463 'm': lambda: re.sub('[^\w]', '_', str(desc))
464 464 }
465 465 expander = {
466 466 '%': lambda: '%',
467 467 'b': lambda: os.path.basename(repo.root),
468 468 }
469 469
470 470 try:
471 471 if node:
472 472 expander.update(node_expander)
473 473 if node:
474 474 expander['r'] = (lambda:
475 475 str(repo.changelog.rev(node)).zfill(revwidth or 0))
476 476 if total is not None:
477 477 expander['N'] = lambda: str(total)
478 478 if seqno is not None:
479 479 expander['n'] = lambda: str(seqno)
480 480 if total is not None and seqno is not None:
481 481 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
482 482 if pathname is not None:
483 483 expander['s'] = lambda: os.path.basename(pathname)
484 484 expander['d'] = lambda: os.path.dirname(pathname) or '.'
485 485 expander['p'] = lambda: pathname
486 486
487 487 newname = []
488 488 patlen = len(pat)
489 489 i = 0
490 490 while i < patlen:
491 491 c = pat[i]
492 492 if c == '%':
493 493 i += 1
494 494 c = pat[i]
495 495 c = expander[c]()
496 496 newname.append(c)
497 497 i += 1
498 498 return ''.join(newname)
499 499 except KeyError as inst:
500 500 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
501 501 inst.args[0])
502 502
503 503 class _unclosablefile(object):
504 504 def __init__(self, fp):
505 505 self._fp = fp
506 506
507 507 def close(self):
508 508 pass
509 509
510 510 def __iter__(self):
511 511 return iter(self._fp)
512 512
513 513 def __getattr__(self, attr):
514 514 return getattr(self._fp, attr)
515 515
516 516 def __enter__(self):
517 517 return self
518 518
519 519 def __exit__(self, exc_type, exc_value, exc_tb):
520 520 pass
521 521
522 522 def makefileobj(repo, pat, node=None, desc=None, total=None,
523 523 seqno=None, revwidth=None, mode='wb', modemap=None,
524 524 pathname=None):
525 525
526 526 writable = mode not in ('r', 'rb')
527 527
528 528 if not pat or pat == '-':
529 529 if writable:
530 530 fp = repo.ui.fout
531 531 else:
532 532 fp = repo.ui.fin
533 533 return _unclosablefile(fp)
534 534 if util.safehasattr(pat, 'write') and writable:
535 535 return pat
536 536 if util.safehasattr(pat, 'read') and 'r' in mode:
537 537 return pat
538 538 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
539 539 if modemap is not None:
540 540 mode = modemap.get(fn, mode)
541 541 if mode == 'wb':
542 542 modemap[fn] = 'ab'
543 543 return open(fn, mode)
544 544
545 545 def openrevlog(repo, cmd, file_, opts):
546 546 """opens the changelog, manifest, a filelog or a given revlog"""
547 547 cl = opts['changelog']
548 548 mf = opts['manifest']
549 549 dir = opts['dir']
550 550 msg = None
551 551 if cl and mf:
552 552 msg = _('cannot specify --changelog and --manifest at the same time')
553 553 elif cl and dir:
554 554 msg = _('cannot specify --changelog and --dir at the same time')
555 555 elif cl or mf or dir:
556 556 if file_:
557 557 msg = _('cannot specify filename with --changelog or --manifest')
558 558 elif not repo:
559 559 msg = _('cannot specify --changelog or --manifest or --dir '
560 560 'without a repository')
561 561 if msg:
562 562 raise error.Abort(msg)
563 563
564 564 r = None
565 565 if repo:
566 566 if cl:
567 567 r = repo.unfiltered().changelog
568 568 elif dir:
569 569 if 'treemanifest' not in repo.requirements:
570 570 raise error.Abort(_("--dir can only be used on repos with "
571 571 "treemanifest enabled"))
572 572 dirlog = repo.manifestlog._revlog.dirlog(dir)
573 573 if len(dirlog):
574 574 r = dirlog
575 575 elif mf:
576 576 r = repo.manifestlog._revlog
577 577 elif file_:
578 578 filelog = repo.file(file_)
579 579 if len(filelog):
580 580 r = filelog
581 581 if not r:
582 582 if not file_:
583 583 raise error.CommandError(cmd, _('invalid arguments'))
584 584 if not os.path.isfile(file_):
585 585 raise error.Abort(_("revlog '%s' not found") % file_)
586 r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
586 r = revlog.revlog(scmutil.vfs(pycompat.getcwd(), audit=False),
587 587 file_[:-2] + ".i")
588 588 return r
589 589
590 590 def copy(ui, repo, pats, opts, rename=False):
591 591 # called with the repo lock held
592 592 #
593 593 # hgsep => pathname that uses "/" to separate directories
594 594 # ossep => pathname that uses os.sep to separate directories
595 595 cwd = repo.getcwd()
596 596 targets = {}
597 597 after = opts.get("after")
598 598 dryrun = opts.get("dry_run")
599 599 wctx = repo[None]
600 600
601 601 def walkpat(pat):
602 602 srcs = []
603 603 if after:
604 604 badstates = '?'
605 605 else:
606 606 badstates = '?r'
607 607 m = scmutil.match(repo[None], [pat], opts, globbed=True)
608 608 for abs in repo.walk(m):
609 609 state = repo.dirstate[abs]
610 610 rel = m.rel(abs)
611 611 exact = m.exact(abs)
612 612 if state in badstates:
613 613 if exact and state == '?':
614 614 ui.warn(_('%s: not copying - file is not managed\n') % rel)
615 615 if exact and state == 'r':
616 616 ui.warn(_('%s: not copying - file has been marked for'
617 617 ' remove\n') % rel)
618 618 continue
619 619 # abs: hgsep
620 620 # rel: ossep
621 621 srcs.append((abs, rel, exact))
622 622 return srcs
623 623
624 624 # abssrc: hgsep
625 625 # relsrc: ossep
626 626 # otarget: ossep
627 627 def copyfile(abssrc, relsrc, otarget, exact):
628 628 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
629 629 if '/' in abstarget:
630 630 # We cannot normalize abstarget itself, this would prevent
631 631 # case only renames, like a => A.
632 632 abspath, absname = abstarget.rsplit('/', 1)
633 633 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
634 634 reltarget = repo.pathto(abstarget, cwd)
635 635 target = repo.wjoin(abstarget)
636 636 src = repo.wjoin(abssrc)
637 637 state = repo.dirstate[abstarget]
638 638
639 639 scmutil.checkportable(ui, abstarget)
640 640
641 641 # check for collisions
642 642 prevsrc = targets.get(abstarget)
643 643 if prevsrc is not None:
644 644 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
645 645 (reltarget, repo.pathto(abssrc, cwd),
646 646 repo.pathto(prevsrc, cwd)))
647 647 return
648 648
649 649 # check for overwrites
650 650 exists = os.path.lexists(target)
651 651 samefile = False
652 652 if exists and abssrc != abstarget:
653 653 if (repo.dirstate.normalize(abssrc) ==
654 654 repo.dirstate.normalize(abstarget)):
655 655 if not rename:
656 656 ui.warn(_("%s: can't copy - same file\n") % reltarget)
657 657 return
658 658 exists = False
659 659 samefile = True
660 660
661 661 if not after and exists or after and state in 'mn':
662 662 if not opts['force']:
663 663 if state in 'mn':
664 664 msg = _('%s: not overwriting - file already committed\n')
665 665 if after:
666 666 flags = '--after --force'
667 667 else:
668 668 flags = '--force'
669 669 if rename:
670 670 hint = _('(hg rename %s to replace the file by '
671 671 'recording a rename)\n') % flags
672 672 else:
673 673 hint = _('(hg copy %s to replace the file by '
674 674 'recording a copy)\n') % flags
675 675 else:
676 676 msg = _('%s: not overwriting - file exists\n')
677 677 if rename:
678 678 hint = _('(hg rename --after to record the rename)\n')
679 679 else:
680 680 hint = _('(hg copy --after to record the copy)\n')
681 681 ui.warn(msg % reltarget)
682 682 ui.warn(hint)
683 683 return
684 684
685 685 if after:
686 686 if not exists:
687 687 if rename:
688 688 ui.warn(_('%s: not recording move - %s does not exist\n') %
689 689 (relsrc, reltarget))
690 690 else:
691 691 ui.warn(_('%s: not recording copy - %s does not exist\n') %
692 692 (relsrc, reltarget))
693 693 return
694 694 elif not dryrun:
695 695 try:
696 696 if exists:
697 697 os.unlink(target)
698 698 targetdir = os.path.dirname(target) or '.'
699 699 if not os.path.isdir(targetdir):
700 700 os.makedirs(targetdir)
701 701 if samefile:
702 702 tmp = target + "~hgrename"
703 703 os.rename(src, tmp)
704 704 os.rename(tmp, target)
705 705 else:
706 706 util.copyfile(src, target)
707 707 srcexists = True
708 708 except IOError as inst:
709 709 if inst.errno == errno.ENOENT:
710 710 ui.warn(_('%s: deleted in working directory\n') % relsrc)
711 711 srcexists = False
712 712 else:
713 713 ui.warn(_('%s: cannot copy - %s\n') %
714 714 (relsrc, inst.strerror))
715 715 return True # report a failure
716 716
717 717 if ui.verbose or not exact:
718 718 if rename:
719 719 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
720 720 else:
721 721 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
722 722
723 723 targets[abstarget] = abssrc
724 724
725 725 # fix up dirstate
726 726 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
727 727 dryrun=dryrun, cwd=cwd)
728 728 if rename and not dryrun:
729 729 if not after and srcexists and not samefile:
730 730 util.unlinkpath(repo.wjoin(abssrc))
731 731 wctx.forget([abssrc])
732 732
733 733 # pat: ossep
734 734 # dest ossep
735 735 # srcs: list of (hgsep, hgsep, ossep, bool)
736 736 # return: function that takes hgsep and returns ossep
737 737 def targetpathfn(pat, dest, srcs):
738 738 if os.path.isdir(pat):
739 739 abspfx = pathutil.canonpath(repo.root, cwd, pat)
740 740 abspfx = util.localpath(abspfx)
741 741 if destdirexists:
742 742 striplen = len(os.path.split(abspfx)[0])
743 743 else:
744 744 striplen = len(abspfx)
745 745 if striplen:
746 746 striplen += len(pycompat.ossep)
747 747 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
748 748 elif destdirexists:
749 749 res = lambda p: os.path.join(dest,
750 750 os.path.basename(util.localpath(p)))
751 751 else:
752 752 res = lambda p: dest
753 753 return res
754 754
755 755 # pat: ossep
756 756 # dest ossep
757 757 # srcs: list of (hgsep, hgsep, ossep, bool)
758 758 # return: function that takes hgsep and returns ossep
759 759 def targetpathafterfn(pat, dest, srcs):
760 760 if matchmod.patkind(pat):
761 761 # a mercurial pattern
762 762 res = lambda p: os.path.join(dest,
763 763 os.path.basename(util.localpath(p)))
764 764 else:
765 765 abspfx = pathutil.canonpath(repo.root, cwd, pat)
766 766 if len(abspfx) < len(srcs[0][0]):
767 767 # A directory. Either the target path contains the last
768 768 # component of the source path or it does not.
769 769 def evalpath(striplen):
770 770 score = 0
771 771 for s in srcs:
772 772 t = os.path.join(dest, util.localpath(s[0])[striplen:])
773 773 if os.path.lexists(t):
774 774 score += 1
775 775 return score
776 776
777 777 abspfx = util.localpath(abspfx)
778 778 striplen = len(abspfx)
779 779 if striplen:
780 780 striplen += len(pycompat.ossep)
781 781 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
782 782 score = evalpath(striplen)
783 783 striplen1 = len(os.path.split(abspfx)[0])
784 784 if striplen1:
785 785 striplen1 += len(pycompat.ossep)
786 786 if evalpath(striplen1) > score:
787 787 striplen = striplen1
788 788 res = lambda p: os.path.join(dest,
789 789 util.localpath(p)[striplen:])
790 790 else:
791 791 # a file
792 792 if destdirexists:
793 793 res = lambda p: os.path.join(dest,
794 794 os.path.basename(util.localpath(p)))
795 795 else:
796 796 res = lambda p: dest
797 797 return res
798 798
799 799 pats = scmutil.expandpats(pats)
800 800 if not pats:
801 801 raise error.Abort(_('no source or destination specified'))
802 802 if len(pats) == 1:
803 803 raise error.Abort(_('no destination specified'))
804 804 dest = pats.pop()
805 805 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
806 806 if not destdirexists:
807 807 if len(pats) > 1 or matchmod.patkind(pats[0]):
808 808 raise error.Abort(_('with multiple sources, destination must be an '
809 809 'existing directory'))
810 810 if util.endswithsep(dest):
811 811 raise error.Abort(_('destination %s is not a directory') % dest)
812 812
813 813 tfn = targetpathfn
814 814 if after:
815 815 tfn = targetpathafterfn
816 816 copylist = []
817 817 for pat in pats:
818 818 srcs = walkpat(pat)
819 819 if not srcs:
820 820 continue
821 821 copylist.append((tfn(pat, dest, srcs), srcs))
822 822 if not copylist:
823 823 raise error.Abort(_('no files to copy'))
824 824
825 825 errors = 0
826 826 for targetpath, srcs in copylist:
827 827 for abssrc, relsrc, exact in srcs:
828 828 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
829 829 errors += 1
830 830
831 831 if errors:
832 832 ui.warn(_('(consider using --after)\n'))
833 833
834 834 return errors != 0
835 835
836 836 ## facility to let extension process additional data into an import patch
837 837 # list of identifier to be executed in order
838 838 extrapreimport = [] # run before commit
839 839 extrapostimport = [] # run after commit
840 840 # mapping from identifier to actual import function
841 841 #
842 842 # 'preimport' are run before the commit is made and are provided the following
843 843 # arguments:
844 844 # - repo: the localrepository instance,
845 845 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
846 846 # - extra: the future extra dictionary of the changeset, please mutate it,
847 847 # - opts: the import options.
848 848 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
849 849 # mutation of in memory commit and more. Feel free to rework the code to get
850 850 # there.
851 851 extrapreimportmap = {}
852 852 # 'postimport' are run after the commit is made and are provided the following
853 853 # argument:
854 854 # - ctx: the changectx created by import.
855 855 extrapostimportmap = {}
856 856
857 857 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
858 858 """Utility function used by commands.import to import a single patch
859 859
860 860 This function is explicitly defined here to help the evolve extension to
861 861 wrap this part of the import logic.
862 862
863 863 The API is currently a bit ugly because it a simple code translation from
864 864 the import command. Feel free to make it better.
865 865
866 866 :hunk: a patch (as a binary string)
867 867 :parents: nodes that will be parent of the created commit
868 868 :opts: the full dict of option passed to the import command
869 869 :msgs: list to save commit message to.
870 870 (used in case we need to save it when failing)
871 871 :updatefunc: a function that update a repo to a given node
872 872 updatefunc(<repo>, <node>)
873 873 """
874 874 # avoid cycle context -> subrepo -> cmdutil
875 875 from . import context
876 876 extractdata = patch.extract(ui, hunk)
877 877 tmpname = extractdata.get('filename')
878 878 message = extractdata.get('message')
879 879 user = opts.get('user') or extractdata.get('user')
880 880 date = opts.get('date') or extractdata.get('date')
881 881 branch = extractdata.get('branch')
882 882 nodeid = extractdata.get('nodeid')
883 883 p1 = extractdata.get('p1')
884 884 p2 = extractdata.get('p2')
885 885
886 886 nocommit = opts.get('no_commit')
887 887 importbranch = opts.get('import_branch')
888 888 update = not opts.get('bypass')
889 889 strip = opts["strip"]
890 890 prefix = opts["prefix"]
891 891 sim = float(opts.get('similarity') or 0)
892 892 if not tmpname:
893 893 return (None, None, False)
894 894
895 895 rejects = False
896 896
897 897 try:
898 898 cmdline_message = logmessage(ui, opts)
899 899 if cmdline_message:
900 900 # pickup the cmdline msg
901 901 message = cmdline_message
902 902 elif message:
903 903 # pickup the patch msg
904 904 message = message.strip()
905 905 else:
906 906 # launch the editor
907 907 message = None
908 908 ui.debug('message:\n%s\n' % message)
909 909
910 910 if len(parents) == 1:
911 911 parents.append(repo[nullid])
912 912 if opts.get('exact'):
913 913 if not nodeid or not p1:
914 914 raise error.Abort(_('not a Mercurial patch'))
915 915 p1 = repo[p1]
916 916 p2 = repo[p2 or nullid]
917 917 elif p2:
918 918 try:
919 919 p1 = repo[p1]
920 920 p2 = repo[p2]
921 921 # Without any options, consider p2 only if the
922 922 # patch is being applied on top of the recorded
923 923 # first parent.
924 924 if p1 != parents[0]:
925 925 p1 = parents[0]
926 926 p2 = repo[nullid]
927 927 except error.RepoError:
928 928 p1, p2 = parents
929 929 if p2.node() == nullid:
930 930 ui.warn(_("warning: import the patch as a normal revision\n"
931 931 "(use --exact to import the patch as a merge)\n"))
932 932 else:
933 933 p1, p2 = parents
934 934
935 935 n = None
936 936 if update:
937 937 if p1 != parents[0]:
938 938 updatefunc(repo, p1.node())
939 939 if p2 != parents[1]:
940 940 repo.setparents(p1.node(), p2.node())
941 941
942 942 if opts.get('exact') or importbranch:
943 943 repo.dirstate.setbranch(branch or 'default')
944 944
945 945 partial = opts.get('partial', False)
946 946 files = set()
947 947 try:
948 948 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
949 949 files=files, eolmode=None, similarity=sim / 100.0)
950 950 except patch.PatchError as e:
951 951 if not partial:
952 952 raise error.Abort(str(e))
953 953 if partial:
954 954 rejects = True
955 955
956 956 files = list(files)
957 957 if nocommit:
958 958 if message:
959 959 msgs.append(message)
960 960 else:
961 961 if opts.get('exact') or p2:
962 962 # If you got here, you either use --force and know what
963 963 # you are doing or used --exact or a merge patch while
964 964 # being updated to its first parent.
965 965 m = None
966 966 else:
967 967 m = scmutil.matchfiles(repo, files or [])
968 968 editform = mergeeditform(repo[None], 'import.normal')
969 969 if opts.get('exact'):
970 970 editor = None
971 971 else:
972 972 editor = getcommiteditor(editform=editform, **opts)
973 973 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
974 974 extra = {}
975 975 for idfunc in extrapreimport:
976 976 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
977 977 try:
978 978 if partial:
979 979 repo.ui.setconfig('ui', 'allowemptycommit', True)
980 980 n = repo.commit(message, user,
981 981 date, match=m,
982 982 editor=editor, extra=extra)
983 983 for idfunc in extrapostimport:
984 984 extrapostimportmap[idfunc](repo[n])
985 985 finally:
986 986 repo.ui.restoreconfig(allowemptyback)
987 987 else:
988 988 if opts.get('exact') or importbranch:
989 989 branch = branch or 'default'
990 990 else:
991 991 branch = p1.branch()
992 992 store = patch.filestore()
993 993 try:
994 994 files = set()
995 995 try:
996 996 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
997 997 files, eolmode=None)
998 998 except patch.PatchError as e:
999 999 raise error.Abort(str(e))
1000 1000 if opts.get('exact'):
1001 1001 editor = None
1002 1002 else:
1003 1003 editor = getcommiteditor(editform='import.bypass')
1004 1004 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1005 1005 message,
1006 1006 user,
1007 1007 date,
1008 1008 branch, files, store,
1009 1009 editor=editor)
1010 1010 n = memctx.commit()
1011 1011 finally:
1012 1012 store.close()
1013 1013 if opts.get('exact') and nocommit:
1014 1014 # --exact with --no-commit is still useful in that it does merge
1015 1015 # and branch bits
1016 1016 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1017 1017 elif opts.get('exact') and hex(n) != nodeid:
1018 1018 raise error.Abort(_('patch is damaged or loses information'))
1019 1019 msg = _('applied to working directory')
1020 1020 if n:
1021 1021 # i18n: refers to a short changeset id
1022 1022 msg = _('created %s') % short(n)
1023 1023 return (msg, n, rejects)
1024 1024 finally:
1025 1025 os.unlink(tmpname)
1026 1026
1027 1027 # facility to let extensions include additional data in an exported patch
1028 1028 # list of identifiers to be executed in order
1029 1029 extraexport = []
1030 1030 # mapping from identifier to actual export function
1031 1031 # function as to return a string to be added to the header or None
1032 1032 # it is given two arguments (sequencenumber, changectx)
1033 1033 extraexportmap = {}
1034 1034
1035 1035 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1036 1036 opts=None, match=None):
1037 1037 '''export changesets as hg patches.'''
1038 1038
1039 1039 total = len(revs)
1040 1040 revwidth = max([len(str(rev)) for rev in revs])
1041 1041 filemode = {}
1042 1042
1043 1043 def single(rev, seqno, fp):
1044 1044 ctx = repo[rev]
1045 1045 node = ctx.node()
1046 1046 parents = [p.node() for p in ctx.parents() if p]
1047 1047 branch = ctx.branch()
1048 1048 if switch_parent:
1049 1049 parents.reverse()
1050 1050
1051 1051 if parents:
1052 1052 prev = parents[0]
1053 1053 else:
1054 1054 prev = nullid
1055 1055
1056 1056 shouldclose = False
1057 1057 if not fp and len(template) > 0:
1058 1058 desc_lines = ctx.description().rstrip().split('\n')
1059 1059 desc = desc_lines[0] #Commit always has a first line.
1060 1060 fp = makefileobj(repo, template, node, desc=desc, total=total,
1061 1061 seqno=seqno, revwidth=revwidth, mode='wb',
1062 1062 modemap=filemode)
1063 1063 shouldclose = True
1064 1064 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1065 1065 repo.ui.note("%s\n" % fp.name)
1066 1066
1067 1067 if not fp:
1068 1068 write = repo.ui.write
1069 1069 else:
1070 1070 def write(s, **kw):
1071 1071 fp.write(s)
1072 1072
1073 1073 write("# HG changeset patch\n")
1074 1074 write("# User %s\n" % ctx.user())
1075 1075 write("# Date %d %d\n" % ctx.date())
1076 1076 write("# %s\n" % util.datestr(ctx.date()))
1077 1077 if branch and branch != 'default':
1078 1078 write("# Branch %s\n" % branch)
1079 1079 write("# Node ID %s\n" % hex(node))
1080 1080 write("# Parent %s\n" % hex(prev))
1081 1081 if len(parents) > 1:
1082 1082 write("# Parent %s\n" % hex(parents[1]))
1083 1083
1084 1084 for headerid in extraexport:
1085 1085 header = extraexportmap[headerid](seqno, ctx)
1086 1086 if header is not None:
1087 1087 write('# %s\n' % header)
1088 1088 write(ctx.description().rstrip())
1089 1089 write("\n\n")
1090 1090
1091 1091 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1092 1092 write(chunk, label=label)
1093 1093
1094 1094 if shouldclose:
1095 1095 fp.close()
1096 1096
1097 1097 for seqno, rev in enumerate(revs):
1098 1098 single(rev, seqno + 1, fp)
1099 1099
1100 1100 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1101 1101 changes=None, stat=False, fp=None, prefix='',
1102 1102 root='', listsubrepos=False):
1103 1103 '''show diff or diffstat.'''
1104 1104 if fp is None:
1105 1105 write = ui.write
1106 1106 else:
1107 1107 def write(s, **kw):
1108 1108 fp.write(s)
1109 1109
1110 1110 if root:
1111 1111 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1112 1112 else:
1113 1113 relroot = ''
1114 1114 if relroot != '':
1115 1115 # XXX relative roots currently don't work if the root is within a
1116 1116 # subrepo
1117 1117 uirelroot = match.uipath(relroot)
1118 1118 relroot += '/'
1119 1119 for matchroot in match.files():
1120 1120 if not matchroot.startswith(relroot):
1121 1121 ui.warn(_('warning: %s not inside relative root %s\n') % (
1122 1122 match.uipath(matchroot), uirelroot))
1123 1123
1124 1124 if stat:
1125 1125 diffopts = diffopts.copy(context=0)
1126 1126 width = 80
1127 1127 if not ui.plain():
1128 1128 width = ui.termwidth()
1129 1129 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1130 1130 prefix=prefix, relroot=relroot)
1131 1131 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1132 1132 width=width):
1133 1133 write(chunk, label=label)
1134 1134 else:
1135 1135 for chunk, label in patch.diffui(repo, node1, node2, match,
1136 1136 changes, diffopts, prefix=prefix,
1137 1137 relroot=relroot):
1138 1138 write(chunk, label=label)
1139 1139
1140 1140 if listsubrepos:
1141 1141 ctx1 = repo[node1]
1142 1142 ctx2 = repo[node2]
1143 1143 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1144 1144 tempnode2 = node2
1145 1145 try:
1146 1146 if node2 is not None:
1147 1147 tempnode2 = ctx2.substate[subpath][1]
1148 1148 except KeyError:
1149 1149 # A subrepo that existed in node1 was deleted between node1 and
1150 1150 # node2 (inclusive). Thus, ctx2's substate won't contain that
1151 1151 # subpath. The best we can do is to ignore it.
1152 1152 tempnode2 = None
1153 1153 submatch = matchmod.subdirmatcher(subpath, match)
1154 1154 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1155 1155 stat=stat, fp=fp, prefix=prefix)
1156 1156
1157 1157 def _changesetlabels(ctx):
1158 1158 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1159 1159 if ctx.troubled():
1160 1160 labels.append('changeset.troubled')
1161 1161 for trouble in ctx.troubles():
1162 1162 labels.append('trouble.%s' % trouble)
1163 1163 return ' '.join(labels)
1164 1164
1165 1165 class changeset_printer(object):
1166 1166 '''show changeset information when templating not requested.'''
1167 1167
1168 1168 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1169 1169 self.ui = ui
1170 1170 self.repo = repo
1171 1171 self.buffered = buffered
1172 1172 self.matchfn = matchfn
1173 1173 self.diffopts = diffopts
1174 1174 self.header = {}
1175 1175 self.hunk = {}
1176 1176 self.lastheader = None
1177 1177 self.footer = None
1178 1178
1179 1179 def flush(self, ctx):
1180 1180 rev = ctx.rev()
1181 1181 if rev in self.header:
1182 1182 h = self.header[rev]
1183 1183 if h != self.lastheader:
1184 1184 self.lastheader = h
1185 1185 self.ui.write(h)
1186 1186 del self.header[rev]
1187 1187 if rev in self.hunk:
1188 1188 self.ui.write(self.hunk[rev])
1189 1189 del self.hunk[rev]
1190 1190 return 1
1191 1191 return 0
1192 1192
1193 1193 def close(self):
1194 1194 if self.footer:
1195 1195 self.ui.write(self.footer)
1196 1196
1197 1197 def show(self, ctx, copies=None, matchfn=None, **props):
1198 1198 if self.buffered:
1199 1199 self.ui.pushbuffer(labeled=True)
1200 1200 self._show(ctx, copies, matchfn, props)
1201 1201 self.hunk[ctx.rev()] = self.ui.popbuffer()
1202 1202 else:
1203 1203 self._show(ctx, copies, matchfn, props)
1204 1204
1205 1205 def _show(self, ctx, copies, matchfn, props):
1206 1206 '''show a single changeset or file revision'''
1207 1207 changenode = ctx.node()
1208 1208 rev = ctx.rev()
1209 1209 if self.ui.debugflag:
1210 1210 hexfunc = hex
1211 1211 else:
1212 1212 hexfunc = short
1213 1213 # as of now, wctx.node() and wctx.rev() return None, but we want to
1214 1214 # show the same values as {node} and {rev} templatekw
1215 1215 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1216 1216
1217 1217 if self.ui.quiet:
1218 1218 self.ui.write("%d:%s\n" % revnode, label='log.node')
1219 1219 return
1220 1220
1221 1221 date = util.datestr(ctx.date())
1222 1222
1223 1223 # i18n: column positioning for "hg log"
1224 1224 self.ui.write(_("changeset: %d:%s\n") % revnode,
1225 1225 label=_changesetlabels(ctx))
1226 1226
1227 1227 # branches are shown first before any other names due to backwards
1228 1228 # compatibility
1229 1229 branch = ctx.branch()
1230 1230 # don't show the default branch name
1231 1231 if branch != 'default':
1232 1232 # i18n: column positioning for "hg log"
1233 1233 self.ui.write(_("branch: %s\n") % branch,
1234 1234 label='log.branch')
1235 1235
1236 1236 for nsname, ns in self.repo.names.iteritems():
1237 1237 # branches has special logic already handled above, so here we just
1238 1238 # skip it
1239 1239 if nsname == 'branches':
1240 1240 continue
1241 1241 # we will use the templatename as the color name since those two
1242 1242 # should be the same
1243 1243 for name in ns.names(self.repo, changenode):
1244 1244 self.ui.write(ns.logfmt % name,
1245 1245 label='log.%s' % ns.colorname)
1246 1246 if self.ui.debugflag:
1247 1247 # i18n: column positioning for "hg log"
1248 1248 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1249 1249 label='log.phase')
1250 1250 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1251 1251 label = 'log.parent changeset.%s' % pctx.phasestr()
1252 1252 # i18n: column positioning for "hg log"
1253 1253 self.ui.write(_("parent: %d:%s\n")
1254 1254 % (pctx.rev(), hexfunc(pctx.node())),
1255 1255 label=label)
1256 1256
1257 1257 if self.ui.debugflag and rev is not None:
1258 1258 mnode = ctx.manifestnode()
1259 1259 # i18n: column positioning for "hg log"
1260 1260 self.ui.write(_("manifest: %d:%s\n") %
1261 1261 (self.repo.manifestlog._revlog.rev(mnode),
1262 1262 hex(mnode)),
1263 1263 label='ui.debug log.manifest')
1264 1264 # i18n: column positioning for "hg log"
1265 1265 self.ui.write(_("user: %s\n") % ctx.user(),
1266 1266 label='log.user')
1267 1267 # i18n: column positioning for "hg log"
1268 1268 self.ui.write(_("date: %s\n") % date,
1269 1269 label='log.date')
1270 1270
1271 1271 if ctx.troubled():
1272 1272 # i18n: column positioning for "hg log"
1273 1273 self.ui.write(_("trouble: %s\n") % ', '.join(ctx.troubles()),
1274 1274 label='log.trouble')
1275 1275
1276 1276 if self.ui.debugflag:
1277 1277 files = ctx.p1().status(ctx)[:3]
1278 1278 for key, value in zip([# i18n: column positioning for "hg log"
1279 1279 _("files:"),
1280 1280 # i18n: column positioning for "hg log"
1281 1281 _("files+:"),
1282 1282 # i18n: column positioning for "hg log"
1283 1283 _("files-:")], files):
1284 1284 if value:
1285 1285 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1286 1286 label='ui.debug log.files')
1287 1287 elif ctx.files() and self.ui.verbose:
1288 1288 # i18n: column positioning for "hg log"
1289 1289 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1290 1290 label='ui.note log.files')
1291 1291 if copies and self.ui.verbose:
1292 1292 copies = ['%s (%s)' % c for c in copies]
1293 1293 # i18n: column positioning for "hg log"
1294 1294 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1295 1295 label='ui.note log.copies')
1296 1296
1297 1297 extra = ctx.extra()
1298 1298 if extra and self.ui.debugflag:
1299 1299 for key, value in sorted(extra.items()):
1300 1300 # i18n: column positioning for "hg log"
1301 1301 self.ui.write(_("extra: %s=%s\n")
1302 1302 % (key, value.encode('string_escape')),
1303 1303 label='ui.debug log.extra')
1304 1304
1305 1305 description = ctx.description().strip()
1306 1306 if description:
1307 1307 if self.ui.verbose:
1308 1308 self.ui.write(_("description:\n"),
1309 1309 label='ui.note log.description')
1310 1310 self.ui.write(description,
1311 1311 label='ui.note log.description')
1312 1312 self.ui.write("\n\n")
1313 1313 else:
1314 1314 # i18n: column positioning for "hg log"
1315 1315 self.ui.write(_("summary: %s\n") %
1316 1316 description.splitlines()[0],
1317 1317 label='log.summary')
1318 1318 self.ui.write("\n")
1319 1319
1320 1320 self.showpatch(ctx, matchfn)
1321 1321
1322 1322 def showpatch(self, ctx, matchfn):
1323 1323 if not matchfn:
1324 1324 matchfn = self.matchfn
1325 1325 if matchfn:
1326 1326 stat = self.diffopts.get('stat')
1327 1327 diff = self.diffopts.get('patch')
1328 1328 diffopts = patch.diffallopts(self.ui, self.diffopts)
1329 1329 node = ctx.node()
1330 1330 prev = ctx.p1().node()
1331 1331 if stat:
1332 1332 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1333 1333 match=matchfn, stat=True)
1334 1334 if diff:
1335 1335 if stat:
1336 1336 self.ui.write("\n")
1337 1337 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1338 1338 match=matchfn, stat=False)
1339 1339 self.ui.write("\n")
1340 1340
1341 1341 class jsonchangeset(changeset_printer):
1342 1342 '''format changeset information.'''
1343 1343
1344 1344 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1345 1345 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1346 1346 self.cache = {}
1347 1347 self._first = True
1348 1348
1349 1349 def close(self):
1350 1350 if not self._first:
1351 1351 self.ui.write("\n]\n")
1352 1352 else:
1353 1353 self.ui.write("[]\n")
1354 1354
1355 1355 def _show(self, ctx, copies, matchfn, props):
1356 1356 '''show a single changeset or file revision'''
1357 1357 rev = ctx.rev()
1358 1358 if rev is None:
1359 1359 jrev = jnode = 'null'
1360 1360 else:
1361 1361 jrev = str(rev)
1362 1362 jnode = '"%s"' % hex(ctx.node())
1363 1363 j = encoding.jsonescape
1364 1364
1365 1365 if self._first:
1366 1366 self.ui.write("[\n {")
1367 1367 self._first = False
1368 1368 else:
1369 1369 self.ui.write(",\n {")
1370 1370
1371 1371 if self.ui.quiet:
1372 1372 self.ui.write(('\n "rev": %s') % jrev)
1373 1373 self.ui.write((',\n "node": %s') % jnode)
1374 1374 self.ui.write('\n }')
1375 1375 return
1376 1376
1377 1377 self.ui.write(('\n "rev": %s') % jrev)
1378 1378 self.ui.write((',\n "node": %s') % jnode)
1379 1379 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1380 1380 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1381 1381 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1382 1382 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1383 1383 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1384 1384
1385 1385 self.ui.write((',\n "bookmarks": [%s]') %
1386 1386 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1387 1387 self.ui.write((',\n "tags": [%s]') %
1388 1388 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1389 1389 self.ui.write((',\n "parents": [%s]') %
1390 1390 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1391 1391
1392 1392 if self.ui.debugflag:
1393 1393 if rev is None:
1394 1394 jmanifestnode = 'null'
1395 1395 else:
1396 1396 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1397 1397 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1398 1398
1399 1399 self.ui.write((',\n "extra": {%s}') %
1400 1400 ", ".join('"%s": "%s"' % (j(k), j(v))
1401 1401 for k, v in ctx.extra().items()))
1402 1402
1403 1403 files = ctx.p1().status(ctx)
1404 1404 self.ui.write((',\n "modified": [%s]') %
1405 1405 ", ".join('"%s"' % j(f) for f in files[0]))
1406 1406 self.ui.write((',\n "added": [%s]') %
1407 1407 ", ".join('"%s"' % j(f) for f in files[1]))
1408 1408 self.ui.write((',\n "removed": [%s]') %
1409 1409 ", ".join('"%s"' % j(f) for f in files[2]))
1410 1410
1411 1411 elif self.ui.verbose:
1412 1412 self.ui.write((',\n "files": [%s]') %
1413 1413 ", ".join('"%s"' % j(f) for f in ctx.files()))
1414 1414
1415 1415 if copies:
1416 1416 self.ui.write((',\n "copies": {%s}') %
1417 1417 ", ".join('"%s": "%s"' % (j(k), j(v))
1418 1418 for k, v in copies))
1419 1419
1420 1420 matchfn = self.matchfn
1421 1421 if matchfn:
1422 1422 stat = self.diffopts.get('stat')
1423 1423 diff = self.diffopts.get('patch')
1424 1424 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1425 1425 node, prev = ctx.node(), ctx.p1().node()
1426 1426 if stat:
1427 1427 self.ui.pushbuffer()
1428 1428 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1429 1429 match=matchfn, stat=True)
1430 1430 self.ui.write((',\n "diffstat": "%s"')
1431 1431 % j(self.ui.popbuffer()))
1432 1432 if diff:
1433 1433 self.ui.pushbuffer()
1434 1434 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1435 1435 match=matchfn, stat=False)
1436 1436 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1437 1437
1438 1438 self.ui.write("\n }")
1439 1439
1440 1440 class changeset_templater(changeset_printer):
1441 1441 '''format changeset information.'''
1442 1442
1443 1443 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1444 1444 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1445 1445 assert not (tmpl and mapfile)
1446 1446 defaulttempl = templatekw.defaulttempl
1447 1447 if mapfile:
1448 1448 self.t = templater.templater.frommapfile(mapfile,
1449 1449 cache=defaulttempl)
1450 1450 else:
1451 1451 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1452 1452 cache=defaulttempl)
1453 1453
1454 1454 self.cache = {}
1455 1455
1456 1456 # find correct templates for current mode
1457 1457 tmplmodes = [
1458 1458 (True, None),
1459 1459 (self.ui.verbose, 'verbose'),
1460 1460 (self.ui.quiet, 'quiet'),
1461 1461 (self.ui.debugflag, 'debug'),
1462 1462 ]
1463 1463
1464 1464 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1465 1465 'docheader': '', 'docfooter': ''}
1466 1466 for mode, postfix in tmplmodes:
1467 1467 for t in self._parts:
1468 1468 cur = t
1469 1469 if postfix:
1470 1470 cur += "_" + postfix
1471 1471 if mode and cur in self.t:
1472 1472 self._parts[t] = cur
1473 1473
1474 1474 if self._parts['docheader']:
1475 1475 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1476 1476
1477 1477 def close(self):
1478 1478 if self._parts['docfooter']:
1479 1479 if not self.footer:
1480 1480 self.footer = ""
1481 1481 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1482 1482 return super(changeset_templater, self).close()
1483 1483
1484 1484 def _show(self, ctx, copies, matchfn, props):
1485 1485 '''show a single changeset or file revision'''
1486 1486 props = props.copy()
1487 1487 props.update(templatekw.keywords)
1488 1488 props['templ'] = self.t
1489 1489 props['ctx'] = ctx
1490 1490 props['repo'] = self.repo
1491 1491 props['ui'] = self.repo.ui
1492 1492 props['revcache'] = {'copies': copies}
1493 1493 props['cache'] = self.cache
1494 1494
1495 1495 # write header
1496 1496 if self._parts['header']:
1497 1497 h = templater.stringify(self.t(self._parts['header'], **props))
1498 1498 if self.buffered:
1499 1499 self.header[ctx.rev()] = h
1500 1500 else:
1501 1501 if self.lastheader != h:
1502 1502 self.lastheader = h
1503 1503 self.ui.write(h)
1504 1504
1505 1505 # write changeset metadata, then patch if requested
1506 1506 key = self._parts['changeset']
1507 1507 self.ui.write(templater.stringify(self.t(key, **props)))
1508 1508 self.showpatch(ctx, matchfn)
1509 1509
1510 1510 if self._parts['footer']:
1511 1511 if not self.footer:
1512 1512 self.footer = templater.stringify(
1513 1513 self.t(self._parts['footer'], **props))
1514 1514
1515 1515 def gettemplate(ui, tmpl, style):
1516 1516 """
1517 1517 Find the template matching the given template spec or style.
1518 1518 """
1519 1519
1520 1520 # ui settings
1521 1521 if not tmpl and not style: # template are stronger than style
1522 1522 tmpl = ui.config('ui', 'logtemplate')
1523 1523 if tmpl:
1524 1524 return templater.unquotestring(tmpl), None
1525 1525 else:
1526 1526 style = util.expandpath(ui.config('ui', 'style', ''))
1527 1527
1528 1528 if not tmpl and style:
1529 1529 mapfile = style
1530 1530 if not os.path.split(mapfile)[0]:
1531 1531 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1532 1532 or templater.templatepath(mapfile))
1533 1533 if mapname:
1534 1534 mapfile = mapname
1535 1535 return None, mapfile
1536 1536
1537 1537 if not tmpl:
1538 1538 return None, None
1539 1539
1540 1540 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1541 1541
1542 1542 def show_changeset(ui, repo, opts, buffered=False):
1543 1543 """show one changeset using template or regular display.
1544 1544
1545 1545 Display format will be the first non-empty hit of:
1546 1546 1. option 'template'
1547 1547 2. option 'style'
1548 1548 3. [ui] setting 'logtemplate'
1549 1549 4. [ui] setting 'style'
1550 1550 If all of these values are either the unset or the empty string,
1551 1551 regular display via changeset_printer() is done.
1552 1552 """
1553 1553 # options
1554 1554 matchfn = None
1555 1555 if opts.get('patch') or opts.get('stat'):
1556 1556 matchfn = scmutil.matchall(repo)
1557 1557
1558 1558 if opts.get('template') == 'json':
1559 1559 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1560 1560
1561 1561 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1562 1562
1563 1563 if not tmpl and not mapfile:
1564 1564 return changeset_printer(ui, repo, matchfn, opts, buffered)
1565 1565
1566 1566 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1567 1567
1568 1568 def showmarker(fm, marker, index=None):
1569 1569 """utility function to display obsolescence marker in a readable way
1570 1570
1571 1571 To be used by debug function."""
1572 1572 if index is not None:
1573 1573 fm.write('index', '%i ', index)
1574 1574 fm.write('precnode', '%s ', hex(marker.precnode()))
1575 1575 succs = marker.succnodes()
1576 1576 fm.condwrite(succs, 'succnodes', '%s ',
1577 1577 fm.formatlist(map(hex, succs), name='node'))
1578 1578 fm.write('flag', '%X ', marker.flags())
1579 1579 parents = marker.parentnodes()
1580 1580 if parents is not None:
1581 1581 fm.write('parentnodes', '{%s} ',
1582 1582 fm.formatlist(map(hex, parents), name='node', sep=', '))
1583 1583 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1584 1584 meta = marker.metadata().copy()
1585 1585 meta.pop('date', None)
1586 1586 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1587 1587 fm.plain('\n')
1588 1588
1589 1589 def finddate(ui, repo, date):
1590 1590 """Find the tipmost changeset that matches the given date spec"""
1591 1591
1592 1592 df = util.matchdate(date)
1593 1593 m = scmutil.matchall(repo)
1594 1594 results = {}
1595 1595
1596 1596 def prep(ctx, fns):
1597 1597 d = ctx.date()
1598 1598 if df(d[0]):
1599 1599 results[ctx.rev()] = d
1600 1600
1601 1601 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1602 1602 rev = ctx.rev()
1603 1603 if rev in results:
1604 1604 ui.status(_("found revision %s from %s\n") %
1605 1605 (rev, util.datestr(results[rev])))
1606 1606 return str(rev)
1607 1607
1608 1608 raise error.Abort(_("revision matching date not found"))
1609 1609
1610 1610 def increasingwindows(windowsize=8, sizelimit=512):
1611 1611 while True:
1612 1612 yield windowsize
1613 1613 if windowsize < sizelimit:
1614 1614 windowsize *= 2
1615 1615
1616 1616 class FileWalkError(Exception):
1617 1617 pass
1618 1618
1619 1619 def walkfilerevs(repo, match, follow, revs, fncache):
1620 1620 '''Walks the file history for the matched files.
1621 1621
1622 1622 Returns the changeset revs that are involved in the file history.
1623 1623
1624 1624 Throws FileWalkError if the file history can't be walked using
1625 1625 filelogs alone.
1626 1626 '''
1627 1627 wanted = set()
1628 1628 copies = []
1629 1629 minrev, maxrev = min(revs), max(revs)
1630 1630 def filerevgen(filelog, last):
1631 1631 """
1632 1632 Only files, no patterns. Check the history of each file.
1633 1633
1634 1634 Examines filelog entries within minrev, maxrev linkrev range
1635 1635 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1636 1636 tuples in backwards order
1637 1637 """
1638 1638 cl_count = len(repo)
1639 1639 revs = []
1640 1640 for j in xrange(0, last + 1):
1641 1641 linkrev = filelog.linkrev(j)
1642 1642 if linkrev < minrev:
1643 1643 continue
1644 1644 # only yield rev for which we have the changelog, it can
1645 1645 # happen while doing "hg log" during a pull or commit
1646 1646 if linkrev >= cl_count:
1647 1647 break
1648 1648
1649 1649 parentlinkrevs = []
1650 1650 for p in filelog.parentrevs(j):
1651 1651 if p != nullrev:
1652 1652 parentlinkrevs.append(filelog.linkrev(p))
1653 1653 n = filelog.node(j)
1654 1654 revs.append((linkrev, parentlinkrevs,
1655 1655 follow and filelog.renamed(n)))
1656 1656
1657 1657 return reversed(revs)
1658 1658 def iterfiles():
1659 1659 pctx = repo['.']
1660 1660 for filename in match.files():
1661 1661 if follow:
1662 1662 if filename not in pctx:
1663 1663 raise error.Abort(_('cannot follow file not in parent '
1664 1664 'revision: "%s"') % filename)
1665 1665 yield filename, pctx[filename].filenode()
1666 1666 else:
1667 1667 yield filename, None
1668 1668 for filename_node in copies:
1669 1669 yield filename_node
1670 1670
1671 1671 for file_, node in iterfiles():
1672 1672 filelog = repo.file(file_)
1673 1673 if not len(filelog):
1674 1674 if node is None:
1675 1675 # A zero count may be a directory or deleted file, so
1676 1676 # try to find matching entries on the slow path.
1677 1677 if follow:
1678 1678 raise error.Abort(
1679 1679 _('cannot follow nonexistent file: "%s"') % file_)
1680 1680 raise FileWalkError("Cannot walk via filelog")
1681 1681 else:
1682 1682 continue
1683 1683
1684 1684 if node is None:
1685 1685 last = len(filelog) - 1
1686 1686 else:
1687 1687 last = filelog.rev(node)
1688 1688
1689 1689 # keep track of all ancestors of the file
1690 1690 ancestors = set([filelog.linkrev(last)])
1691 1691
1692 1692 # iterate from latest to oldest revision
1693 1693 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1694 1694 if not follow:
1695 1695 if rev > maxrev:
1696 1696 continue
1697 1697 else:
1698 1698 # Note that last might not be the first interesting
1699 1699 # rev to us:
1700 1700 # if the file has been changed after maxrev, we'll
1701 1701 # have linkrev(last) > maxrev, and we still need
1702 1702 # to explore the file graph
1703 1703 if rev not in ancestors:
1704 1704 continue
1705 1705 # XXX insert 1327 fix here
1706 1706 if flparentlinkrevs:
1707 1707 ancestors.update(flparentlinkrevs)
1708 1708
1709 1709 fncache.setdefault(rev, []).append(file_)
1710 1710 wanted.add(rev)
1711 1711 if copied:
1712 1712 copies.append(copied)
1713 1713
1714 1714 return wanted
1715 1715
1716 1716 class _followfilter(object):
1717 1717 def __init__(self, repo, onlyfirst=False):
1718 1718 self.repo = repo
1719 1719 self.startrev = nullrev
1720 1720 self.roots = set()
1721 1721 self.onlyfirst = onlyfirst
1722 1722
1723 1723 def match(self, rev):
1724 1724 def realparents(rev):
1725 1725 if self.onlyfirst:
1726 1726 return self.repo.changelog.parentrevs(rev)[0:1]
1727 1727 else:
1728 1728 return filter(lambda x: x != nullrev,
1729 1729 self.repo.changelog.parentrevs(rev))
1730 1730
1731 1731 if self.startrev == nullrev:
1732 1732 self.startrev = rev
1733 1733 return True
1734 1734
1735 1735 if rev > self.startrev:
1736 1736 # forward: all descendants
1737 1737 if not self.roots:
1738 1738 self.roots.add(self.startrev)
1739 1739 for parent in realparents(rev):
1740 1740 if parent in self.roots:
1741 1741 self.roots.add(rev)
1742 1742 return True
1743 1743 else:
1744 1744 # backwards: all parents
1745 1745 if not self.roots:
1746 1746 self.roots.update(realparents(self.startrev))
1747 1747 if rev in self.roots:
1748 1748 self.roots.remove(rev)
1749 1749 self.roots.update(realparents(rev))
1750 1750 return True
1751 1751
1752 1752 return False
1753 1753
1754 1754 def walkchangerevs(repo, match, opts, prepare):
1755 1755 '''Iterate over files and the revs in which they changed.
1756 1756
1757 1757 Callers most commonly need to iterate backwards over the history
1758 1758 in which they are interested. Doing so has awful (quadratic-looking)
1759 1759 performance, so we use iterators in a "windowed" way.
1760 1760
1761 1761 We walk a window of revisions in the desired order. Within the
1762 1762 window, we first walk forwards to gather data, then in the desired
1763 1763 order (usually backwards) to display it.
1764 1764
1765 1765 This function returns an iterator yielding contexts. Before
1766 1766 yielding each context, the iterator will first call the prepare
1767 1767 function on each context in the window in forward order.'''
1768 1768
1769 1769 follow = opts.get('follow') or opts.get('follow_first')
1770 1770 revs = _logrevs(repo, opts)
1771 1771 if not revs:
1772 1772 return []
1773 1773 wanted = set()
1774 1774 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1775 1775 opts.get('removed'))
1776 1776 fncache = {}
1777 1777 change = repo.changectx
1778 1778
1779 1779 # First step is to fill wanted, the set of revisions that we want to yield.
1780 1780 # When it does not induce extra cost, we also fill fncache for revisions in
1781 1781 # wanted: a cache of filenames that were changed (ctx.files()) and that
1782 1782 # match the file filtering conditions.
1783 1783
1784 1784 if match.always():
1785 1785 # No files, no patterns. Display all revs.
1786 1786 wanted = revs
1787 1787 elif not slowpath:
1788 1788 # We only have to read through the filelog to find wanted revisions
1789 1789
1790 1790 try:
1791 1791 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1792 1792 except FileWalkError:
1793 1793 slowpath = True
1794 1794
1795 1795 # We decided to fall back to the slowpath because at least one
1796 1796 # of the paths was not a file. Check to see if at least one of them
1797 1797 # existed in history, otherwise simply return
1798 1798 for path in match.files():
1799 1799 if path == '.' or path in repo.store:
1800 1800 break
1801 1801 else:
1802 1802 return []
1803 1803
1804 1804 if slowpath:
1805 1805 # We have to read the changelog to match filenames against
1806 1806 # changed files
1807 1807
1808 1808 if follow:
1809 1809 raise error.Abort(_('can only follow copies/renames for explicit '
1810 1810 'filenames'))
1811 1811
1812 1812 # The slow path checks files modified in every changeset.
1813 1813 # This is really slow on large repos, so compute the set lazily.
1814 1814 class lazywantedset(object):
1815 1815 def __init__(self):
1816 1816 self.set = set()
1817 1817 self.revs = set(revs)
1818 1818
1819 1819 # No need to worry about locality here because it will be accessed
1820 1820 # in the same order as the increasing window below.
1821 1821 def __contains__(self, value):
1822 1822 if value in self.set:
1823 1823 return True
1824 1824 elif not value in self.revs:
1825 1825 return False
1826 1826 else:
1827 1827 self.revs.discard(value)
1828 1828 ctx = change(value)
1829 1829 matches = filter(match, ctx.files())
1830 1830 if matches:
1831 1831 fncache[value] = matches
1832 1832 self.set.add(value)
1833 1833 return True
1834 1834 return False
1835 1835
1836 1836 def discard(self, value):
1837 1837 self.revs.discard(value)
1838 1838 self.set.discard(value)
1839 1839
1840 1840 wanted = lazywantedset()
1841 1841
1842 1842 # it might be worthwhile to do this in the iterator if the rev range
1843 1843 # is descending and the prune args are all within that range
1844 1844 for rev in opts.get('prune', ()):
1845 1845 rev = repo[rev].rev()
1846 1846 ff = _followfilter(repo)
1847 1847 stop = min(revs[0], revs[-1])
1848 1848 for x in xrange(rev, stop - 1, -1):
1849 1849 if ff.match(x):
1850 1850 wanted = wanted - [x]
1851 1851
1852 1852 # Now that wanted is correctly initialized, we can iterate over the
1853 1853 # revision range, yielding only revisions in wanted.
1854 1854 def iterate():
1855 1855 if follow and match.always():
1856 1856 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1857 1857 def want(rev):
1858 1858 return ff.match(rev) and rev in wanted
1859 1859 else:
1860 1860 def want(rev):
1861 1861 return rev in wanted
1862 1862
1863 1863 it = iter(revs)
1864 1864 stopiteration = False
1865 1865 for windowsize in increasingwindows():
1866 1866 nrevs = []
1867 1867 for i in xrange(windowsize):
1868 1868 rev = next(it, None)
1869 1869 if rev is None:
1870 1870 stopiteration = True
1871 1871 break
1872 1872 elif want(rev):
1873 1873 nrevs.append(rev)
1874 1874 for rev in sorted(nrevs):
1875 1875 fns = fncache.get(rev)
1876 1876 ctx = change(rev)
1877 1877 if not fns:
1878 1878 def fns_generator():
1879 1879 for f in ctx.files():
1880 1880 if match(f):
1881 1881 yield f
1882 1882 fns = fns_generator()
1883 1883 prepare(ctx, fns)
1884 1884 for rev in nrevs:
1885 1885 yield change(rev)
1886 1886
1887 1887 if stopiteration:
1888 1888 break
1889 1889
1890 1890 return iterate()
1891 1891
1892 1892 def _makefollowlogfilematcher(repo, files, followfirst):
1893 1893 # When displaying a revision with --patch --follow FILE, we have
1894 1894 # to know which file of the revision must be diffed. With
1895 1895 # --follow, we want the names of the ancestors of FILE in the
1896 1896 # revision, stored in "fcache". "fcache" is populated by
1897 1897 # reproducing the graph traversal already done by --follow revset
1898 1898 # and relating revs to file names (which is not "correct" but
1899 1899 # good enough).
1900 1900 fcache = {}
1901 1901 fcacheready = [False]
1902 1902 pctx = repo['.']
1903 1903
1904 1904 def populate():
1905 1905 for fn in files:
1906 1906 fctx = pctx[fn]
1907 1907 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1908 1908 for c in fctx.ancestors(followfirst=followfirst):
1909 1909 fcache.setdefault(c.rev(), set()).add(c.path())
1910 1910
1911 1911 def filematcher(rev):
1912 1912 if not fcacheready[0]:
1913 1913 # Lazy initialization
1914 1914 fcacheready[0] = True
1915 1915 populate()
1916 1916 return scmutil.matchfiles(repo, fcache.get(rev, []))
1917 1917
1918 1918 return filematcher
1919 1919
1920 1920 def _makenofollowlogfilematcher(repo, pats, opts):
1921 1921 '''hook for extensions to override the filematcher for non-follow cases'''
1922 1922 return None
1923 1923
1924 1924 def _makelogrevset(repo, pats, opts, revs):
1925 1925 """Return (expr, filematcher) where expr is a revset string built
1926 1926 from log options and file patterns or None. If --stat or --patch
1927 1927 are not passed filematcher is None. Otherwise it is a callable
1928 1928 taking a revision number and returning a match objects filtering
1929 1929 the files to be detailed when displaying the revision.
1930 1930 """
1931 1931 opt2revset = {
1932 1932 'no_merges': ('not merge()', None),
1933 1933 'only_merges': ('merge()', None),
1934 1934 '_ancestors': ('ancestors(%(val)s)', None),
1935 1935 '_fancestors': ('_firstancestors(%(val)s)', None),
1936 1936 '_descendants': ('descendants(%(val)s)', None),
1937 1937 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1938 1938 '_matchfiles': ('_matchfiles(%(val)s)', None),
1939 1939 'date': ('date(%(val)r)', None),
1940 1940 'branch': ('branch(%(val)r)', ' or '),
1941 1941 '_patslog': ('filelog(%(val)r)', ' or '),
1942 1942 '_patsfollow': ('follow(%(val)r)', ' or '),
1943 1943 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1944 1944 'keyword': ('keyword(%(val)r)', ' or '),
1945 1945 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1946 1946 'user': ('user(%(val)r)', ' or '),
1947 1947 }
1948 1948
1949 1949 opts = dict(opts)
1950 1950 # follow or not follow?
1951 1951 follow = opts.get('follow') or opts.get('follow_first')
1952 1952 if opts.get('follow_first'):
1953 1953 followfirst = 1
1954 1954 else:
1955 1955 followfirst = 0
1956 1956 # --follow with FILE behavior depends on revs...
1957 1957 it = iter(revs)
1958 1958 startrev = next(it)
1959 1959 followdescendants = startrev < next(it, startrev)
1960 1960
1961 1961 # branch and only_branch are really aliases and must be handled at
1962 1962 # the same time
1963 1963 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1964 1964 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1965 1965 # pats/include/exclude are passed to match.match() directly in
1966 1966 # _matchfiles() revset but walkchangerevs() builds its matcher with
1967 1967 # scmutil.match(). The difference is input pats are globbed on
1968 1968 # platforms without shell expansion (windows).
1969 1969 wctx = repo[None]
1970 1970 match, pats = scmutil.matchandpats(wctx, pats, opts)
1971 1971 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1972 1972 opts.get('removed'))
1973 1973 if not slowpath:
1974 1974 for f in match.files():
1975 1975 if follow and f not in wctx:
1976 1976 # If the file exists, it may be a directory, so let it
1977 1977 # take the slow path.
1978 1978 if os.path.exists(repo.wjoin(f)):
1979 1979 slowpath = True
1980 1980 continue
1981 1981 else:
1982 1982 raise error.Abort(_('cannot follow file not in parent '
1983 1983 'revision: "%s"') % f)
1984 1984 filelog = repo.file(f)
1985 1985 if not filelog:
1986 1986 # A zero count may be a directory or deleted file, so
1987 1987 # try to find matching entries on the slow path.
1988 1988 if follow:
1989 1989 raise error.Abort(
1990 1990 _('cannot follow nonexistent file: "%s"') % f)
1991 1991 slowpath = True
1992 1992
1993 1993 # We decided to fall back to the slowpath because at least one
1994 1994 # of the paths was not a file. Check to see if at least one of them
1995 1995 # existed in history - in that case, we'll continue down the
1996 1996 # slowpath; otherwise, we can turn off the slowpath
1997 1997 if slowpath:
1998 1998 for path in match.files():
1999 1999 if path == '.' or path in repo.store:
2000 2000 break
2001 2001 else:
2002 2002 slowpath = False
2003 2003
2004 2004 fpats = ('_patsfollow', '_patsfollowfirst')
2005 2005 fnopats = (('_ancestors', '_fancestors'),
2006 2006 ('_descendants', '_fdescendants'))
2007 2007 if slowpath:
2008 2008 # See walkchangerevs() slow path.
2009 2009 #
2010 2010 # pats/include/exclude cannot be represented as separate
2011 2011 # revset expressions as their filtering logic applies at file
2012 2012 # level. For instance "-I a -X a" matches a revision touching
2013 2013 # "a" and "b" while "file(a) and not file(b)" does
2014 2014 # not. Besides, filesets are evaluated against the working
2015 2015 # directory.
2016 2016 matchargs = ['r:', 'd:relpath']
2017 2017 for p in pats:
2018 2018 matchargs.append('p:' + p)
2019 2019 for p in opts.get('include', []):
2020 2020 matchargs.append('i:' + p)
2021 2021 for p in opts.get('exclude', []):
2022 2022 matchargs.append('x:' + p)
2023 2023 matchargs = ','.join(('%r' % p) for p in matchargs)
2024 2024 opts['_matchfiles'] = matchargs
2025 2025 if follow:
2026 2026 opts[fnopats[0][followfirst]] = '.'
2027 2027 else:
2028 2028 if follow:
2029 2029 if pats:
2030 2030 # follow() revset interprets its file argument as a
2031 2031 # manifest entry, so use match.files(), not pats.
2032 2032 opts[fpats[followfirst]] = list(match.files())
2033 2033 else:
2034 2034 op = fnopats[followdescendants][followfirst]
2035 2035 opts[op] = 'rev(%d)' % startrev
2036 2036 else:
2037 2037 opts['_patslog'] = list(pats)
2038 2038
2039 2039 filematcher = None
2040 2040 if opts.get('patch') or opts.get('stat'):
2041 2041 # When following files, track renames via a special matcher.
2042 2042 # If we're forced to take the slowpath it means we're following
2043 2043 # at least one pattern/directory, so don't bother with rename tracking.
2044 2044 if follow and not match.always() and not slowpath:
2045 2045 # _makefollowlogfilematcher expects its files argument to be
2046 2046 # relative to the repo root, so use match.files(), not pats.
2047 2047 filematcher = _makefollowlogfilematcher(repo, match.files(),
2048 2048 followfirst)
2049 2049 else:
2050 2050 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2051 2051 if filematcher is None:
2052 2052 filematcher = lambda rev: match
2053 2053
2054 2054 expr = []
2055 2055 for op, val in sorted(opts.iteritems()):
2056 2056 if not val:
2057 2057 continue
2058 2058 if op not in opt2revset:
2059 2059 continue
2060 2060 revop, andor = opt2revset[op]
2061 2061 if '%(val)' not in revop:
2062 2062 expr.append(revop)
2063 2063 else:
2064 2064 if not isinstance(val, list):
2065 2065 e = revop % {'val': val}
2066 2066 else:
2067 2067 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2068 2068 expr.append(e)
2069 2069
2070 2070 if expr:
2071 2071 expr = '(' + ' and '.join(expr) + ')'
2072 2072 else:
2073 2073 expr = None
2074 2074 return expr, filematcher
2075 2075
2076 2076 def _logrevs(repo, opts):
2077 2077 # Default --rev value depends on --follow but --follow behavior
2078 2078 # depends on revisions resolved from --rev...
2079 2079 follow = opts.get('follow') or opts.get('follow_first')
2080 2080 if opts.get('rev'):
2081 2081 revs = scmutil.revrange(repo, opts['rev'])
2082 2082 elif follow and repo.dirstate.p1() == nullid:
2083 2083 revs = smartset.baseset()
2084 2084 elif follow:
2085 2085 revs = repo.revs('reverse(:.)')
2086 2086 else:
2087 2087 revs = smartset.spanset(repo)
2088 2088 revs.reverse()
2089 2089 return revs
2090 2090
2091 2091 def getgraphlogrevs(repo, pats, opts):
2092 2092 """Return (revs, expr, filematcher) where revs is an iterable of
2093 2093 revision numbers, expr is a revset string built from log options
2094 2094 and file patterns or None, and used to filter 'revs'. If --stat or
2095 2095 --patch are not passed filematcher is None. Otherwise it is a
2096 2096 callable taking a revision number and returning a match objects
2097 2097 filtering the files to be detailed when displaying the revision.
2098 2098 """
2099 2099 limit = loglimit(opts)
2100 2100 revs = _logrevs(repo, opts)
2101 2101 if not revs:
2102 2102 return smartset.baseset(), None, None
2103 2103 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2104 2104 if opts.get('rev'):
2105 2105 # User-specified revs might be unsorted, but don't sort before
2106 2106 # _makelogrevset because it might depend on the order of revs
2107 2107 if not (revs.isdescending() or revs.istopo()):
2108 2108 revs.sort(reverse=True)
2109 2109 if expr:
2110 2110 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2111 2111 revs = matcher(repo, revs)
2112 2112 if limit is not None:
2113 2113 limitedrevs = []
2114 2114 for idx, rev in enumerate(revs):
2115 2115 if idx >= limit:
2116 2116 break
2117 2117 limitedrevs.append(rev)
2118 2118 revs = smartset.baseset(limitedrevs)
2119 2119
2120 2120 return revs, expr, filematcher
2121 2121
2122 2122 def getlogrevs(repo, pats, opts):
2123 2123 """Return (revs, expr, filematcher) where revs is an iterable of
2124 2124 revision numbers, expr is a revset string built from log options
2125 2125 and file patterns or None, and used to filter 'revs'. If --stat or
2126 2126 --patch are not passed filematcher is None. Otherwise it is a
2127 2127 callable taking a revision number and returning a match objects
2128 2128 filtering the files to be detailed when displaying the revision.
2129 2129 """
2130 2130 limit = loglimit(opts)
2131 2131 revs = _logrevs(repo, opts)
2132 2132 if not revs:
2133 2133 return smartset.baseset([]), None, None
2134 2134 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2135 2135 if expr:
2136 2136 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2137 2137 revs = matcher(repo, revs)
2138 2138 if limit is not None:
2139 2139 limitedrevs = []
2140 2140 for idx, r in enumerate(revs):
2141 2141 if limit <= idx:
2142 2142 break
2143 2143 limitedrevs.append(r)
2144 2144 revs = smartset.baseset(limitedrevs)
2145 2145
2146 2146 return revs, expr, filematcher
2147 2147
2148 2148 def _graphnodeformatter(ui, displayer):
2149 2149 spec = ui.config('ui', 'graphnodetemplate')
2150 2150 if not spec:
2151 2151 return templatekw.showgraphnode # fast path for "{graphnode}"
2152 2152
2153 2153 templ = formatter.gettemplater(ui, 'graphnode', spec)
2154 2154 cache = {}
2155 2155 if isinstance(displayer, changeset_templater):
2156 2156 cache = displayer.cache # reuse cache of slow templates
2157 2157 props = templatekw.keywords.copy()
2158 2158 props['templ'] = templ
2159 2159 props['cache'] = cache
2160 2160 def formatnode(repo, ctx):
2161 2161 props['ctx'] = ctx
2162 2162 props['repo'] = repo
2163 2163 props['ui'] = repo.ui
2164 2164 props['revcache'] = {}
2165 2165 return templater.stringify(templ('graphnode', **props))
2166 2166 return formatnode
2167 2167
2168 2168 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2169 2169 filematcher=None):
2170 2170 formatnode = _graphnodeformatter(ui, displayer)
2171 2171 state = graphmod.asciistate()
2172 2172 styles = state['styles']
2173 2173
2174 2174 # only set graph styling if HGPLAIN is not set.
2175 2175 if ui.plain('graph'):
2176 2176 # set all edge styles to |, the default pre-3.8 behaviour
2177 2177 styles.update(dict.fromkeys(styles, '|'))
2178 2178 else:
2179 2179 edgetypes = {
2180 2180 'parent': graphmod.PARENT,
2181 2181 'grandparent': graphmod.GRANDPARENT,
2182 2182 'missing': graphmod.MISSINGPARENT
2183 2183 }
2184 2184 for name, key in edgetypes.items():
2185 2185 # experimental config: experimental.graphstyle.*
2186 2186 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2187 2187 styles[key])
2188 2188 if not styles[key]:
2189 2189 styles[key] = None
2190 2190
2191 2191 # experimental config: experimental.graphshorten
2192 2192 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2193 2193
2194 2194 for rev, type, ctx, parents in dag:
2195 2195 char = formatnode(repo, ctx)
2196 2196 copies = None
2197 2197 if getrenamed and ctx.rev():
2198 2198 copies = []
2199 2199 for fn in ctx.files():
2200 2200 rename = getrenamed(fn, ctx.rev())
2201 2201 if rename:
2202 2202 copies.append((fn, rename[0]))
2203 2203 revmatchfn = None
2204 2204 if filematcher is not None:
2205 2205 revmatchfn = filematcher(ctx.rev())
2206 2206 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2207 2207 lines = displayer.hunk.pop(rev).split('\n')
2208 2208 if not lines[-1]:
2209 2209 del lines[-1]
2210 2210 displayer.flush(ctx)
2211 2211 edges = edgefn(type, char, lines, state, rev, parents)
2212 2212 for type, char, lines, coldata in edges:
2213 2213 graphmod.ascii(ui, state, type, char, lines, coldata)
2214 2214 displayer.close()
2215 2215
2216 2216 def graphlog(ui, repo, *pats, **opts):
2217 2217 # Parameters are identical to log command ones
2218 2218 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2219 2219 revdag = graphmod.dagwalker(repo, revs)
2220 2220
2221 2221 getrenamed = None
2222 2222 if opts.get('copies'):
2223 2223 endrev = None
2224 2224 if opts.get('rev'):
2225 2225 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2226 2226 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2227 2227
2228 2228 ui.pager('log')
2229 2229 displayer = show_changeset(ui, repo, opts, buffered=True)
2230 2230 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2231 2231 filematcher)
2232 2232
2233 2233 def checkunsupportedgraphflags(pats, opts):
2234 2234 for op in ["newest_first"]:
2235 2235 if op in opts and opts[op]:
2236 2236 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2237 2237 % op.replace("_", "-"))
2238 2238
2239 2239 def graphrevs(repo, nodes, opts):
2240 2240 limit = loglimit(opts)
2241 2241 nodes.reverse()
2242 2242 if limit is not None:
2243 2243 nodes = nodes[:limit]
2244 2244 return graphmod.nodes(repo, nodes)
2245 2245
2246 2246 def add(ui, repo, match, prefix, explicitonly, **opts):
2247 2247 join = lambda f: os.path.join(prefix, f)
2248 2248 bad = []
2249 2249
2250 2250 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2251 2251 names = []
2252 2252 wctx = repo[None]
2253 2253 cca = None
2254 2254 abort, warn = scmutil.checkportabilityalert(ui)
2255 2255 if abort or warn:
2256 2256 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2257 2257
2258 2258 badmatch = matchmod.badmatch(match, badfn)
2259 2259 dirstate = repo.dirstate
2260 2260 # We don't want to just call wctx.walk here, since it would return a lot of
2261 2261 # clean files, which we aren't interested in and takes time.
2262 2262 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2263 2263 True, False, full=False)):
2264 2264 exact = match.exact(f)
2265 2265 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2266 2266 if cca:
2267 2267 cca(f)
2268 2268 names.append(f)
2269 2269 if ui.verbose or not exact:
2270 2270 ui.status(_('adding %s\n') % match.rel(f))
2271 2271
2272 2272 for subpath in sorted(wctx.substate):
2273 2273 sub = wctx.sub(subpath)
2274 2274 try:
2275 2275 submatch = matchmod.subdirmatcher(subpath, match)
2276 2276 if opts.get('subrepos'):
2277 2277 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2278 2278 else:
2279 2279 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2280 2280 except error.LookupError:
2281 2281 ui.status(_("skipping missing subrepository: %s\n")
2282 2282 % join(subpath))
2283 2283
2284 2284 if not opts.get('dry_run'):
2285 2285 rejected = wctx.add(names, prefix)
2286 2286 bad.extend(f for f in rejected if f in match.files())
2287 2287 return bad
2288 2288
2289 2289 def forget(ui, repo, match, prefix, explicitonly):
2290 2290 join = lambda f: os.path.join(prefix, f)
2291 2291 bad = []
2292 2292 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2293 2293 wctx = repo[None]
2294 2294 forgot = []
2295 2295
2296 2296 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2297 2297 forget = sorted(s[0] + s[1] + s[3] + s[6])
2298 2298 if explicitonly:
2299 2299 forget = [f for f in forget if match.exact(f)]
2300 2300
2301 2301 for subpath in sorted(wctx.substate):
2302 2302 sub = wctx.sub(subpath)
2303 2303 try:
2304 2304 submatch = matchmod.subdirmatcher(subpath, match)
2305 2305 subbad, subforgot = sub.forget(submatch, prefix)
2306 2306 bad.extend([subpath + '/' + f for f in subbad])
2307 2307 forgot.extend([subpath + '/' + f for f in subforgot])
2308 2308 except error.LookupError:
2309 2309 ui.status(_("skipping missing subrepository: %s\n")
2310 2310 % join(subpath))
2311 2311
2312 2312 if not explicitonly:
2313 2313 for f in match.files():
2314 2314 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2315 2315 if f not in forgot:
2316 2316 if repo.wvfs.exists(f):
2317 2317 # Don't complain if the exact case match wasn't given.
2318 2318 # But don't do this until after checking 'forgot', so
2319 2319 # that subrepo files aren't normalized, and this op is
2320 2320 # purely from data cached by the status walk above.
2321 2321 if repo.dirstate.normalize(f) in repo.dirstate:
2322 2322 continue
2323 2323 ui.warn(_('not removing %s: '
2324 2324 'file is already untracked\n')
2325 2325 % match.rel(f))
2326 2326 bad.append(f)
2327 2327
2328 2328 for f in forget:
2329 2329 if ui.verbose or not match.exact(f):
2330 2330 ui.status(_('removing %s\n') % match.rel(f))
2331 2331
2332 2332 rejected = wctx.forget(forget, prefix)
2333 2333 bad.extend(f for f in rejected if f in match.files())
2334 2334 forgot.extend(f for f in forget if f not in rejected)
2335 2335 return bad, forgot
2336 2336
2337 2337 def files(ui, ctx, m, fm, fmt, subrepos):
2338 2338 rev = ctx.rev()
2339 2339 ret = 1
2340 2340 ds = ctx.repo().dirstate
2341 2341
2342 2342 for f in ctx.matches(m):
2343 2343 if rev is None and ds[f] == 'r':
2344 2344 continue
2345 2345 fm.startitem()
2346 2346 if ui.verbose:
2347 2347 fc = ctx[f]
2348 2348 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2349 2349 fm.data(abspath=f)
2350 2350 fm.write('path', fmt, m.rel(f))
2351 2351 ret = 0
2352 2352
2353 2353 for subpath in sorted(ctx.substate):
2354 2354 submatch = matchmod.subdirmatcher(subpath, m)
2355 2355 if (subrepos or m.exact(subpath) or any(submatch.files())):
2356 2356 sub = ctx.sub(subpath)
2357 2357 try:
2358 2358 recurse = m.exact(subpath) or subrepos
2359 2359 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2360 2360 ret = 0
2361 2361 except error.LookupError:
2362 2362 ui.status(_("skipping missing subrepository: %s\n")
2363 2363 % m.abs(subpath))
2364 2364
2365 2365 return ret
2366 2366
2367 2367 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2368 2368 join = lambda f: os.path.join(prefix, f)
2369 2369 ret = 0
2370 2370 s = repo.status(match=m, clean=True)
2371 2371 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2372 2372
2373 2373 wctx = repo[None]
2374 2374
2375 2375 if warnings is None:
2376 2376 warnings = []
2377 2377 warn = True
2378 2378 else:
2379 2379 warn = False
2380 2380
2381 2381 subs = sorted(wctx.substate)
2382 2382 total = len(subs)
2383 2383 count = 0
2384 2384 for subpath in subs:
2385 2385 count += 1
2386 2386 submatch = matchmod.subdirmatcher(subpath, m)
2387 2387 if subrepos or m.exact(subpath) or any(submatch.files()):
2388 2388 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2389 2389 sub = wctx.sub(subpath)
2390 2390 try:
2391 2391 if sub.removefiles(submatch, prefix, after, force, subrepos,
2392 2392 warnings):
2393 2393 ret = 1
2394 2394 except error.LookupError:
2395 2395 warnings.append(_("skipping missing subrepository: %s\n")
2396 2396 % join(subpath))
2397 2397 ui.progress(_('searching'), None)
2398 2398
2399 2399 # warn about failure to delete explicit files/dirs
2400 2400 deleteddirs = util.dirs(deleted)
2401 2401 files = m.files()
2402 2402 total = len(files)
2403 2403 count = 0
2404 2404 for f in files:
2405 2405 def insubrepo():
2406 2406 for subpath in wctx.substate:
2407 2407 if f.startswith(subpath + '/'):
2408 2408 return True
2409 2409 return False
2410 2410
2411 2411 count += 1
2412 2412 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2413 2413 isdir = f in deleteddirs or wctx.hasdir(f)
2414 2414 if (f in repo.dirstate or isdir or f == '.'
2415 2415 or insubrepo() or f in subs):
2416 2416 continue
2417 2417
2418 2418 if repo.wvfs.exists(f):
2419 2419 if repo.wvfs.isdir(f):
2420 2420 warnings.append(_('not removing %s: no tracked files\n')
2421 2421 % m.rel(f))
2422 2422 else:
2423 2423 warnings.append(_('not removing %s: file is untracked\n')
2424 2424 % m.rel(f))
2425 2425 # missing files will generate a warning elsewhere
2426 2426 ret = 1
2427 2427 ui.progress(_('deleting'), None)
2428 2428
2429 2429 if force:
2430 2430 list = modified + deleted + clean + added
2431 2431 elif after:
2432 2432 list = deleted
2433 2433 remaining = modified + added + clean
2434 2434 total = len(remaining)
2435 2435 count = 0
2436 2436 for f in remaining:
2437 2437 count += 1
2438 2438 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2439 2439 warnings.append(_('not removing %s: file still exists\n')
2440 2440 % m.rel(f))
2441 2441 ret = 1
2442 2442 ui.progress(_('skipping'), None)
2443 2443 else:
2444 2444 list = deleted + clean
2445 2445 total = len(modified) + len(added)
2446 2446 count = 0
2447 2447 for f in modified:
2448 2448 count += 1
2449 2449 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2450 2450 warnings.append(_('not removing %s: file is modified (use -f'
2451 2451 ' to force removal)\n') % m.rel(f))
2452 2452 ret = 1
2453 2453 for f in added:
2454 2454 count += 1
2455 2455 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2456 2456 warnings.append(_("not removing %s: file has been marked for add"
2457 2457 " (use 'hg forget' to undo add)\n") % m.rel(f))
2458 2458 ret = 1
2459 2459 ui.progress(_('skipping'), None)
2460 2460
2461 2461 list = sorted(list)
2462 2462 total = len(list)
2463 2463 count = 0
2464 2464 for f in list:
2465 2465 count += 1
2466 2466 if ui.verbose or not m.exact(f):
2467 2467 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2468 2468 ui.status(_('removing %s\n') % m.rel(f))
2469 2469 ui.progress(_('deleting'), None)
2470 2470
2471 2471 with repo.wlock():
2472 2472 if not after:
2473 2473 for f in list:
2474 2474 if f in added:
2475 2475 continue # we never unlink added files on remove
2476 2476 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2477 2477 repo[None].forget(list)
2478 2478
2479 2479 if warn:
2480 2480 for warning in warnings:
2481 2481 ui.warn(warning)
2482 2482
2483 2483 return ret
2484 2484
2485 2485 def cat(ui, repo, ctx, matcher, prefix, **opts):
2486 2486 err = 1
2487 2487
2488 2488 def write(path):
2489 2489 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2490 2490 pathname=os.path.join(prefix, path))
2491 2491 data = ctx[path].data()
2492 2492 if opts.get('decode'):
2493 2493 data = repo.wwritedata(path, data)
2494 2494 fp.write(data)
2495 2495 fp.close()
2496 2496
2497 2497 # Automation often uses hg cat on single files, so special case it
2498 2498 # for performance to avoid the cost of parsing the manifest.
2499 2499 if len(matcher.files()) == 1 and not matcher.anypats():
2500 2500 file = matcher.files()[0]
2501 2501 mfl = repo.manifestlog
2502 2502 mfnode = ctx.manifestnode()
2503 2503 try:
2504 2504 if mfnode and mfl[mfnode].find(file)[0]:
2505 2505 write(file)
2506 2506 return 0
2507 2507 except KeyError:
2508 2508 pass
2509 2509
2510 2510 for abs in ctx.walk(matcher):
2511 2511 write(abs)
2512 2512 err = 0
2513 2513
2514 2514 for subpath in sorted(ctx.substate):
2515 2515 sub = ctx.sub(subpath)
2516 2516 try:
2517 2517 submatch = matchmod.subdirmatcher(subpath, matcher)
2518 2518
2519 2519 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2520 2520 **opts):
2521 2521 err = 0
2522 2522 except error.RepoLookupError:
2523 2523 ui.status(_("skipping missing subrepository: %s\n")
2524 2524 % os.path.join(prefix, subpath))
2525 2525
2526 2526 return err
2527 2527
2528 2528 def commit(ui, repo, commitfunc, pats, opts):
2529 2529 '''commit the specified files or all outstanding changes'''
2530 2530 date = opts.get('date')
2531 2531 if date:
2532 2532 opts['date'] = util.parsedate(date)
2533 2533 message = logmessage(ui, opts)
2534 2534 matcher = scmutil.match(repo[None], pats, opts)
2535 2535
2536 2536 # extract addremove carefully -- this function can be called from a command
2537 2537 # that doesn't support addremove
2538 2538 if opts.get('addremove'):
2539 2539 if scmutil.addremove(repo, matcher, "", opts) != 0:
2540 2540 raise error.Abort(
2541 2541 _("failed to mark all new/missing files as added/removed"))
2542 2542
2543 2543 return commitfunc(ui, repo, message, matcher, opts)
2544 2544
2545 2545 def samefile(f, ctx1, ctx2):
2546 2546 if f in ctx1.manifest():
2547 2547 a = ctx1.filectx(f)
2548 2548 if f in ctx2.manifest():
2549 2549 b = ctx2.filectx(f)
2550 2550 return (not a.cmp(b)
2551 2551 and a.flags() == b.flags())
2552 2552 else:
2553 2553 return False
2554 2554 else:
2555 2555 return f not in ctx2.manifest()
2556 2556
2557 2557 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2558 2558 # avoid cycle context -> subrepo -> cmdutil
2559 2559 from . import context
2560 2560
2561 2561 # amend will reuse the existing user if not specified, but the obsolete
2562 2562 # marker creation requires that the current user's name is specified.
2563 2563 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2564 2564 ui.username() # raise exception if username not set
2565 2565
2566 2566 ui.note(_('amending changeset %s\n') % old)
2567 2567 base = old.p1()
2568 2568 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2569 2569
2570 2570 wlock = lock = newid = None
2571 2571 try:
2572 2572 wlock = repo.wlock()
2573 2573 lock = repo.lock()
2574 2574 with repo.transaction('amend') as tr:
2575 2575 # See if we got a message from -m or -l, if not, open the editor
2576 2576 # with the message of the changeset to amend
2577 2577 message = logmessage(ui, opts)
2578 2578 # ensure logfile does not conflict with later enforcement of the
2579 2579 # message. potential logfile content has been processed by
2580 2580 # `logmessage` anyway.
2581 2581 opts.pop('logfile')
2582 2582 # First, do a regular commit to record all changes in the working
2583 2583 # directory (if there are any)
2584 2584 ui.callhooks = False
2585 2585 activebookmark = repo._bookmarks.active
2586 2586 try:
2587 2587 repo._bookmarks.active = None
2588 2588 opts['message'] = 'temporary amend commit for %s' % old
2589 2589 node = commit(ui, repo, commitfunc, pats, opts)
2590 2590 finally:
2591 2591 repo._bookmarks.active = activebookmark
2592 2592 repo._bookmarks.recordchange(tr)
2593 2593 ui.callhooks = True
2594 2594 ctx = repo[node]
2595 2595
2596 2596 # Participating changesets:
2597 2597 #
2598 2598 # node/ctx o - new (intermediate) commit that contains changes
2599 2599 # | from working dir to go into amending commit
2600 2600 # | (or a workingctx if there were no changes)
2601 2601 # |
2602 2602 # old o - changeset to amend
2603 2603 # |
2604 2604 # base o - parent of amending changeset
2605 2605
2606 2606 # Update extra dict from amended commit (e.g. to preserve graft
2607 2607 # source)
2608 2608 extra.update(old.extra())
2609 2609
2610 2610 # Also update it from the intermediate commit or from the wctx
2611 2611 extra.update(ctx.extra())
2612 2612
2613 2613 if len(old.parents()) > 1:
2614 2614 # ctx.files() isn't reliable for merges, so fall back to the
2615 2615 # slower repo.status() method
2616 2616 files = set([fn for st in repo.status(base, old)[:3]
2617 2617 for fn in st])
2618 2618 else:
2619 2619 files = set(old.files())
2620 2620
2621 2621 # Second, we use either the commit we just did, or if there were no
2622 2622 # changes the parent of the working directory as the version of the
2623 2623 # files in the final amend commit
2624 2624 if node:
2625 2625 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2626 2626
2627 2627 user = ctx.user()
2628 2628 date = ctx.date()
2629 2629 # Recompute copies (avoid recording a -> b -> a)
2630 2630 copied = copies.pathcopies(base, ctx)
2631 2631 if old.p2:
2632 2632 copied.update(copies.pathcopies(old.p2(), ctx))
2633 2633
2634 2634 # Prune files which were reverted by the updates: if old
2635 2635 # introduced file X and our intermediate commit, node,
2636 2636 # renamed that file, then those two files are the same and
2637 2637 # we can discard X from our list of files. Likewise if X
2638 2638 # was deleted, it's no longer relevant
2639 2639 files.update(ctx.files())
2640 2640 files = [f for f in files if not samefile(f, ctx, base)]
2641 2641
2642 2642 def filectxfn(repo, ctx_, path):
2643 2643 try:
2644 2644 fctx = ctx[path]
2645 2645 flags = fctx.flags()
2646 2646 mctx = context.memfilectx(repo,
2647 2647 fctx.path(), fctx.data(),
2648 2648 islink='l' in flags,
2649 2649 isexec='x' in flags,
2650 2650 copied=copied.get(path))
2651 2651 return mctx
2652 2652 except KeyError:
2653 2653 return None
2654 2654 else:
2655 2655 ui.note(_('copying changeset %s to %s\n') % (old, base))
2656 2656
2657 2657 # Use version of files as in the old cset
2658 2658 def filectxfn(repo, ctx_, path):
2659 2659 try:
2660 2660 return old.filectx(path)
2661 2661 except KeyError:
2662 2662 return None
2663 2663
2664 2664 user = opts.get('user') or old.user()
2665 2665 date = opts.get('date') or old.date()
2666 2666 editform = mergeeditform(old, 'commit.amend')
2667 2667 editor = getcommiteditor(editform=editform, **opts)
2668 2668 if not message:
2669 2669 editor = getcommiteditor(edit=True, editform=editform)
2670 2670 message = old.description()
2671 2671
2672 2672 pureextra = extra.copy()
2673 2673 extra['amend_source'] = old.hex()
2674 2674
2675 2675 new = context.memctx(repo,
2676 2676 parents=[base.node(), old.p2().node()],
2677 2677 text=message,
2678 2678 files=files,
2679 2679 filectxfn=filectxfn,
2680 2680 user=user,
2681 2681 date=date,
2682 2682 extra=extra,
2683 2683 editor=editor)
2684 2684
2685 2685 newdesc = changelog.stripdesc(new.description())
2686 2686 if ((not node)
2687 2687 and newdesc == old.description()
2688 2688 and user == old.user()
2689 2689 and date == old.date()
2690 2690 and pureextra == old.extra()):
2691 2691 # nothing changed. continuing here would create a new node
2692 2692 # anyway because of the amend_source noise.
2693 2693 #
2694 2694 # This not what we expect from amend.
2695 2695 return old.node()
2696 2696
2697 2697 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2698 2698 try:
2699 2699 if opts.get('secret'):
2700 2700 commitphase = 'secret'
2701 2701 else:
2702 2702 commitphase = old.phase()
2703 2703 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2704 2704 newid = repo.commitctx(new)
2705 2705 finally:
2706 2706 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2707 2707 if newid != old.node():
2708 2708 # Reroute the working copy parent to the new changeset
2709 2709 repo.setparents(newid, nullid)
2710 2710
2711 2711 # Move bookmarks from old parent to amend commit
2712 2712 bms = repo.nodebookmarks(old.node())
2713 2713 if bms:
2714 2714 marks = repo._bookmarks
2715 2715 for bm in bms:
2716 2716 ui.debug('moving bookmarks %r from %s to %s\n' %
2717 2717 (marks, old.hex(), hex(newid)))
2718 2718 marks[bm] = newid
2719 2719 marks.recordchange(tr)
2720 2720 #commit the whole amend process
2721 2721 if createmarkers:
2722 2722 # mark the new changeset as successor of the rewritten one
2723 2723 new = repo[newid]
2724 2724 obs = [(old, (new,))]
2725 2725 if node:
2726 2726 obs.append((ctx, ()))
2727 2727
2728 2728 obsolete.createmarkers(repo, obs)
2729 2729 if not createmarkers and newid != old.node():
2730 2730 # Strip the intermediate commit (if there was one) and the amended
2731 2731 # commit
2732 2732 if node:
2733 2733 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2734 2734 ui.note(_('stripping amended changeset %s\n') % old)
2735 2735 repair.strip(ui, repo, old.node(), topic='amend-backup')
2736 2736 finally:
2737 2737 lockmod.release(lock, wlock)
2738 2738 return newid
2739 2739
2740 2740 def commiteditor(repo, ctx, subs, editform=''):
2741 2741 if ctx.description():
2742 2742 return ctx.description()
2743 2743 return commitforceeditor(repo, ctx, subs, editform=editform,
2744 2744 unchangedmessagedetection=True)
2745 2745
2746 2746 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2747 2747 editform='', unchangedmessagedetection=False):
2748 2748 if not extramsg:
2749 2749 extramsg = _("Leave message empty to abort commit.")
2750 2750
2751 2751 forms = [e for e in editform.split('.') if e]
2752 2752 forms.insert(0, 'changeset')
2753 2753 templatetext = None
2754 2754 while forms:
2755 2755 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2756 2756 if tmpl:
2757 2757 templatetext = committext = buildcommittemplate(
2758 2758 repo, ctx, subs, extramsg, tmpl)
2759 2759 break
2760 2760 forms.pop()
2761 2761 else:
2762 2762 committext = buildcommittext(repo, ctx, subs, extramsg)
2763 2763
2764 2764 # run editor in the repository root
2765 2765 olddir = pycompat.getcwd()
2766 2766 os.chdir(repo.root)
2767 2767
2768 2768 # make in-memory changes visible to external process
2769 2769 tr = repo.currenttransaction()
2770 2770 repo.dirstate.write(tr)
2771 2771 pending = tr and tr.writepending() and repo.root
2772 2772
2773 2773 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2774 2774 editform=editform, pending=pending,
2775 2775 repopath=repo.path)
2776 2776 text = editortext
2777 2777
2778 2778 # strip away anything below this special string (used for editors that want
2779 2779 # to display the diff)
2780 2780 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2781 2781 if stripbelow:
2782 2782 text = text[:stripbelow.start()]
2783 2783
2784 2784 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2785 2785 os.chdir(olddir)
2786 2786
2787 2787 if finishdesc:
2788 2788 text = finishdesc(text)
2789 2789 if not text.strip():
2790 2790 raise error.Abort(_("empty commit message"))
2791 2791 if unchangedmessagedetection and editortext == templatetext:
2792 2792 raise error.Abort(_("commit message unchanged"))
2793 2793
2794 2794 return text
2795 2795
2796 2796 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2797 2797 ui = repo.ui
2798 2798 tmpl, mapfile = gettemplate(ui, tmpl, None)
2799 2799
2800 2800 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2801 2801
2802 2802 for k, v in repo.ui.configitems('committemplate'):
2803 2803 if k != 'changeset':
2804 2804 t.t.cache[k] = v
2805 2805
2806 2806 if not extramsg:
2807 2807 extramsg = '' # ensure that extramsg is string
2808 2808
2809 2809 ui.pushbuffer()
2810 2810 t.show(ctx, extramsg=extramsg)
2811 2811 return ui.popbuffer()
2812 2812
2813 2813 def hgprefix(msg):
2814 2814 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2815 2815
2816 2816 def buildcommittext(repo, ctx, subs, extramsg):
2817 2817 edittext = []
2818 2818 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2819 2819 if ctx.description():
2820 2820 edittext.append(ctx.description())
2821 2821 edittext.append("")
2822 2822 edittext.append("") # Empty line between message and comments.
2823 2823 edittext.append(hgprefix(_("Enter commit message."
2824 2824 " Lines beginning with 'HG:' are removed.")))
2825 2825 edittext.append(hgprefix(extramsg))
2826 2826 edittext.append("HG: --")
2827 2827 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2828 2828 if ctx.p2():
2829 2829 edittext.append(hgprefix(_("branch merge")))
2830 2830 if ctx.branch():
2831 2831 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2832 2832 if bookmarks.isactivewdirparent(repo):
2833 2833 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2834 2834 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2835 2835 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2836 2836 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2837 2837 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2838 2838 if not added and not modified and not removed:
2839 2839 edittext.append(hgprefix(_("no files changed")))
2840 2840 edittext.append("")
2841 2841
2842 2842 return "\n".join(edittext)
2843 2843
2844 2844 def commitstatus(repo, node, branch, bheads=None, opts=None):
2845 2845 if opts is None:
2846 2846 opts = {}
2847 2847 ctx = repo[node]
2848 2848 parents = ctx.parents()
2849 2849
2850 2850 if (not opts.get('amend') and bheads and node not in bheads and not
2851 2851 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2852 2852 repo.ui.status(_('created new head\n'))
2853 2853 # The message is not printed for initial roots. For the other
2854 2854 # changesets, it is printed in the following situations:
2855 2855 #
2856 2856 # Par column: for the 2 parents with ...
2857 2857 # N: null or no parent
2858 2858 # B: parent is on another named branch
2859 2859 # C: parent is a regular non head changeset
2860 2860 # H: parent was a branch head of the current branch
2861 2861 # Msg column: whether we print "created new head" message
2862 2862 # In the following, it is assumed that there already exists some
2863 2863 # initial branch heads of the current branch, otherwise nothing is
2864 2864 # printed anyway.
2865 2865 #
2866 2866 # Par Msg Comment
2867 2867 # N N y additional topo root
2868 2868 #
2869 2869 # B N y additional branch root
2870 2870 # C N y additional topo head
2871 2871 # H N n usual case
2872 2872 #
2873 2873 # B B y weird additional branch root
2874 2874 # C B y branch merge
2875 2875 # H B n merge with named branch
2876 2876 #
2877 2877 # C C y additional head from merge
2878 2878 # C H n merge with a head
2879 2879 #
2880 2880 # H H n head merge: head count decreases
2881 2881
2882 2882 if not opts.get('close_branch'):
2883 2883 for r in parents:
2884 2884 if r.closesbranch() and r.branch() == branch:
2885 2885 repo.ui.status(_('reopening closed branch head %d\n') % r)
2886 2886
2887 2887 if repo.ui.debugflag:
2888 2888 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2889 2889 elif repo.ui.verbose:
2890 2890 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2891 2891
2892 2892 def postcommitstatus(repo, pats, opts):
2893 2893 return repo.status(match=scmutil.match(repo[None], pats, opts))
2894 2894
2895 2895 def revert(ui, repo, ctx, parents, *pats, **opts):
2896 2896 parent, p2 = parents
2897 2897 node = ctx.node()
2898 2898
2899 2899 mf = ctx.manifest()
2900 2900 if node == p2:
2901 2901 parent = p2
2902 2902
2903 2903 # need all matching names in dirstate and manifest of target rev,
2904 2904 # so have to walk both. do not print errors if files exist in one
2905 2905 # but not other. in both cases, filesets should be evaluated against
2906 2906 # workingctx to get consistent result (issue4497). this means 'set:**'
2907 2907 # cannot be used to select missing files from target rev.
2908 2908
2909 2909 # `names` is a mapping for all elements in working copy and target revision
2910 2910 # The mapping is in the form:
2911 2911 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2912 2912 names = {}
2913 2913
2914 2914 with repo.wlock():
2915 2915 ## filling of the `names` mapping
2916 2916 # walk dirstate to fill `names`
2917 2917
2918 2918 interactive = opts.get('interactive', False)
2919 2919 wctx = repo[None]
2920 2920 m = scmutil.match(wctx, pats, opts)
2921 2921
2922 2922 # we'll need this later
2923 2923 targetsubs = sorted(s for s in wctx.substate if m(s))
2924 2924
2925 2925 if not m.always():
2926 2926 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2927 2927 names[abs] = m.rel(abs), m.exact(abs)
2928 2928
2929 2929 # walk target manifest to fill `names`
2930 2930
2931 2931 def badfn(path, msg):
2932 2932 if path in names:
2933 2933 return
2934 2934 if path in ctx.substate:
2935 2935 return
2936 2936 path_ = path + '/'
2937 2937 for f in names:
2938 2938 if f.startswith(path_):
2939 2939 return
2940 2940 ui.warn("%s: %s\n" % (m.rel(path), msg))
2941 2941
2942 2942 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2943 2943 if abs not in names:
2944 2944 names[abs] = m.rel(abs), m.exact(abs)
2945 2945
2946 2946 # Find status of all file in `names`.
2947 2947 m = scmutil.matchfiles(repo, names)
2948 2948
2949 2949 changes = repo.status(node1=node, match=m,
2950 2950 unknown=True, ignored=True, clean=True)
2951 2951 else:
2952 2952 changes = repo.status(node1=node, match=m)
2953 2953 for kind in changes:
2954 2954 for abs in kind:
2955 2955 names[abs] = m.rel(abs), m.exact(abs)
2956 2956
2957 2957 m = scmutil.matchfiles(repo, names)
2958 2958
2959 2959 modified = set(changes.modified)
2960 2960 added = set(changes.added)
2961 2961 removed = set(changes.removed)
2962 2962 _deleted = set(changes.deleted)
2963 2963 unknown = set(changes.unknown)
2964 2964 unknown.update(changes.ignored)
2965 2965 clean = set(changes.clean)
2966 2966 modadded = set()
2967 2967
2968 2968 # We need to account for the state of the file in the dirstate,
2969 2969 # even when we revert against something else than parent. This will
2970 2970 # slightly alter the behavior of revert (doing back up or not, delete
2971 2971 # or just forget etc).
2972 2972 if parent == node:
2973 2973 dsmodified = modified
2974 2974 dsadded = added
2975 2975 dsremoved = removed
2976 2976 # store all local modifications, useful later for rename detection
2977 2977 localchanges = dsmodified | dsadded
2978 2978 modified, added, removed = set(), set(), set()
2979 2979 else:
2980 2980 changes = repo.status(node1=parent, match=m)
2981 2981 dsmodified = set(changes.modified)
2982 2982 dsadded = set(changes.added)
2983 2983 dsremoved = set(changes.removed)
2984 2984 # store all local modifications, useful later for rename detection
2985 2985 localchanges = dsmodified | dsadded
2986 2986
2987 2987 # only take into account for removes between wc and target
2988 2988 clean |= dsremoved - removed
2989 2989 dsremoved &= removed
2990 2990 # distinct between dirstate remove and other
2991 2991 removed -= dsremoved
2992 2992
2993 2993 modadded = added & dsmodified
2994 2994 added -= modadded
2995 2995
2996 2996 # tell newly modified apart.
2997 2997 dsmodified &= modified
2998 2998 dsmodified |= modified & dsadded # dirstate added may need backup
2999 2999 modified -= dsmodified
3000 3000
3001 3001 # We need to wait for some post-processing to update this set
3002 3002 # before making the distinction. The dirstate will be used for
3003 3003 # that purpose.
3004 3004 dsadded = added
3005 3005
3006 3006 # in case of merge, files that are actually added can be reported as
3007 3007 # modified, we need to post process the result
3008 3008 if p2 != nullid:
3009 3009 mergeadd = set(dsmodified)
3010 3010 for path in dsmodified:
3011 3011 if path in mf:
3012 3012 mergeadd.remove(path)
3013 3013 dsadded |= mergeadd
3014 3014 dsmodified -= mergeadd
3015 3015
3016 3016 # if f is a rename, update `names` to also revert the source
3017 3017 cwd = repo.getcwd()
3018 3018 for f in localchanges:
3019 3019 src = repo.dirstate.copied(f)
3020 3020 # XXX should we check for rename down to target node?
3021 3021 if src and src not in names and repo.dirstate[src] == 'r':
3022 3022 dsremoved.add(src)
3023 3023 names[src] = (repo.pathto(src, cwd), True)
3024 3024
3025 3025 # determine the exact nature of the deleted changesets
3026 3026 deladded = set(_deleted)
3027 3027 for path in _deleted:
3028 3028 if path in mf:
3029 3029 deladded.remove(path)
3030 3030 deleted = _deleted - deladded
3031 3031
3032 3032 # distinguish between file to forget and the other
3033 3033 added = set()
3034 3034 for abs in dsadded:
3035 3035 if repo.dirstate[abs] != 'a':
3036 3036 added.add(abs)
3037 3037 dsadded -= added
3038 3038
3039 3039 for abs in deladded:
3040 3040 if repo.dirstate[abs] == 'a':
3041 3041 dsadded.add(abs)
3042 3042 deladded -= dsadded
3043 3043
3044 3044 # For files marked as removed, we check if an unknown file is present at
3045 3045 # the same path. If a such file exists it may need to be backed up.
3046 3046 # Making the distinction at this stage helps have simpler backup
3047 3047 # logic.
3048 3048 removunk = set()
3049 3049 for abs in removed:
3050 3050 target = repo.wjoin(abs)
3051 3051 if os.path.lexists(target):
3052 3052 removunk.add(abs)
3053 3053 removed -= removunk
3054 3054
3055 3055 dsremovunk = set()
3056 3056 for abs in dsremoved:
3057 3057 target = repo.wjoin(abs)
3058 3058 if os.path.lexists(target):
3059 3059 dsremovunk.add(abs)
3060 3060 dsremoved -= dsremovunk
3061 3061
3062 3062 # action to be actually performed by revert
3063 3063 # (<list of file>, message>) tuple
3064 3064 actions = {'revert': ([], _('reverting %s\n')),
3065 3065 'add': ([], _('adding %s\n')),
3066 3066 'remove': ([], _('removing %s\n')),
3067 3067 'drop': ([], _('removing %s\n')),
3068 3068 'forget': ([], _('forgetting %s\n')),
3069 3069 'undelete': ([], _('undeleting %s\n')),
3070 3070 'noop': (None, _('no changes needed to %s\n')),
3071 3071 'unknown': (None, _('file not managed: %s\n')),
3072 3072 }
3073 3073
3074 3074 # "constant" that convey the backup strategy.
3075 3075 # All set to `discard` if `no-backup` is set do avoid checking
3076 3076 # no_backup lower in the code.
3077 3077 # These values are ordered for comparison purposes
3078 3078 backupinteractive = 3 # do backup if interactively modified
3079 3079 backup = 2 # unconditionally do backup
3080 3080 check = 1 # check if the existing file differs from target
3081 3081 discard = 0 # never do backup
3082 3082 if opts.get('no_backup'):
3083 3083 backupinteractive = backup = check = discard
3084 3084 if interactive:
3085 3085 dsmodifiedbackup = backupinteractive
3086 3086 else:
3087 3087 dsmodifiedbackup = backup
3088 3088 tobackup = set()
3089 3089
3090 3090 backupanddel = actions['remove']
3091 3091 if not opts.get('no_backup'):
3092 3092 backupanddel = actions['drop']
3093 3093
3094 3094 disptable = (
3095 3095 # dispatch table:
3096 3096 # file state
3097 3097 # action
3098 3098 # make backup
3099 3099
3100 3100 ## Sets that results that will change file on disk
3101 3101 # Modified compared to target, no local change
3102 3102 (modified, actions['revert'], discard),
3103 3103 # Modified compared to target, but local file is deleted
3104 3104 (deleted, actions['revert'], discard),
3105 3105 # Modified compared to target, local change
3106 3106 (dsmodified, actions['revert'], dsmodifiedbackup),
3107 3107 # Added since target
3108 3108 (added, actions['remove'], discard),
3109 3109 # Added in working directory
3110 3110 (dsadded, actions['forget'], discard),
3111 3111 # Added since target, have local modification
3112 3112 (modadded, backupanddel, backup),
3113 3113 # Added since target but file is missing in working directory
3114 3114 (deladded, actions['drop'], discard),
3115 3115 # Removed since target, before working copy parent
3116 3116 (removed, actions['add'], discard),
3117 3117 # Same as `removed` but an unknown file exists at the same path
3118 3118 (removunk, actions['add'], check),
3119 3119 # Removed since targe, marked as such in working copy parent
3120 3120 (dsremoved, actions['undelete'], discard),
3121 3121 # Same as `dsremoved` but an unknown file exists at the same path
3122 3122 (dsremovunk, actions['undelete'], check),
3123 3123 ## the following sets does not result in any file changes
3124 3124 # File with no modification
3125 3125 (clean, actions['noop'], discard),
3126 3126 # Existing file, not tracked anywhere
3127 3127 (unknown, actions['unknown'], discard),
3128 3128 )
3129 3129
3130 3130 for abs, (rel, exact) in sorted(names.items()):
3131 3131 # target file to be touch on disk (relative to cwd)
3132 3132 target = repo.wjoin(abs)
3133 3133 # search the entry in the dispatch table.
3134 3134 # if the file is in any of these sets, it was touched in the working
3135 3135 # directory parent and we are sure it needs to be reverted.
3136 3136 for table, (xlist, msg), dobackup in disptable:
3137 3137 if abs not in table:
3138 3138 continue
3139 3139 if xlist is not None:
3140 3140 xlist.append(abs)
3141 3141 if dobackup:
3142 3142 # If in interactive mode, don't automatically create
3143 3143 # .orig files (issue4793)
3144 3144 if dobackup == backupinteractive:
3145 3145 tobackup.add(abs)
3146 3146 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3147 3147 bakname = scmutil.origpath(ui, repo, rel)
3148 3148 ui.note(_('saving current version of %s as %s\n') %
3149 3149 (rel, bakname))
3150 3150 if not opts.get('dry_run'):
3151 3151 if interactive:
3152 3152 util.copyfile(target, bakname)
3153 3153 else:
3154 3154 util.rename(target, bakname)
3155 3155 if ui.verbose or not exact:
3156 3156 if not isinstance(msg, basestring):
3157 3157 msg = msg(abs)
3158 3158 ui.status(msg % rel)
3159 3159 elif exact:
3160 3160 ui.warn(msg % rel)
3161 3161 break
3162 3162
3163 3163 if not opts.get('dry_run'):
3164 3164 needdata = ('revert', 'add', 'undelete')
3165 3165 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3166 3166 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3167 3167
3168 3168 if targetsubs:
3169 3169 # Revert the subrepos on the revert list
3170 3170 for sub in targetsubs:
3171 3171 try:
3172 3172 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3173 3173 except KeyError:
3174 3174 raise error.Abort("subrepository '%s' does not exist in %s!"
3175 3175 % (sub, short(ctx.node())))
3176 3176
3177 3177 def _revertprefetch(repo, ctx, *files):
3178 3178 """Let extension changing the storage layer prefetch content"""
3179 3179 pass
3180 3180
3181 3181 def _performrevert(repo, parents, ctx, actions, interactive=False,
3182 3182 tobackup=None):
3183 3183 """function that actually perform all the actions computed for revert
3184 3184
3185 3185 This is an independent function to let extension to plug in and react to
3186 3186 the imminent revert.
3187 3187
3188 3188 Make sure you have the working directory locked when calling this function.
3189 3189 """
3190 3190 parent, p2 = parents
3191 3191 node = ctx.node()
3192 3192 excluded_files = []
3193 3193 matcher_opts = {"exclude": excluded_files}
3194 3194
3195 3195 def checkout(f):
3196 3196 fc = ctx[f]
3197 3197 repo.wwrite(f, fc.data(), fc.flags())
3198 3198
3199 3199 def doremove(f):
3200 3200 try:
3201 3201 util.unlinkpath(repo.wjoin(f))
3202 3202 except OSError:
3203 3203 pass
3204 3204 repo.dirstate.remove(f)
3205 3205
3206 3206 audit_path = pathutil.pathauditor(repo.root)
3207 3207 for f in actions['forget'][0]:
3208 3208 if interactive:
3209 3209 choice = repo.ui.promptchoice(
3210 3210 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3211 3211 if choice == 0:
3212 3212 repo.dirstate.drop(f)
3213 3213 else:
3214 3214 excluded_files.append(repo.wjoin(f))
3215 3215 else:
3216 3216 repo.dirstate.drop(f)
3217 3217 for f in actions['remove'][0]:
3218 3218 audit_path(f)
3219 3219 if interactive:
3220 3220 choice = repo.ui.promptchoice(
3221 3221 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3222 3222 if choice == 0:
3223 3223 doremove(f)
3224 3224 else:
3225 3225 excluded_files.append(repo.wjoin(f))
3226 3226 else:
3227 3227 doremove(f)
3228 3228 for f in actions['drop'][0]:
3229 3229 audit_path(f)
3230 3230 repo.dirstate.remove(f)
3231 3231
3232 3232 normal = None
3233 3233 if node == parent:
3234 3234 # We're reverting to our parent. If possible, we'd like status
3235 3235 # to report the file as clean. We have to use normallookup for
3236 3236 # merges to avoid losing information about merged/dirty files.
3237 3237 if p2 != nullid:
3238 3238 normal = repo.dirstate.normallookup
3239 3239 else:
3240 3240 normal = repo.dirstate.normal
3241 3241
3242 3242 newlyaddedandmodifiedfiles = set()
3243 3243 if interactive:
3244 3244 # Prompt the user for changes to revert
3245 3245 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3246 3246 m = scmutil.match(ctx, torevert, matcher_opts)
3247 3247 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3248 3248 diffopts.nodates = True
3249 3249 diffopts.git = True
3250 3250 operation = 'discard'
3251 3251 reversehunks = True
3252 3252 if node != parent:
3253 3253 operation = 'revert'
3254 3254 reversehunks = repo.ui.configbool('experimental',
3255 3255 'revertalternateinteractivemode',
3256 3256 True)
3257 3257 if reversehunks:
3258 3258 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3259 3259 else:
3260 3260 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3261 3261 originalchunks = patch.parsepatch(diff)
3262 3262
3263 3263 try:
3264 3264
3265 3265 chunks, opts = recordfilter(repo.ui, originalchunks,
3266 3266 operation=operation)
3267 3267 if reversehunks:
3268 3268 chunks = patch.reversehunks(chunks)
3269 3269
3270 3270 except patch.PatchError as err:
3271 3271 raise error.Abort(_('error parsing patch: %s') % err)
3272 3272
3273 3273 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3274 3274 if tobackup is None:
3275 3275 tobackup = set()
3276 3276 # Apply changes
3277 3277 fp = stringio()
3278 3278 for c in chunks:
3279 3279 # Create a backup file only if this hunk should be backed up
3280 3280 if ishunk(c) and c.header.filename() in tobackup:
3281 3281 abs = c.header.filename()
3282 3282 target = repo.wjoin(abs)
3283 3283 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3284 3284 util.copyfile(target, bakname)
3285 3285 tobackup.remove(abs)
3286 3286 c.write(fp)
3287 3287 dopatch = fp.tell()
3288 3288 fp.seek(0)
3289 3289 if dopatch:
3290 3290 try:
3291 3291 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3292 3292 except patch.PatchError as err:
3293 3293 raise error.Abort(str(err))
3294 3294 del fp
3295 3295 else:
3296 3296 for f in actions['revert'][0]:
3297 3297 checkout(f)
3298 3298 if normal:
3299 3299 normal(f)
3300 3300
3301 3301 for f in actions['add'][0]:
3302 3302 # Don't checkout modified files, they are already created by the diff
3303 3303 if f not in newlyaddedandmodifiedfiles:
3304 3304 checkout(f)
3305 3305 repo.dirstate.add(f)
3306 3306
3307 3307 normal = repo.dirstate.normallookup
3308 3308 if node == parent and p2 == nullid:
3309 3309 normal = repo.dirstate.normal
3310 3310 for f in actions['undelete'][0]:
3311 3311 checkout(f)
3312 3312 normal(f)
3313 3313
3314 3314 copied = copies.pathcopies(repo[parent], ctx)
3315 3315
3316 3316 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3317 3317 if f in copied:
3318 3318 repo.dirstate.copy(copied[f], f)
3319 3319
3320 3320 def command(table):
3321 3321 """Returns a function object to be used as a decorator for making commands.
3322 3322
3323 3323 This function receives a command table as its argument. The table should
3324 3324 be a dict.
3325 3325
3326 3326 The returned function can be used as a decorator for adding commands
3327 3327 to that command table. This function accepts multiple arguments to define
3328 3328 a command.
3329 3329
3330 3330 The first argument is the command name.
3331 3331
3332 3332 The options argument is an iterable of tuples defining command arguments.
3333 3333 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3334 3334
3335 3335 The synopsis argument defines a short, one line summary of how to use the
3336 3336 command. This shows up in the help output.
3337 3337
3338 3338 The norepo argument defines whether the command does not require a
3339 3339 local repository. Most commands operate against a repository, thus the
3340 3340 default is False.
3341 3341
3342 3342 The optionalrepo argument defines whether the command optionally requires
3343 3343 a local repository.
3344 3344
3345 3345 The inferrepo argument defines whether to try to find a repository from the
3346 3346 command line arguments. If True, arguments will be examined for potential
3347 3347 repository locations. See ``findrepo()``. If a repository is found, it
3348 3348 will be used.
3349 3349 """
3350 3350 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3351 3351 inferrepo=False):
3352 3352 def decorator(func):
3353 3353 func.norepo = norepo
3354 3354 func.optionalrepo = optionalrepo
3355 3355 func.inferrepo = inferrepo
3356 3356 if synopsis:
3357 3357 table[name] = func, list(options), synopsis
3358 3358 else:
3359 3359 table[name] = func, list(options)
3360 3360 return func
3361 3361 return decorator
3362 3362
3363 3363 return cmd
3364 3364
3365 3365 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3366 3366 # commands.outgoing. "missing" is "missing" of the result of
3367 3367 # "findcommonoutgoing()"
3368 3368 outgoinghooks = util.hooks()
3369 3369
3370 3370 # a list of (ui, repo) functions called by commands.summary
3371 3371 summaryhooks = util.hooks()
3372 3372
3373 3373 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3374 3374 #
3375 3375 # functions should return tuple of booleans below, if 'changes' is None:
3376 3376 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3377 3377 #
3378 3378 # otherwise, 'changes' is a tuple of tuples below:
3379 3379 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3380 3380 # - (desturl, destbranch, destpeer, outgoing)
3381 3381 summaryremotehooks = util.hooks()
3382 3382
3383 3383 # A list of state files kept by multistep operations like graft.
3384 3384 # Since graft cannot be aborted, it is considered 'clearable' by update.
3385 3385 # note: bisect is intentionally excluded
3386 3386 # (state file, clearable, allowcommit, error, hint)
3387 3387 unfinishedstates = [
3388 3388 ('graftstate', True, False, _('graft in progress'),
3389 3389 _("use 'hg graft --continue' or 'hg update' to abort")),
3390 3390 ('updatestate', True, False, _('last update was interrupted'),
3391 3391 _("use 'hg update' to get a consistent checkout"))
3392 3392 ]
3393 3393
3394 3394 def checkunfinished(repo, commit=False):
3395 3395 '''Look for an unfinished multistep operation, like graft, and abort
3396 3396 if found. It's probably good to check this right before
3397 3397 bailifchanged().
3398 3398 '''
3399 3399 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3400 3400 if commit and allowcommit:
3401 3401 continue
3402 3402 if repo.vfs.exists(f):
3403 3403 raise error.Abort(msg, hint=hint)
3404 3404
3405 3405 def clearunfinished(repo):
3406 3406 '''Check for unfinished operations (as above), and clear the ones
3407 3407 that are clearable.
3408 3408 '''
3409 3409 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3410 3410 if not clearable and repo.vfs.exists(f):
3411 3411 raise error.Abort(msg, hint=hint)
3412 3412 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3413 3413 if clearable and repo.vfs.exists(f):
3414 3414 util.unlink(repo.join(f))
3415 3415
3416 3416 afterresolvedstates = [
3417 3417 ('graftstate',
3418 3418 _('hg graft --continue')),
3419 3419 ]
3420 3420
3421 3421 def howtocontinue(repo):
3422 3422 '''Check for an unfinished operation and return the command to finish
3423 3423 it.
3424 3424
3425 3425 afterresolvedstates tuples define a .hg/{file} and the corresponding
3426 3426 command needed to finish it.
3427 3427
3428 3428 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3429 3429 a boolean.
3430 3430 '''
3431 3431 contmsg = _("continue: %s")
3432 3432 for f, msg in afterresolvedstates:
3433 3433 if repo.vfs.exists(f):
3434 3434 return contmsg % msg, True
3435 3435 workingctx = repo[None]
3436 3436 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3437 3437 for s in workingctx.substate)
3438 3438 if dirty:
3439 3439 return contmsg % _("hg commit"), False
3440 3440 return None, None
3441 3441
3442 3442 def checkafterresolved(repo):
3443 3443 '''Inform the user about the next action after completing hg resolve
3444 3444
3445 3445 If there's a matching afterresolvedstates, howtocontinue will yield
3446 3446 repo.ui.warn as the reporter.
3447 3447
3448 3448 Otherwise, it will yield repo.ui.note.
3449 3449 '''
3450 3450 msg, warning = howtocontinue(repo)
3451 3451 if msg is not None:
3452 3452 if warning:
3453 3453 repo.ui.warn("%s\n" % msg)
3454 3454 else:
3455 3455 repo.ui.note("%s\n" % msg)
3456 3456
3457 3457 def wrongtooltocontinue(repo, task):
3458 3458 '''Raise an abort suggesting how to properly continue if there is an
3459 3459 active task.
3460 3460
3461 3461 Uses howtocontinue() to find the active task.
3462 3462
3463 3463 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3464 3464 a hint.
3465 3465 '''
3466 3466 after = howtocontinue(repo)
3467 3467 hint = None
3468 3468 if after[1]:
3469 3469 hint = after[0]
3470 3470 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,2110 +1,2110 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import socket
16 16 import string
17 17 import sys
18 18 import tempfile
19 19 import time
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 bin,
24 24 hex,
25 25 nullhex,
26 26 nullid,
27 27 nullrev,
28 28 short,
29 29 )
30 30 from . import (
31 31 bundle2,
32 32 changegroup,
33 33 cmdutil,
34 34 color,
35 35 commands,
36 36 context,
37 37 dagparser,
38 38 dagutil,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 fileset,
44 44 formatter,
45 45 hg,
46 46 localrepo,
47 47 lock as lockmod,
48 48 merge as mergemod,
49 49 obsolete,
50 50 policy,
51 51 pvec,
52 52 pycompat,
53 53 repair,
54 54 revlog,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 setdiscovery,
59 59 simplemerge,
60 60 smartset,
61 61 sslutil,
62 62 streamclone,
63 63 templater,
64 64 treediscovery,
65 65 util,
66 66 )
67 67
68 68 release = lockmod.release
69 69
70 70 # We reuse the command table from commands because it is easier than
71 71 # teaching dispatch about multiple tables.
72 72 command = cmdutil.command(commands.table)
73 73
74 74 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
75 75 def debugancestor(ui, repo, *args):
76 76 """find the ancestor revision of two revisions in a given index"""
77 77 if len(args) == 3:
78 78 index, rev1, rev2 = args
79 r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False), index)
79 r = revlog.revlog(scmutil.vfs(pycompat.getcwd(), audit=False), index)
80 80 lookup = r.lookup
81 81 elif len(args) == 2:
82 82 if not repo:
83 83 raise error.Abort(_('there is no Mercurial repository here '
84 84 '(.hg not found)'))
85 85 rev1, rev2 = args
86 86 r = repo.changelog
87 87 lookup = repo.lookup
88 88 else:
89 89 raise error.Abort(_('either two or three arguments required'))
90 90 a = r.ancestor(lookup(rev1), lookup(rev2))
91 91 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
92 92
93 93 @command('debugapplystreamclonebundle', [], 'FILE')
94 94 def debugapplystreamclonebundle(ui, repo, fname):
95 95 """apply a stream clone bundle file"""
96 96 f = hg.openpath(ui, fname)
97 97 gen = exchange.readbundle(ui, f, fname)
98 98 gen.apply(repo)
99 99
100 100 @command('debugbuilddag',
101 101 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
102 102 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
103 103 ('n', 'new-file', None, _('add new file at each rev'))],
104 104 _('[OPTION]... [TEXT]'))
105 105 def debugbuilddag(ui, repo, text=None,
106 106 mergeable_file=False,
107 107 overwritten_file=False,
108 108 new_file=False):
109 109 """builds a repo with a given DAG from scratch in the current empty repo
110 110
111 111 The description of the DAG is read from stdin if not given on the
112 112 command line.
113 113
114 114 Elements:
115 115
116 116 - "+n" is a linear run of n nodes based on the current default parent
117 117 - "." is a single node based on the current default parent
118 118 - "$" resets the default parent to null (implied at the start);
119 119 otherwise the default parent is always the last node created
120 120 - "<p" sets the default parent to the backref p
121 121 - "*p" is a fork at parent p, which is a backref
122 122 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
123 123 - "/p2" is a merge of the preceding node and p2
124 124 - ":tag" defines a local tag for the preceding node
125 125 - "@branch" sets the named branch for subsequent nodes
126 126 - "#...\\n" is a comment up to the end of the line
127 127
128 128 Whitespace between the above elements is ignored.
129 129
130 130 A backref is either
131 131
132 132 - a number n, which references the node curr-n, where curr is the current
133 133 node, or
134 134 - the name of a local tag you placed earlier using ":tag", or
135 135 - empty to denote the default parent.
136 136
137 137 All string valued-elements are either strictly alphanumeric, or must
138 138 be enclosed in double quotes ("..."), with "\\" as escape character.
139 139 """
140 140
141 141 if text is None:
142 142 ui.status(_("reading DAG from stdin\n"))
143 143 text = ui.fin.read()
144 144
145 145 cl = repo.changelog
146 146 if len(cl) > 0:
147 147 raise error.Abort(_('repository is not empty'))
148 148
149 149 # determine number of revs in DAG
150 150 total = 0
151 151 for type, data in dagparser.parsedag(text):
152 152 if type == 'n':
153 153 total += 1
154 154
155 155 if mergeable_file:
156 156 linesperrev = 2
157 157 # make a file with k lines per rev
158 158 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
159 159 initialmergedlines.append("")
160 160
161 161 tags = []
162 162
163 163 wlock = lock = tr = None
164 164 try:
165 165 wlock = repo.wlock()
166 166 lock = repo.lock()
167 167 tr = repo.transaction("builddag")
168 168
169 169 at = -1
170 170 atbranch = 'default'
171 171 nodeids = []
172 172 id = 0
173 173 ui.progress(_('building'), id, unit=_('revisions'), total=total)
174 174 for type, data in dagparser.parsedag(text):
175 175 if type == 'n':
176 176 ui.note(('node %s\n' % str(data)))
177 177 id, ps = data
178 178
179 179 files = []
180 180 fctxs = {}
181 181
182 182 p2 = None
183 183 if mergeable_file:
184 184 fn = "mf"
185 185 p1 = repo[ps[0]]
186 186 if len(ps) > 1:
187 187 p2 = repo[ps[1]]
188 188 pa = p1.ancestor(p2)
189 189 base, local, other = [x[fn].data() for x in (pa, p1,
190 190 p2)]
191 191 m3 = simplemerge.Merge3Text(base, local, other)
192 192 ml = [l.strip() for l in m3.merge_lines()]
193 193 ml.append("")
194 194 elif at > 0:
195 195 ml = p1[fn].data().split("\n")
196 196 else:
197 197 ml = initialmergedlines
198 198 ml[id * linesperrev] += " r%i" % id
199 199 mergedtext = "\n".join(ml)
200 200 files.append(fn)
201 201 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
202 202
203 203 if overwritten_file:
204 204 fn = "of"
205 205 files.append(fn)
206 206 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
207 207
208 208 if new_file:
209 209 fn = "nf%i" % id
210 210 files.append(fn)
211 211 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
212 212 if len(ps) > 1:
213 213 if not p2:
214 214 p2 = repo[ps[1]]
215 215 for fn in p2:
216 216 if fn.startswith("nf"):
217 217 files.append(fn)
218 218 fctxs[fn] = p2[fn]
219 219
220 220 def fctxfn(repo, cx, path):
221 221 return fctxs.get(path)
222 222
223 223 if len(ps) == 0 or ps[0] < 0:
224 224 pars = [None, None]
225 225 elif len(ps) == 1:
226 226 pars = [nodeids[ps[0]], None]
227 227 else:
228 228 pars = [nodeids[p] for p in ps]
229 229 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
230 230 date=(id, 0),
231 231 user="debugbuilddag",
232 232 extra={'branch': atbranch})
233 233 nodeid = repo.commitctx(cx)
234 234 nodeids.append(nodeid)
235 235 at = id
236 236 elif type == 'l':
237 237 id, name = data
238 238 ui.note(('tag %s\n' % name))
239 239 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
240 240 elif type == 'a':
241 241 ui.note(('branch %s\n' % data))
242 242 atbranch = data
243 243 ui.progress(_('building'), id, unit=_('revisions'), total=total)
244 244 tr.close()
245 245
246 246 if tags:
247 247 repo.vfs.write("localtags", "".join(tags))
248 248 finally:
249 249 ui.progress(_('building'), None)
250 250 release(tr, lock, wlock)
251 251
252 252 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
253 253 indent_string = ' ' * indent
254 254 if all:
255 255 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
256 256 % indent_string)
257 257
258 258 def showchunks(named):
259 259 ui.write("\n%s%s\n" % (indent_string, named))
260 260 chain = None
261 261 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
262 262 node = chunkdata['node']
263 263 p1 = chunkdata['p1']
264 264 p2 = chunkdata['p2']
265 265 cs = chunkdata['cs']
266 266 deltabase = chunkdata['deltabase']
267 267 delta = chunkdata['delta']
268 268 ui.write("%s%s %s %s %s %s %s\n" %
269 269 (indent_string, hex(node), hex(p1), hex(p2),
270 270 hex(cs), hex(deltabase), len(delta)))
271 271 chain = node
272 272
273 273 chunkdata = gen.changelogheader()
274 274 showchunks("changelog")
275 275 chunkdata = gen.manifestheader()
276 276 showchunks("manifest")
277 277 for chunkdata in iter(gen.filelogheader, {}):
278 278 fname = chunkdata['filename']
279 279 showchunks(fname)
280 280 else:
281 281 if isinstance(gen, bundle2.unbundle20):
282 282 raise error.Abort(_('use debugbundle2 for this file'))
283 283 chunkdata = gen.changelogheader()
284 284 chain = None
285 285 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
286 286 node = chunkdata['node']
287 287 ui.write("%s%s\n" % (indent_string, hex(node)))
288 288 chain = node
289 289
290 290 def _debugbundle2(ui, gen, all=None, **opts):
291 291 """lists the contents of a bundle2"""
292 292 if not isinstance(gen, bundle2.unbundle20):
293 293 raise error.Abort(_('not a bundle2 file'))
294 294 ui.write(('Stream params: %s\n' % repr(gen.params)))
295 295 for part in gen.iterparts():
296 296 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
297 297 if part.type == 'changegroup':
298 298 version = part.params.get('version', '01')
299 299 cg = changegroup.getunbundler(version, part, 'UN')
300 300 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
301 301
302 302 @command('debugbundle',
303 303 [('a', 'all', None, _('show all details')),
304 304 ('', 'spec', None, _('print the bundlespec of the bundle'))],
305 305 _('FILE'),
306 306 norepo=True)
307 307 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
308 308 """lists the contents of a bundle"""
309 309 with hg.openpath(ui, bundlepath) as f:
310 310 if spec:
311 311 spec = exchange.getbundlespec(ui, f)
312 312 ui.write('%s\n' % spec)
313 313 return
314 314
315 315 gen = exchange.readbundle(ui, f, bundlepath)
316 316 if isinstance(gen, bundle2.unbundle20):
317 317 return _debugbundle2(ui, gen, all=all, **opts)
318 318 _debugchangegroup(ui, gen, all=all, **opts)
319 319
320 320 @command('debugcheckstate', [], '')
321 321 def debugcheckstate(ui, repo):
322 322 """validate the correctness of the current dirstate"""
323 323 parent1, parent2 = repo.dirstate.parents()
324 324 m1 = repo[parent1].manifest()
325 325 m2 = repo[parent2].manifest()
326 326 errors = 0
327 327 for f in repo.dirstate:
328 328 state = repo.dirstate[f]
329 329 if state in "nr" and f not in m1:
330 330 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
331 331 errors += 1
332 332 if state in "a" and f in m1:
333 333 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
334 334 errors += 1
335 335 if state in "m" and f not in m1 and f not in m2:
336 336 ui.warn(_("%s in state %s, but not in either manifest\n") %
337 337 (f, state))
338 338 errors += 1
339 339 for f in m1:
340 340 state = repo.dirstate[f]
341 341 if state not in "nrm":
342 342 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
343 343 errors += 1
344 344 if errors:
345 345 error = _(".hg/dirstate inconsistent with current parent's manifest")
346 346 raise error.Abort(error)
347 347
348 348 @command('debugcolor',
349 349 [('', 'style', None, _('show all configured styles'))],
350 350 'hg debugcolor')
351 351 def debugcolor(ui, repo, **opts):
352 352 """show available color, effects or style"""
353 353 ui.write(('color mode: %s\n') % ui._colormode)
354 354 if opts.get('style'):
355 355 return _debugdisplaystyle(ui)
356 356 else:
357 357 return _debugdisplaycolor(ui)
358 358
359 359 def _debugdisplaycolor(ui):
360 360 ui = ui.copy()
361 361 ui._styles.clear()
362 362 for effect in color._effects.keys():
363 363 ui._styles[effect] = effect
364 364 if ui._terminfoparams:
365 365 for k, v in ui.configitems('color'):
366 366 if k.startswith('color.'):
367 367 ui._styles[k] = k[6:]
368 368 elif k.startswith('terminfo.'):
369 369 ui._styles[k] = k[9:]
370 370 ui.write(_('available colors:\n'))
371 371 # sort label with a '_' after the other to group '_background' entry.
372 372 items = sorted(ui._styles.items(),
373 373 key=lambda i: ('_' in i[0], i[0], i[1]))
374 374 for colorname, label in items:
375 375 ui.write(('%s\n') % colorname, label=label)
376 376
377 377 def _debugdisplaystyle(ui):
378 378 ui.write(_('available style:\n'))
379 379 width = max(len(s) for s in ui._styles)
380 380 for label, effects in sorted(ui._styles.items()):
381 381 ui.write('%s' % label, label=label)
382 382 if effects:
383 383 # 50
384 384 ui.write(': ')
385 385 ui.write(' ' * (max(0, width - len(label))))
386 386 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
387 387 ui.write('\n')
388 388
389 389 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
390 390 def debugcommands(ui, cmd='', *args):
391 391 """list all available commands and options"""
392 392 for cmd, vals in sorted(commands.table.iteritems()):
393 393 cmd = cmd.split('|')[0].strip('^')
394 394 opts = ', '.join([i[1] for i in vals[1]])
395 395 ui.write('%s: %s\n' % (cmd, opts))
396 396
397 397 @command('debugcomplete',
398 398 [('o', 'options', None, _('show the command options'))],
399 399 _('[-o] CMD'),
400 400 norepo=True)
401 401 def debugcomplete(ui, cmd='', **opts):
402 402 """returns the completion list associated with the given command"""
403 403
404 404 if opts.get('options'):
405 405 options = []
406 406 otables = [commands.globalopts]
407 407 if cmd:
408 408 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
409 409 otables.append(entry[1])
410 410 for t in otables:
411 411 for o in t:
412 412 if "(DEPRECATED)" in o[3]:
413 413 continue
414 414 if o[0]:
415 415 options.append('-%s' % o[0])
416 416 options.append('--%s' % o[1])
417 417 ui.write("%s\n" % "\n".join(options))
418 418 return
419 419
420 420 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
421 421 if ui.verbose:
422 422 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
423 423 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
424 424
425 425 @command('debugcreatestreamclonebundle', [], 'FILE')
426 426 def debugcreatestreamclonebundle(ui, repo, fname):
427 427 """create a stream clone bundle file
428 428
429 429 Stream bundles are special bundles that are essentially archives of
430 430 revlog files. They are commonly used for cloning very quickly.
431 431 """
432 432 requirements, gen = streamclone.generatebundlev1(repo)
433 433 changegroup.writechunks(ui, gen, fname)
434 434
435 435 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
436 436
437 437 @command('debugdag',
438 438 [('t', 'tags', None, _('use tags as labels')),
439 439 ('b', 'branches', None, _('annotate with branch names')),
440 440 ('', 'dots', None, _('use dots for runs')),
441 441 ('s', 'spaces', None, _('separate elements by spaces'))],
442 442 _('[OPTION]... [FILE [REV]...]'),
443 443 optionalrepo=True)
444 444 def debugdag(ui, repo, file_=None, *revs, **opts):
445 445 """format the changelog or an index DAG as a concise textual description
446 446
447 447 If you pass a revlog index, the revlog's DAG is emitted. If you list
448 448 revision numbers, they get labeled in the output as rN.
449 449
450 450 Otherwise, the changelog DAG of the current repo is emitted.
451 451 """
452 452 spaces = opts.get('spaces')
453 453 dots = opts.get('dots')
454 454 if file_:
455 rlog = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
455 rlog = revlog.revlog(scmutil.vfs(pycompat.getcwd(), audit=False),
456 456 file_)
457 457 revs = set((int(r) for r in revs))
458 458 def events():
459 459 for r in rlog:
460 460 yield 'n', (r, list(p for p in rlog.parentrevs(r)
461 461 if p != -1))
462 462 if r in revs:
463 463 yield 'l', (r, "r%i" % r)
464 464 elif repo:
465 465 cl = repo.changelog
466 466 tags = opts.get('tags')
467 467 branches = opts.get('branches')
468 468 if tags:
469 469 labels = {}
470 470 for l, n in repo.tags().items():
471 471 labels.setdefault(cl.rev(n), []).append(l)
472 472 def events():
473 473 b = "default"
474 474 for r in cl:
475 475 if branches:
476 476 newb = cl.read(cl.node(r))[5]['branch']
477 477 if newb != b:
478 478 yield 'a', newb
479 479 b = newb
480 480 yield 'n', (r, list(p for p in cl.parentrevs(r)
481 481 if p != -1))
482 482 if tags:
483 483 ls = labels.get(r)
484 484 if ls:
485 485 for l in ls:
486 486 yield 'l', (r, l)
487 487 else:
488 488 raise error.Abort(_('need repo for changelog dag'))
489 489
490 490 for line in dagparser.dagtextlines(events(),
491 491 addspaces=spaces,
492 492 wraplabels=True,
493 493 wrapannotations=True,
494 494 wrapnonlinear=dots,
495 495 usedots=dots,
496 496 maxlinewidth=70):
497 497 ui.write(line)
498 498 ui.write("\n")
499 499
500 500 @command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
501 501 def debugdata(ui, repo, file_, rev=None, **opts):
502 502 """dump the contents of a data file revision"""
503 503 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
504 504 if rev is not None:
505 505 raise error.CommandError('debugdata', _('invalid arguments'))
506 506 file_, rev = None, file_
507 507 elif rev is None:
508 508 raise error.CommandError('debugdata', _('invalid arguments'))
509 509 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
510 510 try:
511 511 ui.write(r.revision(r.lookup(rev), raw=True))
512 512 except KeyError:
513 513 raise error.Abort(_('invalid revision identifier %s') % rev)
514 514
515 515 @command('debugdate',
516 516 [('e', 'extended', None, _('try extended date formats'))],
517 517 _('[-e] DATE [RANGE]'),
518 518 norepo=True, optionalrepo=True)
519 519 def debugdate(ui, date, range=None, **opts):
520 520 """parse and display a date"""
521 521 if opts["extended"]:
522 522 d = util.parsedate(date, util.extendeddateformats)
523 523 else:
524 524 d = util.parsedate(date)
525 525 ui.write(("internal: %s %s\n") % d)
526 526 ui.write(("standard: %s\n") % util.datestr(d))
527 527 if range:
528 528 m = util.matchdate(range)
529 529 ui.write(("match: %s\n") % m(d[0]))
530 530
531 531 @command('debugdeltachain',
532 532 commands.debugrevlogopts + commands.formatteropts,
533 533 _('-c|-m|FILE'),
534 534 optionalrepo=True)
535 535 def debugdeltachain(ui, repo, file_=None, **opts):
536 536 """dump information about delta chains in a revlog
537 537
538 538 Output can be templatized. Available template keywords are:
539 539
540 540 :``rev``: revision number
541 541 :``chainid``: delta chain identifier (numbered by unique base)
542 542 :``chainlen``: delta chain length to this revision
543 543 :``prevrev``: previous revision in delta chain
544 544 :``deltatype``: role of delta / how it was computed
545 545 :``compsize``: compressed size of revision
546 546 :``uncompsize``: uncompressed size of revision
547 547 :``chainsize``: total size of compressed revisions in chain
548 548 :``chainratio``: total chain size divided by uncompressed revision size
549 549 (new delta chains typically start at ratio 2.00)
550 550 :``lindist``: linear distance from base revision in delta chain to end
551 551 of this revision
552 552 :``extradist``: total size of revisions not part of this delta chain from
553 553 base of delta chain to end of this revision; a measurement
554 554 of how much extra data we need to read/seek across to read
555 555 the delta chain for this revision
556 556 :``extraratio``: extradist divided by chainsize; another representation of
557 557 how much unrelated data is needed to load this delta chain
558 558 """
559 559 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
560 560 index = r.index
561 561 generaldelta = r.version & revlog.REVLOGGENERALDELTA
562 562
563 563 def revinfo(rev):
564 564 e = index[rev]
565 565 compsize = e[1]
566 566 uncompsize = e[2]
567 567 chainsize = 0
568 568
569 569 if generaldelta:
570 570 if e[3] == e[5]:
571 571 deltatype = 'p1'
572 572 elif e[3] == e[6]:
573 573 deltatype = 'p2'
574 574 elif e[3] == rev - 1:
575 575 deltatype = 'prev'
576 576 elif e[3] == rev:
577 577 deltatype = 'base'
578 578 else:
579 579 deltatype = 'other'
580 580 else:
581 581 if e[3] == rev:
582 582 deltatype = 'base'
583 583 else:
584 584 deltatype = 'prev'
585 585
586 586 chain = r._deltachain(rev)[0]
587 587 for iterrev in chain:
588 588 e = index[iterrev]
589 589 chainsize += e[1]
590 590
591 591 return compsize, uncompsize, deltatype, chain, chainsize
592 592
593 593 fm = ui.formatter('debugdeltachain', opts)
594 594
595 595 fm.plain(' rev chain# chainlen prev delta '
596 596 'size rawsize chainsize ratio lindist extradist '
597 597 'extraratio\n')
598 598
599 599 chainbases = {}
600 600 for rev in r:
601 601 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
602 602 chainbase = chain[0]
603 603 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
604 604 basestart = r.start(chainbase)
605 605 revstart = r.start(rev)
606 606 lineardist = revstart + comp - basestart
607 607 extradist = lineardist - chainsize
608 608 try:
609 609 prevrev = chain[-2]
610 610 except IndexError:
611 611 prevrev = -1
612 612
613 613 chainratio = float(chainsize) / float(uncomp)
614 614 extraratio = float(extradist) / float(chainsize)
615 615
616 616 fm.startitem()
617 617 fm.write('rev chainid chainlen prevrev deltatype compsize '
618 618 'uncompsize chainsize chainratio lindist extradist '
619 619 'extraratio',
620 620 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
621 621 rev, chainid, len(chain), prevrev, deltatype, comp,
622 622 uncomp, chainsize, chainratio, lineardist, extradist,
623 623 extraratio,
624 624 rev=rev, chainid=chainid, chainlen=len(chain),
625 625 prevrev=prevrev, deltatype=deltatype, compsize=comp,
626 626 uncompsize=uncomp, chainsize=chainsize,
627 627 chainratio=chainratio, lindist=lineardist,
628 628 extradist=extradist, extraratio=extraratio)
629 629
630 630 fm.end()
631 631
632 632 @command('debugdirstate|debugstate',
633 633 [('', 'nodates', None, _('do not display the saved mtime')),
634 634 ('', 'datesort', None, _('sort by saved mtime'))],
635 635 _('[OPTION]...'))
636 636 def debugstate(ui, repo, **opts):
637 637 """show the contents of the current dirstate"""
638 638
639 639 nodates = opts.get('nodates')
640 640 datesort = opts.get('datesort')
641 641
642 642 timestr = ""
643 643 if datesort:
644 644 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
645 645 else:
646 646 keyfunc = None # sort by filename
647 647 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
648 648 if ent[3] == -1:
649 649 timestr = 'unset '
650 650 elif nodates:
651 651 timestr = 'set '
652 652 else:
653 653 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
654 654 time.localtime(ent[3]))
655 655 if ent[1] & 0o20000:
656 656 mode = 'lnk'
657 657 else:
658 658 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
659 659 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
660 660 for f in repo.dirstate.copies():
661 661 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
662 662
663 663 @command('debugdiscovery',
664 664 [('', 'old', None, _('use old-style discovery')),
665 665 ('', 'nonheads', None,
666 666 _('use old-style discovery with non-heads included')),
667 667 ] + commands.remoteopts,
668 668 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
669 669 def debugdiscovery(ui, repo, remoteurl="default", **opts):
670 670 """runs the changeset discovery protocol in isolation"""
671 671 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
672 672 opts.get('branch'))
673 673 remote = hg.peer(repo, opts, remoteurl)
674 674 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
675 675
676 676 # make sure tests are repeatable
677 677 random.seed(12323)
678 678
679 679 def doit(localheads, remoteheads, remote=remote):
680 680 if opts.get('old'):
681 681 if localheads:
682 682 raise error.Abort('cannot use localheads with old style '
683 683 'discovery')
684 684 if not util.safehasattr(remote, 'branches'):
685 685 # enable in-client legacy support
686 686 remote = localrepo.locallegacypeer(remote.local())
687 687 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
688 688 force=True)
689 689 common = set(common)
690 690 if not opts.get('nonheads'):
691 691 ui.write(("unpruned common: %s\n") %
692 692 " ".join(sorted(short(n) for n in common)))
693 693 dag = dagutil.revlogdag(repo.changelog)
694 694 all = dag.ancestorset(dag.internalizeall(common))
695 695 common = dag.externalizeall(dag.headsetofconnecteds(all))
696 696 else:
697 697 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
698 698 common = set(common)
699 699 rheads = set(hds)
700 700 lheads = set(repo.heads())
701 701 ui.write(("common heads: %s\n") %
702 702 " ".join(sorted(short(n) for n in common)))
703 703 if lheads <= common:
704 704 ui.write(("local is subset\n"))
705 705 elif rheads <= common:
706 706 ui.write(("remote is subset\n"))
707 707
708 708 serverlogs = opts.get('serverlog')
709 709 if serverlogs:
710 710 for filename in serverlogs:
711 711 with open(filename, 'r') as logfile:
712 712 line = logfile.readline()
713 713 while line:
714 714 parts = line.strip().split(';')
715 715 op = parts[1]
716 716 if op == 'cg':
717 717 pass
718 718 elif op == 'cgss':
719 719 doit(parts[2].split(' '), parts[3].split(' '))
720 720 elif op == 'unb':
721 721 doit(parts[3].split(' '), parts[2].split(' '))
722 722 line = logfile.readline()
723 723 else:
724 724 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
725 725 opts.get('remote_head'))
726 726 localrevs = opts.get('local_head')
727 727 doit(localrevs, remoterevs)
728 728
729 729 @command('debugextensions', commands.formatteropts, [], norepo=True)
730 730 def debugextensions(ui, **opts):
731 731 '''show information about active extensions'''
732 732 exts = extensions.extensions(ui)
733 733 hgver = util.version()
734 734 fm = ui.formatter('debugextensions', opts)
735 735 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
736 736 isinternal = extensions.ismoduleinternal(extmod)
737 737 extsource = pycompat.fsencode(extmod.__file__)
738 738 if isinternal:
739 739 exttestedwith = [] # never expose magic string to users
740 740 else:
741 741 exttestedwith = getattr(extmod, 'testedwith', '').split()
742 742 extbuglink = getattr(extmod, 'buglink', None)
743 743
744 744 fm.startitem()
745 745
746 746 if ui.quiet or ui.verbose:
747 747 fm.write('name', '%s\n', extname)
748 748 else:
749 749 fm.write('name', '%s', extname)
750 750 if isinternal or hgver in exttestedwith:
751 751 fm.plain('\n')
752 752 elif not exttestedwith:
753 753 fm.plain(_(' (untested!)\n'))
754 754 else:
755 755 lasttestedversion = exttestedwith[-1]
756 756 fm.plain(' (%s!)\n' % lasttestedversion)
757 757
758 758 fm.condwrite(ui.verbose and extsource, 'source',
759 759 _(' location: %s\n'), extsource or "")
760 760
761 761 if ui.verbose:
762 762 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
763 763 fm.data(bundled=isinternal)
764 764
765 765 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
766 766 _(' tested with: %s\n'),
767 767 fm.formatlist(exttestedwith, name='ver'))
768 768
769 769 fm.condwrite(ui.verbose and extbuglink, 'buglink',
770 770 _(' bug reporting: %s\n'), extbuglink or "")
771 771
772 772 fm.end()
773 773
774 774 @command('debugfileset',
775 775 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
776 776 _('[-r REV] FILESPEC'))
777 777 def debugfileset(ui, repo, expr, **opts):
778 778 '''parse and apply a fileset specification'''
779 779 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
780 780 if ui.verbose:
781 781 tree = fileset.parse(expr)
782 782 ui.note(fileset.prettyformat(tree), "\n")
783 783
784 784 for f in ctx.getfileset(expr):
785 785 ui.write("%s\n" % f)
786 786
787 787 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
788 788 def debugfsinfo(ui, path="."):
789 789 """show information detected about current filesystem"""
790 790 util.writefile('.debugfsinfo', '')
791 791 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
792 792 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
793 793 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
794 794 ui.write(('case-sensitive: %s\n') % (util.fscasesensitive('.debugfsinfo')
795 795 and 'yes' or 'no'))
796 796 os.unlink('.debugfsinfo')
797 797
798 798 @command('debuggetbundle',
799 799 [('H', 'head', [], _('id of head node'), _('ID')),
800 800 ('C', 'common', [], _('id of common node'), _('ID')),
801 801 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
802 802 _('REPO FILE [-H|-C ID]...'),
803 803 norepo=True)
804 804 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
805 805 """retrieves a bundle from a repo
806 806
807 807 Every ID must be a full-length hex node id string. Saves the bundle to the
808 808 given file.
809 809 """
810 810 repo = hg.peer(ui, opts, repopath)
811 811 if not repo.capable('getbundle'):
812 812 raise error.Abort("getbundle() not supported by target repository")
813 813 args = {}
814 814 if common:
815 815 args['common'] = [bin(s) for s in common]
816 816 if head:
817 817 args['heads'] = [bin(s) for s in head]
818 818 # TODO: get desired bundlecaps from command line.
819 819 args['bundlecaps'] = None
820 820 bundle = repo.getbundle('debug', **args)
821 821
822 822 bundletype = opts.get('type', 'bzip2').lower()
823 823 btypes = {'none': 'HG10UN',
824 824 'bzip2': 'HG10BZ',
825 825 'gzip': 'HG10GZ',
826 826 'bundle2': 'HG20'}
827 827 bundletype = btypes.get(bundletype)
828 828 if bundletype not in bundle2.bundletypes:
829 829 raise error.Abort(_('unknown bundle type specified with --type'))
830 830 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
831 831
832 832 @command('debugignore', [], '[FILE]')
833 833 def debugignore(ui, repo, *files, **opts):
834 834 """display the combined ignore pattern and information about ignored files
835 835
836 836 With no argument display the combined ignore pattern.
837 837
838 838 Given space separated file names, shows if the given file is ignored and
839 839 if so, show the ignore rule (file and line number) that matched it.
840 840 """
841 841 ignore = repo.dirstate._ignore
842 842 if not files:
843 843 # Show all the patterns
844 844 includepat = getattr(ignore, 'includepat', None)
845 845 if includepat is not None:
846 846 ui.write("%s\n" % includepat)
847 847 else:
848 848 raise error.Abort(_("no ignore patterns found"))
849 849 else:
850 850 for f in files:
851 851 nf = util.normpath(f)
852 852 ignored = None
853 853 ignoredata = None
854 854 if nf != '.':
855 855 if ignore(nf):
856 856 ignored = nf
857 857 ignoredata = repo.dirstate._ignorefileandline(nf)
858 858 else:
859 859 for p in util.finddirs(nf):
860 860 if ignore(p):
861 861 ignored = p
862 862 ignoredata = repo.dirstate._ignorefileandline(p)
863 863 break
864 864 if ignored:
865 865 if ignored == nf:
866 866 ui.write(_("%s is ignored\n") % f)
867 867 else:
868 868 ui.write(_("%s is ignored because of "
869 869 "containing folder %s\n")
870 870 % (f, ignored))
871 871 ignorefile, lineno, line = ignoredata
872 872 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
873 873 % (ignorefile, lineno, line))
874 874 else:
875 875 ui.write(_("%s is not ignored\n") % f)
876 876
877 877 @command('debugindex', commands.debugrevlogopts +
878 878 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
879 879 _('[-f FORMAT] -c|-m|FILE'),
880 880 optionalrepo=True)
881 881 def debugindex(ui, repo, file_=None, **opts):
882 882 """dump the contents of an index file"""
883 883 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
884 884 format = opts.get('format', 0)
885 885 if format not in (0, 1):
886 886 raise error.Abort(_("unknown format %d") % format)
887 887
888 888 generaldelta = r.version & revlog.REVLOGGENERALDELTA
889 889 if generaldelta:
890 890 basehdr = ' delta'
891 891 else:
892 892 basehdr = ' base'
893 893
894 894 if ui.debugflag:
895 895 shortfn = hex
896 896 else:
897 897 shortfn = short
898 898
899 899 # There might not be anything in r, so have a sane default
900 900 idlen = 12
901 901 for i in r:
902 902 idlen = len(shortfn(r.node(i)))
903 903 break
904 904
905 905 if format == 0:
906 906 ui.write((" rev offset length " + basehdr + " linkrev"
907 907 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
908 908 elif format == 1:
909 909 ui.write((" rev flag offset length"
910 910 " size " + basehdr + " link p1 p2"
911 911 " %s\n") % "nodeid".rjust(idlen))
912 912
913 913 for i in r:
914 914 node = r.node(i)
915 915 if generaldelta:
916 916 base = r.deltaparent(i)
917 917 else:
918 918 base = r.chainbase(i)
919 919 if format == 0:
920 920 try:
921 921 pp = r.parents(node)
922 922 except Exception:
923 923 pp = [nullid, nullid]
924 924 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
925 925 i, r.start(i), r.length(i), base, r.linkrev(i),
926 926 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
927 927 elif format == 1:
928 928 pr = r.parentrevs(i)
929 929 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
930 930 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
931 931 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
932 932
933 933 @command('debugindexdot', commands.debugrevlogopts,
934 934 _('-c|-m|FILE'), optionalrepo=True)
935 935 def debugindexdot(ui, repo, file_=None, **opts):
936 936 """dump an index DAG as a graphviz dot file"""
937 937 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
938 938 ui.write(("digraph G {\n"))
939 939 for i in r:
940 940 node = r.node(i)
941 941 pp = r.parents(node)
942 942 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
943 943 if pp[1] != nullid:
944 944 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
945 945 ui.write("}\n")
946 946
947 947 @command('debuginstall', [] + commands.formatteropts, '', norepo=True)
948 948 def debuginstall(ui, **opts):
949 949 '''test Mercurial installation
950 950
951 951 Returns 0 on success.
952 952 '''
953 953
954 954 def writetemp(contents):
955 955 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
956 956 f = os.fdopen(fd, pycompat.sysstr("wb"))
957 957 f.write(contents)
958 958 f.close()
959 959 return name
960 960
961 961 problems = 0
962 962
963 963 fm = ui.formatter('debuginstall', opts)
964 964 fm.startitem()
965 965
966 966 # encoding
967 967 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
968 968 err = None
969 969 try:
970 970 encoding.fromlocal("test")
971 971 except error.Abort as inst:
972 972 err = inst
973 973 problems += 1
974 974 fm.condwrite(err, 'encodingerror', _(" %s\n"
975 975 " (check that your locale is properly set)\n"), err)
976 976
977 977 # Python
978 978 fm.write('pythonexe', _("checking Python executable (%s)\n"),
979 979 pycompat.sysexecutable)
980 980 fm.write('pythonver', _("checking Python version (%s)\n"),
981 981 ("%d.%d.%d" % sys.version_info[:3]))
982 982 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
983 983 os.path.dirname(pycompat.fsencode(os.__file__)))
984 984
985 985 security = set(sslutil.supportedprotocols)
986 986 if sslutil.hassni:
987 987 security.add('sni')
988 988
989 989 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
990 990 fm.formatlist(sorted(security), name='protocol',
991 991 fmt='%s', sep=','))
992 992
993 993 # These are warnings, not errors. So don't increment problem count. This
994 994 # may change in the future.
995 995 if 'tls1.2' not in security:
996 996 fm.plain(_(' TLS 1.2 not supported by Python install; '
997 997 'network connections lack modern security\n'))
998 998 if 'sni' not in security:
999 999 fm.plain(_(' SNI not supported by Python install; may have '
1000 1000 'connectivity issues with some servers\n'))
1001 1001
1002 1002 # TODO print CA cert info
1003 1003
1004 1004 # hg version
1005 1005 hgver = util.version()
1006 1006 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1007 1007 hgver.split('+')[0])
1008 1008 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1009 1009 '+'.join(hgver.split('+')[1:]))
1010 1010
1011 1011 # compiled modules
1012 1012 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1013 1013 policy.policy)
1014 1014 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1015 1015 os.path.dirname(pycompat.fsencode(__file__)))
1016 1016
1017 1017 err = None
1018 1018 try:
1019 1019 from . import (
1020 1020 base85,
1021 1021 bdiff,
1022 1022 mpatch,
1023 1023 osutil,
1024 1024 )
1025 1025 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1026 1026 except Exception as inst:
1027 1027 err = inst
1028 1028 problems += 1
1029 1029 fm.condwrite(err, 'extensionserror', " %s\n", err)
1030 1030
1031 1031 compengines = util.compengines._engines.values()
1032 1032 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1033 1033 fm.formatlist(sorted(e.name() for e in compengines),
1034 1034 name='compengine', fmt='%s', sep=', '))
1035 1035 fm.write('compenginesavail', _('checking available compression engines '
1036 1036 '(%s)\n'),
1037 1037 fm.formatlist(sorted(e.name() for e in compengines
1038 1038 if e.available()),
1039 1039 name='compengine', fmt='%s', sep=', '))
1040 1040 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1041 1041 fm.write('compenginesserver', _('checking available compression engines '
1042 1042 'for wire protocol (%s)\n'),
1043 1043 fm.formatlist([e.name() for e in wirecompengines
1044 1044 if e.wireprotosupport()],
1045 1045 name='compengine', fmt='%s', sep=', '))
1046 1046
1047 1047 # templates
1048 1048 p = templater.templatepaths()
1049 1049 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1050 1050 fm.condwrite(not p, '', _(" no template directories found\n"))
1051 1051 if p:
1052 1052 m = templater.templatepath("map-cmdline.default")
1053 1053 if m:
1054 1054 # template found, check if it is working
1055 1055 err = None
1056 1056 try:
1057 1057 templater.templater.frommapfile(m)
1058 1058 except Exception as inst:
1059 1059 err = inst
1060 1060 p = None
1061 1061 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1062 1062 else:
1063 1063 p = None
1064 1064 fm.condwrite(p, 'defaulttemplate',
1065 1065 _("checking default template (%s)\n"), m)
1066 1066 fm.condwrite(not m, 'defaulttemplatenotfound',
1067 1067 _(" template '%s' not found\n"), "default")
1068 1068 if not p:
1069 1069 problems += 1
1070 1070 fm.condwrite(not p, '',
1071 1071 _(" (templates seem to have been installed incorrectly)\n"))
1072 1072
1073 1073 # editor
1074 1074 editor = ui.geteditor()
1075 1075 editor = util.expandpath(editor)
1076 1076 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1077 1077 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1078 1078 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1079 1079 _(" No commit editor set and can't find %s in PATH\n"
1080 1080 " (specify a commit editor in your configuration"
1081 1081 " file)\n"), not cmdpath and editor == 'vi' and editor)
1082 1082 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1083 1083 _(" Can't find editor '%s' in PATH\n"
1084 1084 " (specify a commit editor in your configuration"
1085 1085 " file)\n"), not cmdpath and editor)
1086 1086 if not cmdpath and editor != 'vi':
1087 1087 problems += 1
1088 1088
1089 1089 # check username
1090 1090 username = None
1091 1091 err = None
1092 1092 try:
1093 1093 username = ui.username()
1094 1094 except error.Abort as e:
1095 1095 err = e
1096 1096 problems += 1
1097 1097
1098 1098 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1099 1099 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1100 1100 " (specify a username in your configuration file)\n"), err)
1101 1101
1102 1102 fm.condwrite(not problems, '',
1103 1103 _("no problems detected\n"))
1104 1104 if not problems:
1105 1105 fm.data(problems=problems)
1106 1106 fm.condwrite(problems, 'problems',
1107 1107 _("%d problems detected,"
1108 1108 " please check your install!\n"), problems)
1109 1109 fm.end()
1110 1110
1111 1111 return problems
1112 1112
1113 1113 @command('debugknown', [], _('REPO ID...'), norepo=True)
1114 1114 def debugknown(ui, repopath, *ids, **opts):
1115 1115 """test whether node ids are known to a repo
1116 1116
1117 1117 Every ID must be a full-length hex node id string. Returns a list of 0s
1118 1118 and 1s indicating unknown/known.
1119 1119 """
1120 1120 repo = hg.peer(ui, opts, repopath)
1121 1121 if not repo.capable('known'):
1122 1122 raise error.Abort("known() not supported by target repository")
1123 1123 flags = repo.known([bin(s) for s in ids])
1124 1124 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1125 1125
1126 1126 @command('debuglabelcomplete', [], _('LABEL...'))
1127 1127 def debuglabelcomplete(ui, repo, *args):
1128 1128 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1129 1129 commands.debugnamecomplete(ui, repo, *args)
1130 1130
1131 1131 @command('debuglocks',
1132 1132 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1133 1133 ('W', 'force-wlock', None,
1134 1134 _('free the working state lock (DANGEROUS)'))],
1135 1135 _('[OPTION]...'))
1136 1136 def debuglocks(ui, repo, **opts):
1137 1137 """show or modify state of locks
1138 1138
1139 1139 By default, this command will show which locks are held. This
1140 1140 includes the user and process holding the lock, the amount of time
1141 1141 the lock has been held, and the machine name where the process is
1142 1142 running if it's not local.
1143 1143
1144 1144 Locks protect the integrity of Mercurial's data, so should be
1145 1145 treated with care. System crashes or other interruptions may cause
1146 1146 locks to not be properly released, though Mercurial will usually
1147 1147 detect and remove such stale locks automatically.
1148 1148
1149 1149 However, detecting stale locks may not always be possible (for
1150 1150 instance, on a shared filesystem). Removing locks may also be
1151 1151 blocked by filesystem permissions.
1152 1152
1153 1153 Returns 0 if no locks are held.
1154 1154
1155 1155 """
1156 1156
1157 1157 if opts.get('force_lock'):
1158 1158 repo.svfs.unlink('lock')
1159 1159 if opts.get('force_wlock'):
1160 1160 repo.vfs.unlink('wlock')
1161 1161 if opts.get('force_lock') or opts.get('force_lock'):
1162 1162 return 0
1163 1163
1164 1164 now = time.time()
1165 1165 held = 0
1166 1166
1167 1167 def report(vfs, name, method):
1168 1168 # this causes stale locks to get reaped for more accurate reporting
1169 1169 try:
1170 1170 l = method(False)
1171 1171 except error.LockHeld:
1172 1172 l = None
1173 1173
1174 1174 if l:
1175 1175 l.release()
1176 1176 else:
1177 1177 try:
1178 1178 stat = vfs.lstat(name)
1179 1179 age = now - stat.st_mtime
1180 1180 user = util.username(stat.st_uid)
1181 1181 locker = vfs.readlock(name)
1182 1182 if ":" in locker:
1183 1183 host, pid = locker.split(':')
1184 1184 if host == socket.gethostname():
1185 1185 locker = 'user %s, process %s' % (user, pid)
1186 1186 else:
1187 1187 locker = 'user %s, process %s, host %s' \
1188 1188 % (user, pid, host)
1189 1189 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1190 1190 return 1
1191 1191 except OSError as e:
1192 1192 if e.errno != errno.ENOENT:
1193 1193 raise
1194 1194
1195 1195 ui.write(("%-6s free\n") % (name + ":"))
1196 1196 return 0
1197 1197
1198 1198 held += report(repo.svfs, "lock", repo.lock)
1199 1199 held += report(repo.vfs, "wlock", repo.wlock)
1200 1200
1201 1201 return held
1202 1202
1203 1203 @command('debugmergestate', [], '')
1204 1204 def debugmergestate(ui, repo, *args):
1205 1205 """print merge state
1206 1206
1207 1207 Use --verbose to print out information about whether v1 or v2 merge state
1208 1208 was chosen."""
1209 1209 def _hashornull(h):
1210 1210 if h == nullhex:
1211 1211 return 'null'
1212 1212 else:
1213 1213 return h
1214 1214
1215 1215 def printrecords(version):
1216 1216 ui.write(('* version %s records\n') % version)
1217 1217 if version == 1:
1218 1218 records = v1records
1219 1219 else:
1220 1220 records = v2records
1221 1221
1222 1222 for rtype, record in records:
1223 1223 # pretty print some record types
1224 1224 if rtype == 'L':
1225 1225 ui.write(('local: %s\n') % record)
1226 1226 elif rtype == 'O':
1227 1227 ui.write(('other: %s\n') % record)
1228 1228 elif rtype == 'm':
1229 1229 driver, mdstate = record.split('\0', 1)
1230 1230 ui.write(('merge driver: %s (state "%s")\n')
1231 1231 % (driver, mdstate))
1232 1232 elif rtype in 'FDC':
1233 1233 r = record.split('\0')
1234 1234 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1235 1235 if version == 1:
1236 1236 onode = 'not stored in v1 format'
1237 1237 flags = r[7]
1238 1238 else:
1239 1239 onode, flags = r[7:9]
1240 1240 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1241 1241 % (f, rtype, state, _hashornull(hash)))
1242 1242 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1243 1243 ui.write((' ancestor path: %s (node %s)\n')
1244 1244 % (afile, _hashornull(anode)))
1245 1245 ui.write((' other path: %s (node %s)\n')
1246 1246 % (ofile, _hashornull(onode)))
1247 1247 elif rtype == 'f':
1248 1248 filename, rawextras = record.split('\0', 1)
1249 1249 extras = rawextras.split('\0')
1250 1250 i = 0
1251 1251 extrastrings = []
1252 1252 while i < len(extras):
1253 1253 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1254 1254 i += 2
1255 1255
1256 1256 ui.write(('file extras: %s (%s)\n')
1257 1257 % (filename, ', '.join(extrastrings)))
1258 1258 elif rtype == 'l':
1259 1259 labels = record.split('\0', 2)
1260 1260 labels = [l for l in labels if len(l) > 0]
1261 1261 ui.write(('labels:\n'))
1262 1262 ui.write((' local: %s\n' % labels[0]))
1263 1263 ui.write((' other: %s\n' % labels[1]))
1264 1264 if len(labels) > 2:
1265 1265 ui.write((' base: %s\n' % labels[2]))
1266 1266 else:
1267 1267 ui.write(('unrecognized entry: %s\t%s\n')
1268 1268 % (rtype, record.replace('\0', '\t')))
1269 1269
1270 1270 # Avoid mergestate.read() since it may raise an exception for unsupported
1271 1271 # merge state records. We shouldn't be doing this, but this is OK since this
1272 1272 # command is pretty low-level.
1273 1273 ms = mergemod.mergestate(repo)
1274 1274
1275 1275 # sort so that reasonable information is on top
1276 1276 v1records = ms._readrecordsv1()
1277 1277 v2records = ms._readrecordsv2()
1278 1278 order = 'LOml'
1279 1279 def key(r):
1280 1280 idx = order.find(r[0])
1281 1281 if idx == -1:
1282 1282 return (1, r[1])
1283 1283 else:
1284 1284 return (0, idx)
1285 1285 v1records.sort(key=key)
1286 1286 v2records.sort(key=key)
1287 1287
1288 1288 if not v1records and not v2records:
1289 1289 ui.write(('no merge state found\n'))
1290 1290 elif not v2records:
1291 1291 ui.note(('no version 2 merge state\n'))
1292 1292 printrecords(1)
1293 1293 elif ms._v1v2match(v1records, v2records):
1294 1294 ui.note(('v1 and v2 states match: using v2\n'))
1295 1295 printrecords(2)
1296 1296 else:
1297 1297 ui.note(('v1 and v2 states mismatch: using v1\n'))
1298 1298 printrecords(1)
1299 1299 if ui.verbose:
1300 1300 printrecords(2)
1301 1301
1302 1302 @command('debugnamecomplete', [], _('NAME...'))
1303 1303 def debugnamecomplete(ui, repo, *args):
1304 1304 '''complete "names" - tags, open branch names, bookmark names'''
1305 1305
1306 1306 names = set()
1307 1307 # since we previously only listed open branches, we will handle that
1308 1308 # specially (after this for loop)
1309 1309 for name, ns in repo.names.iteritems():
1310 1310 if name != 'branches':
1311 1311 names.update(ns.listnames(repo))
1312 1312 names.update(tag for (tag, heads, tip, closed)
1313 1313 in repo.branchmap().iterbranches() if not closed)
1314 1314 completions = set()
1315 1315 if not args:
1316 1316 args = ['']
1317 1317 for a in args:
1318 1318 completions.update(n for n in names if n.startswith(a))
1319 1319 ui.write('\n'.join(sorted(completions)))
1320 1320 ui.write('\n')
1321 1321
1322 1322 @command('debugobsolete',
1323 1323 [('', 'flags', 0, _('markers flag')),
1324 1324 ('', 'record-parents', False,
1325 1325 _('record parent information for the precursor')),
1326 1326 ('r', 'rev', [], _('display markers relevant to REV')),
1327 1327 ('', 'index', False, _('display index of the marker')),
1328 1328 ('', 'delete', [], _('delete markers specified by indices')),
1329 1329 ] + commands.commitopts2 + commands.formatteropts,
1330 1330 _('[OBSOLETED [REPLACEMENT ...]]'))
1331 1331 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1332 1332 """create arbitrary obsolete marker
1333 1333
1334 1334 With no arguments, displays the list of obsolescence markers."""
1335 1335
1336 1336 def parsenodeid(s):
1337 1337 try:
1338 1338 # We do not use revsingle/revrange functions here to accept
1339 1339 # arbitrary node identifiers, possibly not present in the
1340 1340 # local repository.
1341 1341 n = bin(s)
1342 1342 if len(n) != len(nullid):
1343 1343 raise TypeError()
1344 1344 return n
1345 1345 except TypeError:
1346 1346 raise error.Abort('changeset references must be full hexadecimal '
1347 1347 'node identifiers')
1348 1348
1349 1349 if opts.get('delete'):
1350 1350 indices = []
1351 1351 for v in opts.get('delete'):
1352 1352 try:
1353 1353 indices.append(int(v))
1354 1354 except ValueError:
1355 1355 raise error.Abort(_('invalid index value: %r') % v,
1356 1356 hint=_('use integers for indices'))
1357 1357
1358 1358 if repo.currenttransaction():
1359 1359 raise error.Abort(_('cannot delete obsmarkers in the middle '
1360 1360 'of transaction.'))
1361 1361
1362 1362 with repo.lock():
1363 1363 n = repair.deleteobsmarkers(repo.obsstore, indices)
1364 1364 ui.write(_('deleted %i obsolescence markers\n') % n)
1365 1365
1366 1366 return
1367 1367
1368 1368 if precursor is not None:
1369 1369 if opts['rev']:
1370 1370 raise error.Abort('cannot select revision when creating marker')
1371 1371 metadata = {}
1372 1372 metadata['user'] = opts['user'] or ui.username()
1373 1373 succs = tuple(parsenodeid(succ) for succ in successors)
1374 1374 l = repo.lock()
1375 1375 try:
1376 1376 tr = repo.transaction('debugobsolete')
1377 1377 try:
1378 1378 date = opts.get('date')
1379 1379 if date:
1380 1380 date = util.parsedate(date)
1381 1381 else:
1382 1382 date = None
1383 1383 prec = parsenodeid(precursor)
1384 1384 parents = None
1385 1385 if opts['record_parents']:
1386 1386 if prec not in repo.unfiltered():
1387 1387 raise error.Abort('cannot used --record-parents on '
1388 1388 'unknown changesets')
1389 1389 parents = repo.unfiltered()[prec].parents()
1390 1390 parents = tuple(p.node() for p in parents)
1391 1391 repo.obsstore.create(tr, prec, succs, opts['flags'],
1392 1392 parents=parents, date=date,
1393 1393 metadata=metadata)
1394 1394 tr.close()
1395 1395 except ValueError as exc:
1396 1396 raise error.Abort(_('bad obsmarker input: %s') % exc)
1397 1397 finally:
1398 1398 tr.release()
1399 1399 finally:
1400 1400 l.release()
1401 1401 else:
1402 1402 if opts['rev']:
1403 1403 revs = scmutil.revrange(repo, opts['rev'])
1404 1404 nodes = [repo[r].node() for r in revs]
1405 1405 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1406 1406 markers.sort(key=lambda x: x._data)
1407 1407 else:
1408 1408 markers = obsolete.getmarkers(repo)
1409 1409
1410 1410 markerstoiter = markers
1411 1411 isrelevant = lambda m: True
1412 1412 if opts.get('rev') and opts.get('index'):
1413 1413 markerstoiter = obsolete.getmarkers(repo)
1414 1414 markerset = set(markers)
1415 1415 isrelevant = lambda m: m in markerset
1416 1416
1417 1417 fm = ui.formatter('debugobsolete', opts)
1418 1418 for i, m in enumerate(markerstoiter):
1419 1419 if not isrelevant(m):
1420 1420 # marker can be irrelevant when we're iterating over a set
1421 1421 # of markers (markerstoiter) which is bigger than the set
1422 1422 # of markers we want to display (markers)
1423 1423 # this can happen if both --index and --rev options are
1424 1424 # provided and thus we need to iterate over all of the markers
1425 1425 # to get the correct indices, but only display the ones that
1426 1426 # are relevant to --rev value
1427 1427 continue
1428 1428 fm.startitem()
1429 1429 ind = i if opts.get('index') else None
1430 1430 cmdutil.showmarker(fm, m, index=ind)
1431 1431 fm.end()
1432 1432
1433 1433 @command('debugpathcomplete',
1434 1434 [('f', 'full', None, _('complete an entire path')),
1435 1435 ('n', 'normal', None, _('show only normal files')),
1436 1436 ('a', 'added', None, _('show only added files')),
1437 1437 ('r', 'removed', None, _('show only removed files'))],
1438 1438 _('FILESPEC...'))
1439 1439 def debugpathcomplete(ui, repo, *specs, **opts):
1440 1440 '''complete part or all of a tracked path
1441 1441
1442 1442 This command supports shells that offer path name completion. It
1443 1443 currently completes only files already known to the dirstate.
1444 1444
1445 1445 Completion extends only to the next path segment unless
1446 1446 --full is specified, in which case entire paths are used.'''
1447 1447
1448 1448 def complete(path, acceptable):
1449 1449 dirstate = repo.dirstate
1450 1450 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1451 1451 rootdir = repo.root + pycompat.ossep
1452 1452 if spec != repo.root and not spec.startswith(rootdir):
1453 1453 return [], []
1454 1454 if os.path.isdir(spec):
1455 1455 spec += '/'
1456 1456 spec = spec[len(rootdir):]
1457 1457 fixpaths = pycompat.ossep != '/'
1458 1458 if fixpaths:
1459 1459 spec = spec.replace(pycompat.ossep, '/')
1460 1460 speclen = len(spec)
1461 1461 fullpaths = opts['full']
1462 1462 files, dirs = set(), set()
1463 1463 adddir, addfile = dirs.add, files.add
1464 1464 for f, st in dirstate.iteritems():
1465 1465 if f.startswith(spec) and st[0] in acceptable:
1466 1466 if fixpaths:
1467 1467 f = f.replace('/', pycompat.ossep)
1468 1468 if fullpaths:
1469 1469 addfile(f)
1470 1470 continue
1471 1471 s = f.find(pycompat.ossep, speclen)
1472 1472 if s >= 0:
1473 1473 adddir(f[:s])
1474 1474 else:
1475 1475 addfile(f)
1476 1476 return files, dirs
1477 1477
1478 1478 acceptable = ''
1479 1479 if opts['normal']:
1480 1480 acceptable += 'nm'
1481 1481 if opts['added']:
1482 1482 acceptable += 'a'
1483 1483 if opts['removed']:
1484 1484 acceptable += 'r'
1485 1485 cwd = repo.getcwd()
1486 1486 if not specs:
1487 1487 specs = ['.']
1488 1488
1489 1489 files, dirs = set(), set()
1490 1490 for spec in specs:
1491 1491 f, d = complete(spec, acceptable or 'nmar')
1492 1492 files.update(f)
1493 1493 dirs.update(d)
1494 1494 files.update(dirs)
1495 1495 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1496 1496 ui.write('\n')
1497 1497
1498 1498 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1499 1499 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1500 1500 '''access the pushkey key/value protocol
1501 1501
1502 1502 With two args, list the keys in the given namespace.
1503 1503
1504 1504 With five args, set a key to new if it currently is set to old.
1505 1505 Reports success or failure.
1506 1506 '''
1507 1507
1508 1508 target = hg.peer(ui, {}, repopath)
1509 1509 if keyinfo:
1510 1510 key, old, new = keyinfo
1511 1511 r = target.pushkey(namespace, key, old, new)
1512 1512 ui.status(str(r) + '\n')
1513 1513 return not r
1514 1514 else:
1515 1515 for k, v in sorted(target.listkeys(namespace).iteritems()):
1516 1516 ui.write("%s\t%s\n" % (k.encode('string-escape'),
1517 1517 v.encode('string-escape')))
1518 1518
1519 1519 @command('debugpvec', [], _('A B'))
1520 1520 def debugpvec(ui, repo, a, b=None):
1521 1521 ca = scmutil.revsingle(repo, a)
1522 1522 cb = scmutil.revsingle(repo, b)
1523 1523 pa = pvec.ctxpvec(ca)
1524 1524 pb = pvec.ctxpvec(cb)
1525 1525 if pa == pb:
1526 1526 rel = "="
1527 1527 elif pa > pb:
1528 1528 rel = ">"
1529 1529 elif pa < pb:
1530 1530 rel = "<"
1531 1531 elif pa | pb:
1532 1532 rel = "|"
1533 1533 ui.write(_("a: %s\n") % pa)
1534 1534 ui.write(_("b: %s\n") % pb)
1535 1535 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1536 1536 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1537 1537 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1538 1538 pa.distance(pb), rel))
1539 1539
1540 1540 @command('debugrebuilddirstate|debugrebuildstate',
1541 1541 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1542 1542 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1543 1543 'the working copy parent')),
1544 1544 ],
1545 1545 _('[-r REV]'))
1546 1546 def debugrebuilddirstate(ui, repo, rev, **opts):
1547 1547 """rebuild the dirstate as it would look like for the given revision
1548 1548
1549 1549 If no revision is specified the first current parent will be used.
1550 1550
1551 1551 The dirstate will be set to the files of the given revision.
1552 1552 The actual working directory content or existing dirstate
1553 1553 information such as adds or removes is not considered.
1554 1554
1555 1555 ``minimal`` will only rebuild the dirstate status for files that claim to be
1556 1556 tracked but are not in the parent manifest, or that exist in the parent
1557 1557 manifest but are not in the dirstate. It will not change adds, removes, or
1558 1558 modified files that are in the working copy parent.
1559 1559
1560 1560 One use of this command is to make the next :hg:`status` invocation
1561 1561 check the actual file content.
1562 1562 """
1563 1563 ctx = scmutil.revsingle(repo, rev)
1564 1564 with repo.wlock():
1565 1565 dirstate = repo.dirstate
1566 1566 changedfiles = None
1567 1567 # See command doc for what minimal does.
1568 1568 if opts.get('minimal'):
1569 1569 manifestfiles = set(ctx.manifest().keys())
1570 1570 dirstatefiles = set(dirstate)
1571 1571 manifestonly = manifestfiles - dirstatefiles
1572 1572 dsonly = dirstatefiles - manifestfiles
1573 1573 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1574 1574 changedfiles = manifestonly | dsnotadded
1575 1575
1576 1576 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1577 1577
1578 1578 @command('debugrebuildfncache', [], '')
1579 1579 def debugrebuildfncache(ui, repo):
1580 1580 """rebuild the fncache file"""
1581 1581 repair.rebuildfncache(ui, repo)
1582 1582
1583 1583 @command('debugrename',
1584 1584 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1585 1585 _('[-r REV] FILE'))
1586 1586 def debugrename(ui, repo, file1, *pats, **opts):
1587 1587 """dump rename information"""
1588 1588
1589 1589 ctx = scmutil.revsingle(repo, opts.get('rev'))
1590 1590 m = scmutil.match(ctx, (file1,) + pats, opts)
1591 1591 for abs in ctx.walk(m):
1592 1592 fctx = ctx[abs]
1593 1593 o = fctx.filelog().renamed(fctx.filenode())
1594 1594 rel = m.rel(abs)
1595 1595 if o:
1596 1596 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1597 1597 else:
1598 1598 ui.write(_("%s not renamed\n") % rel)
1599 1599
1600 1600 @command('debugrevlog', commands.debugrevlogopts +
1601 1601 [('d', 'dump', False, _('dump index data'))],
1602 1602 _('-c|-m|FILE'),
1603 1603 optionalrepo=True)
1604 1604 def debugrevlog(ui, repo, file_=None, **opts):
1605 1605 """show data and statistics about a revlog"""
1606 1606 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1607 1607
1608 1608 if opts.get("dump"):
1609 1609 numrevs = len(r)
1610 1610 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1611 1611 " rawsize totalsize compression heads chainlen\n"))
1612 1612 ts = 0
1613 1613 heads = set()
1614 1614
1615 1615 for rev in xrange(numrevs):
1616 1616 dbase = r.deltaparent(rev)
1617 1617 if dbase == -1:
1618 1618 dbase = rev
1619 1619 cbase = r.chainbase(rev)
1620 1620 clen = r.chainlen(rev)
1621 1621 p1, p2 = r.parentrevs(rev)
1622 1622 rs = r.rawsize(rev)
1623 1623 ts = ts + rs
1624 1624 heads -= set(r.parentrevs(rev))
1625 1625 heads.add(rev)
1626 1626 try:
1627 1627 compression = ts / r.end(rev)
1628 1628 except ZeroDivisionError:
1629 1629 compression = 0
1630 1630 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1631 1631 "%11d %5d %8d\n" %
1632 1632 (rev, p1, p2, r.start(rev), r.end(rev),
1633 1633 r.start(dbase), r.start(cbase),
1634 1634 r.start(p1), r.start(p2),
1635 1635 rs, ts, compression, len(heads), clen))
1636 1636 return 0
1637 1637
1638 1638 v = r.version
1639 1639 format = v & 0xFFFF
1640 1640 flags = []
1641 1641 gdelta = False
1642 1642 if v & revlog.REVLOGNGINLINEDATA:
1643 1643 flags.append('inline')
1644 1644 if v & revlog.REVLOGGENERALDELTA:
1645 1645 gdelta = True
1646 1646 flags.append('generaldelta')
1647 1647 if not flags:
1648 1648 flags = ['(none)']
1649 1649
1650 1650 nummerges = 0
1651 1651 numfull = 0
1652 1652 numprev = 0
1653 1653 nump1 = 0
1654 1654 nump2 = 0
1655 1655 numother = 0
1656 1656 nump1prev = 0
1657 1657 nump2prev = 0
1658 1658 chainlengths = []
1659 1659
1660 1660 datasize = [None, 0, 0]
1661 1661 fullsize = [None, 0, 0]
1662 1662 deltasize = [None, 0, 0]
1663 1663 chunktypecounts = {}
1664 1664 chunktypesizes = {}
1665 1665
1666 1666 def addsize(size, l):
1667 1667 if l[0] is None or size < l[0]:
1668 1668 l[0] = size
1669 1669 if size > l[1]:
1670 1670 l[1] = size
1671 1671 l[2] += size
1672 1672
1673 1673 numrevs = len(r)
1674 1674 for rev in xrange(numrevs):
1675 1675 p1, p2 = r.parentrevs(rev)
1676 1676 delta = r.deltaparent(rev)
1677 1677 if format > 0:
1678 1678 addsize(r.rawsize(rev), datasize)
1679 1679 if p2 != nullrev:
1680 1680 nummerges += 1
1681 1681 size = r.length(rev)
1682 1682 if delta == nullrev:
1683 1683 chainlengths.append(0)
1684 1684 numfull += 1
1685 1685 addsize(size, fullsize)
1686 1686 else:
1687 1687 chainlengths.append(chainlengths[delta] + 1)
1688 1688 addsize(size, deltasize)
1689 1689 if delta == rev - 1:
1690 1690 numprev += 1
1691 1691 if delta == p1:
1692 1692 nump1prev += 1
1693 1693 elif delta == p2:
1694 1694 nump2prev += 1
1695 1695 elif delta == p1:
1696 1696 nump1 += 1
1697 1697 elif delta == p2:
1698 1698 nump2 += 1
1699 1699 elif delta != nullrev:
1700 1700 numother += 1
1701 1701
1702 1702 # Obtain data on the raw chunks in the revlog.
1703 1703 chunk = r._chunkraw(rev, rev)[1]
1704 1704 if chunk:
1705 1705 chunktype = chunk[0]
1706 1706 else:
1707 1707 chunktype = 'empty'
1708 1708
1709 1709 if chunktype not in chunktypecounts:
1710 1710 chunktypecounts[chunktype] = 0
1711 1711 chunktypesizes[chunktype] = 0
1712 1712
1713 1713 chunktypecounts[chunktype] += 1
1714 1714 chunktypesizes[chunktype] += size
1715 1715
1716 1716 # Adjust size min value for empty cases
1717 1717 for size in (datasize, fullsize, deltasize):
1718 1718 if size[0] is None:
1719 1719 size[0] = 0
1720 1720
1721 1721 numdeltas = numrevs - numfull
1722 1722 numoprev = numprev - nump1prev - nump2prev
1723 1723 totalrawsize = datasize[2]
1724 1724 datasize[2] /= numrevs
1725 1725 fulltotal = fullsize[2]
1726 1726 fullsize[2] /= numfull
1727 1727 deltatotal = deltasize[2]
1728 1728 if numrevs - numfull > 0:
1729 1729 deltasize[2] /= numrevs - numfull
1730 1730 totalsize = fulltotal + deltatotal
1731 1731 avgchainlen = sum(chainlengths) / numrevs
1732 1732 maxchainlen = max(chainlengths)
1733 1733 compratio = 1
1734 1734 if totalsize:
1735 1735 compratio = totalrawsize / totalsize
1736 1736
1737 1737 basedfmtstr = '%%%dd\n'
1738 1738 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1739 1739
1740 1740 def dfmtstr(max):
1741 1741 return basedfmtstr % len(str(max))
1742 1742 def pcfmtstr(max, padding=0):
1743 1743 return basepcfmtstr % (len(str(max)), ' ' * padding)
1744 1744
1745 1745 def pcfmt(value, total):
1746 1746 if total:
1747 1747 return (value, 100 * float(value) / total)
1748 1748 else:
1749 1749 return value, 100.0
1750 1750
1751 1751 ui.write(('format : %d\n') % format)
1752 1752 ui.write(('flags : %s\n') % ', '.join(flags))
1753 1753
1754 1754 ui.write('\n')
1755 1755 fmt = pcfmtstr(totalsize)
1756 1756 fmt2 = dfmtstr(totalsize)
1757 1757 ui.write(('revisions : ') + fmt2 % numrevs)
1758 1758 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1759 1759 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1760 1760 ui.write(('revisions : ') + fmt2 % numrevs)
1761 1761 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1762 1762 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1763 1763 ui.write(('revision size : ') + fmt2 % totalsize)
1764 1764 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1765 1765 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1766 1766
1767 1767 def fmtchunktype(chunktype):
1768 1768 if chunktype == 'empty':
1769 1769 return ' %s : ' % chunktype
1770 1770 elif chunktype in string.ascii_letters:
1771 1771 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1772 1772 else:
1773 1773 return ' 0x%s : ' % hex(chunktype)
1774 1774
1775 1775 ui.write('\n')
1776 1776 ui.write(('chunks : ') + fmt2 % numrevs)
1777 1777 for chunktype in sorted(chunktypecounts):
1778 1778 ui.write(fmtchunktype(chunktype))
1779 1779 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1780 1780 ui.write(('chunks size : ') + fmt2 % totalsize)
1781 1781 for chunktype in sorted(chunktypecounts):
1782 1782 ui.write(fmtchunktype(chunktype))
1783 1783 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1784 1784
1785 1785 ui.write('\n')
1786 1786 fmt = dfmtstr(max(avgchainlen, compratio))
1787 1787 ui.write(('avg chain length : ') + fmt % avgchainlen)
1788 1788 ui.write(('max chain length : ') + fmt % maxchainlen)
1789 1789 ui.write(('compression ratio : ') + fmt % compratio)
1790 1790
1791 1791 if format > 0:
1792 1792 ui.write('\n')
1793 1793 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1794 1794 % tuple(datasize))
1795 1795 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1796 1796 % tuple(fullsize))
1797 1797 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1798 1798 % tuple(deltasize))
1799 1799
1800 1800 if numdeltas > 0:
1801 1801 ui.write('\n')
1802 1802 fmt = pcfmtstr(numdeltas)
1803 1803 fmt2 = pcfmtstr(numdeltas, 4)
1804 1804 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1805 1805 if numprev > 0:
1806 1806 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1807 1807 numprev))
1808 1808 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1809 1809 numprev))
1810 1810 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1811 1811 numprev))
1812 1812 if gdelta:
1813 1813 ui.write(('deltas against p1 : ')
1814 1814 + fmt % pcfmt(nump1, numdeltas))
1815 1815 ui.write(('deltas against p2 : ')
1816 1816 + fmt % pcfmt(nump2, numdeltas))
1817 1817 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1818 1818 numdeltas))
1819 1819
1820 1820 @command('debugrevspec',
1821 1821 [('', 'optimize', None,
1822 1822 _('print parsed tree after optimizing (DEPRECATED)')),
1823 1823 ('p', 'show-stage', [],
1824 1824 _('print parsed tree at the given stage'), _('NAME')),
1825 1825 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1826 1826 ('', 'verify-optimized', False, _('verify optimized result')),
1827 1827 ],
1828 1828 ('REVSPEC'))
1829 1829 def debugrevspec(ui, repo, expr, **opts):
1830 1830 """parse and apply a revision specification
1831 1831
1832 1832 Use -p/--show-stage option to print the parsed tree at the given stages.
1833 1833 Use -p all to print tree at every stage.
1834 1834
1835 1835 Use --verify-optimized to compare the optimized result with the unoptimized
1836 1836 one. Returns 1 if the optimized result differs.
1837 1837 """
1838 1838 stages = [
1839 1839 ('parsed', lambda tree: tree),
1840 1840 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1841 1841 ('concatenated', revsetlang.foldconcat),
1842 1842 ('analyzed', revsetlang.analyze),
1843 1843 ('optimized', revsetlang.optimize),
1844 1844 ]
1845 1845 if opts['no_optimized']:
1846 1846 stages = stages[:-1]
1847 1847 if opts['verify_optimized'] and opts['no_optimized']:
1848 1848 raise error.Abort(_('cannot use --verify-optimized with '
1849 1849 '--no-optimized'))
1850 1850 stagenames = set(n for n, f in stages)
1851 1851
1852 1852 showalways = set()
1853 1853 showchanged = set()
1854 1854 if ui.verbose and not opts['show_stage']:
1855 1855 # show parsed tree by --verbose (deprecated)
1856 1856 showalways.add('parsed')
1857 1857 showchanged.update(['expanded', 'concatenated'])
1858 1858 if opts['optimize']:
1859 1859 showalways.add('optimized')
1860 1860 if opts['show_stage'] and opts['optimize']:
1861 1861 raise error.Abort(_('cannot use --optimize with --show-stage'))
1862 1862 if opts['show_stage'] == ['all']:
1863 1863 showalways.update(stagenames)
1864 1864 else:
1865 1865 for n in opts['show_stage']:
1866 1866 if n not in stagenames:
1867 1867 raise error.Abort(_('invalid stage name: %s') % n)
1868 1868 showalways.update(opts['show_stage'])
1869 1869
1870 1870 treebystage = {}
1871 1871 printedtree = None
1872 1872 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1873 1873 for n, f in stages:
1874 1874 treebystage[n] = tree = f(tree)
1875 1875 if n in showalways or (n in showchanged and tree != printedtree):
1876 1876 if opts['show_stage'] or n != 'parsed':
1877 1877 ui.write(("* %s:\n") % n)
1878 1878 ui.write(revsetlang.prettyformat(tree), "\n")
1879 1879 printedtree = tree
1880 1880
1881 1881 if opts['verify_optimized']:
1882 1882 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1883 1883 brevs = revset.makematcher(treebystage['optimized'])(repo)
1884 1884 if ui.verbose:
1885 1885 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1886 1886 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1887 1887 arevs = list(arevs)
1888 1888 brevs = list(brevs)
1889 1889 if arevs == brevs:
1890 1890 return 0
1891 1891 ui.write(('--- analyzed\n'), label='diff.file_a')
1892 1892 ui.write(('+++ optimized\n'), label='diff.file_b')
1893 1893 sm = difflib.SequenceMatcher(None, arevs, brevs)
1894 1894 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1895 1895 if tag in ('delete', 'replace'):
1896 1896 for c in arevs[alo:ahi]:
1897 1897 ui.write('-%s\n' % c, label='diff.deleted')
1898 1898 if tag in ('insert', 'replace'):
1899 1899 for c in brevs[blo:bhi]:
1900 1900 ui.write('+%s\n' % c, label='diff.inserted')
1901 1901 if tag == 'equal':
1902 1902 for c in arevs[alo:ahi]:
1903 1903 ui.write(' %s\n' % c)
1904 1904 return 1
1905 1905
1906 1906 func = revset.makematcher(tree)
1907 1907 revs = func(repo)
1908 1908 if ui.verbose:
1909 1909 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1910 1910 for c in revs:
1911 1911 ui.write("%s\n" % c)
1912 1912
1913 1913 @command('debugsetparents', [], _('REV1 [REV2]'))
1914 1914 def debugsetparents(ui, repo, rev1, rev2=None):
1915 1915 """manually set the parents of the current working directory
1916 1916
1917 1917 This is useful for writing repository conversion tools, but should
1918 1918 be used with care. For example, neither the working directory nor the
1919 1919 dirstate is updated, so file status may be incorrect after running this
1920 1920 command.
1921 1921
1922 1922 Returns 0 on success.
1923 1923 """
1924 1924
1925 1925 r1 = scmutil.revsingle(repo, rev1).node()
1926 1926 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1927 1927
1928 1928 with repo.wlock():
1929 1929 repo.setparents(r1, r2)
1930 1930
1931 1931 @command('debugsub',
1932 1932 [('r', 'rev', '',
1933 1933 _('revision to check'), _('REV'))],
1934 1934 _('[-r REV] [REV]'))
1935 1935 def debugsub(ui, repo, rev=None):
1936 1936 ctx = scmutil.revsingle(repo, rev, None)
1937 1937 for k, v in sorted(ctx.substate.items()):
1938 1938 ui.write(('path %s\n') % k)
1939 1939 ui.write((' source %s\n') % v[0])
1940 1940 ui.write((' revision %s\n') % v[1])
1941 1941
1942 1942 @command('debugsuccessorssets',
1943 1943 [],
1944 1944 _('[REV]'))
1945 1945 def debugsuccessorssets(ui, repo, *revs):
1946 1946 """show set of successors for revision
1947 1947
1948 1948 A successors set of changeset A is a consistent group of revisions that
1949 1949 succeed A. It contains non-obsolete changesets only.
1950 1950
1951 1951 In most cases a changeset A has a single successors set containing a single
1952 1952 successor (changeset A replaced by A').
1953 1953
1954 1954 A changeset that is made obsolete with no successors are called "pruned".
1955 1955 Such changesets have no successors sets at all.
1956 1956
1957 1957 A changeset that has been "split" will have a successors set containing
1958 1958 more than one successor.
1959 1959
1960 1960 A changeset that has been rewritten in multiple different ways is called
1961 1961 "divergent". Such changesets have multiple successor sets (each of which
1962 1962 may also be split, i.e. have multiple successors).
1963 1963
1964 1964 Results are displayed as follows::
1965 1965
1966 1966 <rev1>
1967 1967 <successors-1A>
1968 1968 <rev2>
1969 1969 <successors-2A>
1970 1970 <successors-2B1> <successors-2B2> <successors-2B3>
1971 1971
1972 1972 Here rev2 has two possible (i.e. divergent) successors sets. The first
1973 1973 holds one element, whereas the second holds three (i.e. the changeset has
1974 1974 been split).
1975 1975 """
1976 1976 # passed to successorssets caching computation from one call to another
1977 1977 cache = {}
1978 1978 ctx2str = str
1979 1979 node2str = short
1980 1980 if ui.debug():
1981 1981 def ctx2str(ctx):
1982 1982 return ctx.hex()
1983 1983 node2str = hex
1984 1984 for rev in scmutil.revrange(repo, revs):
1985 1985 ctx = repo[rev]
1986 1986 ui.write('%s\n'% ctx2str(ctx))
1987 1987 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
1988 1988 if succsset:
1989 1989 ui.write(' ')
1990 1990 ui.write(node2str(succsset[0]))
1991 1991 for node in succsset[1:]:
1992 1992 ui.write(' ')
1993 1993 ui.write(node2str(node))
1994 1994 ui.write('\n')
1995 1995
1996 1996 @command('debugtemplate',
1997 1997 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
1998 1998 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
1999 1999 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2000 2000 optionalrepo=True)
2001 2001 def debugtemplate(ui, repo, tmpl, **opts):
2002 2002 """parse and apply a template
2003 2003
2004 2004 If -r/--rev is given, the template is processed as a log template and
2005 2005 applied to the given changesets. Otherwise, it is processed as a generic
2006 2006 template.
2007 2007
2008 2008 Use --verbose to print the parsed tree.
2009 2009 """
2010 2010 revs = None
2011 2011 if opts['rev']:
2012 2012 if repo is None:
2013 2013 raise error.RepoError(_('there is no Mercurial repository here '
2014 2014 '(.hg not found)'))
2015 2015 revs = scmutil.revrange(repo, opts['rev'])
2016 2016
2017 2017 props = {}
2018 2018 for d in opts['define']:
2019 2019 try:
2020 2020 k, v = (e.strip() for e in d.split('=', 1))
2021 2021 if not k:
2022 2022 raise ValueError
2023 2023 props[k] = v
2024 2024 except ValueError:
2025 2025 raise error.Abort(_('malformed keyword definition: %s') % d)
2026 2026
2027 2027 if ui.verbose:
2028 2028 aliases = ui.configitems('templatealias')
2029 2029 tree = templater.parse(tmpl)
2030 2030 ui.note(templater.prettyformat(tree), '\n')
2031 2031 newtree = templater.expandaliases(tree, aliases)
2032 2032 if newtree != tree:
2033 2033 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2034 2034
2035 2035 mapfile = None
2036 2036 if revs is None:
2037 2037 k = 'debugtemplate'
2038 2038 t = formatter.maketemplater(ui, k, tmpl)
2039 2039 ui.write(templater.stringify(t(k, **props)))
2040 2040 else:
2041 2041 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2042 2042 mapfile, buffered=False)
2043 2043 for r in revs:
2044 2044 displayer.show(repo[r], **props)
2045 2045 displayer.close()
2046 2046
2047 2047 @command('debugupgraderepo', [
2048 2048 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2049 2049 ('', 'run', False, _('performs an upgrade')),
2050 2050 ])
2051 2051 def debugupgraderepo(ui, repo, run=False, optimize=None):
2052 2052 """upgrade a repository to use different features
2053 2053
2054 2054 If no arguments are specified, the repository is evaluated for upgrade
2055 2055 and a list of problems and potential optimizations is printed.
2056 2056
2057 2057 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2058 2058 can be influenced via additional arguments. More details will be provided
2059 2059 by the command output when run without ``--run``.
2060 2060
2061 2061 During the upgrade, the repository will be locked and no writes will be
2062 2062 allowed.
2063 2063
2064 2064 At the end of the upgrade, the repository may not be readable while new
2065 2065 repository data is swapped in. This window will be as long as it takes to
2066 2066 rename some directories inside the ``.hg`` directory. On most machines, this
2067 2067 should complete almost instantaneously and the chances of a consumer being
2068 2068 unable to access the repository should be low.
2069 2069 """
2070 2070 return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
2071 2071
2072 2072 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2073 2073 inferrepo=True)
2074 2074 def debugwalk(ui, repo, *pats, **opts):
2075 2075 """show how files match on given patterns"""
2076 2076 m = scmutil.match(repo[None], pats, opts)
2077 2077 items = list(repo.walk(m))
2078 2078 if not items:
2079 2079 return
2080 2080 f = lambda fn: fn
2081 2081 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2082 2082 f = lambda fn: util.normpath(fn)
2083 2083 fmt = 'f %%-%ds %%-%ds %%s' % (
2084 2084 max([len(abs) for abs in items]),
2085 2085 max([len(m.rel(abs)) for abs in items]))
2086 2086 for abs in items:
2087 2087 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2088 2088 ui.write("%s\n" % line.rstrip())
2089 2089
2090 2090 @command('debugwireargs',
2091 2091 [('', 'three', '', 'three'),
2092 2092 ('', 'four', '', 'four'),
2093 2093 ('', 'five', '', 'five'),
2094 2094 ] + commands.remoteopts,
2095 2095 _('REPO [OPTIONS]... [ONE [TWO]]'),
2096 2096 norepo=True)
2097 2097 def debugwireargs(ui, repopath, *vals, **opts):
2098 2098 repo = hg.peer(ui, opts, repopath)
2099 2099 for opt in commands.remoteopts:
2100 2100 del opts[opt[1]]
2101 2101 args = {}
2102 2102 for k, v in opts.iteritems():
2103 2103 if v:
2104 2104 args[k] = v
2105 2105 # run twice to check that we don't mess up the stream for the next command
2106 2106 res1 = repo.debugwireargs(*vals, **args)
2107 2107 res2 = repo.debugwireargs(*vals, **args)
2108 2108 ui.write("%s\n" % res1)
2109 2109 if res1 != res2:
2110 2110 ui.warn("%s\n" % res2)
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now