##// END OF EJS Templates
getchangegroup: take an 'outgoing' object as argument (API)...
Pierre-Yves David -
r29807:d4e02634 default
parent child Browse files
Show More
@@ -1,1048 +1,1047 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 branchmap,
25 25 dagutil,
26 26 discovery,
27 27 error,
28 28 mdiff,
29 29 phases,
30 30 util,
31 31 )
32 32
33 33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36 36
37 37 def readexactly(stream, n):
38 38 '''read n bytes from stream.read and abort if less was available'''
39 39 s = stream.read(n)
40 40 if len(s) < n:
41 41 raise error.Abort(_("stream ended unexpectedly"
42 42 " (got %d bytes, expected %d)")
43 43 % (len(s), n))
44 44 return s
45 45
46 46 def getchunk(stream):
47 47 """return the next chunk from stream as a string"""
48 48 d = readexactly(stream, 4)
49 49 l = struct.unpack(">l", d)[0]
50 50 if l <= 4:
51 51 if l:
52 52 raise error.Abort(_("invalid chunk length %d") % l)
53 53 return ""
54 54 return readexactly(stream, l - 4)
55 55
56 56 def chunkheader(length):
57 57 """return a changegroup chunk header (string)"""
58 58 return struct.pack(">l", length + 4)
59 59
60 60 def closechunk():
61 61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 62 return struct.pack(">l", 0)
63 63
64 64 def combineresults(results):
65 65 """logic to combine 0 or more addchangegroup results into one"""
66 66 changedheads = 0
67 67 result = 1
68 68 for ret in results:
69 69 # If any changegroup result is 0, return 0
70 70 if ret == 0:
71 71 result = 0
72 72 break
73 73 if ret < -1:
74 74 changedheads += ret + 1
75 75 elif ret > 1:
76 76 changedheads += ret - 1
77 77 if changedheads > 0:
78 78 result = 1 + changedheads
79 79 elif changedheads < 0:
80 80 result = -1 + changedheads
81 81 return result
82 82
83 83 def writechunks(ui, chunks, filename, vfs=None):
84 84 """Write chunks to a file and return its filename.
85 85
86 86 The stream is assumed to be a bundle file.
87 87 Existing files will not be overwritten.
88 88 If no filename is specified, a temporary file is created.
89 89 """
90 90 fh = None
91 91 cleanup = None
92 92 try:
93 93 if filename:
94 94 if vfs:
95 95 fh = vfs.open(filename, "wb")
96 96 else:
97 97 fh = open(filename, "wb")
98 98 else:
99 99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 100 fh = os.fdopen(fd, "wb")
101 101 cleanup = filename
102 102 for c in chunks:
103 103 fh.write(c)
104 104 cleanup = None
105 105 return filename
106 106 finally:
107 107 if fh is not None:
108 108 fh.close()
109 109 if cleanup is not None:
110 110 if filename and vfs:
111 111 vfs.unlink(cleanup)
112 112 else:
113 113 os.unlink(cleanup)
114 114
115 115 class cg1unpacker(object):
116 116 """Unpacker for cg1 changegroup streams.
117 117
118 118 A changegroup unpacker handles the framing of the revision data in
119 119 the wire format. Most consumers will want to use the apply()
120 120 method to add the changes from the changegroup to a repository.
121 121
122 122 If you're forwarding a changegroup unmodified to another consumer,
123 123 use getchunks(), which returns an iterator of changegroup
124 124 chunks. This is mostly useful for cases where you need to know the
125 125 data stream has ended by observing the end of the changegroup.
126 126
127 127 deltachunk() is useful only if you're applying delta data. Most
128 128 consumers should prefer apply() instead.
129 129
130 130 A few other public methods exist. Those are used only for
131 131 bundlerepo and some debug commands - their use is discouraged.
132 132 """
133 133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 134 deltaheadersize = struct.calcsize(deltaheader)
135 135 version = '01'
136 136 _grouplistcount = 1 # One list of files after the manifests
137 137
138 138 def __init__(self, fh, alg, extras=None):
139 139 if alg == 'UN':
140 140 alg = None # get more modern without breaking too much
141 141 if not alg in util.decompressors:
142 142 raise error.Abort(_('unknown stream compression type: %s')
143 143 % alg)
144 144 if alg == 'BZ':
145 145 alg = '_truncatedBZ'
146 146 self._stream = util.decompressors[alg](fh)
147 147 self._type = alg
148 148 self.extras = extras or {}
149 149 self.callback = None
150 150
151 151 # These methods (compressed, read, seek, tell) all appear to only
152 152 # be used by bundlerepo, but it's a little hard to tell.
153 153 def compressed(self):
154 154 return self._type is not None
155 155 def read(self, l):
156 156 return self._stream.read(l)
157 157 def seek(self, pos):
158 158 return self._stream.seek(pos)
159 159 def tell(self):
160 160 return self._stream.tell()
161 161 def close(self):
162 162 return self._stream.close()
163 163
164 164 def _chunklength(self):
165 165 d = readexactly(self._stream, 4)
166 166 l = struct.unpack(">l", d)[0]
167 167 if l <= 4:
168 168 if l:
169 169 raise error.Abort(_("invalid chunk length %d") % l)
170 170 return 0
171 171 if self.callback:
172 172 self.callback()
173 173 return l - 4
174 174
175 175 def changelogheader(self):
176 176 """v10 does not have a changelog header chunk"""
177 177 return {}
178 178
179 179 def manifestheader(self):
180 180 """v10 does not have a manifest header chunk"""
181 181 return {}
182 182
183 183 def filelogheader(self):
184 184 """return the header of the filelogs chunk, v10 only has the filename"""
185 185 l = self._chunklength()
186 186 if not l:
187 187 return {}
188 188 fname = readexactly(self._stream, l)
189 189 return {'filename': fname}
190 190
191 191 def _deltaheader(self, headertuple, prevnode):
192 192 node, p1, p2, cs = headertuple
193 193 if prevnode is None:
194 194 deltabase = p1
195 195 else:
196 196 deltabase = prevnode
197 197 flags = 0
198 198 return node, p1, p2, deltabase, cs, flags
199 199
200 200 def deltachunk(self, prevnode):
201 201 l = self._chunklength()
202 202 if not l:
203 203 return {}
204 204 headerdata = readexactly(self._stream, self.deltaheadersize)
205 205 header = struct.unpack(self.deltaheader, headerdata)
206 206 delta = readexactly(self._stream, l - self.deltaheadersize)
207 207 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
208 208 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
209 209 'deltabase': deltabase, 'delta': delta, 'flags': flags}
210 210
211 211 def getchunks(self):
212 212 """returns all the chunks contains in the bundle
213 213
214 214 Used when you need to forward the binary stream to a file or another
215 215 network API. To do so, it parse the changegroup data, otherwise it will
216 216 block in case of sshrepo because it don't know the end of the stream.
217 217 """
218 218 # an empty chunkgroup is the end of the changegroup
219 219 # a changegroup has at least 2 chunkgroups (changelog and manifest).
220 220 # after that, changegroup versions 1 and 2 have a series of groups
221 221 # with one group per file. changegroup 3 has a series of directory
222 222 # manifests before the files.
223 223 count = 0
224 224 emptycount = 0
225 225 while emptycount < self._grouplistcount:
226 226 empty = True
227 227 count += 1
228 228 while True:
229 229 chunk = getchunk(self)
230 230 if not chunk:
231 231 if empty and count > 2:
232 232 emptycount += 1
233 233 break
234 234 empty = False
235 235 yield chunkheader(len(chunk))
236 236 pos = 0
237 237 while pos < len(chunk):
238 238 next = pos + 2**20
239 239 yield chunk[pos:next]
240 240 pos = next
241 241 yield closechunk()
242 242
243 243 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
244 244 # We know that we'll never have more manifests than we had
245 245 # changesets.
246 246 self.callback = prog(_('manifests'), numchanges)
247 247 # no need to check for empty manifest group here:
248 248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 249 # no new manifest will be created and the manifest group will
250 250 # be empty during the pull
251 251 self.manifestheader()
252 252 repo.manifest.addgroup(self, revmap, trp)
253 253 repo.ui.progress(_('manifests'), None)
254 254 self.callback = None
255 255
256 256 def apply(self, repo, srctype, url, emptyok=False,
257 257 targetphase=phases.draft, expectedtotal=None):
258 258 """Add the changegroup returned by source.read() to this repo.
259 259 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 260 the URL of the repo where this changegroup is coming from.
261 261
262 262 Return an integer summarizing the change to this repo:
263 263 - nothing changed or no source: 0
264 264 - more heads than before: 1+added heads (2..n)
265 265 - fewer heads than before: -1-removed heads (-2..-n)
266 266 - number of heads stays the same: 1
267 267 """
268 268 repo = repo.unfiltered()
269 269 def csmap(x):
270 270 repo.ui.debug("add changeset %s\n" % short(x))
271 271 return len(cl)
272 272
273 273 def revmap(x):
274 274 return cl.rev(x)
275 275
276 276 changesets = files = revisions = 0
277 277
278 278 try:
279 279 with repo.transaction("\n".join([srctype,
280 280 util.hidepassword(url)])) as tr:
281 281 # The transaction could have been created before and already
282 282 # carries source information. In this case we use the top
283 283 # level data. We overwrite the argument because we need to use
284 284 # the top level value (if they exist) in this function.
285 285 srctype = tr.hookargs.setdefault('source', srctype)
286 286 url = tr.hookargs.setdefault('url', url)
287 287 repo.hook('prechangegroup', throw=True, **tr.hookargs)
288 288
289 289 # write changelog data to temp files so concurrent readers
290 290 # will not see an inconsistent view
291 291 cl = repo.changelog
292 292 cl.delayupdate(tr)
293 293 oldheads = cl.heads()
294 294
295 295 trp = weakref.proxy(tr)
296 296 # pull off the changeset group
297 297 repo.ui.status(_("adding changesets\n"))
298 298 clstart = len(cl)
299 299 class prog(object):
300 300 def __init__(self, step, total):
301 301 self._step = step
302 302 self._total = total
303 303 self._count = 1
304 304 def __call__(self):
305 305 repo.ui.progress(self._step, self._count,
306 306 unit=_('chunks'), total=self._total)
307 307 self._count += 1
308 308 self.callback = prog(_('changesets'), expectedtotal)
309 309
310 310 efiles = set()
311 311 def onchangelog(cl, node):
312 312 efiles.update(cl.readfiles(node))
313 313
314 314 self.changelogheader()
315 315 srccontent = cl.addgroup(self, csmap, trp,
316 316 addrevisioncb=onchangelog)
317 317 efiles = len(efiles)
318 318
319 319 if not (srccontent or emptyok):
320 320 raise error.Abort(_("received changelog group is empty"))
321 321 clend = len(cl)
322 322 changesets = clend - clstart
323 323 repo.ui.progress(_('changesets'), None)
324 324 self.callback = None
325 325
326 326 # pull off the manifest group
327 327 repo.ui.status(_("adding manifests\n"))
328 328 self._unpackmanifests(repo, revmap, trp, prog, changesets)
329 329
330 330 needfiles = {}
331 331 if repo.ui.configbool('server', 'validate', default=False):
332 332 # validate incoming csets have their manifests
333 333 for cset in xrange(clstart, clend):
334 334 mfnode = repo.changelog.read(
335 335 repo.changelog.node(cset))[0]
336 336 mfest = repo.manifest.readdelta(mfnode)
337 337 # store file nodes we must see
338 338 for f, n in mfest.iteritems():
339 339 needfiles.setdefault(f, set()).add(n)
340 340
341 341 # process the files
342 342 repo.ui.status(_("adding file changes\n"))
343 343 newrevs, newfiles = _addchangegroupfiles(
344 344 repo, self, revmap, trp, efiles, needfiles)
345 345 revisions += newrevs
346 346 files += newfiles
347 347
348 348 dh = 0
349 349 if oldheads:
350 350 heads = cl.heads()
351 351 dh = len(heads) - len(oldheads)
352 352 for h in heads:
353 353 if h not in oldheads and repo[h].closesbranch():
354 354 dh -= 1
355 355 htext = ""
356 356 if dh:
357 357 htext = _(" (%+d heads)") % dh
358 358
359 359 repo.ui.status(_("added %d changesets"
360 360 " with %d changes to %d files%s\n")
361 361 % (changesets, revisions, files, htext))
362 362 repo.invalidatevolatilesets()
363 363
364 364 if changesets > 0:
365 365 if 'node' not in tr.hookargs:
366 366 tr.hookargs['node'] = hex(cl.node(clstart))
367 367 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
368 368 hookargs = dict(tr.hookargs)
369 369 else:
370 370 hookargs = dict(tr.hookargs)
371 371 hookargs['node'] = hex(cl.node(clstart))
372 372 hookargs['node_last'] = hex(cl.node(clend - 1))
373 373 repo.hook('pretxnchangegroup', throw=True, **hookargs)
374 374
375 375 added = [cl.node(r) for r in xrange(clstart, clend)]
376 376 publishing = repo.publishing()
377 377 if srctype in ('push', 'serve'):
378 378 # Old servers can not push the boundary themselves.
379 379 # New servers won't push the boundary if changeset already
380 380 # exists locally as secret
381 381 #
382 382 # We should not use added here but the list of all change in
383 383 # the bundle
384 384 if publishing:
385 385 phases.advanceboundary(repo, tr, phases.public,
386 386 srccontent)
387 387 else:
388 388 # Those changesets have been pushed from the
389 389 # outside, their phases are going to be pushed
390 390 # alongside. Therefor `targetphase` is
391 391 # ignored.
392 392 phases.advanceboundary(repo, tr, phases.draft,
393 393 srccontent)
394 394 phases.retractboundary(repo, tr, phases.draft, added)
395 395 elif srctype != 'strip':
396 396 # publishing only alter behavior during push
397 397 #
398 398 # strip should not touch boundary at all
399 399 phases.retractboundary(repo, tr, targetphase, added)
400 400
401 401 if changesets > 0:
402 402 if srctype != 'strip':
403 403 # During strip, branchcache is invalid but
404 404 # coming call to `destroyed` will repair it.
405 405 # In other case we can safely update cache on
406 406 # disk.
407 407 repo.ui.debug('updating the branch cache\n')
408 408 branchmap.updatecache(repo.filtered('served'))
409 409
410 410 def runhooks():
411 411 # These hooks run when the lock releases, not when the
412 412 # transaction closes. So it's possible for the changelog
413 413 # to have changed since we last saw it.
414 414 if clstart >= len(repo):
415 415 return
416 416
417 417 repo.hook("changegroup", **hookargs)
418 418
419 419 for n in added:
420 420 args = hookargs.copy()
421 421 args['node'] = hex(n)
422 422 del args['node_last']
423 423 repo.hook("incoming", **args)
424 424
425 425 newheads = [h for h in repo.heads()
426 426 if h not in oldheads]
427 427 repo.ui.log("incoming",
428 428 "%s incoming changes - new heads: %s\n",
429 429 len(added),
430 430 ', '.join([hex(c[:6]) for c in newheads]))
431 431
432 432 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
433 433 lambda tr: repo._afterlock(runhooks))
434 434 finally:
435 435 repo.ui.flush()
436 436 # never return 0 here:
437 437 if dh < 0:
438 438 return dh - 1
439 439 else:
440 440 return dh + 1
441 441
442 442 class cg2unpacker(cg1unpacker):
443 443 """Unpacker for cg2 streams.
444 444
445 445 cg2 streams add support for generaldelta, so the delta header
446 446 format is slightly different. All other features about the data
447 447 remain the same.
448 448 """
449 449 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 450 deltaheadersize = struct.calcsize(deltaheader)
451 451 version = '02'
452 452
453 453 def _deltaheader(self, headertuple, prevnode):
454 454 node, p1, p2, deltabase, cs = headertuple
455 455 flags = 0
456 456 return node, p1, p2, deltabase, cs, flags
457 457
458 458 class cg3unpacker(cg2unpacker):
459 459 """Unpacker for cg3 streams.
460 460
461 461 cg3 streams add support for exchanging treemanifests and revlog
462 462 flags. It adds the revlog flags to the delta header and an empty chunk
463 463 separating manifests and files.
464 464 """
465 465 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 466 deltaheadersize = struct.calcsize(deltaheader)
467 467 version = '03'
468 468 _grouplistcount = 2 # One list of manifests and one list of files
469 469
470 470 def _deltaheader(self, headertuple, prevnode):
471 471 node, p1, p2, deltabase, cs, flags = headertuple
472 472 return node, p1, p2, deltabase, cs, flags
473 473
474 474 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
475 475 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
476 476 numchanges)
477 477 for chunkdata in iter(self.filelogheader, {}):
478 478 # If we get here, there are directory manifests in the changegroup
479 479 d = chunkdata["filename"]
480 480 repo.ui.debug("adding %s revisions\n" % d)
481 481 dirlog = repo.manifest.dirlog(d)
482 482 if not dirlog.addgroup(self, revmap, trp):
483 483 raise error.Abort(_("received dir revlog group is empty"))
484 484
485 485 class headerlessfixup(object):
486 486 def __init__(self, fh, h):
487 487 self._h = h
488 488 self._fh = fh
489 489 def read(self, n):
490 490 if self._h:
491 491 d, self._h = self._h[:n], self._h[n:]
492 492 if len(d) < n:
493 493 d += readexactly(self._fh, n - len(d))
494 494 return d
495 495 return readexactly(self._fh, n)
496 496
497 497 class cg1packer(object):
498 498 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
499 499 version = '01'
500 500 def __init__(self, repo, bundlecaps=None):
501 501 """Given a source repo, construct a bundler.
502 502
503 503 bundlecaps is optional and can be used to specify the set of
504 504 capabilities which can be used to build the bundle.
505 505 """
506 506 # Set of capabilities we can use to build the bundle.
507 507 if bundlecaps is None:
508 508 bundlecaps = set()
509 509 self._bundlecaps = bundlecaps
510 510 # experimental config: bundle.reorder
511 511 reorder = repo.ui.config('bundle', 'reorder', 'auto')
512 512 if reorder == 'auto':
513 513 reorder = None
514 514 else:
515 515 reorder = util.parsebool(reorder)
516 516 self._repo = repo
517 517 self._reorder = reorder
518 518 self._progress = repo.ui.progress
519 519 if self._repo.ui.verbose and not self._repo.ui.debugflag:
520 520 self._verbosenote = self._repo.ui.note
521 521 else:
522 522 self._verbosenote = lambda s: None
523 523
524 524 def close(self):
525 525 return closechunk()
526 526
527 527 def fileheader(self, fname):
528 528 return chunkheader(len(fname)) + fname
529 529
530 530 # Extracted both for clarity and for overriding in extensions.
531 531 def _sortgroup(self, revlog, nodelist, lookup):
532 532 """Sort nodes for change group and turn them into revnums."""
533 533 # for generaldelta revlogs, we linearize the revs; this will both be
534 534 # much quicker and generate a much smaller bundle
535 535 if (revlog._generaldelta and self._reorder is None) or self._reorder:
536 536 dag = dagutil.revlogdag(revlog)
537 537 return dag.linearize(set(revlog.rev(n) for n in nodelist))
538 538 else:
539 539 return sorted([revlog.rev(n) for n in nodelist])
540 540
541 541 def group(self, nodelist, revlog, lookup, units=None):
542 542 """Calculate a delta group, yielding a sequence of changegroup chunks
543 543 (strings).
544 544
545 545 Given a list of changeset revs, return a set of deltas and
546 546 metadata corresponding to nodes. The first delta is
547 547 first parent(nodelist[0]) -> nodelist[0], the receiver is
548 548 guaranteed to have this parent as it has all history before
549 549 these changesets. In the case firstparent is nullrev the
550 550 changegroup starts with a full revision.
551 551
552 552 If units is not None, progress detail will be generated, units specifies
553 553 the type of revlog that is touched (changelog, manifest, etc.).
554 554 """
555 555 # if we don't have any revisions touched by these changesets, bail
556 556 if len(nodelist) == 0:
557 557 yield self.close()
558 558 return
559 559
560 560 revs = self._sortgroup(revlog, nodelist, lookup)
561 561
562 562 # add the parent of the first rev
563 563 p = revlog.parentrevs(revs[0])[0]
564 564 revs.insert(0, p)
565 565
566 566 # build deltas
567 567 total = len(revs) - 1
568 568 msgbundling = _('bundling')
569 569 for r in xrange(len(revs) - 1):
570 570 if units is not None:
571 571 self._progress(msgbundling, r + 1, unit=units, total=total)
572 572 prev, curr = revs[r], revs[r + 1]
573 573 linknode = lookup(revlog.node(curr))
574 574 for c in self.revchunk(revlog, curr, prev, linknode):
575 575 yield c
576 576
577 577 if units is not None:
578 578 self._progress(msgbundling, None)
579 579 yield self.close()
580 580
581 581 # filter any nodes that claim to be part of the known set
582 582 def prune(self, revlog, missing, commonrevs):
583 583 rr, rl = revlog.rev, revlog.linkrev
584 584 return [n for n in missing if rl(rr(n)) not in commonrevs]
585 585
586 586 def _packmanifests(self, dir, mfnodes, lookuplinknode):
587 587 """Pack flat manifests into a changegroup stream."""
588 588 assert not dir
589 589 for chunk in self.group(mfnodes, self._repo.manifest,
590 590 lookuplinknode, units=_('manifests')):
591 591 yield chunk
592 592
593 593 def _manifestsdone(self):
594 594 return ''
595 595
596 596 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
597 597 '''yield a sequence of changegroup chunks (strings)'''
598 598 repo = self._repo
599 599 cl = repo.changelog
600 600
601 601 clrevorder = {}
602 602 mfs = {} # needed manifests
603 603 fnodes = {} # needed file nodes
604 604 changedfiles = set()
605 605
606 606 # Callback for the changelog, used to collect changed files and manifest
607 607 # nodes.
608 608 # Returns the linkrev node (identity in the changelog case).
609 609 def lookupcl(x):
610 610 c = cl.read(x)
611 611 clrevorder[x] = len(clrevorder)
612 612 n = c[0]
613 613 # record the first changeset introducing this manifest version
614 614 mfs.setdefault(n, x)
615 615 # Record a complete list of potentially-changed files in
616 616 # this manifest.
617 617 changedfiles.update(c[3])
618 618 return x
619 619
620 620 self._verbosenote(_('uncompressed size of bundle content:\n'))
621 621 size = 0
622 622 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
623 623 size += len(chunk)
624 624 yield chunk
625 625 self._verbosenote(_('%8.i (changelog)\n') % size)
626 626
627 627 # We need to make sure that the linkrev in the changegroup refers to
628 628 # the first changeset that introduced the manifest or file revision.
629 629 # The fastpath is usually safer than the slowpath, because the filelogs
630 630 # are walked in revlog order.
631 631 #
632 632 # When taking the slowpath with reorder=None and the manifest revlog
633 633 # uses generaldelta, the manifest may be walked in the "wrong" order.
634 634 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
635 635 # cc0ff93d0c0c).
636 636 #
637 637 # When taking the fastpath, we are only vulnerable to reordering
638 638 # of the changelog itself. The changelog never uses generaldelta, so
639 639 # it is only reordered when reorder=True. To handle this case, we
640 640 # simply take the slowpath, which already has the 'clrevorder' logic.
641 641 # This was also fixed in cc0ff93d0c0c.
642 642 fastpathlinkrev = fastpathlinkrev and not self._reorder
643 643 # Treemanifests don't work correctly with fastpathlinkrev
644 644 # either, because we don't discover which directory nodes to
645 645 # send along with files. This could probably be fixed.
646 646 fastpathlinkrev = fastpathlinkrev and (
647 647 'treemanifest' not in repo.requirements)
648 648
649 649 for chunk in self.generatemanifests(commonrevs, clrevorder,
650 650 fastpathlinkrev, mfs, fnodes):
651 651 yield chunk
652 652 mfs.clear()
653 653 clrevs = set(cl.rev(x) for x in clnodes)
654 654
655 655 if not fastpathlinkrev:
656 656 def linknodes(unused, fname):
657 657 return fnodes.get(fname, {})
658 658 else:
659 659 cln = cl.node
660 660 def linknodes(filerevlog, fname):
661 661 llr = filerevlog.linkrev
662 662 fln = filerevlog.node
663 663 revs = ((r, llr(r)) for r in filerevlog)
664 664 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
665 665
666 666 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
667 667 source):
668 668 yield chunk
669 669
670 670 yield self.close()
671 671
672 672 if clnodes:
673 673 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
674 674
675 675 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
676 676 fnodes):
677 677 repo = self._repo
678 678 dirlog = repo.manifest.dirlog
679 679 tmfnodes = {'': mfs}
680 680
681 681 # Callback for the manifest, used to collect linkrevs for filelog
682 682 # revisions.
683 683 # Returns the linkrev node (collected in lookupcl).
684 684 def makelookupmflinknode(dir):
685 685 if fastpathlinkrev:
686 686 assert not dir
687 687 return mfs.__getitem__
688 688
689 689 def lookupmflinknode(x):
690 690 """Callback for looking up the linknode for manifests.
691 691
692 692 Returns the linkrev node for the specified manifest.
693 693
694 694 SIDE EFFECT:
695 695
696 696 1) fclnodes gets populated with the list of relevant
697 697 file nodes if we're not using fastpathlinkrev
698 698 2) When treemanifests are in use, collects treemanifest nodes
699 699 to send
700 700
701 701 Note that this means manifests must be completely sent to
702 702 the client before you can trust the list of files and
703 703 treemanifests to send.
704 704 """
705 705 clnode = tmfnodes[dir][x]
706 706 mdata = dirlog(dir).readshallowfast(x)
707 707 for p, n, fl in mdata.iterentries():
708 708 if fl == 't': # subdirectory manifest
709 709 subdir = dir + p + '/'
710 710 tmfclnodes = tmfnodes.setdefault(subdir, {})
711 711 tmfclnode = tmfclnodes.setdefault(n, clnode)
712 712 if clrevorder[clnode] < clrevorder[tmfclnode]:
713 713 tmfclnodes[n] = clnode
714 714 else:
715 715 f = dir + p
716 716 fclnodes = fnodes.setdefault(f, {})
717 717 fclnode = fclnodes.setdefault(n, clnode)
718 718 if clrevorder[clnode] < clrevorder[fclnode]:
719 719 fclnodes[n] = clnode
720 720 return clnode
721 721 return lookupmflinknode
722 722
723 723 size = 0
724 724 while tmfnodes:
725 725 dir = min(tmfnodes)
726 726 nodes = tmfnodes[dir]
727 727 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
728 728 if not dir or prunednodes:
729 729 for x in self._packmanifests(dir, prunednodes,
730 730 makelookupmflinknode(dir)):
731 731 size += len(x)
732 732 yield x
733 733 del tmfnodes[dir]
734 734 self._verbosenote(_('%8.i (manifests)\n') % size)
735 735 yield self._manifestsdone()
736 736
737 737 # The 'source' parameter is useful for extensions
738 738 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
739 739 repo = self._repo
740 740 progress = self._progress
741 741 msgbundling = _('bundling')
742 742
743 743 total = len(changedfiles)
744 744 # for progress output
745 745 msgfiles = _('files')
746 746 for i, fname in enumerate(sorted(changedfiles)):
747 747 filerevlog = repo.file(fname)
748 748 if not filerevlog:
749 749 raise error.Abort(_("empty or missing revlog for %s") % fname)
750 750
751 751 linkrevnodes = linknodes(filerevlog, fname)
752 752 # Lookup for filenodes, we collected the linkrev nodes above in the
753 753 # fastpath case and with lookupmf in the slowpath case.
754 754 def lookupfilelog(x):
755 755 return linkrevnodes[x]
756 756
757 757 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
758 758 if filenodes:
759 759 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
760 760 total=total)
761 761 h = self.fileheader(fname)
762 762 size = len(h)
763 763 yield h
764 764 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
765 765 size += len(chunk)
766 766 yield chunk
767 767 self._verbosenote(_('%8.i %s\n') % (size, fname))
768 768 progress(msgbundling, None)
769 769
770 770 def deltaparent(self, revlog, rev, p1, p2, prev):
771 771 return prev
772 772
773 773 def revchunk(self, revlog, rev, prev, linknode):
774 774 node = revlog.node(rev)
775 775 p1, p2 = revlog.parentrevs(rev)
776 776 base = self.deltaparent(revlog, rev, p1, p2, prev)
777 777
778 778 prefix = ''
779 779 if revlog.iscensored(base) or revlog.iscensored(rev):
780 780 try:
781 781 delta = revlog.revision(node)
782 782 except error.CensoredNodeError as e:
783 783 delta = e.tombstone
784 784 if base == nullrev:
785 785 prefix = mdiff.trivialdiffheader(len(delta))
786 786 else:
787 787 baselen = revlog.rawsize(base)
788 788 prefix = mdiff.replacediffheader(baselen, len(delta))
789 789 elif base == nullrev:
790 790 delta = revlog.revision(node)
791 791 prefix = mdiff.trivialdiffheader(len(delta))
792 792 else:
793 793 delta = revlog.revdiff(base, rev)
794 794 p1n, p2n = revlog.parents(node)
795 795 basenode = revlog.node(base)
796 796 flags = revlog.flags(rev)
797 797 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
798 798 meta += prefix
799 799 l = len(meta) + len(delta)
800 800 yield chunkheader(l)
801 801 yield meta
802 802 yield delta
803 803 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
804 804 # do nothing with basenode, it is implicitly the previous one in HG10
805 805 # do nothing with flags, it is implicitly 0 for cg1 and cg2
806 806 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
807 807
808 808 class cg2packer(cg1packer):
809 809 version = '02'
810 810 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
811 811
812 812 def __init__(self, repo, bundlecaps=None):
813 813 super(cg2packer, self).__init__(repo, bundlecaps)
814 814 if self._reorder is None:
815 815 # Since generaldelta is directly supported by cg2, reordering
816 816 # generally doesn't help, so we disable it by default (treating
817 817 # bundle.reorder=auto just like bundle.reorder=False).
818 818 self._reorder = False
819 819
820 820 def deltaparent(self, revlog, rev, p1, p2, prev):
821 821 dp = revlog.deltaparent(rev)
822 822 # avoid storing full revisions; pick prev in those cases
823 823 # also pick prev when we can't be sure remote has dp
824 824 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
825 825 return prev
826 826 return dp
827 827
828 828 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
829 829 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
830 830 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
831 831
832 832 class cg3packer(cg2packer):
833 833 version = '03'
834 834 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
835 835
836 836 def _packmanifests(self, dir, mfnodes, lookuplinknode):
837 837 if dir:
838 838 yield self.fileheader(dir)
839 839 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
840 840 lookuplinknode, units=_('manifests')):
841 841 yield chunk
842 842
843 843 def _manifestsdone(self):
844 844 return self.close()
845 845
846 846 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
847 847 return struct.pack(
848 848 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
849 849
850 850 _packermap = {'01': (cg1packer, cg1unpacker),
851 851 # cg2 adds support for exchanging generaldelta
852 852 '02': (cg2packer, cg2unpacker),
853 853 # cg3 adds support for exchanging revlog flags and treemanifests
854 854 '03': (cg3packer, cg3unpacker),
855 855 }
856 856
857 857 def allsupportedversions(ui):
858 858 versions = set(_packermap.keys())
859 859 versions.discard('03')
860 860 if (ui.configbool('experimental', 'changegroup3') or
861 861 ui.configbool('experimental', 'treemanifest')):
862 862 versions.add('03')
863 863 return versions
864 864
865 865 # Changegroup versions that can be applied to the repo
866 866 def supportedincomingversions(repo):
867 867 versions = allsupportedversions(repo.ui)
868 868 if 'treemanifest' in repo.requirements:
869 869 versions.add('03')
870 870 return versions
871 871
872 872 # Changegroup versions that can be created from the repo
873 873 def supportedoutgoingversions(repo):
874 874 versions = allsupportedversions(repo.ui)
875 875 if 'treemanifest' in repo.requirements:
876 876 # Versions 01 and 02 support only flat manifests and it's just too
877 877 # expensive to convert between the flat manifest and tree manifest on
878 878 # the fly. Since tree manifests are hashed differently, all of history
879 879 # would have to be converted. Instead, we simply don't even pretend to
880 880 # support versions 01 and 02.
881 881 versions.discard('01')
882 882 versions.discard('02')
883 883 versions.add('03')
884 884 return versions
885 885
886 886 def safeversion(repo):
887 887 # Finds the smallest version that it's safe to assume clients of the repo
888 888 # will support. For example, all hg versions that support generaldelta also
889 889 # support changegroup 02.
890 890 versions = supportedoutgoingversions(repo)
891 891 if 'generaldelta' in repo.requirements:
892 892 versions.discard('01')
893 893 assert versions
894 894 return min(versions)
895 895
896 896 def getbundler(version, repo, bundlecaps=None):
897 897 assert version in supportedoutgoingversions(repo)
898 898 return _packermap[version][0](repo, bundlecaps)
899 899
900 900 def getunbundler(version, fh, alg, extras=None):
901 901 return _packermap[version][1](fh, alg, extras=extras)
902 902
903 903 def _changegroupinfo(repo, nodes, source):
904 904 if repo.ui.verbose or source == 'bundle':
905 905 repo.ui.status(_("%d changesets found\n") % len(nodes))
906 906 if repo.ui.debugflag:
907 907 repo.ui.debug("list of changesets:\n")
908 908 for node in nodes:
909 909 repo.ui.debug("%s\n" % hex(node))
910 910
911 911 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
912 912 repo = repo.unfiltered()
913 913 commonrevs = outgoing.common
914 914 csets = outgoing.missing
915 915 heads = outgoing.missingheads
916 916 # We go through the fast path if we get told to, or if all (unfiltered
917 917 # heads have been requested (since we then know there all linkrevs will
918 918 # be pulled by the client).
919 919 heads.sort()
920 920 fastpathlinkrev = fastpath or (
921 921 repo.filtername is None and heads == sorted(repo.heads()))
922 922
923 923 repo.hook('preoutgoing', throw=True, source=source)
924 924 _changegroupinfo(repo, csets, source)
925 925 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
926 926
927 927 def getsubset(repo, outgoing, bundler, source, fastpath=False):
928 928 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
929 929 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
930 930 {'clcount': len(outgoing.missing)})
931 931
932 932 def changegroupsubset(repo, roots, heads, source, version='01'):
933 933 """Compute a changegroup consisting of all the nodes that are
934 934 descendants of any of the roots and ancestors of any of the heads.
935 935 Return a chunkbuffer object whose read() method will return
936 936 successive changegroup chunks.
937 937
938 938 It is fairly complex as determining which filenodes and which
939 939 manifest nodes need to be included for the changeset to be complete
940 940 is non-trivial.
941 941
942 942 Another wrinkle is doing the reverse, figuring out which changeset in
943 943 the changegroup a particular filenode or manifestnode belongs to.
944 944 """
945 945 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
946 946 bundler = getbundler(version, repo)
947 947 return getsubset(repo, outgoing, bundler, source)
948 948
949 949 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
950 950 version='01'):
951 951 """Like getbundle, but taking a discovery.outgoing as an argument.
952 952
953 953 This is only implemented for local repos and reuses potentially
954 954 precomputed sets in outgoing. Returns a raw changegroup generator."""
955 955 if not outgoing.missing:
956 956 return None
957 957 bundler = getbundler(version, repo, bundlecaps)
958 958 return getsubsetraw(repo, outgoing, bundler, source)
959 959
960 960 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
961 961 version='01'):
962 962 """Like getbundle, but taking a discovery.outgoing as an argument.
963 963
964 964 This is only implemented for local repos and reuses potentially
965 965 precomputed sets in outgoing."""
966 966 if not outgoing.missing:
967 967 return None
968 968 bundler = getbundler(version, repo, bundlecaps)
969 969 return getsubset(repo, outgoing, bundler, source)
970 970
971 971 def computeoutgoing(repo, heads, common):
972 972 """Computes which revs are outgoing given a set of common
973 973 and a set of heads.
974 974
975 975 This is a separate function so extensions can have access to
976 976 the logic.
977 977
978 978 Returns a discovery.outgoing object.
979 979 """
980 980 cl = repo.changelog
981 981 if common:
982 982 hasnode = cl.hasnode
983 983 common = [n for n in common if hasnode(n)]
984 984 else:
985 985 common = [nullid]
986 986 if not heads:
987 987 heads = cl.heads()
988 988 return discovery.outgoing(repo, common, heads)
989 989
990 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
990 def getchangegroup(repo, source, outgoing, bundlecaps=None,
991 991 version='01'):
992 992 """Like changegroupsubset, but returns the set difference between the
993 993 ancestors of heads and the ancestors common.
994 994
995 995 If heads is None, use the local heads. If common is None, use [nullid].
996 996
997 997 The nodes in common might not all be known locally due to the way the
998 998 current discovery protocol works.
999 999 """
1000 outgoing = computeoutgoing(repo, heads, common)
1001 1000 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1002 1001 version=version)
1003 1002
1004 1003 def changegroup(repo, basenodes, source):
1005 1004 # to avoid a race we use changegroupsubset() (issue1320)
1006 1005 return changegroupsubset(repo, basenodes, repo.heads(), source)
1007 1006
1008 1007 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1009 1008 revisions = 0
1010 1009 files = 0
1011 1010 for chunkdata in iter(source.filelogheader, {}):
1012 1011 files += 1
1013 1012 f = chunkdata["filename"]
1014 1013 repo.ui.debug("adding %s revisions\n" % f)
1015 1014 repo.ui.progress(_('files'), files, unit=_('files'),
1016 1015 total=expectedfiles)
1017 1016 fl = repo.file(f)
1018 1017 o = len(fl)
1019 1018 try:
1020 1019 if not fl.addgroup(source, revmap, trp):
1021 1020 raise error.Abort(_("received file revlog group is empty"))
1022 1021 except error.CensoredBaseError as e:
1023 1022 raise error.Abort(_("received delta base is censored: %s") % e)
1024 1023 revisions += len(fl) - o
1025 1024 if f in needfiles:
1026 1025 needs = needfiles[f]
1027 1026 for new in xrange(o, len(fl)):
1028 1027 n = fl.node(new)
1029 1028 if n in needs:
1030 1029 needs.remove(n)
1031 1030 else:
1032 1031 raise error.Abort(
1033 1032 _("received spurious file revlog entry"))
1034 1033 if not needs:
1035 1034 del needfiles[f]
1036 1035 repo.ui.progress(_('files'), None)
1037 1036
1038 1037 for f, needs in needfiles.iteritems():
1039 1038 fl = repo.file(f)
1040 1039 for n in needs:
1041 1040 try:
1042 1041 fl.rev(n)
1043 1042 except error.LookupError:
1044 1043 raise error.Abort(
1045 1044 _('missing file data for %s:%s - run hg verify') %
1046 1045 (f, hex(n)))
1047 1046
1048 1047 return revisions, files
@@ -1,7282 +1,7283 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import re
16 16 import shlex
17 17 import socket
18 18 import sys
19 19 import tempfile
20 20 import time
21 21
22 22 from .i18n import _
23 23 from .node import (
24 24 bin,
25 25 hex,
26 26 nullhex,
27 27 nullid,
28 28 nullrev,
29 29 short,
30 30 )
31 31 from . import (
32 32 archival,
33 33 bookmarks,
34 34 bundle2,
35 35 changegroup,
36 36 cmdutil,
37 37 commandserver,
38 38 context,
39 39 copies,
40 40 dagparser,
41 41 dagutil,
42 42 destutil,
43 43 discovery,
44 44 encoding,
45 45 error,
46 46 exchange,
47 47 extensions,
48 48 fileset,
49 49 formatter,
50 50 graphmod,
51 51 hbisect,
52 52 help,
53 53 hg,
54 54 hgweb,
55 55 localrepo,
56 56 lock as lockmod,
57 57 merge as mergemod,
58 58 minirst,
59 59 obsolete,
60 60 patch,
61 61 phases,
62 62 policy,
63 63 pvec,
64 64 repair,
65 65 revlog,
66 66 revset,
67 67 scmutil,
68 68 setdiscovery,
69 69 simplemerge,
70 70 sshserver,
71 71 streamclone,
72 72 templatekw,
73 73 templater,
74 74 treediscovery,
75 75 ui as uimod,
76 76 util,
77 77 )
78 78
79 79 release = lockmod.release
80 80
81 81 table = {}
82 82
83 83 command = cmdutil.command(table)
84 84
85 85 # label constants
86 86 # until 3.5, bookmarks.current was the advertised name, not
87 87 # bookmarks.active, so we must use both to avoid breaking old
88 88 # custom styles
89 89 activebookmarklabel = 'bookmarks.active bookmarks.current'
90 90
91 91 # common command options
92 92
93 93 globalopts = [
94 94 ('R', 'repository', '',
95 95 _('repository root directory or name of overlay bundle file'),
96 96 _('REPO')),
97 97 ('', 'cwd', '',
98 98 _('change working directory'), _('DIR')),
99 99 ('y', 'noninteractive', None,
100 100 _('do not prompt, automatically pick the first choice for all prompts')),
101 101 ('q', 'quiet', None, _('suppress output')),
102 102 ('v', 'verbose', None, _('enable additional output')),
103 103 ('', 'config', [],
104 104 _('set/override config option (use \'section.name=value\')'),
105 105 _('CONFIG')),
106 106 ('', 'debug', None, _('enable debugging output')),
107 107 ('', 'debugger', None, _('start debugger')),
108 108 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
109 109 _('ENCODE')),
110 110 ('', 'encodingmode', encoding.encodingmode,
111 111 _('set the charset encoding mode'), _('MODE')),
112 112 ('', 'traceback', None, _('always print a traceback on exception')),
113 113 ('', 'time', None, _('time how long the command takes')),
114 114 ('', 'profile', None, _('print command execution profile')),
115 115 ('', 'version', None, _('output version information and exit')),
116 116 ('h', 'help', None, _('display help and exit')),
117 117 ('', 'hidden', False, _('consider hidden changesets')),
118 118 ]
119 119
120 120 dryrunopts = [('n', 'dry-run', None,
121 121 _('do not perform actions, just print output'))]
122 122
123 123 remoteopts = [
124 124 ('e', 'ssh', '',
125 125 _('specify ssh command to use'), _('CMD')),
126 126 ('', 'remotecmd', '',
127 127 _('specify hg command to run on the remote side'), _('CMD')),
128 128 ('', 'insecure', None,
129 129 _('do not verify server certificate (ignoring web.cacerts config)')),
130 130 ]
131 131
132 132 walkopts = [
133 133 ('I', 'include', [],
134 134 _('include names matching the given patterns'), _('PATTERN')),
135 135 ('X', 'exclude', [],
136 136 _('exclude names matching the given patterns'), _('PATTERN')),
137 137 ]
138 138
139 139 commitopts = [
140 140 ('m', 'message', '',
141 141 _('use text as commit message'), _('TEXT')),
142 142 ('l', 'logfile', '',
143 143 _('read commit message from file'), _('FILE')),
144 144 ]
145 145
146 146 commitopts2 = [
147 147 ('d', 'date', '',
148 148 _('record the specified date as commit date'), _('DATE')),
149 149 ('u', 'user', '',
150 150 _('record the specified user as committer'), _('USER')),
151 151 ]
152 152
153 153 # hidden for now
154 154 formatteropts = [
155 155 ('T', 'template', '',
156 156 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
157 157 ]
158 158
159 159 templateopts = [
160 160 ('', 'style', '',
161 161 _('display using template map file (DEPRECATED)'), _('STYLE')),
162 162 ('T', 'template', '',
163 163 _('display with template'), _('TEMPLATE')),
164 164 ]
165 165
166 166 logopts = [
167 167 ('p', 'patch', None, _('show patch')),
168 168 ('g', 'git', None, _('use git extended diff format')),
169 169 ('l', 'limit', '',
170 170 _('limit number of changes displayed'), _('NUM')),
171 171 ('M', 'no-merges', None, _('do not show merges')),
172 172 ('', 'stat', None, _('output diffstat-style summary of changes')),
173 173 ('G', 'graph', None, _("show the revision DAG")),
174 174 ] + templateopts
175 175
176 176 diffopts = [
177 177 ('a', 'text', None, _('treat all files as text')),
178 178 ('g', 'git', None, _('use git extended diff format')),
179 179 ('', 'nodates', None, _('omit dates from diff headers'))
180 180 ]
181 181
182 182 diffwsopts = [
183 183 ('w', 'ignore-all-space', None,
184 184 _('ignore white space when comparing lines')),
185 185 ('b', 'ignore-space-change', None,
186 186 _('ignore changes in the amount of white space')),
187 187 ('B', 'ignore-blank-lines', None,
188 188 _('ignore changes whose lines are all blank')),
189 189 ]
190 190
191 191 diffopts2 = [
192 192 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
193 193 ('p', 'show-function', None, _('show which function each change is in')),
194 194 ('', 'reverse', None, _('produce a diff that undoes the changes')),
195 195 ] + diffwsopts + [
196 196 ('U', 'unified', '',
197 197 _('number of lines of context to show'), _('NUM')),
198 198 ('', 'stat', None, _('output diffstat-style summary of changes')),
199 199 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
200 200 ]
201 201
202 202 mergetoolopts = [
203 203 ('t', 'tool', '', _('specify merge tool')),
204 204 ]
205 205
206 206 similarityopts = [
207 207 ('s', 'similarity', '',
208 208 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
209 209 ]
210 210
211 211 subrepoopts = [
212 212 ('S', 'subrepos', None,
213 213 _('recurse into subrepositories'))
214 214 ]
215 215
216 216 debugrevlogopts = [
217 217 ('c', 'changelog', False, _('open changelog')),
218 218 ('m', 'manifest', False, _('open manifest')),
219 219 ('', 'dir', '', _('open directory manifest')),
220 220 ]
221 221
222 222 # Commands start here, listed alphabetically
223 223
224 224 @command('^add',
225 225 walkopts + subrepoopts + dryrunopts,
226 226 _('[OPTION]... [FILE]...'),
227 227 inferrepo=True)
228 228 def add(ui, repo, *pats, **opts):
229 229 """add the specified files on the next commit
230 230
231 231 Schedule files to be version controlled and added to the
232 232 repository.
233 233
234 234 The files will be added to the repository at the next commit. To
235 235 undo an add before that, see :hg:`forget`.
236 236
237 237 If no names are given, add all files to the repository (except
238 238 files matching ``.hgignore``).
239 239
240 240 .. container:: verbose
241 241
242 242 Examples:
243 243
244 244 - New (unknown) files are added
245 245 automatically by :hg:`add`::
246 246
247 247 $ ls
248 248 foo.c
249 249 $ hg status
250 250 ? foo.c
251 251 $ hg add
252 252 adding foo.c
253 253 $ hg status
254 254 A foo.c
255 255
256 256 - Specific files to be added can be specified::
257 257
258 258 $ ls
259 259 bar.c foo.c
260 260 $ hg status
261 261 ? bar.c
262 262 ? foo.c
263 263 $ hg add bar.c
264 264 $ hg status
265 265 A bar.c
266 266 ? foo.c
267 267
268 268 Returns 0 if all files are successfully added.
269 269 """
270 270
271 271 m = scmutil.match(repo[None], pats, opts)
272 272 rejected = cmdutil.add(ui, repo, m, "", False, **opts)
273 273 return rejected and 1 or 0
274 274
275 275 @command('addremove',
276 276 similarityopts + subrepoopts + walkopts + dryrunopts,
277 277 _('[OPTION]... [FILE]...'),
278 278 inferrepo=True)
279 279 def addremove(ui, repo, *pats, **opts):
280 280 """add all new files, delete all missing files
281 281
282 282 Add all new files and remove all missing files from the
283 283 repository.
284 284
285 285 Unless names are given, new files are ignored if they match any of
286 286 the patterns in ``.hgignore``. As with add, these changes take
287 287 effect at the next commit.
288 288
289 289 Use the -s/--similarity option to detect renamed files. This
290 290 option takes a percentage between 0 (disabled) and 100 (files must
291 291 be identical) as its parameter. With a parameter greater than 0,
292 292 this compares every removed file with every added file and records
293 293 those similar enough as renames. Detecting renamed files this way
294 294 can be expensive. After using this option, :hg:`status -C` can be
295 295 used to check which files were identified as moved or renamed. If
296 296 not specified, -s/--similarity defaults to 100 and only renames of
297 297 identical files are detected.
298 298
299 299 .. container:: verbose
300 300
301 301 Examples:
302 302
303 303 - A number of files (bar.c and foo.c) are new,
304 304 while foobar.c has been removed (without using :hg:`remove`)
305 305 from the repository::
306 306
307 307 $ ls
308 308 bar.c foo.c
309 309 $ hg status
310 310 ! foobar.c
311 311 ? bar.c
312 312 ? foo.c
313 313 $ hg addremove
314 314 adding bar.c
315 315 adding foo.c
316 316 removing foobar.c
317 317 $ hg status
318 318 A bar.c
319 319 A foo.c
320 320 R foobar.c
321 321
322 322 - A file foobar.c was moved to foo.c without using :hg:`rename`.
323 323 Afterwards, it was edited slightly::
324 324
325 325 $ ls
326 326 foo.c
327 327 $ hg status
328 328 ! foobar.c
329 329 ? foo.c
330 330 $ hg addremove --similarity 90
331 331 removing foobar.c
332 332 adding foo.c
333 333 recording removal of foobar.c as rename to foo.c (94% similar)
334 334 $ hg status -C
335 335 A foo.c
336 336 foobar.c
337 337 R foobar.c
338 338
339 339 Returns 0 if all files are successfully added.
340 340 """
341 341 try:
342 342 sim = float(opts.get('similarity') or 100)
343 343 except ValueError:
344 344 raise error.Abort(_('similarity must be a number'))
345 345 if sim < 0 or sim > 100:
346 346 raise error.Abort(_('similarity must be between 0 and 100'))
347 347 matcher = scmutil.match(repo[None], pats, opts)
348 348 return scmutil.addremove(repo, matcher, "", opts, similarity=sim / 100.0)
349 349
350 350 @command('^annotate|blame',
351 351 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
352 352 ('', 'follow', None,
353 353 _('follow copies/renames and list the filename (DEPRECATED)')),
354 354 ('', 'no-follow', None, _("don't follow copies and renames")),
355 355 ('a', 'text', None, _('treat all files as text')),
356 356 ('u', 'user', None, _('list the author (long with -v)')),
357 357 ('f', 'file', None, _('list the filename')),
358 358 ('d', 'date', None, _('list the date (short with -q)')),
359 359 ('n', 'number', None, _('list the revision number (default)')),
360 360 ('c', 'changeset', None, _('list the changeset')),
361 361 ('l', 'line-number', None, _('show line number at the first appearance'))
362 362 ] + diffwsopts + walkopts + formatteropts,
363 363 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
364 364 inferrepo=True)
365 365 def annotate(ui, repo, *pats, **opts):
366 366 """show changeset information by line for each file
367 367
368 368 List changes in files, showing the revision id responsible for
369 369 each line.
370 370
371 371 This command is useful for discovering when a change was made and
372 372 by whom.
373 373
374 374 If you include --file, --user, or --date, the revision number is
375 375 suppressed unless you also include --number.
376 376
377 377 Without the -a/--text option, annotate will avoid processing files
378 378 it detects as binary. With -a, annotate will annotate the file
379 379 anyway, although the results will probably be neither useful
380 380 nor desirable.
381 381
382 382 Returns 0 on success.
383 383 """
384 384 if not pats:
385 385 raise error.Abort(_('at least one filename or pattern is required'))
386 386
387 387 if opts.get('follow'):
388 388 # --follow is deprecated and now just an alias for -f/--file
389 389 # to mimic the behavior of Mercurial before version 1.5
390 390 opts['file'] = True
391 391
392 392 ctx = scmutil.revsingle(repo, opts.get('rev'))
393 393
394 394 fm = ui.formatter('annotate', opts)
395 395 if ui.quiet:
396 396 datefunc = util.shortdate
397 397 else:
398 398 datefunc = util.datestr
399 399 if ctx.rev() is None:
400 400 def hexfn(node):
401 401 if node is None:
402 402 return None
403 403 else:
404 404 return fm.hexfunc(node)
405 405 if opts.get('changeset'):
406 406 # omit "+" suffix which is appended to node hex
407 407 def formatrev(rev):
408 408 if rev is None:
409 409 return '%d' % ctx.p1().rev()
410 410 else:
411 411 return '%d' % rev
412 412 else:
413 413 def formatrev(rev):
414 414 if rev is None:
415 415 return '%d+' % ctx.p1().rev()
416 416 else:
417 417 return '%d ' % rev
418 418 def formathex(hex):
419 419 if hex is None:
420 420 return '%s+' % fm.hexfunc(ctx.p1().node())
421 421 else:
422 422 return '%s ' % hex
423 423 else:
424 424 hexfn = fm.hexfunc
425 425 formatrev = formathex = str
426 426
427 427 opmap = [('user', ' ', lambda x: x[0].user(), ui.shortuser),
428 428 ('number', ' ', lambda x: x[0].rev(), formatrev),
429 429 ('changeset', ' ', lambda x: hexfn(x[0].node()), formathex),
430 430 ('date', ' ', lambda x: x[0].date(), util.cachefunc(datefunc)),
431 431 ('file', ' ', lambda x: x[0].path(), str),
432 432 ('line_number', ':', lambda x: x[1], str),
433 433 ]
434 434 fieldnamemap = {'number': 'rev', 'changeset': 'node'}
435 435
436 436 if (not opts.get('user') and not opts.get('changeset')
437 437 and not opts.get('date') and not opts.get('file')):
438 438 opts['number'] = True
439 439
440 440 linenumber = opts.get('line_number') is not None
441 441 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
442 442 raise error.Abort(_('at least one of -n/-c is required for -l'))
443 443
444 444 if fm:
445 445 def makefunc(get, fmt):
446 446 return get
447 447 else:
448 448 def makefunc(get, fmt):
449 449 return lambda x: fmt(get(x))
450 450 funcmap = [(makefunc(get, fmt), sep) for op, sep, get, fmt in opmap
451 451 if opts.get(op)]
452 452 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
453 453 fields = ' '.join(fieldnamemap.get(op, op) for op, sep, get, fmt in opmap
454 454 if opts.get(op))
455 455
456 456 def bad(x, y):
457 457 raise error.Abort("%s: %s" % (x, y))
458 458
459 459 m = scmutil.match(ctx, pats, opts, badfn=bad)
460 460
461 461 follow = not opts.get('no_follow')
462 462 diffopts = patch.difffeatureopts(ui, opts, section='annotate',
463 463 whitespace=True)
464 464 for abs in ctx.walk(m):
465 465 fctx = ctx[abs]
466 466 if not opts.get('text') and util.binary(fctx.data()):
467 467 fm.plain(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
468 468 continue
469 469
470 470 lines = fctx.annotate(follow=follow, linenumber=linenumber,
471 471 diffopts=diffopts)
472 472 if not lines:
473 473 continue
474 474 formats = []
475 475 pieces = []
476 476
477 477 for f, sep in funcmap:
478 478 l = [f(n) for n, dummy in lines]
479 479 if fm:
480 480 formats.append(['%s' for x in l])
481 481 else:
482 482 sizes = [encoding.colwidth(x) for x in l]
483 483 ml = max(sizes)
484 484 formats.append([sep + ' ' * (ml - w) + '%s' for w in sizes])
485 485 pieces.append(l)
486 486
487 487 for f, p, l in zip(zip(*formats), zip(*pieces), lines):
488 488 fm.startitem()
489 489 fm.write(fields, "".join(f), *p)
490 490 fm.write('line', ": %s", l[1])
491 491
492 492 if not lines[-1][1].endswith('\n'):
493 493 fm.plain('\n')
494 494
495 495 fm.end()
496 496
497 497 @command('archive',
498 498 [('', 'no-decode', None, _('do not pass files through decoders')),
499 499 ('p', 'prefix', '', _('directory prefix for files in archive'),
500 500 _('PREFIX')),
501 501 ('r', 'rev', '', _('revision to distribute'), _('REV')),
502 502 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
503 503 ] + subrepoopts + walkopts,
504 504 _('[OPTION]... DEST'))
505 505 def archive(ui, repo, dest, **opts):
506 506 '''create an unversioned archive of a repository revision
507 507
508 508 By default, the revision used is the parent of the working
509 509 directory; use -r/--rev to specify a different revision.
510 510
511 511 The archive type is automatically detected based on file
512 512 extension (to override, use -t/--type).
513 513
514 514 .. container:: verbose
515 515
516 516 Examples:
517 517
518 518 - create a zip file containing the 1.0 release::
519 519
520 520 hg archive -r 1.0 project-1.0.zip
521 521
522 522 - create a tarball excluding .hg files::
523 523
524 524 hg archive project.tar.gz -X ".hg*"
525 525
526 526 Valid types are:
527 527
528 528 :``files``: a directory full of files (default)
529 529 :``tar``: tar archive, uncompressed
530 530 :``tbz2``: tar archive, compressed using bzip2
531 531 :``tgz``: tar archive, compressed using gzip
532 532 :``uzip``: zip archive, uncompressed
533 533 :``zip``: zip archive, compressed using deflate
534 534
535 535 The exact name of the destination archive or directory is given
536 536 using a format string; see :hg:`help export` for details.
537 537
538 538 Each member added to an archive file has a directory prefix
539 539 prepended. Use -p/--prefix to specify a format string for the
540 540 prefix. The default is the basename of the archive, with suffixes
541 541 removed.
542 542
543 543 Returns 0 on success.
544 544 '''
545 545
546 546 ctx = scmutil.revsingle(repo, opts.get('rev'))
547 547 if not ctx:
548 548 raise error.Abort(_('no working directory: please specify a revision'))
549 549 node = ctx.node()
550 550 dest = cmdutil.makefilename(repo, dest, node)
551 551 if os.path.realpath(dest) == repo.root:
552 552 raise error.Abort(_('repository root cannot be destination'))
553 553
554 554 kind = opts.get('type') or archival.guesskind(dest) or 'files'
555 555 prefix = opts.get('prefix')
556 556
557 557 if dest == '-':
558 558 if kind == 'files':
559 559 raise error.Abort(_('cannot archive plain files to stdout'))
560 560 dest = cmdutil.makefileobj(repo, dest)
561 561 if not prefix:
562 562 prefix = os.path.basename(repo.root) + '-%h'
563 563
564 564 prefix = cmdutil.makefilename(repo, prefix, node)
565 565 matchfn = scmutil.match(ctx, [], opts)
566 566 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
567 567 matchfn, prefix, subrepos=opts.get('subrepos'))
568 568
569 569 @command('backout',
570 570 [('', 'merge', None, _('merge with old dirstate parent after backout')),
571 571 ('', 'commit', None,
572 572 _('commit if no conflicts were encountered (DEPRECATED)')),
573 573 ('', 'no-commit', None, _('do not commit')),
574 574 ('', 'parent', '',
575 575 _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
576 576 ('r', 'rev', '', _('revision to backout'), _('REV')),
577 577 ('e', 'edit', False, _('invoke editor on commit messages')),
578 578 ] + mergetoolopts + walkopts + commitopts + commitopts2,
579 579 _('[OPTION]... [-r] REV'))
580 580 def backout(ui, repo, node=None, rev=None, **opts):
581 581 '''reverse effect of earlier changeset
582 582
583 583 Prepare a new changeset with the effect of REV undone in the
584 584 current working directory. If no conflicts were encountered,
585 585 it will be committed immediately.
586 586
587 587 If REV is the parent of the working directory, then this new changeset
588 588 is committed automatically (unless --no-commit is specified).
589 589
590 590 .. note::
591 591
592 592 :hg:`backout` cannot be used to fix either an unwanted or
593 593 incorrect merge.
594 594
595 595 .. container:: verbose
596 596
597 597 Examples:
598 598
599 599 - Reverse the effect of the parent of the working directory.
600 600 This backout will be committed immediately::
601 601
602 602 hg backout -r .
603 603
604 604 - Reverse the effect of previous bad revision 23::
605 605
606 606 hg backout -r 23
607 607
608 608 - Reverse the effect of previous bad revision 23 and
609 609 leave changes uncommitted::
610 610
611 611 hg backout -r 23 --no-commit
612 612 hg commit -m "Backout revision 23"
613 613
614 614 By default, the pending changeset will have one parent,
615 615 maintaining a linear history. With --merge, the pending
616 616 changeset will instead have two parents: the old parent of the
617 617 working directory and a new child of REV that simply undoes REV.
618 618
619 619 Before version 1.7, the behavior without --merge was equivalent
620 620 to specifying --merge followed by :hg:`update --clean .` to
621 621 cancel the merge and leave the child of REV as a head to be
622 622 merged separately.
623 623
624 624 See :hg:`help dates` for a list of formats valid for -d/--date.
625 625
626 626 See :hg:`help revert` for a way to restore files to the state
627 627 of another revision.
628 628
629 629 Returns 0 on success, 1 if nothing to backout or there are unresolved
630 630 files.
631 631 '''
632 632 wlock = lock = None
633 633 try:
634 634 wlock = repo.wlock()
635 635 lock = repo.lock()
636 636 return _dobackout(ui, repo, node, rev, **opts)
637 637 finally:
638 638 release(lock, wlock)
639 639
640 640 def _dobackout(ui, repo, node=None, rev=None, **opts):
641 641 if opts.get('commit') and opts.get('no_commit'):
642 642 raise error.Abort(_("cannot use --commit with --no-commit"))
643 643 if opts.get('merge') and opts.get('no_commit'):
644 644 raise error.Abort(_("cannot use --merge with --no-commit"))
645 645
646 646 if rev and node:
647 647 raise error.Abort(_("please specify just one revision"))
648 648
649 649 if not rev:
650 650 rev = node
651 651
652 652 if not rev:
653 653 raise error.Abort(_("please specify a revision to backout"))
654 654
655 655 date = opts.get('date')
656 656 if date:
657 657 opts['date'] = util.parsedate(date)
658 658
659 659 cmdutil.checkunfinished(repo)
660 660 cmdutil.bailifchanged(repo)
661 661 node = scmutil.revsingle(repo, rev).node()
662 662
663 663 op1, op2 = repo.dirstate.parents()
664 664 if not repo.changelog.isancestor(node, op1):
665 665 raise error.Abort(_('cannot backout change that is not an ancestor'))
666 666
667 667 p1, p2 = repo.changelog.parents(node)
668 668 if p1 == nullid:
669 669 raise error.Abort(_('cannot backout a change with no parents'))
670 670 if p2 != nullid:
671 671 if not opts.get('parent'):
672 672 raise error.Abort(_('cannot backout a merge changeset'))
673 673 p = repo.lookup(opts['parent'])
674 674 if p not in (p1, p2):
675 675 raise error.Abort(_('%s is not a parent of %s') %
676 676 (short(p), short(node)))
677 677 parent = p
678 678 else:
679 679 if opts.get('parent'):
680 680 raise error.Abort(_('cannot use --parent on non-merge changeset'))
681 681 parent = p1
682 682
683 683 # the backout should appear on the same branch
684 684 branch = repo.dirstate.branch()
685 685 bheads = repo.branchheads(branch)
686 686 rctx = scmutil.revsingle(repo, hex(parent))
687 687 if not opts.get('merge') and op1 != node:
688 688 dsguard = cmdutil.dirstateguard(repo, 'backout')
689 689 try:
690 690 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
691 691 'backout')
692 692 stats = mergemod.update(repo, parent, True, True, node, False)
693 693 repo.setparents(op1, op2)
694 694 dsguard.close()
695 695 hg._showstats(repo, stats)
696 696 if stats[3]:
697 697 repo.ui.status(_("use 'hg resolve' to retry unresolved "
698 698 "file merges\n"))
699 699 return 1
700 700 finally:
701 701 ui.setconfig('ui', 'forcemerge', '', '')
702 702 lockmod.release(dsguard)
703 703 else:
704 704 hg.clean(repo, node, show_stats=False)
705 705 repo.dirstate.setbranch(branch)
706 706 cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
707 707
708 708 if opts.get('no_commit'):
709 709 msg = _("changeset %s backed out, "
710 710 "don't forget to commit.\n")
711 711 ui.status(msg % short(node))
712 712 return 0
713 713
714 714 def commitfunc(ui, repo, message, match, opts):
715 715 editform = 'backout'
716 716 e = cmdutil.getcommiteditor(editform=editform, **opts)
717 717 if not message:
718 718 # we don't translate commit messages
719 719 message = "Backed out changeset %s" % short(node)
720 720 e = cmdutil.getcommiteditor(edit=True, editform=editform)
721 721 return repo.commit(message, opts.get('user'), opts.get('date'),
722 722 match, editor=e)
723 723 newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
724 724 if not newnode:
725 725 ui.status(_("nothing changed\n"))
726 726 return 1
727 727 cmdutil.commitstatus(repo, newnode, branch, bheads)
728 728
729 729 def nice(node):
730 730 return '%d:%s' % (repo.changelog.rev(node), short(node))
731 731 ui.status(_('changeset %s backs out changeset %s\n') %
732 732 (nice(repo.changelog.tip()), nice(node)))
733 733 if opts.get('merge') and op1 != node:
734 734 hg.clean(repo, op1, show_stats=False)
735 735 ui.status(_('merging with changeset %s\n')
736 736 % nice(repo.changelog.tip()))
737 737 try:
738 738 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
739 739 'backout')
740 740 return hg.merge(repo, hex(repo.changelog.tip()))
741 741 finally:
742 742 ui.setconfig('ui', 'forcemerge', '', '')
743 743 return 0
744 744
745 745 @command('bisect',
746 746 [('r', 'reset', False, _('reset bisect state')),
747 747 ('g', 'good', False, _('mark changeset good')),
748 748 ('b', 'bad', False, _('mark changeset bad')),
749 749 ('s', 'skip', False, _('skip testing changeset')),
750 750 ('e', 'extend', False, _('extend the bisect range')),
751 751 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
752 752 ('U', 'noupdate', False, _('do not update to target'))],
753 753 _("[-gbsr] [-U] [-c CMD] [REV]"))
754 754 def bisect(ui, repo, rev=None, extra=None, command=None,
755 755 reset=None, good=None, bad=None, skip=None, extend=None,
756 756 noupdate=None):
757 757 """subdivision search of changesets
758 758
759 759 This command helps to find changesets which introduce problems. To
760 760 use, mark the earliest changeset you know exhibits the problem as
761 761 bad, then mark the latest changeset which is free from the problem
762 762 as good. Bisect will update your working directory to a revision
763 763 for testing (unless the -U/--noupdate option is specified). Once
764 764 you have performed tests, mark the working directory as good or
765 765 bad, and bisect will either update to another candidate changeset
766 766 or announce that it has found the bad revision.
767 767
768 768 As a shortcut, you can also use the revision argument to mark a
769 769 revision as good or bad without checking it out first.
770 770
771 771 If you supply a command, it will be used for automatic bisection.
772 772 The environment variable HG_NODE will contain the ID of the
773 773 changeset being tested. The exit status of the command will be
774 774 used to mark revisions as good or bad: status 0 means good, 125
775 775 means to skip the revision, 127 (command not found) will abort the
776 776 bisection, and any other non-zero exit status means the revision
777 777 is bad.
778 778
779 779 .. container:: verbose
780 780
781 781 Some examples:
782 782
783 783 - start a bisection with known bad revision 34, and good revision 12::
784 784
785 785 hg bisect --bad 34
786 786 hg bisect --good 12
787 787
788 788 - advance the current bisection by marking current revision as good or
789 789 bad::
790 790
791 791 hg bisect --good
792 792 hg bisect --bad
793 793
794 794 - mark the current revision, or a known revision, to be skipped (e.g. if
795 795 that revision is not usable because of another issue)::
796 796
797 797 hg bisect --skip
798 798 hg bisect --skip 23
799 799
800 800 - skip all revisions that do not touch directories ``foo`` or ``bar``::
801 801
802 802 hg bisect --skip "!( file('path:foo') & file('path:bar') )"
803 803
804 804 - forget the current bisection::
805 805
806 806 hg bisect --reset
807 807
808 808 - use 'make && make tests' to automatically find the first broken
809 809 revision::
810 810
811 811 hg bisect --reset
812 812 hg bisect --bad 34
813 813 hg bisect --good 12
814 814 hg bisect --command "make && make tests"
815 815
816 816 - see all changesets whose states are already known in the current
817 817 bisection::
818 818
819 819 hg log -r "bisect(pruned)"
820 820
821 821 - see the changeset currently being bisected (especially useful
822 822 if running with -U/--noupdate)::
823 823
824 824 hg log -r "bisect(current)"
825 825
826 826 - see all changesets that took part in the current bisection::
827 827
828 828 hg log -r "bisect(range)"
829 829
830 830 - you can even get a nice graph::
831 831
832 832 hg log --graph -r "bisect(range)"
833 833
834 834 See :hg:`help revsets` for more about the `bisect()` keyword.
835 835
836 836 Returns 0 on success.
837 837 """
838 838 def extendbisectrange(nodes, good):
839 839 # bisect is incomplete when it ends on a merge node and
840 840 # one of the parent was not checked.
841 841 parents = repo[nodes[0]].parents()
842 842 if len(parents) > 1:
843 843 if good:
844 844 side = state['bad']
845 845 else:
846 846 side = state['good']
847 847 num = len(set(i.node() for i in parents) & set(side))
848 848 if num == 1:
849 849 return parents[0].ancestor(parents[1])
850 850 return None
851 851
852 852 def print_result(nodes, good):
853 853 displayer = cmdutil.show_changeset(ui, repo, {})
854 854 if len(nodes) == 1:
855 855 # narrowed it down to a single revision
856 856 if good:
857 857 ui.write(_("The first good revision is:\n"))
858 858 else:
859 859 ui.write(_("The first bad revision is:\n"))
860 860 displayer.show(repo[nodes[0]])
861 861 extendnode = extendbisectrange(nodes, good)
862 862 if extendnode is not None:
863 863 ui.write(_('Not all ancestors of this changeset have been'
864 864 ' checked.\nUse bisect --extend to continue the '
865 865 'bisection from\nthe common ancestor, %s.\n')
866 866 % extendnode)
867 867 else:
868 868 # multiple possible revisions
869 869 if good:
870 870 ui.write(_("Due to skipped revisions, the first "
871 871 "good revision could be any of:\n"))
872 872 else:
873 873 ui.write(_("Due to skipped revisions, the first "
874 874 "bad revision could be any of:\n"))
875 875 for n in nodes:
876 876 displayer.show(repo[n])
877 877 displayer.close()
878 878
879 879 def check_state(state, interactive=True):
880 880 if not state['good'] or not state['bad']:
881 881 if (good or bad or skip or reset) and interactive:
882 882 return
883 883 if not state['good']:
884 884 raise error.Abort(_('cannot bisect (no known good revisions)'))
885 885 else:
886 886 raise error.Abort(_('cannot bisect (no known bad revisions)'))
887 887 return True
888 888
889 889 # backward compatibility
890 890 if rev in "good bad reset init".split():
891 891 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
892 892 cmd, rev, extra = rev, extra, None
893 893 if cmd == "good":
894 894 good = True
895 895 elif cmd == "bad":
896 896 bad = True
897 897 else:
898 898 reset = True
899 899 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
900 900 raise error.Abort(_('incompatible arguments'))
901 901
902 902 cmdutil.checkunfinished(repo)
903 903
904 904 if reset:
905 905 p = repo.join("bisect.state")
906 906 if os.path.exists(p):
907 907 os.unlink(p)
908 908 return
909 909
910 910 state = hbisect.load_state(repo)
911 911
912 912 if command:
913 913 changesets = 1
914 914 if noupdate:
915 915 try:
916 916 node = state['current'][0]
917 917 except LookupError:
918 918 raise error.Abort(_('current bisect revision is unknown - '
919 919 'start a new bisect to fix'))
920 920 else:
921 921 node, p2 = repo.dirstate.parents()
922 922 if p2 != nullid:
923 923 raise error.Abort(_('current bisect revision is a merge'))
924 924 try:
925 925 while changesets:
926 926 # update state
927 927 state['current'] = [node]
928 928 hbisect.save_state(repo, state)
929 929 status = ui.system(command, environ={'HG_NODE': hex(node)})
930 930 if status == 125:
931 931 transition = "skip"
932 932 elif status == 0:
933 933 transition = "good"
934 934 # status < 0 means process was killed
935 935 elif status == 127:
936 936 raise error.Abort(_("failed to execute %s") % command)
937 937 elif status < 0:
938 938 raise error.Abort(_("%s killed") % command)
939 939 else:
940 940 transition = "bad"
941 941 ctx = scmutil.revsingle(repo, rev, node)
942 942 rev = None # clear for future iterations
943 943 state[transition].append(ctx.node())
944 944 ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
945 945 check_state(state, interactive=False)
946 946 # bisect
947 947 nodes, changesets, bgood = hbisect.bisect(repo.changelog, state)
948 948 # update to next check
949 949 node = nodes[0]
950 950 if not noupdate:
951 951 cmdutil.bailifchanged(repo)
952 952 hg.clean(repo, node, show_stats=False)
953 953 finally:
954 954 state['current'] = [node]
955 955 hbisect.save_state(repo, state)
956 956 print_result(nodes, bgood)
957 957 return
958 958
959 959 # update state
960 960
961 961 if rev:
962 962 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
963 963 else:
964 964 nodes = [repo.lookup('.')]
965 965
966 966 if good or bad or skip:
967 967 if good:
968 968 state['good'] += nodes
969 969 elif bad:
970 970 state['bad'] += nodes
971 971 elif skip:
972 972 state['skip'] += nodes
973 973 hbisect.save_state(repo, state)
974 974
975 975 if not check_state(state):
976 976 return
977 977
978 978 # actually bisect
979 979 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
980 980 if extend:
981 981 if not changesets:
982 982 extendnode = extendbisectrange(nodes, good)
983 983 if extendnode is not None:
984 984 ui.write(_("Extending search to changeset %d:%s\n")
985 985 % (extendnode.rev(), extendnode))
986 986 state['current'] = [extendnode.node()]
987 987 hbisect.save_state(repo, state)
988 988 if noupdate:
989 989 return
990 990 cmdutil.bailifchanged(repo)
991 991 return hg.clean(repo, extendnode.node())
992 992 raise error.Abort(_("nothing to extend"))
993 993
994 994 if changesets == 0:
995 995 print_result(nodes, good)
996 996 else:
997 997 assert len(nodes) == 1 # only a single node can be tested next
998 998 node = nodes[0]
999 999 # compute the approximate number of remaining tests
1000 1000 tests, size = 0, 2
1001 1001 while size <= changesets:
1002 1002 tests, size = tests + 1, size * 2
1003 1003 rev = repo.changelog.rev(node)
1004 1004 ui.write(_("Testing changeset %d:%s "
1005 1005 "(%d changesets remaining, ~%d tests)\n")
1006 1006 % (rev, short(node), changesets, tests))
1007 1007 state['current'] = [node]
1008 1008 hbisect.save_state(repo, state)
1009 1009 if not noupdate:
1010 1010 cmdutil.bailifchanged(repo)
1011 1011 return hg.clean(repo, node)
1012 1012
1013 1013 @command('bookmarks|bookmark',
1014 1014 [('f', 'force', False, _('force')),
1015 1015 ('r', 'rev', '', _('revision for bookmark action'), _('REV')),
1016 1016 ('d', 'delete', False, _('delete a given bookmark')),
1017 1017 ('m', 'rename', '', _('rename a given bookmark'), _('OLD')),
1018 1018 ('i', 'inactive', False, _('mark a bookmark inactive')),
1019 1019 ] + formatteropts,
1020 1020 _('hg bookmarks [OPTIONS]... [NAME]...'))
1021 1021 def bookmark(ui, repo, *names, **opts):
1022 1022 '''create a new bookmark or list existing bookmarks
1023 1023
1024 1024 Bookmarks are labels on changesets to help track lines of development.
1025 1025 Bookmarks are unversioned and can be moved, renamed and deleted.
1026 1026 Deleting or moving a bookmark has no effect on the associated changesets.
1027 1027
1028 1028 Creating or updating to a bookmark causes it to be marked as 'active'.
1029 1029 The active bookmark is indicated with a '*'.
1030 1030 When a commit is made, the active bookmark will advance to the new commit.
1031 1031 A plain :hg:`update` will also advance an active bookmark, if possible.
1032 1032 Updating away from a bookmark will cause it to be deactivated.
1033 1033
1034 1034 Bookmarks can be pushed and pulled between repositories (see
1035 1035 :hg:`help push` and :hg:`help pull`). If a shared bookmark has
1036 1036 diverged, a new 'divergent bookmark' of the form 'name@path' will
1037 1037 be created. Using :hg:`merge` will resolve the divergence.
1038 1038
1039 1039 A bookmark named '@' has the special property that :hg:`clone` will
1040 1040 check it out by default if it exists.
1041 1041
1042 1042 .. container:: verbose
1043 1043
1044 1044 Examples:
1045 1045
1046 1046 - create an active bookmark for a new line of development::
1047 1047
1048 1048 hg book new-feature
1049 1049
1050 1050 - create an inactive bookmark as a place marker::
1051 1051
1052 1052 hg book -i reviewed
1053 1053
1054 1054 - create an inactive bookmark on another changeset::
1055 1055
1056 1056 hg book -r .^ tested
1057 1057
1058 1058 - rename bookmark turkey to dinner::
1059 1059
1060 1060 hg book -m turkey dinner
1061 1061
1062 1062 - move the '@' bookmark from another branch::
1063 1063
1064 1064 hg book -f @
1065 1065 '''
1066 1066 force = opts.get('force')
1067 1067 rev = opts.get('rev')
1068 1068 delete = opts.get('delete')
1069 1069 rename = opts.get('rename')
1070 1070 inactive = opts.get('inactive')
1071 1071
1072 1072 def checkformat(mark):
1073 1073 mark = mark.strip()
1074 1074 if not mark:
1075 1075 raise error.Abort(_("bookmark names cannot consist entirely of "
1076 1076 "whitespace"))
1077 1077 scmutil.checknewlabel(repo, mark, 'bookmark')
1078 1078 return mark
1079 1079
1080 1080 def checkconflict(repo, mark, cur, force=False, target=None):
1081 1081 if mark in marks and not force:
1082 1082 if target:
1083 1083 if marks[mark] == target and target == cur:
1084 1084 # re-activating a bookmark
1085 1085 return
1086 1086 anc = repo.changelog.ancestors([repo[target].rev()])
1087 1087 bmctx = repo[marks[mark]]
1088 1088 divs = [repo[b].node() for b in marks
1089 1089 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
1090 1090
1091 1091 # allow resolving a single divergent bookmark even if moving
1092 1092 # the bookmark across branches when a revision is specified
1093 1093 # that contains a divergent bookmark
1094 1094 if bmctx.rev() not in anc and target in divs:
1095 1095 bookmarks.deletedivergent(repo, [target], mark)
1096 1096 return
1097 1097
1098 1098 deletefrom = [b for b in divs
1099 1099 if repo[b].rev() in anc or b == target]
1100 1100 bookmarks.deletedivergent(repo, deletefrom, mark)
1101 1101 if bookmarks.validdest(repo, bmctx, repo[target]):
1102 1102 ui.status(_("moving bookmark '%s' forward from %s\n") %
1103 1103 (mark, short(bmctx.node())))
1104 1104 return
1105 1105 raise error.Abort(_("bookmark '%s' already exists "
1106 1106 "(use -f to force)") % mark)
1107 1107 if ((mark in repo.branchmap() or mark == repo.dirstate.branch())
1108 1108 and not force):
1109 1109 raise error.Abort(
1110 1110 _("a bookmark cannot have the name of an existing branch"))
1111 1111
1112 1112 if delete and rename:
1113 1113 raise error.Abort(_("--delete and --rename are incompatible"))
1114 1114 if delete and rev:
1115 1115 raise error.Abort(_("--rev is incompatible with --delete"))
1116 1116 if rename and rev:
1117 1117 raise error.Abort(_("--rev is incompatible with --rename"))
1118 1118 if not names and (delete or rev):
1119 1119 raise error.Abort(_("bookmark name required"))
1120 1120
1121 1121 if delete or rename or names or inactive:
1122 1122 wlock = lock = tr = None
1123 1123 try:
1124 1124 wlock = repo.wlock()
1125 1125 lock = repo.lock()
1126 1126 cur = repo.changectx('.').node()
1127 1127 marks = repo._bookmarks
1128 1128 if delete:
1129 1129 tr = repo.transaction('bookmark')
1130 1130 for mark in names:
1131 1131 if mark not in marks:
1132 1132 raise error.Abort(_("bookmark '%s' does not exist") %
1133 1133 mark)
1134 1134 if mark == repo._activebookmark:
1135 1135 bookmarks.deactivate(repo)
1136 1136 del marks[mark]
1137 1137
1138 1138 elif rename:
1139 1139 tr = repo.transaction('bookmark')
1140 1140 if not names:
1141 1141 raise error.Abort(_("new bookmark name required"))
1142 1142 elif len(names) > 1:
1143 1143 raise error.Abort(_("only one new bookmark name allowed"))
1144 1144 mark = checkformat(names[0])
1145 1145 if rename not in marks:
1146 1146 raise error.Abort(_("bookmark '%s' does not exist")
1147 1147 % rename)
1148 1148 checkconflict(repo, mark, cur, force)
1149 1149 marks[mark] = marks[rename]
1150 1150 if repo._activebookmark == rename and not inactive:
1151 1151 bookmarks.activate(repo, mark)
1152 1152 del marks[rename]
1153 1153 elif names:
1154 1154 tr = repo.transaction('bookmark')
1155 1155 newact = None
1156 1156 for mark in names:
1157 1157 mark = checkformat(mark)
1158 1158 if newact is None:
1159 1159 newact = mark
1160 1160 if inactive and mark == repo._activebookmark:
1161 1161 bookmarks.deactivate(repo)
1162 1162 return
1163 1163 tgt = cur
1164 1164 if rev:
1165 1165 tgt = scmutil.revsingle(repo, rev).node()
1166 1166 checkconflict(repo, mark, cur, force, tgt)
1167 1167 marks[mark] = tgt
1168 1168 if not inactive and cur == marks[newact] and not rev:
1169 1169 bookmarks.activate(repo, newact)
1170 1170 elif cur != tgt and newact == repo._activebookmark:
1171 1171 bookmarks.deactivate(repo)
1172 1172 elif inactive:
1173 1173 if len(marks) == 0:
1174 1174 ui.status(_("no bookmarks set\n"))
1175 1175 elif not repo._activebookmark:
1176 1176 ui.status(_("no active bookmark\n"))
1177 1177 else:
1178 1178 bookmarks.deactivate(repo)
1179 1179 if tr is not None:
1180 1180 marks.recordchange(tr)
1181 1181 tr.close()
1182 1182 finally:
1183 1183 lockmod.release(tr, lock, wlock)
1184 1184 else: # show bookmarks
1185 1185 fm = ui.formatter('bookmarks', opts)
1186 1186 hexfn = fm.hexfunc
1187 1187 marks = repo._bookmarks
1188 1188 if len(marks) == 0 and not fm:
1189 1189 ui.status(_("no bookmarks set\n"))
1190 1190 for bmark, n in sorted(marks.iteritems()):
1191 1191 active = repo._activebookmark
1192 1192 if bmark == active:
1193 1193 prefix, label = '*', activebookmarklabel
1194 1194 else:
1195 1195 prefix, label = ' ', ''
1196 1196
1197 1197 fm.startitem()
1198 1198 if not ui.quiet:
1199 1199 fm.plain(' %s ' % prefix, label=label)
1200 1200 fm.write('bookmark', '%s', bmark, label=label)
1201 1201 pad = " " * (25 - encoding.colwidth(bmark))
1202 1202 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
1203 1203 repo.changelog.rev(n), hexfn(n), label=label)
1204 1204 fm.data(active=(bmark == active))
1205 1205 fm.plain('\n')
1206 1206 fm.end()
1207 1207
1208 1208 @command('branch',
1209 1209 [('f', 'force', None,
1210 1210 _('set branch name even if it shadows an existing branch')),
1211 1211 ('C', 'clean', None, _('reset branch name to parent branch name'))],
1212 1212 _('[-fC] [NAME]'))
1213 1213 def branch(ui, repo, label=None, **opts):
1214 1214 """set or show the current branch name
1215 1215
1216 1216 .. note::
1217 1217
1218 1218 Branch names are permanent and global. Use :hg:`bookmark` to create a
1219 1219 light-weight bookmark instead. See :hg:`help glossary` for more
1220 1220 information about named branches and bookmarks.
1221 1221
1222 1222 With no argument, show the current branch name. With one argument,
1223 1223 set the working directory branch name (the branch will not exist
1224 1224 in the repository until the next commit). Standard practice
1225 1225 recommends that primary development take place on the 'default'
1226 1226 branch.
1227 1227
1228 1228 Unless -f/--force is specified, branch will not let you set a
1229 1229 branch name that already exists.
1230 1230
1231 1231 Use -C/--clean to reset the working directory branch to that of
1232 1232 the parent of the working directory, negating a previous branch
1233 1233 change.
1234 1234
1235 1235 Use the command :hg:`update` to switch to an existing branch. Use
1236 1236 :hg:`commit --close-branch` to mark this branch head as closed.
1237 1237 When all heads of a branch are closed, the branch will be
1238 1238 considered closed.
1239 1239
1240 1240 Returns 0 on success.
1241 1241 """
1242 1242 if label:
1243 1243 label = label.strip()
1244 1244
1245 1245 if not opts.get('clean') and not label:
1246 1246 ui.write("%s\n" % repo.dirstate.branch())
1247 1247 return
1248 1248
1249 1249 with repo.wlock():
1250 1250 if opts.get('clean'):
1251 1251 label = repo[None].p1().branch()
1252 1252 repo.dirstate.setbranch(label)
1253 1253 ui.status(_('reset working directory to branch %s\n') % label)
1254 1254 elif label:
1255 1255 if not opts.get('force') and label in repo.branchmap():
1256 1256 if label not in [p.branch() for p in repo[None].parents()]:
1257 1257 raise error.Abort(_('a branch of the same name already'
1258 1258 ' exists'),
1259 1259 # i18n: "it" refers to an existing branch
1260 1260 hint=_("use 'hg update' to switch to it"))
1261 1261 scmutil.checknewlabel(repo, label, 'branch')
1262 1262 repo.dirstate.setbranch(label)
1263 1263 ui.status(_('marked working directory as branch %s\n') % label)
1264 1264
1265 1265 # find any open named branches aside from default
1266 1266 others = [n for n, h, t, c in repo.branchmap().iterbranches()
1267 1267 if n != "default" and not c]
1268 1268 if not others:
1269 1269 ui.status(_('(branches are permanent and global, '
1270 1270 'did you want a bookmark?)\n'))
1271 1271
1272 1272 @command('branches',
1273 1273 [('a', 'active', False,
1274 1274 _('show only branches that have unmerged heads (DEPRECATED)')),
1275 1275 ('c', 'closed', False, _('show normal and closed branches')),
1276 1276 ] + formatteropts,
1277 1277 _('[-c]'))
1278 1278 def branches(ui, repo, active=False, closed=False, **opts):
1279 1279 """list repository named branches
1280 1280
1281 1281 List the repository's named branches, indicating which ones are
1282 1282 inactive. If -c/--closed is specified, also list branches which have
1283 1283 been marked closed (see :hg:`commit --close-branch`).
1284 1284
1285 1285 Use the command :hg:`update` to switch to an existing branch.
1286 1286
1287 1287 Returns 0.
1288 1288 """
1289 1289
1290 1290 fm = ui.formatter('branches', opts)
1291 1291 hexfunc = fm.hexfunc
1292 1292
1293 1293 allheads = set(repo.heads())
1294 1294 branches = []
1295 1295 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
1296 1296 isactive = not isclosed and bool(set(heads) & allheads)
1297 1297 branches.append((tag, repo[tip], isactive, not isclosed))
1298 1298 branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]),
1299 1299 reverse=True)
1300 1300
1301 1301 for tag, ctx, isactive, isopen in branches:
1302 1302 if active and not isactive:
1303 1303 continue
1304 1304 if isactive:
1305 1305 label = 'branches.active'
1306 1306 notice = ''
1307 1307 elif not isopen:
1308 1308 if not closed:
1309 1309 continue
1310 1310 label = 'branches.closed'
1311 1311 notice = _(' (closed)')
1312 1312 else:
1313 1313 label = 'branches.inactive'
1314 1314 notice = _(' (inactive)')
1315 1315 current = (tag == repo.dirstate.branch())
1316 1316 if current:
1317 1317 label = 'branches.current'
1318 1318
1319 1319 fm.startitem()
1320 1320 fm.write('branch', '%s', tag, label=label)
1321 1321 rev = ctx.rev()
1322 1322 padsize = max(31 - len(str(rev)) - encoding.colwidth(tag), 0)
1323 1323 fmt = ' ' * padsize + ' %d:%s'
1324 1324 fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()),
1325 1325 label='log.changeset changeset.%s' % ctx.phasestr())
1326 1326 fm.data(active=isactive, closed=not isopen, current=current)
1327 1327 if not ui.quiet:
1328 1328 fm.plain(notice)
1329 1329 fm.plain('\n')
1330 1330 fm.end()
1331 1331
1332 1332 @command('bundle',
1333 1333 [('f', 'force', None, _('run even when the destination is unrelated')),
1334 1334 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
1335 1335 _('REV')),
1336 1336 ('b', 'branch', [], _('a specific branch you would like to bundle'),
1337 1337 _('BRANCH')),
1338 1338 ('', 'base', [],
1339 1339 _('a base changeset assumed to be available at the destination'),
1340 1340 _('REV')),
1341 1341 ('a', 'all', None, _('bundle all changesets in the repository')),
1342 1342 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
1343 1343 ] + remoteopts,
1344 1344 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
1345 1345 def bundle(ui, repo, fname, dest=None, **opts):
1346 1346 """create a changegroup file
1347 1347
1348 1348 Generate a changegroup file collecting changesets to be added
1349 1349 to a repository.
1350 1350
1351 1351 To create a bundle containing all changesets, use -a/--all
1352 1352 (or --base null). Otherwise, hg assumes the destination will have
1353 1353 all the nodes you specify with --base parameters. Otherwise, hg
1354 1354 will assume the repository has all the nodes in destination, or
1355 1355 default-push/default if no destination is specified.
1356 1356
1357 1357 You can change bundle format with the -t/--type option. You can
1358 1358 specify a compression, a bundle version or both using a dash
1359 1359 (comp-version). The available compression methods are: none, bzip2,
1360 1360 and gzip (by default, bundles are compressed using bzip2). The
1361 1361 available formats are: v1, v2 (default to most suitable).
1362 1362
1363 1363 The bundle file can then be transferred using conventional means
1364 1364 and applied to another repository with the unbundle or pull
1365 1365 command. This is useful when direct push and pull are not
1366 1366 available or when exporting an entire repository is undesirable.
1367 1367
1368 1368 Applying bundles preserves all changeset contents including
1369 1369 permissions, copy/rename information, and revision history.
1370 1370
1371 1371 Returns 0 on success, 1 if no changes found.
1372 1372 """
1373 1373 revs = None
1374 1374 if 'rev' in opts:
1375 1375 revstrings = opts['rev']
1376 1376 revs = scmutil.revrange(repo, revstrings)
1377 1377 if revstrings and not revs:
1378 1378 raise error.Abort(_('no commits to bundle'))
1379 1379
1380 1380 bundletype = opts.get('type', 'bzip2').lower()
1381 1381 try:
1382 1382 bcompression, cgversion, params = exchange.parsebundlespec(
1383 1383 repo, bundletype, strict=False)
1384 1384 except error.UnsupportedBundleSpecification as e:
1385 1385 raise error.Abort(str(e),
1386 1386 hint=_('see "hg help bundle" for supported '
1387 1387 'values for --type'))
1388 1388
1389 1389 # Packed bundles are a pseudo bundle format for now.
1390 1390 if cgversion == 's1':
1391 1391 raise error.Abort(_('packed bundles cannot be produced by "hg bundle"'),
1392 1392 hint=_("use 'hg debugcreatestreamclonebundle'"))
1393 1393
1394 1394 if opts.get('all'):
1395 1395 if dest:
1396 1396 raise error.Abort(_("--all is incompatible with specifying "
1397 1397 "a destination"))
1398 1398 if opts.get('base'):
1399 1399 ui.warn(_("ignoring --base because --all was specified\n"))
1400 1400 base = ['null']
1401 1401 else:
1402 1402 base = scmutil.revrange(repo, opts.get('base'))
1403 1403 # TODO: get desired bundlecaps from command line.
1404 1404 bundlecaps = None
1405 1405 if cgversion not in changegroup.supportedoutgoingversions(repo):
1406 1406 raise error.Abort(_("repository does not support bundle version %s") %
1407 1407 cgversion)
1408 1408
1409 1409 if base:
1410 1410 if dest:
1411 1411 raise error.Abort(_("--base is incompatible with specifying "
1412 1412 "a destination"))
1413 1413 common = [repo.lookup(rev) for rev in base]
1414 1414 heads = revs and map(repo.lookup, revs) or revs
1415 cg = changegroup.getchangegroup(repo, 'bundle', heads=heads,
1416 common=common, bundlecaps=bundlecaps,
1415 outgoing = discovery.outgoing(repo, common, heads)
1416 cg = changegroup.getchangegroup(repo, 'bundle', outgoing,
1417 bundlecaps=bundlecaps,
1417 1418 version=cgversion)
1418 1419 outgoing = None
1419 1420 else:
1420 1421 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1421 1422 dest, branches = hg.parseurl(dest, opts.get('branch'))
1422 1423 other = hg.peer(repo, opts, dest)
1423 1424 revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
1424 1425 heads = revs and map(repo.lookup, revs) or revs
1425 1426 outgoing = discovery.findcommonoutgoing(repo, other,
1426 1427 onlyheads=heads,
1427 1428 force=opts.get('force'),
1428 1429 portable=True)
1429 1430 cg = changegroup.getlocalchangegroup(repo, 'bundle', outgoing,
1430 1431 bundlecaps, version=cgversion)
1431 1432 if not cg:
1432 1433 scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded)
1433 1434 return 1
1434 1435
1435 1436 if cgversion == '01': #bundle1
1436 1437 if bcompression is None:
1437 1438 bcompression = 'UN'
1438 1439 bversion = 'HG10' + bcompression
1439 1440 bcompression = None
1440 1441 else:
1441 1442 assert cgversion == '02'
1442 1443 bversion = 'HG20'
1443 1444
1444 1445 bundle2.writebundle(ui, cg, fname, bversion, compression=bcompression)
1445 1446
1446 1447 @command('cat',
1447 1448 [('o', 'output', '',
1448 1449 _('print output to file with formatted name'), _('FORMAT')),
1449 1450 ('r', 'rev', '', _('print the given revision'), _('REV')),
1450 1451 ('', 'decode', None, _('apply any matching decode filter')),
1451 1452 ] + walkopts,
1452 1453 _('[OPTION]... FILE...'),
1453 1454 inferrepo=True)
1454 1455 def cat(ui, repo, file1, *pats, **opts):
1455 1456 """output the current or given revision of files
1456 1457
1457 1458 Print the specified files as they were at the given revision. If
1458 1459 no revision is given, the parent of the working directory is used.
1459 1460
1460 1461 Output may be to a file, in which case the name of the file is
1461 1462 given using a format string. The formatting rules as follows:
1462 1463
1463 1464 :``%%``: literal "%" character
1464 1465 :``%s``: basename of file being printed
1465 1466 :``%d``: dirname of file being printed, or '.' if in repository root
1466 1467 :``%p``: root-relative path name of file being printed
1467 1468 :``%H``: changeset hash (40 hexadecimal digits)
1468 1469 :``%R``: changeset revision number
1469 1470 :``%h``: short-form changeset hash (12 hexadecimal digits)
1470 1471 :``%r``: zero-padded changeset revision number
1471 1472 :``%b``: basename of the exporting repository
1472 1473
1473 1474 Returns 0 on success.
1474 1475 """
1475 1476 ctx = scmutil.revsingle(repo, opts.get('rev'))
1476 1477 m = scmutil.match(ctx, (file1,) + pats, opts)
1477 1478
1478 1479 return cmdutil.cat(ui, repo, ctx, m, '', **opts)
1479 1480
1480 1481 @command('^clone',
1481 1482 [('U', 'noupdate', None, _('the clone will include an empty working '
1482 1483 'directory (only a repository)')),
1483 1484 ('u', 'updaterev', '', _('revision, tag, or branch to check out'),
1484 1485 _('REV')),
1485 1486 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
1486 1487 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
1487 1488 ('', 'pull', None, _('use pull protocol to copy metadata')),
1488 1489 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
1489 1490 ] + remoteopts,
1490 1491 _('[OPTION]... SOURCE [DEST]'),
1491 1492 norepo=True)
1492 1493 def clone(ui, source, dest=None, **opts):
1493 1494 """make a copy of an existing repository
1494 1495
1495 1496 Create a copy of an existing repository in a new directory.
1496 1497
1497 1498 If no destination directory name is specified, it defaults to the
1498 1499 basename of the source.
1499 1500
1500 1501 The location of the source is added to the new repository's
1501 1502 ``.hg/hgrc`` file, as the default to be used for future pulls.
1502 1503
1503 1504 Only local paths and ``ssh://`` URLs are supported as
1504 1505 destinations. For ``ssh://`` destinations, no working directory or
1505 1506 ``.hg/hgrc`` will be created on the remote side.
1506 1507
1507 1508 If the source repository has a bookmark called '@' set, that
1508 1509 revision will be checked out in the new repository by default.
1509 1510
1510 1511 To check out a particular version, use -u/--update, or
1511 1512 -U/--noupdate to create a clone with no working directory.
1512 1513
1513 1514 To pull only a subset of changesets, specify one or more revisions
1514 1515 identifiers with -r/--rev or branches with -b/--branch. The
1515 1516 resulting clone will contain only the specified changesets and
1516 1517 their ancestors. These options (or 'clone src#rev dest') imply
1517 1518 --pull, even for local source repositories.
1518 1519
1519 1520 .. note::
1520 1521
1521 1522 Specifying a tag will include the tagged changeset but not the
1522 1523 changeset containing the tag.
1523 1524
1524 1525 .. container:: verbose
1525 1526
1526 1527 For efficiency, hardlinks are used for cloning whenever the
1527 1528 source and destination are on the same filesystem (note this
1528 1529 applies only to the repository data, not to the working
1529 1530 directory). Some filesystems, such as AFS, implement hardlinking
1530 1531 incorrectly, but do not report errors. In these cases, use the
1531 1532 --pull option to avoid hardlinking.
1532 1533
1533 1534 In some cases, you can clone repositories and the working
1534 1535 directory using full hardlinks with ::
1535 1536
1536 1537 $ cp -al REPO REPOCLONE
1537 1538
1538 1539 This is the fastest way to clone, but it is not always safe. The
1539 1540 operation is not atomic (making sure REPO is not modified during
1540 1541 the operation is up to you) and you have to make sure your
1541 1542 editor breaks hardlinks (Emacs and most Linux Kernel tools do
1542 1543 so). Also, this is not compatible with certain extensions that
1543 1544 place their metadata under the .hg directory, such as mq.
1544 1545
1545 1546 Mercurial will update the working directory to the first applicable
1546 1547 revision from this list:
1547 1548
1548 1549 a) null if -U or the source repository has no changesets
1549 1550 b) if -u . and the source repository is local, the first parent of
1550 1551 the source repository's working directory
1551 1552 c) the changeset specified with -u (if a branch name, this means the
1552 1553 latest head of that branch)
1553 1554 d) the changeset specified with -r
1554 1555 e) the tipmost head specified with -b
1555 1556 f) the tipmost head specified with the url#branch source syntax
1556 1557 g) the revision marked with the '@' bookmark, if present
1557 1558 h) the tipmost head of the default branch
1558 1559 i) tip
1559 1560
1560 1561 When cloning from servers that support it, Mercurial may fetch
1561 1562 pre-generated data from a server-advertised URL. When this is done,
1562 1563 hooks operating on incoming changesets and changegroups may fire twice,
1563 1564 once for the bundle fetched from the URL and another for any additional
1564 1565 data not fetched from this URL. In addition, if an error occurs, the
1565 1566 repository may be rolled back to a partial clone. This behavior may
1566 1567 change in future releases. See :hg:`help -e clonebundles` for more.
1567 1568
1568 1569 Examples:
1569 1570
1570 1571 - clone a remote repository to a new directory named hg/::
1571 1572
1572 1573 hg clone http://selenic.com/hg
1573 1574
1574 1575 - create a lightweight local clone::
1575 1576
1576 1577 hg clone project/ project-feature/
1577 1578
1578 1579 - clone from an absolute path on an ssh server (note double-slash)::
1579 1580
1580 1581 hg clone ssh://user@server//home/projects/alpha/
1581 1582
1582 1583 - do a high-speed clone over a LAN while checking out a
1583 1584 specified version::
1584 1585
1585 1586 hg clone --uncompressed http://server/repo -u 1.5
1586 1587
1587 1588 - create a repository without changesets after a particular revision::
1588 1589
1589 1590 hg clone -r 04e544 experimental/ good/
1590 1591
1591 1592 - clone (and track) a particular named branch::
1592 1593
1593 1594 hg clone http://selenic.com/hg#stable
1594 1595
1595 1596 See :hg:`help urls` for details on specifying URLs.
1596 1597
1597 1598 Returns 0 on success.
1598 1599 """
1599 1600 if opts.get('noupdate') and opts.get('updaterev'):
1600 1601 raise error.Abort(_("cannot specify both --noupdate and --updaterev"))
1601 1602
1602 1603 r = hg.clone(ui, opts, source, dest,
1603 1604 pull=opts.get('pull'),
1604 1605 stream=opts.get('uncompressed'),
1605 1606 rev=opts.get('rev'),
1606 1607 update=opts.get('updaterev') or not opts.get('noupdate'),
1607 1608 branch=opts.get('branch'),
1608 1609 shareopts=opts.get('shareopts'))
1609 1610
1610 1611 return r is None
1611 1612
1612 1613 @command('^commit|ci',
1613 1614 [('A', 'addremove', None,
1614 1615 _('mark new/missing files as added/removed before committing')),
1615 1616 ('', 'close-branch', None,
1616 1617 _('mark a branch head as closed')),
1617 1618 ('', 'amend', None, _('amend the parent of the working directory')),
1618 1619 ('s', 'secret', None, _('use the secret phase for committing')),
1619 1620 ('e', 'edit', None, _('invoke editor on commit messages')),
1620 1621 ('i', 'interactive', None, _('use interactive mode')),
1621 1622 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1622 1623 _('[OPTION]... [FILE]...'),
1623 1624 inferrepo=True)
1624 1625 def commit(ui, repo, *pats, **opts):
1625 1626 """commit the specified files or all outstanding changes
1626 1627
1627 1628 Commit changes to the given files into the repository. Unlike a
1628 1629 centralized SCM, this operation is a local operation. See
1629 1630 :hg:`push` for a way to actively distribute your changes.
1630 1631
1631 1632 If a list of files is omitted, all changes reported by :hg:`status`
1632 1633 will be committed.
1633 1634
1634 1635 If you are committing the result of a merge, do not provide any
1635 1636 filenames or -I/-X filters.
1636 1637
1637 1638 If no commit message is specified, Mercurial starts your
1638 1639 configured editor where you can enter a message. In case your
1639 1640 commit fails, you will find a backup of your message in
1640 1641 ``.hg/last-message.txt``.
1641 1642
1642 1643 The --close-branch flag can be used to mark the current branch
1643 1644 head closed. When all heads of a branch are closed, the branch
1644 1645 will be considered closed and no longer listed.
1645 1646
1646 1647 The --amend flag can be used to amend the parent of the
1647 1648 working directory with a new commit that contains the changes
1648 1649 in the parent in addition to those currently reported by :hg:`status`,
1649 1650 if there are any. The old commit is stored in a backup bundle in
1650 1651 ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
1651 1652 on how to restore it).
1652 1653
1653 1654 Message, user and date are taken from the amended commit unless
1654 1655 specified. When a message isn't specified on the command line,
1655 1656 the editor will open with the message of the amended commit.
1656 1657
1657 1658 It is not possible to amend public changesets (see :hg:`help phases`)
1658 1659 or changesets that have children.
1659 1660
1660 1661 See :hg:`help dates` for a list of formats valid for -d/--date.
1661 1662
1662 1663 Returns 0 on success, 1 if nothing changed.
1663 1664
1664 1665 .. container:: verbose
1665 1666
1666 1667 Examples:
1667 1668
1668 1669 - commit all files ending in .py::
1669 1670
1670 1671 hg commit --include "set:**.py"
1671 1672
1672 1673 - commit all non-binary files::
1673 1674
1674 1675 hg commit --exclude "set:binary()"
1675 1676
1676 1677 - amend the current commit and set the date to now::
1677 1678
1678 1679 hg commit --amend --date now
1679 1680 """
1680 1681 wlock = lock = None
1681 1682 try:
1682 1683 wlock = repo.wlock()
1683 1684 lock = repo.lock()
1684 1685 return _docommit(ui, repo, *pats, **opts)
1685 1686 finally:
1686 1687 release(lock, wlock)
1687 1688
1688 1689 def _docommit(ui, repo, *pats, **opts):
1689 1690 if opts.get('interactive'):
1690 1691 opts.pop('interactive')
1691 1692 cmdutil.dorecord(ui, repo, commit, None, False,
1692 1693 cmdutil.recordfilter, *pats, **opts)
1693 1694 return
1694 1695
1695 1696 if opts.get('subrepos'):
1696 1697 if opts.get('amend'):
1697 1698 raise error.Abort(_('cannot amend with --subrepos'))
1698 1699 # Let --subrepos on the command line override config setting.
1699 1700 ui.setconfig('ui', 'commitsubrepos', True, 'commit')
1700 1701
1701 1702 cmdutil.checkunfinished(repo, commit=True)
1702 1703
1703 1704 branch = repo[None].branch()
1704 1705 bheads = repo.branchheads(branch)
1705 1706
1706 1707 extra = {}
1707 1708 if opts.get('close_branch'):
1708 1709 extra['close'] = 1
1709 1710
1710 1711 if not bheads:
1711 1712 raise error.Abort(_('can only close branch heads'))
1712 1713 elif opts.get('amend'):
1713 1714 if repo[None].parents()[0].p1().branch() != branch and \
1714 1715 repo[None].parents()[0].p2().branch() != branch:
1715 1716 raise error.Abort(_('can only close branch heads'))
1716 1717
1717 1718 if opts.get('amend'):
1718 1719 if ui.configbool('ui', 'commitsubrepos'):
1719 1720 raise error.Abort(_('cannot amend with ui.commitsubrepos enabled'))
1720 1721
1721 1722 old = repo['.']
1722 1723 if not old.mutable():
1723 1724 raise error.Abort(_('cannot amend public changesets'))
1724 1725 if len(repo[None].parents()) > 1:
1725 1726 raise error.Abort(_('cannot amend while merging'))
1726 1727 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1727 1728 if not allowunstable and old.children():
1728 1729 raise error.Abort(_('cannot amend changeset with children'))
1729 1730
1730 1731 # Currently histedit gets confused if an amend happens while histedit
1731 1732 # is in progress. Since we have a checkunfinished command, we are
1732 1733 # temporarily honoring it.
1733 1734 #
1734 1735 # Note: eventually this guard will be removed. Please do not expect
1735 1736 # this behavior to remain.
1736 1737 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1737 1738 cmdutil.checkunfinished(repo)
1738 1739
1739 1740 # commitfunc is used only for temporary amend commit by cmdutil.amend
1740 1741 def commitfunc(ui, repo, message, match, opts):
1741 1742 return repo.commit(message,
1742 1743 opts.get('user') or old.user(),
1743 1744 opts.get('date') or old.date(),
1744 1745 match,
1745 1746 extra=extra)
1746 1747
1747 1748 node = cmdutil.amend(ui, repo, commitfunc, old, extra, pats, opts)
1748 1749 if node == old.node():
1749 1750 ui.status(_("nothing changed\n"))
1750 1751 return 1
1751 1752 else:
1752 1753 def commitfunc(ui, repo, message, match, opts):
1753 1754 backup = ui.backupconfig('phases', 'new-commit')
1754 1755 baseui = repo.baseui
1755 1756 basebackup = baseui.backupconfig('phases', 'new-commit')
1756 1757 try:
1757 1758 if opts.get('secret'):
1758 1759 ui.setconfig('phases', 'new-commit', 'secret', 'commit')
1759 1760 # Propagate to subrepos
1760 1761 baseui.setconfig('phases', 'new-commit', 'secret', 'commit')
1761 1762
1762 1763 editform = cmdutil.mergeeditform(repo[None], 'commit.normal')
1763 1764 editor = cmdutil.getcommiteditor(editform=editform, **opts)
1764 1765 return repo.commit(message, opts.get('user'), opts.get('date'),
1765 1766 match,
1766 1767 editor=editor,
1767 1768 extra=extra)
1768 1769 finally:
1769 1770 ui.restoreconfig(backup)
1770 1771 repo.baseui.restoreconfig(basebackup)
1771 1772
1772 1773
1773 1774 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1774 1775
1775 1776 if not node:
1776 1777 stat = cmdutil.postcommitstatus(repo, pats, opts)
1777 1778 if stat[3]:
1778 1779 ui.status(_("nothing changed (%d missing files, see "
1779 1780 "'hg status')\n") % len(stat[3]))
1780 1781 else:
1781 1782 ui.status(_("nothing changed\n"))
1782 1783 return 1
1783 1784
1784 1785 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1785 1786
1786 1787 @command('config|showconfig|debugconfig',
1787 1788 [('u', 'untrusted', None, _('show untrusted configuration options')),
1788 1789 ('e', 'edit', None, _('edit user config')),
1789 1790 ('l', 'local', None, _('edit repository config')),
1790 1791 ('g', 'global', None, _('edit global config'))],
1791 1792 _('[-u] [NAME]...'),
1792 1793 optionalrepo=True)
1793 1794 def config(ui, repo, *values, **opts):
1794 1795 """show combined config settings from all hgrc files
1795 1796
1796 1797 With no arguments, print names and values of all config items.
1797 1798
1798 1799 With one argument of the form section.name, print just the value
1799 1800 of that config item.
1800 1801
1801 1802 With multiple arguments, print names and values of all config
1802 1803 items with matching section names.
1803 1804
1804 1805 With --edit, start an editor on the user-level config file. With
1805 1806 --global, edit the system-wide config file. With --local, edit the
1806 1807 repository-level config file.
1807 1808
1808 1809 With --debug, the source (filename and line number) is printed
1809 1810 for each config item.
1810 1811
1811 1812 See :hg:`help config` for more information about config files.
1812 1813
1813 1814 Returns 0 on success, 1 if NAME does not exist.
1814 1815
1815 1816 """
1816 1817
1817 1818 if opts.get('edit') or opts.get('local') or opts.get('global'):
1818 1819 if opts.get('local') and opts.get('global'):
1819 1820 raise error.Abort(_("can't use --local and --global together"))
1820 1821
1821 1822 if opts.get('local'):
1822 1823 if not repo:
1823 1824 raise error.Abort(_("can't use --local outside a repository"))
1824 1825 paths = [repo.join('hgrc')]
1825 1826 elif opts.get('global'):
1826 1827 paths = scmutil.systemrcpath()
1827 1828 else:
1828 1829 paths = scmutil.userrcpath()
1829 1830
1830 1831 for f in paths:
1831 1832 if os.path.exists(f):
1832 1833 break
1833 1834 else:
1834 1835 if opts.get('global'):
1835 1836 samplehgrc = uimod.samplehgrcs['global']
1836 1837 elif opts.get('local'):
1837 1838 samplehgrc = uimod.samplehgrcs['local']
1838 1839 else:
1839 1840 samplehgrc = uimod.samplehgrcs['user']
1840 1841
1841 1842 f = paths[0]
1842 1843 fp = open(f, "w")
1843 1844 fp.write(samplehgrc)
1844 1845 fp.close()
1845 1846
1846 1847 editor = ui.geteditor()
1847 1848 ui.system("%s \"%s\"" % (editor, f),
1848 1849 onerr=error.Abort, errprefix=_("edit failed"))
1849 1850 return
1850 1851
1851 1852 for f in scmutil.rcpath():
1852 1853 ui.debug('read config from: %s\n' % f)
1853 1854 untrusted = bool(opts.get('untrusted'))
1854 1855 if values:
1855 1856 sections = [v for v in values if '.' not in v]
1856 1857 items = [v for v in values if '.' in v]
1857 1858 if len(items) > 1 or items and sections:
1858 1859 raise error.Abort(_('only one config item permitted'))
1859 1860 matched = False
1860 1861 for section, name, value in ui.walkconfig(untrusted=untrusted):
1861 1862 value = str(value).replace('\n', '\\n')
1862 1863 sectname = section + '.' + name
1863 1864 if values:
1864 1865 for v in values:
1865 1866 if v == section:
1866 1867 ui.debug('%s: ' %
1867 1868 ui.configsource(section, name, untrusted))
1868 1869 ui.write('%s=%s\n' % (sectname, value))
1869 1870 matched = True
1870 1871 elif v == sectname:
1871 1872 ui.debug('%s: ' %
1872 1873 ui.configsource(section, name, untrusted))
1873 1874 ui.write(value, '\n')
1874 1875 matched = True
1875 1876 else:
1876 1877 ui.debug('%s: ' %
1877 1878 ui.configsource(section, name, untrusted))
1878 1879 ui.write('%s=%s\n' % (sectname, value))
1879 1880 matched = True
1880 1881 if matched:
1881 1882 return 0
1882 1883 return 1
1883 1884
1884 1885 @command('copy|cp',
1885 1886 [('A', 'after', None, _('record a copy that has already occurred')),
1886 1887 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1887 1888 ] + walkopts + dryrunopts,
1888 1889 _('[OPTION]... [SOURCE]... DEST'))
1889 1890 def copy(ui, repo, *pats, **opts):
1890 1891 """mark files as copied for the next commit
1891 1892
1892 1893 Mark dest as having copies of source files. If dest is a
1893 1894 directory, copies are put in that directory. If dest is a file,
1894 1895 the source must be a single file.
1895 1896
1896 1897 By default, this command copies the contents of files as they
1897 1898 exist in the working directory. If invoked with -A/--after, the
1898 1899 operation is recorded, but no copying is performed.
1899 1900
1900 1901 This command takes effect with the next commit. To undo a copy
1901 1902 before that, see :hg:`revert`.
1902 1903
1903 1904 Returns 0 on success, 1 if errors are encountered.
1904 1905 """
1905 1906 with repo.wlock(False):
1906 1907 return cmdutil.copy(ui, repo, pats, opts)
1907 1908
1908 1909 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
1909 1910 def debugancestor(ui, repo, *args):
1910 1911 """find the ancestor revision of two revisions in a given index"""
1911 1912 if len(args) == 3:
1912 1913 index, rev1, rev2 = args
1913 1914 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1914 1915 lookup = r.lookup
1915 1916 elif len(args) == 2:
1916 1917 if not repo:
1917 1918 raise error.Abort(_("there is no Mercurial repository here "
1918 1919 "(.hg not found)"))
1919 1920 rev1, rev2 = args
1920 1921 r = repo.changelog
1921 1922 lookup = repo.lookup
1922 1923 else:
1923 1924 raise error.Abort(_('either two or three arguments required'))
1924 1925 a = r.ancestor(lookup(rev1), lookup(rev2))
1925 1926 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1926 1927
1927 1928 @command('debugbuilddag',
1928 1929 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1929 1930 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1930 1931 ('n', 'new-file', None, _('add new file at each rev'))],
1931 1932 _('[OPTION]... [TEXT]'))
1932 1933 def debugbuilddag(ui, repo, text=None,
1933 1934 mergeable_file=False,
1934 1935 overwritten_file=False,
1935 1936 new_file=False):
1936 1937 """builds a repo with a given DAG from scratch in the current empty repo
1937 1938
1938 1939 The description of the DAG is read from stdin if not given on the
1939 1940 command line.
1940 1941
1941 1942 Elements:
1942 1943
1943 1944 - "+n" is a linear run of n nodes based on the current default parent
1944 1945 - "." is a single node based on the current default parent
1945 1946 - "$" resets the default parent to null (implied at the start);
1946 1947 otherwise the default parent is always the last node created
1947 1948 - "<p" sets the default parent to the backref p
1948 1949 - "*p" is a fork at parent p, which is a backref
1949 1950 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1950 1951 - "/p2" is a merge of the preceding node and p2
1951 1952 - ":tag" defines a local tag for the preceding node
1952 1953 - "@branch" sets the named branch for subsequent nodes
1953 1954 - "#...\\n" is a comment up to the end of the line
1954 1955
1955 1956 Whitespace between the above elements is ignored.
1956 1957
1957 1958 A backref is either
1958 1959
1959 1960 - a number n, which references the node curr-n, where curr is the current
1960 1961 node, or
1961 1962 - the name of a local tag you placed earlier using ":tag", or
1962 1963 - empty to denote the default parent.
1963 1964
1964 1965 All string valued-elements are either strictly alphanumeric, or must
1965 1966 be enclosed in double quotes ("..."), with "\\" as escape character.
1966 1967 """
1967 1968
1968 1969 if text is None:
1969 1970 ui.status(_("reading DAG from stdin\n"))
1970 1971 text = ui.fin.read()
1971 1972
1972 1973 cl = repo.changelog
1973 1974 if len(cl) > 0:
1974 1975 raise error.Abort(_('repository is not empty'))
1975 1976
1976 1977 # determine number of revs in DAG
1977 1978 total = 0
1978 1979 for type, data in dagparser.parsedag(text):
1979 1980 if type == 'n':
1980 1981 total += 1
1981 1982
1982 1983 if mergeable_file:
1983 1984 linesperrev = 2
1984 1985 # make a file with k lines per rev
1985 1986 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1986 1987 initialmergedlines.append("")
1987 1988
1988 1989 tags = []
1989 1990
1990 1991 wlock = lock = tr = None
1991 1992 try:
1992 1993 wlock = repo.wlock()
1993 1994 lock = repo.lock()
1994 1995 tr = repo.transaction("builddag")
1995 1996
1996 1997 at = -1
1997 1998 atbranch = 'default'
1998 1999 nodeids = []
1999 2000 id = 0
2000 2001 ui.progress(_('building'), id, unit=_('revisions'), total=total)
2001 2002 for type, data in dagparser.parsedag(text):
2002 2003 if type == 'n':
2003 2004 ui.note(('node %s\n' % str(data)))
2004 2005 id, ps = data
2005 2006
2006 2007 files = []
2007 2008 fctxs = {}
2008 2009
2009 2010 p2 = None
2010 2011 if mergeable_file:
2011 2012 fn = "mf"
2012 2013 p1 = repo[ps[0]]
2013 2014 if len(ps) > 1:
2014 2015 p2 = repo[ps[1]]
2015 2016 pa = p1.ancestor(p2)
2016 2017 base, local, other = [x[fn].data() for x in (pa, p1,
2017 2018 p2)]
2018 2019 m3 = simplemerge.Merge3Text(base, local, other)
2019 2020 ml = [l.strip() for l in m3.merge_lines()]
2020 2021 ml.append("")
2021 2022 elif at > 0:
2022 2023 ml = p1[fn].data().split("\n")
2023 2024 else:
2024 2025 ml = initialmergedlines
2025 2026 ml[id * linesperrev] += " r%i" % id
2026 2027 mergedtext = "\n".join(ml)
2027 2028 files.append(fn)
2028 2029 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
2029 2030
2030 2031 if overwritten_file:
2031 2032 fn = "of"
2032 2033 files.append(fn)
2033 2034 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
2034 2035
2035 2036 if new_file:
2036 2037 fn = "nf%i" % id
2037 2038 files.append(fn)
2038 2039 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
2039 2040 if len(ps) > 1:
2040 2041 if not p2:
2041 2042 p2 = repo[ps[1]]
2042 2043 for fn in p2:
2043 2044 if fn.startswith("nf"):
2044 2045 files.append(fn)
2045 2046 fctxs[fn] = p2[fn]
2046 2047
2047 2048 def fctxfn(repo, cx, path):
2048 2049 return fctxs.get(path)
2049 2050
2050 2051 if len(ps) == 0 or ps[0] < 0:
2051 2052 pars = [None, None]
2052 2053 elif len(ps) == 1:
2053 2054 pars = [nodeids[ps[0]], None]
2054 2055 else:
2055 2056 pars = [nodeids[p] for p in ps]
2056 2057 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
2057 2058 date=(id, 0),
2058 2059 user="debugbuilddag",
2059 2060 extra={'branch': atbranch})
2060 2061 nodeid = repo.commitctx(cx)
2061 2062 nodeids.append(nodeid)
2062 2063 at = id
2063 2064 elif type == 'l':
2064 2065 id, name = data
2065 2066 ui.note(('tag %s\n' % name))
2066 2067 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
2067 2068 elif type == 'a':
2068 2069 ui.note(('branch %s\n' % data))
2069 2070 atbranch = data
2070 2071 ui.progress(_('building'), id, unit=_('revisions'), total=total)
2071 2072 tr.close()
2072 2073
2073 2074 if tags:
2074 2075 repo.vfs.write("localtags", "".join(tags))
2075 2076 finally:
2076 2077 ui.progress(_('building'), None)
2077 2078 release(tr, lock, wlock)
2078 2079
2079 2080 @command('debugbundle',
2080 2081 [('a', 'all', None, _('show all details')),
2081 2082 ('', 'spec', None, _('print the bundlespec of the bundle'))],
2082 2083 _('FILE'),
2083 2084 norepo=True)
2084 2085 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
2085 2086 """lists the contents of a bundle"""
2086 2087 with hg.openpath(ui, bundlepath) as f:
2087 2088 if spec:
2088 2089 spec = exchange.getbundlespec(ui, f)
2089 2090 ui.write('%s\n' % spec)
2090 2091 return
2091 2092
2092 2093 gen = exchange.readbundle(ui, f, bundlepath)
2093 2094 if isinstance(gen, bundle2.unbundle20):
2094 2095 return _debugbundle2(ui, gen, all=all, **opts)
2095 2096 _debugchangegroup(ui, gen, all=all, **opts)
2096 2097
2097 2098 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
2098 2099 indent_string = ' ' * indent
2099 2100 if all:
2100 2101 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
2101 2102 % indent_string)
2102 2103
2103 2104 def showchunks(named):
2104 2105 ui.write("\n%s%s\n" % (indent_string, named))
2105 2106 chain = None
2106 2107 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
2107 2108 node = chunkdata['node']
2108 2109 p1 = chunkdata['p1']
2109 2110 p2 = chunkdata['p2']
2110 2111 cs = chunkdata['cs']
2111 2112 deltabase = chunkdata['deltabase']
2112 2113 delta = chunkdata['delta']
2113 2114 ui.write("%s%s %s %s %s %s %s\n" %
2114 2115 (indent_string, hex(node), hex(p1), hex(p2),
2115 2116 hex(cs), hex(deltabase), len(delta)))
2116 2117 chain = node
2117 2118
2118 2119 chunkdata = gen.changelogheader()
2119 2120 showchunks("changelog")
2120 2121 chunkdata = gen.manifestheader()
2121 2122 showchunks("manifest")
2122 2123 for chunkdata in iter(gen.filelogheader, {}):
2123 2124 fname = chunkdata['filename']
2124 2125 showchunks(fname)
2125 2126 else:
2126 2127 if isinstance(gen, bundle2.unbundle20):
2127 2128 raise error.Abort(_('use debugbundle2 for this file'))
2128 2129 chunkdata = gen.changelogheader()
2129 2130 chain = None
2130 2131 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
2131 2132 node = chunkdata['node']
2132 2133 ui.write("%s%s\n" % (indent_string, hex(node)))
2133 2134 chain = node
2134 2135
2135 2136 def _debugbundle2(ui, gen, all=None, **opts):
2136 2137 """lists the contents of a bundle2"""
2137 2138 if not isinstance(gen, bundle2.unbundle20):
2138 2139 raise error.Abort(_('not a bundle2 file'))
2139 2140 ui.write(('Stream params: %s\n' % repr(gen.params)))
2140 2141 for part in gen.iterparts():
2141 2142 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
2142 2143 if part.type == 'changegroup':
2143 2144 version = part.params.get('version', '01')
2144 2145 cg = changegroup.getunbundler(version, part, 'UN')
2145 2146 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
2146 2147
2147 2148 @command('debugcreatestreamclonebundle', [], 'FILE')
2148 2149 def debugcreatestreamclonebundle(ui, repo, fname):
2149 2150 """create a stream clone bundle file
2150 2151
2151 2152 Stream bundles are special bundles that are essentially archives of
2152 2153 revlog files. They are commonly used for cloning very quickly.
2153 2154 """
2154 2155 requirements, gen = streamclone.generatebundlev1(repo)
2155 2156 changegroup.writechunks(ui, gen, fname)
2156 2157
2157 2158 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
2158 2159
2159 2160 @command('debugapplystreamclonebundle', [], 'FILE')
2160 2161 def debugapplystreamclonebundle(ui, repo, fname):
2161 2162 """apply a stream clone bundle file"""
2162 2163 f = hg.openpath(ui, fname)
2163 2164 gen = exchange.readbundle(ui, f, fname)
2164 2165 gen.apply(repo)
2165 2166
2166 2167 @command('debugcheckstate', [], '')
2167 2168 def debugcheckstate(ui, repo):
2168 2169 """validate the correctness of the current dirstate"""
2169 2170 parent1, parent2 = repo.dirstate.parents()
2170 2171 m1 = repo[parent1].manifest()
2171 2172 m2 = repo[parent2].manifest()
2172 2173 errors = 0
2173 2174 for f in repo.dirstate:
2174 2175 state = repo.dirstate[f]
2175 2176 if state in "nr" and f not in m1:
2176 2177 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
2177 2178 errors += 1
2178 2179 if state in "a" and f in m1:
2179 2180 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
2180 2181 errors += 1
2181 2182 if state in "m" and f not in m1 and f not in m2:
2182 2183 ui.warn(_("%s in state %s, but not in either manifest\n") %
2183 2184 (f, state))
2184 2185 errors += 1
2185 2186 for f in m1:
2186 2187 state = repo.dirstate[f]
2187 2188 if state not in "nrm":
2188 2189 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
2189 2190 errors += 1
2190 2191 if errors:
2191 2192 error = _(".hg/dirstate inconsistent with current parent's manifest")
2192 2193 raise error.Abort(error)
2193 2194
2194 2195 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
2195 2196 def debugcommands(ui, cmd='', *args):
2196 2197 """list all available commands and options"""
2197 2198 for cmd, vals in sorted(table.iteritems()):
2198 2199 cmd = cmd.split('|')[0].strip('^')
2199 2200 opts = ', '.join([i[1] for i in vals[1]])
2200 2201 ui.write('%s: %s\n' % (cmd, opts))
2201 2202
2202 2203 @command('debugcomplete',
2203 2204 [('o', 'options', None, _('show the command options'))],
2204 2205 _('[-o] CMD'),
2205 2206 norepo=True)
2206 2207 def debugcomplete(ui, cmd='', **opts):
2207 2208 """returns the completion list associated with the given command"""
2208 2209
2209 2210 if opts.get('options'):
2210 2211 options = []
2211 2212 otables = [globalopts]
2212 2213 if cmd:
2213 2214 aliases, entry = cmdutil.findcmd(cmd, table, False)
2214 2215 otables.append(entry[1])
2215 2216 for t in otables:
2216 2217 for o in t:
2217 2218 if "(DEPRECATED)" in o[3]:
2218 2219 continue
2219 2220 if o[0]:
2220 2221 options.append('-%s' % o[0])
2221 2222 options.append('--%s' % o[1])
2222 2223 ui.write("%s\n" % "\n".join(options))
2223 2224 return
2224 2225
2225 2226 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, table)
2226 2227 if ui.verbose:
2227 2228 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
2228 2229 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
2229 2230
2230 2231 @command('debugdag',
2231 2232 [('t', 'tags', None, _('use tags as labels')),
2232 2233 ('b', 'branches', None, _('annotate with branch names')),
2233 2234 ('', 'dots', None, _('use dots for runs')),
2234 2235 ('s', 'spaces', None, _('separate elements by spaces'))],
2235 2236 _('[OPTION]... [FILE [REV]...]'),
2236 2237 optionalrepo=True)
2237 2238 def debugdag(ui, repo, file_=None, *revs, **opts):
2238 2239 """format the changelog or an index DAG as a concise textual description
2239 2240
2240 2241 If you pass a revlog index, the revlog's DAG is emitted. If you list
2241 2242 revision numbers, they get labeled in the output as rN.
2242 2243
2243 2244 Otherwise, the changelog DAG of the current repo is emitted.
2244 2245 """
2245 2246 spaces = opts.get('spaces')
2246 2247 dots = opts.get('dots')
2247 2248 if file_:
2248 2249 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
2249 2250 revs = set((int(r) for r in revs))
2250 2251 def events():
2251 2252 for r in rlog:
2252 2253 yield 'n', (r, list(p for p in rlog.parentrevs(r)
2253 2254 if p != -1))
2254 2255 if r in revs:
2255 2256 yield 'l', (r, "r%i" % r)
2256 2257 elif repo:
2257 2258 cl = repo.changelog
2258 2259 tags = opts.get('tags')
2259 2260 branches = opts.get('branches')
2260 2261 if tags:
2261 2262 labels = {}
2262 2263 for l, n in repo.tags().items():
2263 2264 labels.setdefault(cl.rev(n), []).append(l)
2264 2265 def events():
2265 2266 b = "default"
2266 2267 for r in cl:
2267 2268 if branches:
2268 2269 newb = cl.read(cl.node(r))[5]['branch']
2269 2270 if newb != b:
2270 2271 yield 'a', newb
2271 2272 b = newb
2272 2273 yield 'n', (r, list(p for p in cl.parentrevs(r)
2273 2274 if p != -1))
2274 2275 if tags:
2275 2276 ls = labels.get(r)
2276 2277 if ls:
2277 2278 for l in ls:
2278 2279 yield 'l', (r, l)
2279 2280 else:
2280 2281 raise error.Abort(_('need repo for changelog dag'))
2281 2282
2282 2283 for line in dagparser.dagtextlines(events(),
2283 2284 addspaces=spaces,
2284 2285 wraplabels=True,
2285 2286 wrapannotations=True,
2286 2287 wrapnonlinear=dots,
2287 2288 usedots=dots,
2288 2289 maxlinewidth=70):
2289 2290 ui.write(line)
2290 2291 ui.write("\n")
2291 2292
2292 2293 @command('debugdata', debugrevlogopts, _('-c|-m|FILE REV'))
2293 2294 def debugdata(ui, repo, file_, rev=None, **opts):
2294 2295 """dump the contents of a data file revision"""
2295 2296 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
2296 2297 if rev is not None:
2297 2298 raise error.CommandError('debugdata', _('invalid arguments'))
2298 2299 file_, rev = None, file_
2299 2300 elif rev is None:
2300 2301 raise error.CommandError('debugdata', _('invalid arguments'))
2301 2302 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
2302 2303 try:
2303 2304 ui.write(r.revision(r.lookup(rev)))
2304 2305 except KeyError:
2305 2306 raise error.Abort(_('invalid revision identifier %s') % rev)
2306 2307
2307 2308 @command('debugdate',
2308 2309 [('e', 'extended', None, _('try extended date formats'))],
2309 2310 _('[-e] DATE [RANGE]'),
2310 2311 norepo=True, optionalrepo=True)
2311 2312 def debugdate(ui, date, range=None, **opts):
2312 2313 """parse and display a date"""
2313 2314 if opts["extended"]:
2314 2315 d = util.parsedate(date, util.extendeddateformats)
2315 2316 else:
2316 2317 d = util.parsedate(date)
2317 2318 ui.write(("internal: %s %s\n") % d)
2318 2319 ui.write(("standard: %s\n") % util.datestr(d))
2319 2320 if range:
2320 2321 m = util.matchdate(range)
2321 2322 ui.write(("match: %s\n") % m(d[0]))
2322 2323
2323 2324 @command('debugdiscovery',
2324 2325 [('', 'old', None, _('use old-style discovery')),
2325 2326 ('', 'nonheads', None,
2326 2327 _('use old-style discovery with non-heads included')),
2327 2328 ] + remoteopts,
2328 2329 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
2329 2330 def debugdiscovery(ui, repo, remoteurl="default", **opts):
2330 2331 """runs the changeset discovery protocol in isolation"""
2331 2332 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
2332 2333 opts.get('branch'))
2333 2334 remote = hg.peer(repo, opts, remoteurl)
2334 2335 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
2335 2336
2336 2337 # make sure tests are repeatable
2337 2338 random.seed(12323)
2338 2339
2339 2340 def doit(localheads, remoteheads, remote=remote):
2340 2341 if opts.get('old'):
2341 2342 if localheads:
2342 2343 raise error.Abort('cannot use localheads with old style '
2343 2344 'discovery')
2344 2345 if not util.safehasattr(remote, 'branches'):
2345 2346 # enable in-client legacy support
2346 2347 remote = localrepo.locallegacypeer(remote.local())
2347 2348 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
2348 2349 force=True)
2349 2350 common = set(common)
2350 2351 if not opts.get('nonheads'):
2351 2352 ui.write(("unpruned common: %s\n") %
2352 2353 " ".join(sorted(short(n) for n in common)))
2353 2354 dag = dagutil.revlogdag(repo.changelog)
2354 2355 all = dag.ancestorset(dag.internalizeall(common))
2355 2356 common = dag.externalizeall(dag.headsetofconnecteds(all))
2356 2357 else:
2357 2358 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
2358 2359 common = set(common)
2359 2360 rheads = set(hds)
2360 2361 lheads = set(repo.heads())
2361 2362 ui.write(("common heads: %s\n") %
2362 2363 " ".join(sorted(short(n) for n in common)))
2363 2364 if lheads <= common:
2364 2365 ui.write(("local is subset\n"))
2365 2366 elif rheads <= common:
2366 2367 ui.write(("remote is subset\n"))
2367 2368
2368 2369 serverlogs = opts.get('serverlog')
2369 2370 if serverlogs:
2370 2371 for filename in serverlogs:
2371 2372 with open(filename, 'r') as logfile:
2372 2373 line = logfile.readline()
2373 2374 while line:
2374 2375 parts = line.strip().split(';')
2375 2376 op = parts[1]
2376 2377 if op == 'cg':
2377 2378 pass
2378 2379 elif op == 'cgss':
2379 2380 doit(parts[2].split(' '), parts[3].split(' '))
2380 2381 elif op == 'unb':
2381 2382 doit(parts[3].split(' '), parts[2].split(' '))
2382 2383 line = logfile.readline()
2383 2384 else:
2384 2385 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
2385 2386 opts.get('remote_head'))
2386 2387 localrevs = opts.get('local_head')
2387 2388 doit(localrevs, remoterevs)
2388 2389
2389 2390 @command('debugextensions', formatteropts, [], norepo=True)
2390 2391 def debugextensions(ui, **opts):
2391 2392 '''show information about active extensions'''
2392 2393 exts = extensions.extensions(ui)
2393 2394 hgver = util.version()
2394 2395 fm = ui.formatter('debugextensions', opts)
2395 2396 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
2396 2397 extsource = extmod.__file__
2397 2398 exttestedwith = getattr(extmod, 'testedwith', '').split()
2398 2399 extbuglink = getattr(extmod, 'buglink', None)
2399 2400
2400 2401 fm.startitem()
2401 2402
2402 2403 if ui.quiet or ui.verbose:
2403 2404 fm.write('name', '%s\n', extname)
2404 2405 else:
2405 2406 fm.write('name', '%s', extname)
2406 2407 if not exttestedwith:
2407 2408 fm.plain(_(' (untested!)\n'))
2408 2409 elif exttestedwith == ['internal'] or hgver in exttestedwith:
2409 2410 fm.plain('\n')
2410 2411 else:
2411 2412 lasttestedversion = exttestedwith[-1]
2412 2413 fm.plain(' (%s!)\n' % lasttestedversion)
2413 2414
2414 2415 fm.condwrite(ui.verbose and extsource, 'source',
2415 2416 _(' location: %s\n'), extsource or "")
2416 2417
2417 2418 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
2418 2419 _(' tested with: %s\n'),
2419 2420 fm.formatlist(exttestedwith, name='ver'))
2420 2421
2421 2422 fm.condwrite(ui.verbose and extbuglink, 'buglink',
2422 2423 _(' bug reporting: %s\n'), extbuglink or "")
2423 2424
2424 2425 fm.end()
2425 2426
2426 2427 @command('debugfileset',
2427 2428 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
2428 2429 _('[-r REV] FILESPEC'))
2429 2430 def debugfileset(ui, repo, expr, **opts):
2430 2431 '''parse and apply a fileset specification'''
2431 2432 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
2432 2433 if ui.verbose:
2433 2434 tree = fileset.parse(expr)
2434 2435 ui.note(fileset.prettyformat(tree), "\n")
2435 2436
2436 2437 for f in ctx.getfileset(expr):
2437 2438 ui.write("%s\n" % f)
2438 2439
2439 2440 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
2440 2441 def debugfsinfo(ui, path="."):
2441 2442 """show information detected about current filesystem"""
2442 2443 util.writefile('.debugfsinfo', '')
2443 2444 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
2444 2445 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
2445 2446 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
2446 2447 ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
2447 2448 and 'yes' or 'no'))
2448 2449 os.unlink('.debugfsinfo')
2449 2450
2450 2451 @command('debuggetbundle',
2451 2452 [('H', 'head', [], _('id of head node'), _('ID')),
2452 2453 ('C', 'common', [], _('id of common node'), _('ID')),
2453 2454 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
2454 2455 _('REPO FILE [-H|-C ID]...'),
2455 2456 norepo=True)
2456 2457 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
2457 2458 """retrieves a bundle from a repo
2458 2459
2459 2460 Every ID must be a full-length hex node id string. Saves the bundle to the
2460 2461 given file.
2461 2462 """
2462 2463 repo = hg.peer(ui, opts, repopath)
2463 2464 if not repo.capable('getbundle'):
2464 2465 raise error.Abort("getbundle() not supported by target repository")
2465 2466 args = {}
2466 2467 if common:
2467 2468 args['common'] = [bin(s) for s in common]
2468 2469 if head:
2469 2470 args['heads'] = [bin(s) for s in head]
2470 2471 # TODO: get desired bundlecaps from command line.
2471 2472 args['bundlecaps'] = None
2472 2473 bundle = repo.getbundle('debug', **args)
2473 2474
2474 2475 bundletype = opts.get('type', 'bzip2').lower()
2475 2476 btypes = {'none': 'HG10UN',
2476 2477 'bzip2': 'HG10BZ',
2477 2478 'gzip': 'HG10GZ',
2478 2479 'bundle2': 'HG20'}
2479 2480 bundletype = btypes.get(bundletype)
2480 2481 if bundletype not in bundle2.bundletypes:
2481 2482 raise error.Abort(_('unknown bundle type specified with --type'))
2482 2483 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
2483 2484
2484 2485 @command('debugignore', [], '[FILE]')
2485 2486 def debugignore(ui, repo, *files, **opts):
2486 2487 """display the combined ignore pattern and information about ignored files
2487 2488
2488 2489 With no argument display the combined ignore pattern.
2489 2490
2490 2491 Given space separated file names, shows if the given file is ignored and
2491 2492 if so, show the ignore rule (file and line number) that matched it.
2492 2493 """
2493 2494 ignore = repo.dirstate._ignore
2494 2495 if not files:
2495 2496 # Show all the patterns
2496 2497 includepat = getattr(ignore, 'includepat', None)
2497 2498 if includepat is not None:
2498 2499 ui.write("%s\n" % includepat)
2499 2500 else:
2500 2501 raise error.Abort(_("no ignore patterns found"))
2501 2502 else:
2502 2503 for f in files:
2503 2504 nf = util.normpath(f)
2504 2505 ignored = None
2505 2506 ignoredata = None
2506 2507 if nf != '.':
2507 2508 if ignore(nf):
2508 2509 ignored = nf
2509 2510 ignoredata = repo.dirstate._ignorefileandline(nf)
2510 2511 else:
2511 2512 for p in util.finddirs(nf):
2512 2513 if ignore(p):
2513 2514 ignored = p
2514 2515 ignoredata = repo.dirstate._ignorefileandline(p)
2515 2516 break
2516 2517 if ignored:
2517 2518 if ignored == nf:
2518 2519 ui.write(_("%s is ignored\n") % f)
2519 2520 else:
2520 2521 ui.write(_("%s is ignored because of "
2521 2522 "containing folder %s\n")
2522 2523 % (f, ignored))
2523 2524 ignorefile, lineno, line = ignoredata
2524 2525 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
2525 2526 % (ignorefile, lineno, line))
2526 2527 else:
2527 2528 ui.write(_("%s is not ignored\n") % f)
2528 2529
2529 2530 @command('debugindex', debugrevlogopts +
2530 2531 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2531 2532 _('[-f FORMAT] -c|-m|FILE'),
2532 2533 optionalrepo=True)
2533 2534 def debugindex(ui, repo, file_=None, **opts):
2534 2535 """dump the contents of an index file"""
2535 2536 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
2536 2537 format = opts.get('format', 0)
2537 2538 if format not in (0, 1):
2538 2539 raise error.Abort(_("unknown format %d") % format)
2539 2540
2540 2541 generaldelta = r.version & revlog.REVLOGGENERALDELTA
2541 2542 if generaldelta:
2542 2543 basehdr = ' delta'
2543 2544 else:
2544 2545 basehdr = ' base'
2545 2546
2546 2547 if ui.debugflag:
2547 2548 shortfn = hex
2548 2549 else:
2549 2550 shortfn = short
2550 2551
2551 2552 # There might not be anything in r, so have a sane default
2552 2553 idlen = 12
2553 2554 for i in r:
2554 2555 idlen = len(shortfn(r.node(i)))
2555 2556 break
2556 2557
2557 2558 if format == 0:
2558 2559 ui.write((" rev offset length " + basehdr + " linkrev"
2559 2560 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
2560 2561 elif format == 1:
2561 2562 ui.write((" rev flag offset length"
2562 2563 " size " + basehdr + " link p1 p2"
2563 2564 " %s\n") % "nodeid".rjust(idlen))
2564 2565
2565 2566 for i in r:
2566 2567 node = r.node(i)
2567 2568 if generaldelta:
2568 2569 base = r.deltaparent(i)
2569 2570 else:
2570 2571 base = r.chainbase(i)
2571 2572 if format == 0:
2572 2573 try:
2573 2574 pp = r.parents(node)
2574 2575 except Exception:
2575 2576 pp = [nullid, nullid]
2576 2577 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
2577 2578 i, r.start(i), r.length(i), base, r.linkrev(i),
2578 2579 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2579 2580 elif format == 1:
2580 2581 pr = r.parentrevs(i)
2581 2582 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
2582 2583 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2583 2584 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
2584 2585
2585 2586 @command('debugindexdot', debugrevlogopts,
2586 2587 _('-c|-m|FILE'), optionalrepo=True)
2587 2588 def debugindexdot(ui, repo, file_=None, **opts):
2588 2589 """dump an index DAG as a graphviz dot file"""
2589 2590 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
2590 2591 ui.write(("digraph G {\n"))
2591 2592 for i in r:
2592 2593 node = r.node(i)
2593 2594 pp = r.parents(node)
2594 2595 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
2595 2596 if pp[1] != nullid:
2596 2597 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
2597 2598 ui.write("}\n")
2598 2599
2599 2600 @command('debugdeltachain',
2600 2601 debugrevlogopts + formatteropts,
2601 2602 _('-c|-m|FILE'),
2602 2603 optionalrepo=True)
2603 2604 def debugdeltachain(ui, repo, file_=None, **opts):
2604 2605 """dump information about delta chains in a revlog
2605 2606
2606 2607 Output can be templatized. Available template keywords are:
2607 2608
2608 2609 :``rev``: revision number
2609 2610 :``chainid``: delta chain identifier (numbered by unique base)
2610 2611 :``chainlen``: delta chain length to this revision
2611 2612 :``prevrev``: previous revision in delta chain
2612 2613 :``deltatype``: role of delta / how it was computed
2613 2614 :``compsize``: compressed size of revision
2614 2615 :``uncompsize``: uncompressed size of revision
2615 2616 :``chainsize``: total size of compressed revisions in chain
2616 2617 :``chainratio``: total chain size divided by uncompressed revision size
2617 2618 (new delta chains typically start at ratio 2.00)
2618 2619 :``lindist``: linear distance from base revision in delta chain to end
2619 2620 of this revision
2620 2621 :``extradist``: total size of revisions not part of this delta chain from
2621 2622 base of delta chain to end of this revision; a measurement
2622 2623 of how much extra data we need to read/seek across to read
2623 2624 the delta chain for this revision
2624 2625 :``extraratio``: extradist divided by chainsize; another representation of
2625 2626 how much unrelated data is needed to load this delta chain
2626 2627 """
2627 2628 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
2628 2629 index = r.index
2629 2630 generaldelta = r.version & revlog.REVLOGGENERALDELTA
2630 2631
2631 2632 def revinfo(rev):
2632 2633 e = index[rev]
2633 2634 compsize = e[1]
2634 2635 uncompsize = e[2]
2635 2636 chainsize = 0
2636 2637
2637 2638 if generaldelta:
2638 2639 if e[3] == e[5]:
2639 2640 deltatype = 'p1'
2640 2641 elif e[3] == e[6]:
2641 2642 deltatype = 'p2'
2642 2643 elif e[3] == rev - 1:
2643 2644 deltatype = 'prev'
2644 2645 elif e[3] == rev:
2645 2646 deltatype = 'base'
2646 2647 else:
2647 2648 deltatype = 'other'
2648 2649 else:
2649 2650 if e[3] == rev:
2650 2651 deltatype = 'base'
2651 2652 else:
2652 2653 deltatype = 'prev'
2653 2654
2654 2655 chain = r._deltachain(rev)[0]
2655 2656 for iterrev in chain:
2656 2657 e = index[iterrev]
2657 2658 chainsize += e[1]
2658 2659
2659 2660 return compsize, uncompsize, deltatype, chain, chainsize
2660 2661
2661 2662 fm = ui.formatter('debugdeltachain', opts)
2662 2663
2663 2664 fm.plain(' rev chain# chainlen prev delta '
2664 2665 'size rawsize chainsize ratio lindist extradist '
2665 2666 'extraratio\n')
2666 2667
2667 2668 chainbases = {}
2668 2669 for rev in r:
2669 2670 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
2670 2671 chainbase = chain[0]
2671 2672 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
2672 2673 basestart = r.start(chainbase)
2673 2674 revstart = r.start(rev)
2674 2675 lineardist = revstart + comp - basestart
2675 2676 extradist = lineardist - chainsize
2676 2677 try:
2677 2678 prevrev = chain[-2]
2678 2679 except IndexError:
2679 2680 prevrev = -1
2680 2681
2681 2682 chainratio = float(chainsize) / float(uncomp)
2682 2683 extraratio = float(extradist) / float(chainsize)
2683 2684
2684 2685 fm.startitem()
2685 2686 fm.write('rev chainid chainlen prevrev deltatype compsize '
2686 2687 'uncompsize chainsize chainratio lindist extradist '
2687 2688 'extraratio',
2688 2689 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
2689 2690 rev, chainid, len(chain), prevrev, deltatype, comp,
2690 2691 uncomp, chainsize, chainratio, lineardist, extradist,
2691 2692 extraratio,
2692 2693 rev=rev, chainid=chainid, chainlen=len(chain),
2693 2694 prevrev=prevrev, deltatype=deltatype, compsize=comp,
2694 2695 uncompsize=uncomp, chainsize=chainsize,
2695 2696 chainratio=chainratio, lindist=lineardist,
2696 2697 extradist=extradist, extraratio=extraratio)
2697 2698
2698 2699 fm.end()
2699 2700
2700 2701 @command('debuginstall', [] + formatteropts, '', norepo=True)
2701 2702 def debuginstall(ui, **opts):
2702 2703 '''test Mercurial installation
2703 2704
2704 2705 Returns 0 on success.
2705 2706 '''
2706 2707
2707 2708 def writetemp(contents):
2708 2709 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
2709 2710 f = os.fdopen(fd, "wb")
2710 2711 f.write(contents)
2711 2712 f.close()
2712 2713 return name
2713 2714
2714 2715 problems = 0
2715 2716
2716 2717 fm = ui.formatter('debuginstall', opts)
2717 2718 fm.startitem()
2718 2719
2719 2720 # encoding
2720 2721 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
2721 2722 err = None
2722 2723 try:
2723 2724 encoding.fromlocal("test")
2724 2725 except error.Abort as inst:
2725 2726 err = inst
2726 2727 problems += 1
2727 2728 fm.condwrite(err, 'encodingerror', _(" %s\n"
2728 2729 " (check that your locale is properly set)\n"), err)
2729 2730
2730 2731 # Python
2731 2732 fm.write('pythonexe', _("checking Python executable (%s)\n"),
2732 2733 sys.executable)
2733 2734 fm.write('pythonver', _("checking Python version (%s)\n"),
2734 2735 ("%s.%s.%s" % sys.version_info[:3]))
2735 2736 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
2736 2737 os.path.dirname(os.__file__))
2737 2738
2738 2739 # hg version
2739 2740 hgver = util.version()
2740 2741 fm.write('hgver', _("checking Mercurial version (%s)\n"),
2741 2742 hgver.split('+')[0])
2742 2743 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
2743 2744 '+'.join(hgver.split('+')[1:]))
2744 2745
2745 2746 # compiled modules
2746 2747 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
2747 2748 policy.policy)
2748 2749 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
2749 2750 os.path.dirname(__file__))
2750 2751
2751 2752 err = None
2752 2753 try:
2753 2754 from . import (
2754 2755 base85,
2755 2756 bdiff,
2756 2757 mpatch,
2757 2758 osutil,
2758 2759 )
2759 2760 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
2760 2761 except Exception as inst:
2761 2762 err = inst
2762 2763 problems += 1
2763 2764 fm.condwrite(err, 'extensionserror', " %s\n", err)
2764 2765
2765 2766 # templates
2766 2767 p = templater.templatepaths()
2767 2768 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
2768 2769 fm.condwrite(not p, '', _(" no template directories found\n"))
2769 2770 if p:
2770 2771 m = templater.templatepath("map-cmdline.default")
2771 2772 if m:
2772 2773 # template found, check if it is working
2773 2774 err = None
2774 2775 try:
2775 2776 templater.templater.frommapfile(m)
2776 2777 except Exception as inst:
2777 2778 err = inst
2778 2779 p = None
2779 2780 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
2780 2781 else:
2781 2782 p = None
2782 2783 fm.condwrite(p, 'defaulttemplate',
2783 2784 _("checking default template (%s)\n"), m)
2784 2785 fm.condwrite(not m, 'defaulttemplatenotfound',
2785 2786 _(" template '%s' not found\n"), "default")
2786 2787 if not p:
2787 2788 problems += 1
2788 2789 fm.condwrite(not p, '',
2789 2790 _(" (templates seem to have been installed incorrectly)\n"))
2790 2791
2791 2792 # editor
2792 2793 editor = ui.geteditor()
2793 2794 editor = util.expandpath(editor)
2794 2795 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
2795 2796 cmdpath = util.findexe(shlex.split(editor)[0])
2796 2797 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
2797 2798 _(" No commit editor set and can't find %s in PATH\n"
2798 2799 " (specify a commit editor in your configuration"
2799 2800 " file)\n"), not cmdpath and editor == 'vi' and editor)
2800 2801 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
2801 2802 _(" Can't find editor '%s' in PATH\n"
2802 2803 " (specify a commit editor in your configuration"
2803 2804 " file)\n"), not cmdpath and editor)
2804 2805 if not cmdpath and editor != 'vi':
2805 2806 problems += 1
2806 2807
2807 2808 # check username
2808 2809 username = None
2809 2810 err = None
2810 2811 try:
2811 2812 username = ui.username()
2812 2813 except error.Abort as e:
2813 2814 err = e
2814 2815 problems += 1
2815 2816
2816 2817 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
2817 2818 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
2818 2819 " (specify a username in your configuration file)\n"), err)
2819 2820
2820 2821 fm.condwrite(not problems, '',
2821 2822 _("no problems detected\n"))
2822 2823 if not problems:
2823 2824 fm.data(problems=problems)
2824 2825 fm.condwrite(problems, 'problems',
2825 2826 _("%s problems detected,"
2826 2827 " please check your install!\n"), problems)
2827 2828 fm.end()
2828 2829
2829 2830 return problems
2830 2831
2831 2832 @command('debugknown', [], _('REPO ID...'), norepo=True)
2832 2833 def debugknown(ui, repopath, *ids, **opts):
2833 2834 """test whether node ids are known to a repo
2834 2835
2835 2836 Every ID must be a full-length hex node id string. Returns a list of 0s
2836 2837 and 1s indicating unknown/known.
2837 2838 """
2838 2839 repo = hg.peer(ui, opts, repopath)
2839 2840 if not repo.capable('known'):
2840 2841 raise error.Abort("known() not supported by target repository")
2841 2842 flags = repo.known([bin(s) for s in ids])
2842 2843 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
2843 2844
2844 2845 @command('debuglabelcomplete', [], _('LABEL...'))
2845 2846 def debuglabelcomplete(ui, repo, *args):
2846 2847 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2847 2848 debugnamecomplete(ui, repo, *args)
2848 2849
2849 2850 @command('debugmergestate', [], '')
2850 2851 def debugmergestate(ui, repo, *args):
2851 2852 """print merge state
2852 2853
2853 2854 Use --verbose to print out information about whether v1 or v2 merge state
2854 2855 was chosen."""
2855 2856 def _hashornull(h):
2856 2857 if h == nullhex:
2857 2858 return 'null'
2858 2859 else:
2859 2860 return h
2860 2861
2861 2862 def printrecords(version):
2862 2863 ui.write(('* version %s records\n') % version)
2863 2864 if version == 1:
2864 2865 records = v1records
2865 2866 else:
2866 2867 records = v2records
2867 2868
2868 2869 for rtype, record in records:
2869 2870 # pretty print some record types
2870 2871 if rtype == 'L':
2871 2872 ui.write(('local: %s\n') % record)
2872 2873 elif rtype == 'O':
2873 2874 ui.write(('other: %s\n') % record)
2874 2875 elif rtype == 'm':
2875 2876 driver, mdstate = record.split('\0', 1)
2876 2877 ui.write(('merge driver: %s (state "%s")\n')
2877 2878 % (driver, mdstate))
2878 2879 elif rtype in 'FDC':
2879 2880 r = record.split('\0')
2880 2881 f, state, hash, lfile, afile, anode, ofile = r[0:7]
2881 2882 if version == 1:
2882 2883 onode = 'not stored in v1 format'
2883 2884 flags = r[7]
2884 2885 else:
2885 2886 onode, flags = r[7:9]
2886 2887 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
2887 2888 % (f, rtype, state, _hashornull(hash)))
2888 2889 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
2889 2890 ui.write((' ancestor path: %s (node %s)\n')
2890 2891 % (afile, _hashornull(anode)))
2891 2892 ui.write((' other path: %s (node %s)\n')
2892 2893 % (ofile, _hashornull(onode)))
2893 2894 elif rtype == 'f':
2894 2895 filename, rawextras = record.split('\0', 1)
2895 2896 extras = rawextras.split('\0')
2896 2897 i = 0
2897 2898 extrastrings = []
2898 2899 while i < len(extras):
2899 2900 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
2900 2901 i += 2
2901 2902
2902 2903 ui.write(('file extras: %s (%s)\n')
2903 2904 % (filename, ', '.join(extrastrings)))
2904 2905 elif rtype == 'l':
2905 2906 labels = record.split('\0', 2)
2906 2907 labels = [l for l in labels if len(l) > 0]
2907 2908 ui.write(('labels:\n'))
2908 2909 ui.write((' local: %s\n' % labels[0]))
2909 2910 ui.write((' other: %s\n' % labels[1]))
2910 2911 if len(labels) > 2:
2911 2912 ui.write((' base: %s\n' % labels[2]))
2912 2913 else:
2913 2914 ui.write(('unrecognized entry: %s\t%s\n')
2914 2915 % (rtype, record.replace('\0', '\t')))
2915 2916
2916 2917 # Avoid mergestate.read() since it may raise an exception for unsupported
2917 2918 # merge state records. We shouldn't be doing this, but this is OK since this
2918 2919 # command is pretty low-level.
2919 2920 ms = mergemod.mergestate(repo)
2920 2921
2921 2922 # sort so that reasonable information is on top
2922 2923 v1records = ms._readrecordsv1()
2923 2924 v2records = ms._readrecordsv2()
2924 2925 order = 'LOml'
2925 2926 def key(r):
2926 2927 idx = order.find(r[0])
2927 2928 if idx == -1:
2928 2929 return (1, r[1])
2929 2930 else:
2930 2931 return (0, idx)
2931 2932 v1records.sort(key=key)
2932 2933 v2records.sort(key=key)
2933 2934
2934 2935 if not v1records and not v2records:
2935 2936 ui.write(('no merge state found\n'))
2936 2937 elif not v2records:
2937 2938 ui.note(('no version 2 merge state\n'))
2938 2939 printrecords(1)
2939 2940 elif ms._v1v2match(v1records, v2records):
2940 2941 ui.note(('v1 and v2 states match: using v2\n'))
2941 2942 printrecords(2)
2942 2943 else:
2943 2944 ui.note(('v1 and v2 states mismatch: using v1\n'))
2944 2945 printrecords(1)
2945 2946 if ui.verbose:
2946 2947 printrecords(2)
2947 2948
2948 2949 @command('debugnamecomplete', [], _('NAME...'))
2949 2950 def debugnamecomplete(ui, repo, *args):
2950 2951 '''complete "names" - tags, open branch names, bookmark names'''
2951 2952
2952 2953 names = set()
2953 2954 # since we previously only listed open branches, we will handle that
2954 2955 # specially (after this for loop)
2955 2956 for name, ns in repo.names.iteritems():
2956 2957 if name != 'branches':
2957 2958 names.update(ns.listnames(repo))
2958 2959 names.update(tag for (tag, heads, tip, closed)
2959 2960 in repo.branchmap().iterbranches() if not closed)
2960 2961 completions = set()
2961 2962 if not args:
2962 2963 args = ['']
2963 2964 for a in args:
2964 2965 completions.update(n for n in names if n.startswith(a))
2965 2966 ui.write('\n'.join(sorted(completions)))
2966 2967 ui.write('\n')
2967 2968
2968 2969 @command('debuglocks',
2969 2970 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
2970 2971 ('W', 'force-wlock', None,
2971 2972 _('free the working state lock (DANGEROUS)'))],
2972 2973 _('[OPTION]...'))
2973 2974 def debuglocks(ui, repo, **opts):
2974 2975 """show or modify state of locks
2975 2976
2976 2977 By default, this command will show which locks are held. This
2977 2978 includes the user and process holding the lock, the amount of time
2978 2979 the lock has been held, and the machine name where the process is
2979 2980 running if it's not local.
2980 2981
2981 2982 Locks protect the integrity of Mercurial's data, so should be
2982 2983 treated with care. System crashes or other interruptions may cause
2983 2984 locks to not be properly released, though Mercurial will usually
2984 2985 detect and remove such stale locks automatically.
2985 2986
2986 2987 However, detecting stale locks may not always be possible (for
2987 2988 instance, on a shared filesystem). Removing locks may also be
2988 2989 blocked by filesystem permissions.
2989 2990
2990 2991 Returns 0 if no locks are held.
2991 2992
2992 2993 """
2993 2994
2994 2995 if opts.get('force_lock'):
2995 2996 repo.svfs.unlink('lock')
2996 2997 if opts.get('force_wlock'):
2997 2998 repo.vfs.unlink('wlock')
2998 2999 if opts.get('force_lock') or opts.get('force_lock'):
2999 3000 return 0
3000 3001
3001 3002 now = time.time()
3002 3003 held = 0
3003 3004
3004 3005 def report(vfs, name, method):
3005 3006 # this causes stale locks to get reaped for more accurate reporting
3006 3007 try:
3007 3008 l = method(False)
3008 3009 except error.LockHeld:
3009 3010 l = None
3010 3011
3011 3012 if l:
3012 3013 l.release()
3013 3014 else:
3014 3015 try:
3015 3016 stat = vfs.lstat(name)
3016 3017 age = now - stat.st_mtime
3017 3018 user = util.username(stat.st_uid)
3018 3019 locker = vfs.readlock(name)
3019 3020 if ":" in locker:
3020 3021 host, pid = locker.split(':')
3021 3022 if host == socket.gethostname():
3022 3023 locker = 'user %s, process %s' % (user, pid)
3023 3024 else:
3024 3025 locker = 'user %s, process %s, host %s' \
3025 3026 % (user, pid, host)
3026 3027 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
3027 3028 return 1
3028 3029 except OSError as e:
3029 3030 if e.errno != errno.ENOENT:
3030 3031 raise
3031 3032
3032 3033 ui.write(("%-6s free\n") % (name + ":"))
3033 3034 return 0
3034 3035
3035 3036 held += report(repo.svfs, "lock", repo.lock)
3036 3037 held += report(repo.vfs, "wlock", repo.wlock)
3037 3038
3038 3039 return held
3039 3040
3040 3041 @command('debugobsolete',
3041 3042 [('', 'flags', 0, _('markers flag')),
3042 3043 ('', 'record-parents', False,
3043 3044 _('record parent information for the precursor')),
3044 3045 ('r', 'rev', [], _('display markers relevant to REV')),
3045 3046 ('', 'index', False, _('display index of the marker')),
3046 3047 ('', 'delete', [], _('delete markers specified by indices')),
3047 3048 ] + commitopts2 + formatteropts,
3048 3049 _('[OBSOLETED [REPLACEMENT ...]]'))
3049 3050 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
3050 3051 """create arbitrary obsolete marker
3051 3052
3052 3053 With no arguments, displays the list of obsolescence markers."""
3053 3054
3054 3055 def parsenodeid(s):
3055 3056 try:
3056 3057 # We do not use revsingle/revrange functions here to accept
3057 3058 # arbitrary node identifiers, possibly not present in the
3058 3059 # local repository.
3059 3060 n = bin(s)
3060 3061 if len(n) != len(nullid):
3061 3062 raise TypeError()
3062 3063 return n
3063 3064 except TypeError:
3064 3065 raise error.Abort('changeset references must be full hexadecimal '
3065 3066 'node identifiers')
3066 3067
3067 3068 if opts.get('delete'):
3068 3069 indices = []
3069 3070 for v in opts.get('delete'):
3070 3071 try:
3071 3072 indices.append(int(v))
3072 3073 except ValueError:
3073 3074 raise error.Abort(_('invalid index value: %r') % v,
3074 3075 hint=_('use integers for indices'))
3075 3076
3076 3077 if repo.currenttransaction():
3077 3078 raise error.Abort(_('cannot delete obsmarkers in the middle '
3078 3079 'of transaction.'))
3079 3080
3080 3081 with repo.lock():
3081 3082 n = repair.deleteobsmarkers(repo.obsstore, indices)
3082 3083 ui.write(_('deleted %i obsolescense markers\n') % n)
3083 3084
3084 3085 return
3085 3086
3086 3087 if precursor is not None:
3087 3088 if opts['rev']:
3088 3089 raise error.Abort('cannot select revision when creating marker')
3089 3090 metadata = {}
3090 3091 metadata['user'] = opts['user'] or ui.username()
3091 3092 succs = tuple(parsenodeid(succ) for succ in successors)
3092 3093 l = repo.lock()
3093 3094 try:
3094 3095 tr = repo.transaction('debugobsolete')
3095 3096 try:
3096 3097 date = opts.get('date')
3097 3098 if date:
3098 3099 date = util.parsedate(date)
3099 3100 else:
3100 3101 date = None
3101 3102 prec = parsenodeid(precursor)
3102 3103 parents = None
3103 3104 if opts['record_parents']:
3104 3105 if prec not in repo.unfiltered():
3105 3106 raise error.Abort('cannot used --record-parents on '
3106 3107 'unknown changesets')
3107 3108 parents = repo.unfiltered()[prec].parents()
3108 3109 parents = tuple(p.node() for p in parents)
3109 3110 repo.obsstore.create(tr, prec, succs, opts['flags'],
3110 3111 parents=parents, date=date,
3111 3112 metadata=metadata)
3112 3113 tr.close()
3113 3114 except ValueError as exc:
3114 3115 raise error.Abort(_('bad obsmarker input: %s') % exc)
3115 3116 finally:
3116 3117 tr.release()
3117 3118 finally:
3118 3119 l.release()
3119 3120 else:
3120 3121 if opts['rev']:
3121 3122 revs = scmutil.revrange(repo, opts['rev'])
3122 3123 nodes = [repo[r].node() for r in revs]
3123 3124 markers = list(obsolete.getmarkers(repo, nodes=nodes))
3124 3125 markers.sort(key=lambda x: x._data)
3125 3126 else:
3126 3127 markers = obsolete.getmarkers(repo)
3127 3128
3128 3129 markerstoiter = markers
3129 3130 isrelevant = lambda m: True
3130 3131 if opts.get('rev') and opts.get('index'):
3131 3132 markerstoiter = obsolete.getmarkers(repo)
3132 3133 markerset = set(markers)
3133 3134 isrelevant = lambda m: m in markerset
3134 3135
3135 3136 fm = ui.formatter('debugobsolete', opts)
3136 3137 for i, m in enumerate(markerstoiter):
3137 3138 if not isrelevant(m):
3138 3139 # marker can be irrelevant when we're iterating over a set
3139 3140 # of markers (markerstoiter) which is bigger than the set
3140 3141 # of markers we want to display (markers)
3141 3142 # this can happen if both --index and --rev options are
3142 3143 # provided and thus we need to iterate over all of the markers
3143 3144 # to get the correct indices, but only display the ones that
3144 3145 # are relevant to --rev value
3145 3146 continue
3146 3147 fm.startitem()
3147 3148 ind = i if opts.get('index') else None
3148 3149 cmdutil.showmarker(fm, m, index=ind)
3149 3150 fm.end()
3150 3151
3151 3152 @command('debugpathcomplete',
3152 3153 [('f', 'full', None, _('complete an entire path')),
3153 3154 ('n', 'normal', None, _('show only normal files')),
3154 3155 ('a', 'added', None, _('show only added files')),
3155 3156 ('r', 'removed', None, _('show only removed files'))],
3156 3157 _('FILESPEC...'))
3157 3158 def debugpathcomplete(ui, repo, *specs, **opts):
3158 3159 '''complete part or all of a tracked path
3159 3160
3160 3161 This command supports shells that offer path name completion. It
3161 3162 currently completes only files already known to the dirstate.
3162 3163
3163 3164 Completion extends only to the next path segment unless
3164 3165 --full is specified, in which case entire paths are used.'''
3165 3166
3166 3167 def complete(path, acceptable):
3167 3168 dirstate = repo.dirstate
3168 3169 spec = os.path.normpath(os.path.join(os.getcwd(), path))
3169 3170 rootdir = repo.root + os.sep
3170 3171 if spec != repo.root and not spec.startswith(rootdir):
3171 3172 return [], []
3172 3173 if os.path.isdir(spec):
3173 3174 spec += '/'
3174 3175 spec = spec[len(rootdir):]
3175 3176 fixpaths = os.sep != '/'
3176 3177 if fixpaths:
3177 3178 spec = spec.replace(os.sep, '/')
3178 3179 speclen = len(spec)
3179 3180 fullpaths = opts['full']
3180 3181 files, dirs = set(), set()
3181 3182 adddir, addfile = dirs.add, files.add
3182 3183 for f, st in dirstate.iteritems():
3183 3184 if f.startswith(spec) and st[0] in acceptable:
3184 3185 if fixpaths:
3185 3186 f = f.replace('/', os.sep)
3186 3187 if fullpaths:
3187 3188 addfile(f)
3188 3189 continue
3189 3190 s = f.find(os.sep, speclen)
3190 3191 if s >= 0:
3191 3192 adddir(f[:s])
3192 3193 else:
3193 3194 addfile(f)
3194 3195 return files, dirs
3195 3196
3196 3197 acceptable = ''
3197 3198 if opts['normal']:
3198 3199 acceptable += 'nm'
3199 3200 if opts['added']:
3200 3201 acceptable += 'a'
3201 3202 if opts['removed']:
3202 3203 acceptable += 'r'
3203 3204 cwd = repo.getcwd()
3204 3205 if not specs:
3205 3206 specs = ['.']
3206 3207
3207 3208 files, dirs = set(), set()
3208 3209 for spec in specs:
3209 3210 f, d = complete(spec, acceptable or 'nmar')
3210 3211 files.update(f)
3211 3212 dirs.update(d)
3212 3213 files.update(dirs)
3213 3214 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
3214 3215 ui.write('\n')
3215 3216
3216 3217 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
3217 3218 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
3218 3219 '''access the pushkey key/value protocol
3219 3220
3220 3221 With two args, list the keys in the given namespace.
3221 3222
3222 3223 With five args, set a key to new if it currently is set to old.
3223 3224 Reports success or failure.
3224 3225 '''
3225 3226
3226 3227 target = hg.peer(ui, {}, repopath)
3227 3228 if keyinfo:
3228 3229 key, old, new = keyinfo
3229 3230 r = target.pushkey(namespace, key, old, new)
3230 3231 ui.status(str(r) + '\n')
3231 3232 return not r
3232 3233 else:
3233 3234 for k, v in sorted(target.listkeys(namespace).iteritems()):
3234 3235 ui.write("%s\t%s\n" % (k.encode('string-escape'),
3235 3236 v.encode('string-escape')))
3236 3237
3237 3238 @command('debugpvec', [], _('A B'))
3238 3239 def debugpvec(ui, repo, a, b=None):
3239 3240 ca = scmutil.revsingle(repo, a)
3240 3241 cb = scmutil.revsingle(repo, b)
3241 3242 pa = pvec.ctxpvec(ca)
3242 3243 pb = pvec.ctxpvec(cb)
3243 3244 if pa == pb:
3244 3245 rel = "="
3245 3246 elif pa > pb:
3246 3247 rel = ">"
3247 3248 elif pa < pb:
3248 3249 rel = "<"
3249 3250 elif pa | pb:
3250 3251 rel = "|"
3251 3252 ui.write(_("a: %s\n") % pa)
3252 3253 ui.write(_("b: %s\n") % pb)
3253 3254 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3254 3255 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
3255 3256 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
3256 3257 pa.distance(pb), rel))
3257 3258
3258 3259 @command('debugrebuilddirstate|debugrebuildstate',
3259 3260 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
3260 3261 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
3261 3262 'the working copy parent')),
3262 3263 ],
3263 3264 _('[-r REV]'))
3264 3265 def debugrebuilddirstate(ui, repo, rev, **opts):
3265 3266 """rebuild the dirstate as it would look like for the given revision
3266 3267
3267 3268 If no revision is specified the first current parent will be used.
3268 3269
3269 3270 The dirstate will be set to the files of the given revision.
3270 3271 The actual working directory content or existing dirstate
3271 3272 information such as adds or removes is not considered.
3272 3273
3273 3274 ``minimal`` will only rebuild the dirstate status for files that claim to be
3274 3275 tracked but are not in the parent manifest, or that exist in the parent
3275 3276 manifest but are not in the dirstate. It will not change adds, removes, or
3276 3277 modified files that are in the working copy parent.
3277 3278
3278 3279 One use of this command is to make the next :hg:`status` invocation
3279 3280 check the actual file content.
3280 3281 """
3281 3282 ctx = scmutil.revsingle(repo, rev)
3282 3283 with repo.wlock():
3283 3284 dirstate = repo.dirstate
3284 3285 changedfiles = None
3285 3286 # See command doc for what minimal does.
3286 3287 if opts.get('minimal'):
3287 3288 manifestfiles = set(ctx.manifest().keys())
3288 3289 dirstatefiles = set(dirstate)
3289 3290 manifestonly = manifestfiles - dirstatefiles
3290 3291 dsonly = dirstatefiles - manifestfiles
3291 3292 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
3292 3293 changedfiles = manifestonly | dsnotadded
3293 3294
3294 3295 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3295 3296
3296 3297 @command('debugrebuildfncache', [], '')
3297 3298 def debugrebuildfncache(ui, repo):
3298 3299 """rebuild the fncache file"""
3299 3300 repair.rebuildfncache(ui, repo)
3300 3301
3301 3302 @command('debugrename',
3302 3303 [('r', 'rev', '', _('revision to debug'), _('REV'))],
3303 3304 _('[-r REV] FILE'))
3304 3305 def debugrename(ui, repo, file1, *pats, **opts):
3305 3306 """dump rename information"""
3306 3307
3307 3308 ctx = scmutil.revsingle(repo, opts.get('rev'))
3308 3309 m = scmutil.match(ctx, (file1,) + pats, opts)
3309 3310 for abs in ctx.walk(m):
3310 3311 fctx = ctx[abs]
3311 3312 o = fctx.filelog().renamed(fctx.filenode())
3312 3313 rel = m.rel(abs)
3313 3314 if o:
3314 3315 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3315 3316 else:
3316 3317 ui.write(_("%s not renamed\n") % rel)
3317 3318
3318 3319 @command('debugrevlog', debugrevlogopts +
3319 3320 [('d', 'dump', False, _('dump index data'))],
3320 3321 _('-c|-m|FILE'),
3321 3322 optionalrepo=True)
3322 3323 def debugrevlog(ui, repo, file_=None, **opts):
3323 3324 """show data and statistics about a revlog"""
3324 3325 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
3325 3326
3326 3327 if opts.get("dump"):
3327 3328 numrevs = len(r)
3328 3329 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
3329 3330 " rawsize totalsize compression heads chainlen\n"))
3330 3331 ts = 0
3331 3332 heads = set()
3332 3333
3333 3334 for rev in xrange(numrevs):
3334 3335 dbase = r.deltaparent(rev)
3335 3336 if dbase == -1:
3336 3337 dbase = rev
3337 3338 cbase = r.chainbase(rev)
3338 3339 clen = r.chainlen(rev)
3339 3340 p1, p2 = r.parentrevs(rev)
3340 3341 rs = r.rawsize(rev)
3341 3342 ts = ts + rs
3342 3343 heads -= set(r.parentrevs(rev))
3343 3344 heads.add(rev)
3344 3345 try:
3345 3346 compression = ts / r.end(rev)
3346 3347 except ZeroDivisionError:
3347 3348 compression = 0
3348 3349 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3349 3350 "%11d %5d %8d\n" %
3350 3351 (rev, p1, p2, r.start(rev), r.end(rev),
3351 3352 r.start(dbase), r.start(cbase),
3352 3353 r.start(p1), r.start(p2),
3353 3354 rs, ts, compression, len(heads), clen))
3354 3355 return 0
3355 3356
3356 3357 v = r.version
3357 3358 format = v & 0xFFFF
3358 3359 flags = []
3359 3360 gdelta = False
3360 3361 if v & revlog.REVLOGNGINLINEDATA:
3361 3362 flags.append('inline')
3362 3363 if v & revlog.REVLOGGENERALDELTA:
3363 3364 gdelta = True
3364 3365 flags.append('generaldelta')
3365 3366 if not flags:
3366 3367 flags = ['(none)']
3367 3368
3368 3369 nummerges = 0
3369 3370 numfull = 0
3370 3371 numprev = 0
3371 3372 nump1 = 0
3372 3373 nump2 = 0
3373 3374 numother = 0
3374 3375 nump1prev = 0
3375 3376 nump2prev = 0
3376 3377 chainlengths = []
3377 3378
3378 3379 datasize = [None, 0, 0L]
3379 3380 fullsize = [None, 0, 0L]
3380 3381 deltasize = [None, 0, 0L]
3381 3382
3382 3383 def addsize(size, l):
3383 3384 if l[0] is None or size < l[0]:
3384 3385 l[0] = size
3385 3386 if size > l[1]:
3386 3387 l[1] = size
3387 3388 l[2] += size
3388 3389
3389 3390 numrevs = len(r)
3390 3391 for rev in xrange(numrevs):
3391 3392 p1, p2 = r.parentrevs(rev)
3392 3393 delta = r.deltaparent(rev)
3393 3394 if format > 0:
3394 3395 addsize(r.rawsize(rev), datasize)
3395 3396 if p2 != nullrev:
3396 3397 nummerges += 1
3397 3398 size = r.length(rev)
3398 3399 if delta == nullrev:
3399 3400 chainlengths.append(0)
3400 3401 numfull += 1
3401 3402 addsize(size, fullsize)
3402 3403 else:
3403 3404 chainlengths.append(chainlengths[delta] + 1)
3404 3405 addsize(size, deltasize)
3405 3406 if delta == rev - 1:
3406 3407 numprev += 1
3407 3408 if delta == p1:
3408 3409 nump1prev += 1
3409 3410 elif delta == p2:
3410 3411 nump2prev += 1
3411 3412 elif delta == p1:
3412 3413 nump1 += 1
3413 3414 elif delta == p2:
3414 3415 nump2 += 1
3415 3416 elif delta != nullrev:
3416 3417 numother += 1
3417 3418
3418 3419 # Adjust size min value for empty cases
3419 3420 for size in (datasize, fullsize, deltasize):
3420 3421 if size[0] is None:
3421 3422 size[0] = 0
3422 3423
3423 3424 numdeltas = numrevs - numfull
3424 3425 numoprev = numprev - nump1prev - nump2prev
3425 3426 totalrawsize = datasize[2]
3426 3427 datasize[2] /= numrevs
3427 3428 fulltotal = fullsize[2]
3428 3429 fullsize[2] /= numfull
3429 3430 deltatotal = deltasize[2]
3430 3431 if numrevs - numfull > 0:
3431 3432 deltasize[2] /= numrevs - numfull
3432 3433 totalsize = fulltotal + deltatotal
3433 3434 avgchainlen = sum(chainlengths) / numrevs
3434 3435 maxchainlen = max(chainlengths)
3435 3436 compratio = 1
3436 3437 if totalsize:
3437 3438 compratio = totalrawsize / totalsize
3438 3439
3439 3440 basedfmtstr = '%%%dd\n'
3440 3441 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
3441 3442
3442 3443 def dfmtstr(max):
3443 3444 return basedfmtstr % len(str(max))
3444 3445 def pcfmtstr(max, padding=0):
3445 3446 return basepcfmtstr % (len(str(max)), ' ' * padding)
3446 3447
3447 3448 def pcfmt(value, total):
3448 3449 if total:
3449 3450 return (value, 100 * float(value) / total)
3450 3451 else:
3451 3452 return value, 100.0
3452 3453
3453 3454 ui.write(('format : %d\n') % format)
3454 3455 ui.write(('flags : %s\n') % ', '.join(flags))
3455 3456
3456 3457 ui.write('\n')
3457 3458 fmt = pcfmtstr(totalsize)
3458 3459 fmt2 = dfmtstr(totalsize)
3459 3460 ui.write(('revisions : ') + fmt2 % numrevs)
3460 3461 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
3461 3462 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
3462 3463 ui.write(('revisions : ') + fmt2 % numrevs)
3463 3464 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
3464 3465 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
3465 3466 ui.write(('revision size : ') + fmt2 % totalsize)
3466 3467 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
3467 3468 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
3468 3469
3469 3470 ui.write('\n')
3470 3471 fmt = dfmtstr(max(avgchainlen, compratio))
3471 3472 ui.write(('avg chain length : ') + fmt % avgchainlen)
3472 3473 ui.write(('max chain length : ') + fmt % maxchainlen)
3473 3474 ui.write(('compression ratio : ') + fmt % compratio)
3474 3475
3475 3476 if format > 0:
3476 3477 ui.write('\n')
3477 3478 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
3478 3479 % tuple(datasize))
3479 3480 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
3480 3481 % tuple(fullsize))
3481 3482 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
3482 3483 % tuple(deltasize))
3483 3484
3484 3485 if numdeltas > 0:
3485 3486 ui.write('\n')
3486 3487 fmt = pcfmtstr(numdeltas)
3487 3488 fmt2 = pcfmtstr(numdeltas, 4)
3488 3489 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
3489 3490 if numprev > 0:
3490 3491 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
3491 3492 numprev))
3492 3493 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
3493 3494 numprev))
3494 3495 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
3495 3496 numprev))
3496 3497 if gdelta:
3497 3498 ui.write(('deltas against p1 : ')
3498 3499 + fmt % pcfmt(nump1, numdeltas))
3499 3500 ui.write(('deltas against p2 : ')
3500 3501 + fmt % pcfmt(nump2, numdeltas))
3501 3502 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
3502 3503 numdeltas))
3503 3504
3504 3505 @command('debugrevspec',
3505 3506 [('', 'optimize', None, _('print parsed tree after optimizing'))],
3506 3507 ('REVSPEC'))
3507 3508 def debugrevspec(ui, repo, expr, **opts):
3508 3509 """parse and apply a revision specification
3509 3510
3510 3511 Use --verbose to print the parsed tree before and after aliases
3511 3512 expansion.
3512 3513 """
3513 3514 if ui.verbose:
3514 3515 tree = revset.parse(expr, lookup=repo.__contains__)
3515 3516 ui.note(revset.prettyformat(tree), "\n")
3516 3517 newtree = revset.expandaliases(ui, tree)
3517 3518 if newtree != tree:
3518 3519 ui.note(("* expanded:\n"), revset.prettyformat(newtree), "\n")
3519 3520 tree = newtree
3520 3521 newtree = revset.foldconcat(tree)
3521 3522 if newtree != tree:
3522 3523 ui.note(("* concatenated:\n"), revset.prettyformat(newtree), "\n")
3523 3524 if opts["optimize"]:
3524 3525 optimizedtree = revset.optimize(newtree)
3525 3526 ui.note(("* optimized:\n"),
3526 3527 revset.prettyformat(optimizedtree), "\n")
3527 3528 func = revset.match(ui, expr, repo)
3528 3529 revs = func(repo)
3529 3530 if ui.verbose:
3530 3531 ui.note(("* set:\n"), revset.prettyformatset(revs), "\n")
3531 3532 for c in revs:
3532 3533 ui.write("%s\n" % c)
3533 3534
3534 3535 @command('debugsetparents', [], _('REV1 [REV2]'))
3535 3536 def debugsetparents(ui, repo, rev1, rev2=None):
3536 3537 """manually set the parents of the current working directory
3537 3538
3538 3539 This is useful for writing repository conversion tools, but should
3539 3540 be used with care. For example, neither the working directory nor the
3540 3541 dirstate is updated, so file status may be incorrect after running this
3541 3542 command.
3542 3543
3543 3544 Returns 0 on success.
3544 3545 """
3545 3546
3546 3547 r1 = scmutil.revsingle(repo, rev1).node()
3547 3548 r2 = scmutil.revsingle(repo, rev2, 'null').node()
3548 3549
3549 3550 with repo.wlock():
3550 3551 repo.setparents(r1, r2)
3551 3552
3552 3553 @command('debugdirstate|debugstate',
3553 3554 [('', 'nodates', None, _('do not display the saved mtime')),
3554 3555 ('', 'datesort', None, _('sort by saved mtime'))],
3555 3556 _('[OPTION]...'))
3556 3557 def debugstate(ui, repo, **opts):
3557 3558 """show the contents of the current dirstate"""
3558 3559
3559 3560 nodates = opts.get('nodates')
3560 3561 datesort = opts.get('datesort')
3561 3562
3562 3563 timestr = ""
3563 3564 if datesort:
3564 3565 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
3565 3566 else:
3566 3567 keyfunc = None # sort by filename
3567 3568 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
3568 3569 if ent[3] == -1:
3569 3570 timestr = 'unset '
3570 3571 elif nodates:
3571 3572 timestr = 'set '
3572 3573 else:
3573 3574 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
3574 3575 time.localtime(ent[3]))
3575 3576 if ent[1] & 0o20000:
3576 3577 mode = 'lnk'
3577 3578 else:
3578 3579 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
3579 3580 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
3580 3581 for f in repo.dirstate.copies():
3581 3582 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
3582 3583
3583 3584 @command('debugsub',
3584 3585 [('r', 'rev', '',
3585 3586 _('revision to check'), _('REV'))],
3586 3587 _('[-r REV] [REV]'))
3587 3588 def debugsub(ui, repo, rev=None):
3588 3589 ctx = scmutil.revsingle(repo, rev, None)
3589 3590 for k, v in sorted(ctx.substate.items()):
3590 3591 ui.write(('path %s\n') % k)
3591 3592 ui.write((' source %s\n') % v[0])
3592 3593 ui.write((' revision %s\n') % v[1])
3593 3594
3594 3595 @command('debugsuccessorssets',
3595 3596 [],
3596 3597 _('[REV]'))
3597 3598 def debugsuccessorssets(ui, repo, *revs):
3598 3599 """show set of successors for revision
3599 3600
3600 3601 A successors set of changeset A is a consistent group of revisions that
3601 3602 succeed A. It contains non-obsolete changesets only.
3602 3603
3603 3604 In most cases a changeset A has a single successors set containing a single
3604 3605 successor (changeset A replaced by A').
3605 3606
3606 3607 A changeset that is made obsolete with no successors are called "pruned".
3607 3608 Such changesets have no successors sets at all.
3608 3609
3609 3610 A changeset that has been "split" will have a successors set containing
3610 3611 more than one successor.
3611 3612
3612 3613 A changeset that has been rewritten in multiple different ways is called
3613 3614 "divergent". Such changesets have multiple successor sets (each of which
3614 3615 may also be split, i.e. have multiple successors).
3615 3616
3616 3617 Results are displayed as follows::
3617 3618
3618 3619 <rev1>
3619 3620 <successors-1A>
3620 3621 <rev2>
3621 3622 <successors-2A>
3622 3623 <successors-2B1> <successors-2B2> <successors-2B3>
3623 3624
3624 3625 Here rev2 has two possible (i.e. divergent) successors sets. The first
3625 3626 holds one element, whereas the second holds three (i.e. the changeset has
3626 3627 been split).
3627 3628 """
3628 3629 # passed to successorssets caching computation from one call to another
3629 3630 cache = {}
3630 3631 ctx2str = str
3631 3632 node2str = short
3632 3633 if ui.debug():
3633 3634 def ctx2str(ctx):
3634 3635 return ctx.hex()
3635 3636 node2str = hex
3636 3637 for rev in scmutil.revrange(repo, revs):
3637 3638 ctx = repo[rev]
3638 3639 ui.write('%s\n'% ctx2str(ctx))
3639 3640 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
3640 3641 if succsset:
3641 3642 ui.write(' ')
3642 3643 ui.write(node2str(succsset[0]))
3643 3644 for node in succsset[1:]:
3644 3645 ui.write(' ')
3645 3646 ui.write(node2str(node))
3646 3647 ui.write('\n')
3647 3648
3648 3649 @command('debugtemplate',
3649 3650 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
3650 3651 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
3651 3652 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3652 3653 optionalrepo=True)
3653 3654 def debugtemplate(ui, repo, tmpl, **opts):
3654 3655 """parse and apply a template
3655 3656
3656 3657 If -r/--rev is given, the template is processed as a log template and
3657 3658 applied to the given changesets. Otherwise, it is processed as a generic
3658 3659 template.
3659 3660
3660 3661 Use --verbose to print the parsed tree.
3661 3662 """
3662 3663 revs = None
3663 3664 if opts['rev']:
3664 3665 if repo is None:
3665 3666 raise error.RepoError(_('there is no Mercurial repository here '
3666 3667 '(.hg not found)'))
3667 3668 revs = scmutil.revrange(repo, opts['rev'])
3668 3669
3669 3670 props = {}
3670 3671 for d in opts['define']:
3671 3672 try:
3672 3673 k, v = (e.strip() for e in d.split('=', 1))
3673 3674 if not k:
3674 3675 raise ValueError
3675 3676 props[k] = v
3676 3677 except ValueError:
3677 3678 raise error.Abort(_('malformed keyword definition: %s') % d)
3678 3679
3679 3680 if ui.verbose:
3680 3681 aliases = ui.configitems('templatealias')
3681 3682 tree = templater.parse(tmpl)
3682 3683 ui.note(templater.prettyformat(tree), '\n')
3683 3684 newtree = templater.expandaliases(tree, aliases)
3684 3685 if newtree != tree:
3685 3686 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
3686 3687
3687 3688 mapfile = None
3688 3689 if revs is None:
3689 3690 k = 'debugtemplate'
3690 3691 t = formatter.maketemplater(ui, k, tmpl)
3691 3692 ui.write(templater.stringify(t(k, **props)))
3692 3693 else:
3693 3694 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
3694 3695 mapfile, buffered=False)
3695 3696 for r in revs:
3696 3697 displayer.show(repo[r], **props)
3697 3698 displayer.close()
3698 3699
3699 3700 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'), inferrepo=True)
3700 3701 def debugwalk(ui, repo, *pats, **opts):
3701 3702 """show how files match on given patterns"""
3702 3703 m = scmutil.match(repo[None], pats, opts)
3703 3704 items = list(repo.walk(m))
3704 3705 if not items:
3705 3706 return
3706 3707 f = lambda fn: fn
3707 3708 if ui.configbool('ui', 'slash') and os.sep != '/':
3708 3709 f = lambda fn: util.normpath(fn)
3709 3710 fmt = 'f %%-%ds %%-%ds %%s' % (
3710 3711 max([len(abs) for abs in items]),
3711 3712 max([len(m.rel(abs)) for abs in items]))
3712 3713 for abs in items:
3713 3714 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
3714 3715 ui.write("%s\n" % line.rstrip())
3715 3716
3716 3717 @command('debugwireargs',
3717 3718 [('', 'three', '', 'three'),
3718 3719 ('', 'four', '', 'four'),
3719 3720 ('', 'five', '', 'five'),
3720 3721 ] + remoteopts,
3721 3722 _('REPO [OPTIONS]... [ONE [TWO]]'),
3722 3723 norepo=True)
3723 3724 def debugwireargs(ui, repopath, *vals, **opts):
3724 3725 repo = hg.peer(ui, opts, repopath)
3725 3726 for opt in remoteopts:
3726 3727 del opts[opt[1]]
3727 3728 args = {}
3728 3729 for k, v in opts.iteritems():
3729 3730 if v:
3730 3731 args[k] = v
3731 3732 # run twice to check that we don't mess up the stream for the next command
3732 3733 res1 = repo.debugwireargs(*vals, **args)
3733 3734 res2 = repo.debugwireargs(*vals, **args)
3734 3735 ui.write("%s\n" % res1)
3735 3736 if res1 != res2:
3736 3737 ui.warn("%s\n" % res2)
3737 3738
3738 3739 @command('^diff',
3739 3740 [('r', 'rev', [], _('revision'), _('REV')),
3740 3741 ('c', 'change', '', _('change made by revision'), _('REV'))
3741 3742 ] + diffopts + diffopts2 + walkopts + subrepoopts,
3742 3743 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
3743 3744 inferrepo=True)
3744 3745 def diff(ui, repo, *pats, **opts):
3745 3746 """diff repository (or selected files)
3746 3747
3747 3748 Show differences between revisions for the specified files.
3748 3749
3749 3750 Differences between files are shown using the unified diff format.
3750 3751
3751 3752 .. note::
3752 3753
3753 3754 :hg:`diff` may generate unexpected results for merges, as it will
3754 3755 default to comparing against the working directory's first
3755 3756 parent changeset if no revisions are specified.
3756 3757
3757 3758 When two revision arguments are given, then changes are shown
3758 3759 between those revisions. If only one revision is specified then
3759 3760 that revision is compared to the working directory, and, when no
3760 3761 revisions are specified, the working directory files are compared
3761 3762 to its first parent.
3762 3763
3763 3764 Alternatively you can specify -c/--change with a revision to see
3764 3765 the changes in that changeset relative to its first parent.
3765 3766
3766 3767 Without the -a/--text option, diff will avoid generating diffs of
3767 3768 files it detects as binary. With -a, diff will generate a diff
3768 3769 anyway, probably with undesirable results.
3769 3770
3770 3771 Use the -g/--git option to generate diffs in the git extended diff
3771 3772 format. For more information, read :hg:`help diffs`.
3772 3773
3773 3774 .. container:: verbose
3774 3775
3775 3776 Examples:
3776 3777
3777 3778 - compare a file in the current working directory to its parent::
3778 3779
3779 3780 hg diff foo.c
3780 3781
3781 3782 - compare two historical versions of a directory, with rename info::
3782 3783
3783 3784 hg diff --git -r 1.0:1.2 lib/
3784 3785
3785 3786 - get change stats relative to the last change on some date::
3786 3787
3787 3788 hg diff --stat -r "date('may 2')"
3788 3789
3789 3790 - diff all newly-added files that contain a keyword::
3790 3791
3791 3792 hg diff "set:added() and grep(GNU)"
3792 3793
3793 3794 - compare a revision and its parents::
3794 3795
3795 3796 hg diff -c 9353 # compare against first parent
3796 3797 hg diff -r 9353^:9353 # same using revset syntax
3797 3798 hg diff -r 9353^2:9353 # compare against the second parent
3798 3799
3799 3800 Returns 0 on success.
3800 3801 """
3801 3802
3802 3803 revs = opts.get('rev')
3803 3804 change = opts.get('change')
3804 3805 stat = opts.get('stat')
3805 3806 reverse = opts.get('reverse')
3806 3807
3807 3808 if revs and change:
3808 3809 msg = _('cannot specify --rev and --change at the same time')
3809 3810 raise error.Abort(msg)
3810 3811 elif change:
3811 3812 node2 = scmutil.revsingle(repo, change, None).node()
3812 3813 node1 = repo[node2].p1().node()
3813 3814 else:
3814 3815 node1, node2 = scmutil.revpair(repo, revs)
3815 3816
3816 3817 if reverse:
3817 3818 node1, node2 = node2, node1
3818 3819
3819 3820 diffopts = patch.diffallopts(ui, opts)
3820 3821 m = scmutil.match(repo[node2], pats, opts)
3821 3822 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
3822 3823 listsubrepos=opts.get('subrepos'),
3823 3824 root=opts.get('root'))
3824 3825
3825 3826 @command('^export',
3826 3827 [('o', 'output', '',
3827 3828 _('print output to file with formatted name'), _('FORMAT')),
3828 3829 ('', 'switch-parent', None, _('diff against the second parent')),
3829 3830 ('r', 'rev', [], _('revisions to export'), _('REV')),
3830 3831 ] + diffopts,
3831 3832 _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'))
3832 3833 def export(ui, repo, *changesets, **opts):
3833 3834 """dump the header and diffs for one or more changesets
3834 3835
3835 3836 Print the changeset header and diffs for one or more revisions.
3836 3837 If no revision is given, the parent of the working directory is used.
3837 3838
3838 3839 The information shown in the changeset header is: author, date,
3839 3840 branch name (if non-default), changeset hash, parent(s) and commit
3840 3841 comment.
3841 3842
3842 3843 .. note::
3843 3844
3844 3845 :hg:`export` may generate unexpected diff output for merge
3845 3846 changesets, as it will compare the merge changeset against its
3846 3847 first parent only.
3847 3848
3848 3849 Output may be to a file, in which case the name of the file is
3849 3850 given using a format string. The formatting rules are as follows:
3850 3851
3851 3852 :``%%``: literal "%" character
3852 3853 :``%H``: changeset hash (40 hexadecimal digits)
3853 3854 :``%N``: number of patches being generated
3854 3855 :``%R``: changeset revision number
3855 3856 :``%b``: basename of the exporting repository
3856 3857 :``%h``: short-form changeset hash (12 hexadecimal digits)
3857 3858 :``%m``: first line of the commit message (only alphanumeric characters)
3858 3859 :``%n``: zero-padded sequence number, starting at 1
3859 3860 :``%r``: zero-padded changeset revision number
3860 3861
3861 3862 Without the -a/--text option, export will avoid generating diffs
3862 3863 of files it detects as binary. With -a, export will generate a
3863 3864 diff anyway, probably with undesirable results.
3864 3865
3865 3866 Use the -g/--git option to generate diffs in the git extended diff
3866 3867 format. See :hg:`help diffs` for more information.
3867 3868
3868 3869 With the --switch-parent option, the diff will be against the
3869 3870 second parent. It can be useful to review a merge.
3870 3871
3871 3872 .. container:: verbose
3872 3873
3873 3874 Examples:
3874 3875
3875 3876 - use export and import to transplant a bugfix to the current
3876 3877 branch::
3877 3878
3878 3879 hg export -r 9353 | hg import -
3879 3880
3880 3881 - export all the changesets between two revisions to a file with
3881 3882 rename information::
3882 3883
3883 3884 hg export --git -r 123:150 > changes.txt
3884 3885
3885 3886 - split outgoing changes into a series of patches with
3886 3887 descriptive names::
3887 3888
3888 3889 hg export -r "outgoing()" -o "%n-%m.patch"
3889 3890
3890 3891 Returns 0 on success.
3891 3892 """
3892 3893 changesets += tuple(opts.get('rev', []))
3893 3894 if not changesets:
3894 3895 changesets = ['.']
3895 3896 revs = scmutil.revrange(repo, changesets)
3896 3897 if not revs:
3897 3898 raise error.Abort(_("export requires at least one changeset"))
3898 3899 if len(revs) > 1:
3899 3900 ui.note(_('exporting patches:\n'))
3900 3901 else:
3901 3902 ui.note(_('exporting patch:\n'))
3902 3903 cmdutil.export(repo, revs, template=opts.get('output'),
3903 3904 switch_parent=opts.get('switch_parent'),
3904 3905 opts=patch.diffallopts(ui, opts))
3905 3906
3906 3907 @command('files',
3907 3908 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3908 3909 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3909 3910 ] + walkopts + formatteropts + subrepoopts,
3910 3911 _('[OPTION]... [PATTERN]...'))
3911 3912 def files(ui, repo, *pats, **opts):
3912 3913 """list tracked files
3913 3914
3914 3915 Print files under Mercurial control in the working directory or
3915 3916 specified revision whose names match the given patterns (excluding
3916 3917 removed files).
3917 3918
3918 3919 If no patterns are given to match, this command prints the names
3919 3920 of all files under Mercurial control in the working directory.
3920 3921
3921 3922 .. container:: verbose
3922 3923
3923 3924 Examples:
3924 3925
3925 3926 - list all files under the current directory::
3926 3927
3927 3928 hg files .
3928 3929
3929 3930 - shows sizes and flags for current revision::
3930 3931
3931 3932 hg files -vr .
3932 3933
3933 3934 - list all files named README::
3934 3935
3935 3936 hg files -I "**/README"
3936 3937
3937 3938 - list all binary files::
3938 3939
3939 3940 hg files "set:binary()"
3940 3941
3941 3942 - find files containing a regular expression::
3942 3943
3943 3944 hg files "set:grep('bob')"
3944 3945
3945 3946 - search tracked file contents with xargs and grep::
3946 3947
3947 3948 hg files -0 | xargs -0 grep foo
3948 3949
3949 3950 See :hg:`help patterns` and :hg:`help filesets` for more information
3950 3951 on specifying file patterns.
3951 3952
3952 3953 Returns 0 if a match is found, 1 otherwise.
3953 3954
3954 3955 """
3955 3956 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
3956 3957
3957 3958 end = '\n'
3958 3959 if opts.get('print0'):
3959 3960 end = '\0'
3960 3961 fm = ui.formatter('files', opts)
3961 3962 fmt = '%s' + end
3962 3963
3963 3964 m = scmutil.match(ctx, pats, opts)
3964 3965 ret = cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos'))
3965 3966
3966 3967 fm.end()
3967 3968
3968 3969 return ret
3969 3970
3970 3971 @command('^forget', walkopts, _('[OPTION]... FILE...'), inferrepo=True)
3971 3972 def forget(ui, repo, *pats, **opts):
3972 3973 """forget the specified files on the next commit
3973 3974
3974 3975 Mark the specified files so they will no longer be tracked
3975 3976 after the next commit.
3976 3977
3977 3978 This only removes files from the current branch, not from the
3978 3979 entire project history, and it does not delete them from the
3979 3980 working directory.
3980 3981
3981 3982 To delete the file from the working directory, see :hg:`remove`.
3982 3983
3983 3984 To undo a forget before the next commit, see :hg:`add`.
3984 3985
3985 3986 .. container:: verbose
3986 3987
3987 3988 Examples:
3988 3989
3989 3990 - forget newly-added binary files::
3990 3991
3991 3992 hg forget "set:added() and binary()"
3992 3993
3993 3994 - forget files that would be excluded by .hgignore::
3994 3995
3995 3996 hg forget "set:hgignore()"
3996 3997
3997 3998 Returns 0 on success.
3998 3999 """
3999 4000
4000 4001 if not pats:
4001 4002 raise error.Abort(_('no files specified'))
4002 4003
4003 4004 m = scmutil.match(repo[None], pats, opts)
4004 4005 rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0]
4005 4006 return rejected and 1 or 0
4006 4007
4007 4008 @command(
4008 4009 'graft',
4009 4010 [('r', 'rev', [], _('revisions to graft'), _('REV')),
4010 4011 ('c', 'continue', False, _('resume interrupted graft')),
4011 4012 ('e', 'edit', False, _('invoke editor on commit messages')),
4012 4013 ('', 'log', None, _('append graft info to log message')),
4013 4014 ('f', 'force', False, _('force graft')),
4014 4015 ('D', 'currentdate', False,
4015 4016 _('record the current date as commit date')),
4016 4017 ('U', 'currentuser', False,
4017 4018 _('record the current user as committer'), _('DATE'))]
4018 4019 + commitopts2 + mergetoolopts + dryrunopts,
4019 4020 _('[OPTION]... [-r REV]... REV...'))
4020 4021 def graft(ui, repo, *revs, **opts):
4021 4022 '''copy changes from other branches onto the current branch
4022 4023
4023 4024 This command uses Mercurial's merge logic to copy individual
4024 4025 changes from other branches without merging branches in the
4025 4026 history graph. This is sometimes known as 'backporting' or
4026 4027 'cherry-picking'. By default, graft will copy user, date, and
4027 4028 description from the source changesets.
4028 4029
4029 4030 Changesets that are ancestors of the current revision, that have
4030 4031 already been grafted, or that are merges will be skipped.
4031 4032
4032 4033 If --log is specified, log messages will have a comment appended
4033 4034 of the form::
4034 4035
4035 4036 (grafted from CHANGESETHASH)
4036 4037
4037 4038 If --force is specified, revisions will be grafted even if they
4038 4039 are already ancestors of or have been grafted to the destination.
4039 4040 This is useful when the revisions have since been backed out.
4040 4041
4041 4042 If a graft merge results in conflicts, the graft process is
4042 4043 interrupted so that the current merge can be manually resolved.
4043 4044 Once all conflicts are addressed, the graft process can be
4044 4045 continued with the -c/--continue option.
4045 4046
4046 4047 .. note::
4047 4048
4048 4049 The -c/--continue option does not reapply earlier options, except
4049 4050 for --force.
4050 4051
4051 4052 .. container:: verbose
4052 4053
4053 4054 Examples:
4054 4055
4055 4056 - copy a single change to the stable branch and edit its description::
4056 4057
4057 4058 hg update stable
4058 4059 hg graft --edit 9393
4059 4060
4060 4061 - graft a range of changesets with one exception, updating dates::
4061 4062
4062 4063 hg graft -D "2085::2093 and not 2091"
4063 4064
4064 4065 - continue a graft after resolving conflicts::
4065 4066
4066 4067 hg graft -c
4067 4068
4068 4069 - show the source of a grafted changeset::
4069 4070
4070 4071 hg log --debug -r .
4071 4072
4072 4073 - show revisions sorted by date::
4073 4074
4074 4075 hg log -r "sort(all(), date)"
4075 4076
4076 4077 See :hg:`help revisions` and :hg:`help revsets` for more about
4077 4078 specifying revisions.
4078 4079
4079 4080 Returns 0 on successful completion.
4080 4081 '''
4081 4082 with repo.wlock():
4082 4083 return _dograft(ui, repo, *revs, **opts)
4083 4084
4084 4085 def _dograft(ui, repo, *revs, **opts):
4085 4086 if revs and opts.get('rev'):
4086 4087 ui.warn(_('warning: inconsistent use of --rev might give unexpected '
4087 4088 'revision ordering!\n'))
4088 4089
4089 4090 revs = list(revs)
4090 4091 revs.extend(opts.get('rev'))
4091 4092
4092 4093 if not opts.get('user') and opts.get('currentuser'):
4093 4094 opts['user'] = ui.username()
4094 4095 if not opts.get('date') and opts.get('currentdate'):
4095 4096 opts['date'] = "%d %d" % util.makedate()
4096 4097
4097 4098 editor = cmdutil.getcommiteditor(editform='graft', **opts)
4098 4099
4099 4100 cont = False
4100 4101 if opts.get('continue'):
4101 4102 cont = True
4102 4103 if revs:
4103 4104 raise error.Abort(_("can't specify --continue and revisions"))
4104 4105 # read in unfinished revisions
4105 4106 try:
4106 4107 nodes = repo.vfs.read('graftstate').splitlines()
4107 4108 revs = [repo[node].rev() for node in nodes]
4108 4109 except IOError as inst:
4109 4110 if inst.errno != errno.ENOENT:
4110 4111 raise
4111 4112 cmdutil.wrongtooltocontinue(repo, _('graft'))
4112 4113 else:
4113 4114 cmdutil.checkunfinished(repo)
4114 4115 cmdutil.bailifchanged(repo)
4115 4116 if not revs:
4116 4117 raise error.Abort(_('no revisions specified'))
4117 4118 revs = scmutil.revrange(repo, revs)
4118 4119
4119 4120 skipped = set()
4120 4121 # check for merges
4121 4122 for rev in repo.revs('%ld and merge()', revs):
4122 4123 ui.warn(_('skipping ungraftable merge revision %s\n') % rev)
4123 4124 skipped.add(rev)
4124 4125 revs = [r for r in revs if r not in skipped]
4125 4126 if not revs:
4126 4127 return -1
4127 4128
4128 4129 # Don't check in the --continue case, in effect retaining --force across
4129 4130 # --continues. That's because without --force, any revisions we decided to
4130 4131 # skip would have been filtered out here, so they wouldn't have made their
4131 4132 # way to the graftstate. With --force, any revisions we would have otherwise
4132 4133 # skipped would not have been filtered out, and if they hadn't been applied
4133 4134 # already, they'd have been in the graftstate.
4134 4135 if not (cont or opts.get('force')):
4135 4136 # check for ancestors of dest branch
4136 4137 crev = repo['.'].rev()
4137 4138 ancestors = repo.changelog.ancestors([crev], inclusive=True)
4138 4139 # Cannot use x.remove(y) on smart set, this has to be a list.
4139 4140 # XXX make this lazy in the future
4140 4141 revs = list(revs)
4141 4142 # don't mutate while iterating, create a copy
4142 4143 for rev in list(revs):
4143 4144 if rev in ancestors:
4144 4145 ui.warn(_('skipping ancestor revision %d:%s\n') %
4145 4146 (rev, repo[rev]))
4146 4147 # XXX remove on list is slow
4147 4148 revs.remove(rev)
4148 4149 if not revs:
4149 4150 return -1
4150 4151
4151 4152 # analyze revs for earlier grafts
4152 4153 ids = {}
4153 4154 for ctx in repo.set("%ld", revs):
4154 4155 ids[ctx.hex()] = ctx.rev()
4155 4156 n = ctx.extra().get('source')
4156 4157 if n:
4157 4158 ids[n] = ctx.rev()
4158 4159
4159 4160 # check ancestors for earlier grafts
4160 4161 ui.debug('scanning for duplicate grafts\n')
4161 4162
4162 4163 for rev in repo.changelog.findmissingrevs(revs, [crev]):
4163 4164 ctx = repo[rev]
4164 4165 n = ctx.extra().get('source')
4165 4166 if n in ids:
4166 4167 try:
4167 4168 r = repo[n].rev()
4168 4169 except error.RepoLookupError:
4169 4170 r = None
4170 4171 if r in revs:
4171 4172 ui.warn(_('skipping revision %d:%s '
4172 4173 '(already grafted to %d:%s)\n')
4173 4174 % (r, repo[r], rev, ctx))
4174 4175 revs.remove(r)
4175 4176 elif ids[n] in revs:
4176 4177 if r is None:
4177 4178 ui.warn(_('skipping already grafted revision %d:%s '
4178 4179 '(%d:%s also has unknown origin %s)\n')
4179 4180 % (ids[n], repo[ids[n]], rev, ctx, n[:12]))
4180 4181 else:
4181 4182 ui.warn(_('skipping already grafted revision %d:%s '
4182 4183 '(%d:%s also has origin %d:%s)\n')
4183 4184 % (ids[n], repo[ids[n]], rev, ctx, r, n[:12]))
4184 4185 revs.remove(ids[n])
4185 4186 elif ctx.hex() in ids:
4186 4187 r = ids[ctx.hex()]
4187 4188 ui.warn(_('skipping already grafted revision %d:%s '
4188 4189 '(was grafted from %d:%s)\n') %
4189 4190 (r, repo[r], rev, ctx))
4190 4191 revs.remove(r)
4191 4192 if not revs:
4192 4193 return -1
4193 4194
4194 4195 for pos, ctx in enumerate(repo.set("%ld", revs)):
4195 4196 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
4196 4197 ctx.description().split('\n', 1)[0])
4197 4198 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
4198 4199 if names:
4199 4200 desc += ' (%s)' % ' '.join(names)
4200 4201 ui.status(_('grafting %s\n') % desc)
4201 4202 if opts.get('dry_run'):
4202 4203 continue
4203 4204
4204 4205 source = ctx.extra().get('source')
4205 4206 extra = {}
4206 4207 if source:
4207 4208 extra['source'] = source
4208 4209 extra['intermediate-source'] = ctx.hex()
4209 4210 else:
4210 4211 extra['source'] = ctx.hex()
4211 4212 user = ctx.user()
4212 4213 if opts.get('user'):
4213 4214 user = opts['user']
4214 4215 date = ctx.date()
4215 4216 if opts.get('date'):
4216 4217 date = opts['date']
4217 4218 message = ctx.description()
4218 4219 if opts.get('log'):
4219 4220 message += '\n(grafted from %s)' % ctx.hex()
4220 4221
4221 4222 # we don't merge the first commit when continuing
4222 4223 if not cont:
4223 4224 # perform the graft merge with p1(rev) as 'ancestor'
4224 4225 try:
4225 4226 # ui.forcemerge is an internal variable, do not document
4226 4227 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
4227 4228 'graft')
4228 4229 stats = mergemod.graft(repo, ctx, ctx.p1(),
4229 4230 ['local', 'graft'])
4230 4231 finally:
4231 4232 repo.ui.setconfig('ui', 'forcemerge', '', 'graft')
4232 4233 # report any conflicts
4233 4234 if stats and stats[3] > 0:
4234 4235 # write out state for --continue
4235 4236 nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
4236 4237 repo.vfs.write('graftstate', ''.join(nodelines))
4237 4238 extra = ''
4238 4239 if opts.get('user'):
4239 4240 extra += ' --user %s' % util.shellquote(opts['user'])
4240 4241 if opts.get('date'):
4241 4242 extra += ' --date %s' % util.shellquote(opts['date'])
4242 4243 if opts.get('log'):
4243 4244 extra += ' --log'
4244 4245 hint=_("use 'hg resolve' and 'hg graft --continue%s'") % extra
4245 4246 raise error.Abort(
4246 4247 _("unresolved conflicts, can't continue"),
4247 4248 hint=hint)
4248 4249 else:
4249 4250 cont = False
4250 4251
4251 4252 # commit
4252 4253 node = repo.commit(text=message, user=user,
4253 4254 date=date, extra=extra, editor=editor)
4254 4255 if node is None:
4255 4256 ui.warn(
4256 4257 _('note: graft of %d:%s created no changes to commit\n') %
4257 4258 (ctx.rev(), ctx))
4258 4259
4259 4260 # remove state when we complete successfully
4260 4261 if not opts.get('dry_run'):
4261 4262 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
4262 4263
4263 4264 return 0
4264 4265
4265 4266 @command('grep',
4266 4267 [('0', 'print0', None, _('end fields with NUL')),
4267 4268 ('', 'all', None, _('print all revisions that match')),
4268 4269 ('a', 'text', None, _('treat all files as text')),
4269 4270 ('f', 'follow', None,
4270 4271 _('follow changeset history,'
4271 4272 ' or file history across copies and renames')),
4272 4273 ('i', 'ignore-case', None, _('ignore case when matching')),
4273 4274 ('l', 'files-with-matches', None,
4274 4275 _('print only filenames and revisions that match')),
4275 4276 ('n', 'line-number', None, _('print matching line numbers')),
4276 4277 ('r', 'rev', [],
4277 4278 _('only search files changed within revision range'), _('REV')),
4278 4279 ('u', 'user', None, _('list the author (long with -v)')),
4279 4280 ('d', 'date', None, _('list the date (short with -q)')),
4280 4281 ] + walkopts,
4281 4282 _('[OPTION]... PATTERN [FILE]...'),
4282 4283 inferrepo=True)
4283 4284 def grep(ui, repo, pattern, *pats, **opts):
4284 4285 """search for a pattern in specified files and revisions
4285 4286
4286 4287 Search revisions of files for a regular expression.
4287 4288
4288 4289 This command behaves differently than Unix grep. It only accepts
4289 4290 Python/Perl regexps. It searches repository history, not the
4290 4291 working directory. It always prints the revision number in which a
4291 4292 match appears.
4292 4293
4293 4294 By default, grep only prints output for the first revision of a
4294 4295 file in which it finds a match. To get it to print every revision
4295 4296 that contains a change in match status ("-" for a match that
4296 4297 becomes a non-match, or "+" for a non-match that becomes a match),
4297 4298 use the --all flag.
4298 4299
4299 4300 Returns 0 if a match is found, 1 otherwise.
4300 4301 """
4301 4302 reflags = re.M
4302 4303 if opts.get('ignore_case'):
4303 4304 reflags |= re.I
4304 4305 try:
4305 4306 regexp = util.re.compile(pattern, reflags)
4306 4307 except re.error as inst:
4307 4308 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
4308 4309 return 1
4309 4310 sep, eol = ':', '\n'
4310 4311 if opts.get('print0'):
4311 4312 sep = eol = '\0'
4312 4313
4313 4314 getfile = util.lrucachefunc(repo.file)
4314 4315
4315 4316 def matchlines(body):
4316 4317 begin = 0
4317 4318 linenum = 0
4318 4319 while begin < len(body):
4319 4320 match = regexp.search(body, begin)
4320 4321 if not match:
4321 4322 break
4322 4323 mstart, mend = match.span()
4323 4324 linenum += body.count('\n', begin, mstart) + 1
4324 4325 lstart = body.rfind('\n', begin, mstart) + 1 or begin
4325 4326 begin = body.find('\n', mend) + 1 or len(body) + 1
4326 4327 lend = begin - 1
4327 4328 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
4328 4329
4329 4330 class linestate(object):
4330 4331 def __init__(self, line, linenum, colstart, colend):
4331 4332 self.line = line
4332 4333 self.linenum = linenum
4333 4334 self.colstart = colstart
4334 4335 self.colend = colend
4335 4336
4336 4337 def __hash__(self):
4337 4338 return hash((self.linenum, self.line))
4338 4339
4339 4340 def __eq__(self, other):
4340 4341 return self.line == other.line
4341 4342
4342 4343 def __iter__(self):
4343 4344 yield (self.line[:self.colstart], '')
4344 4345 yield (self.line[self.colstart:self.colend], 'grep.match')
4345 4346 rest = self.line[self.colend:]
4346 4347 while rest != '':
4347 4348 match = regexp.search(rest)
4348 4349 if not match:
4349 4350 yield (rest, '')
4350 4351 break
4351 4352 mstart, mend = match.span()
4352 4353 yield (rest[:mstart], '')
4353 4354 yield (rest[mstart:mend], 'grep.match')
4354 4355 rest = rest[mend:]
4355 4356
4356 4357 matches = {}
4357 4358 copies = {}
4358 4359 def grepbody(fn, rev, body):
4359 4360 matches[rev].setdefault(fn, [])
4360 4361 m = matches[rev][fn]
4361 4362 for lnum, cstart, cend, line in matchlines(body):
4362 4363 s = linestate(line, lnum, cstart, cend)
4363 4364 m.append(s)
4364 4365
4365 4366 def difflinestates(a, b):
4366 4367 sm = difflib.SequenceMatcher(None, a, b)
4367 4368 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
4368 4369 if tag == 'insert':
4369 4370 for i in xrange(blo, bhi):
4370 4371 yield ('+', b[i])
4371 4372 elif tag == 'delete':
4372 4373 for i in xrange(alo, ahi):
4373 4374 yield ('-', a[i])
4374 4375 elif tag == 'replace':
4375 4376 for i in xrange(alo, ahi):
4376 4377 yield ('-', a[i])
4377 4378 for i in xrange(blo, bhi):
4378 4379 yield ('+', b[i])
4379 4380
4380 4381 def display(fn, ctx, pstates, states):
4381 4382 rev = ctx.rev()
4382 4383 if ui.quiet:
4383 4384 datefunc = util.shortdate
4384 4385 else:
4385 4386 datefunc = util.datestr
4386 4387 found = False
4387 4388 @util.cachefunc
4388 4389 def binary():
4389 4390 flog = getfile(fn)
4390 4391 return util.binary(flog.read(ctx.filenode(fn)))
4391 4392
4392 4393 if opts.get('all'):
4393 4394 iter = difflinestates(pstates, states)
4394 4395 else:
4395 4396 iter = [('', l) for l in states]
4396 4397 for change, l in iter:
4397 4398 cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')]
4398 4399
4399 4400 if opts.get('line_number'):
4400 4401 cols.append((str(l.linenum), 'grep.linenumber'))
4401 4402 if opts.get('all'):
4402 4403 cols.append((change, 'grep.change'))
4403 4404 if opts.get('user'):
4404 4405 cols.append((ui.shortuser(ctx.user()), 'grep.user'))
4405 4406 if opts.get('date'):
4406 4407 cols.append((datefunc(ctx.date()), 'grep.date'))
4407 4408 for col, label in cols[:-1]:
4408 4409 ui.write(col, label=label)
4409 4410 ui.write(sep, label='grep.sep')
4410 4411 ui.write(cols[-1][0], label=cols[-1][1])
4411 4412 if not opts.get('files_with_matches'):
4412 4413 ui.write(sep, label='grep.sep')
4413 4414 if not opts.get('text') and binary():
4414 4415 ui.write(_(" Binary file matches"))
4415 4416 else:
4416 4417 for s, label in l:
4417 4418 ui.write(s, label=label)
4418 4419 ui.write(eol)
4419 4420 found = True
4420 4421 if opts.get('files_with_matches'):
4421 4422 break
4422 4423 return found
4423 4424
4424 4425 skip = {}
4425 4426 revfiles = {}
4426 4427 matchfn = scmutil.match(repo[None], pats, opts)
4427 4428 found = False
4428 4429 follow = opts.get('follow')
4429 4430
4430 4431 def prep(ctx, fns):
4431 4432 rev = ctx.rev()
4432 4433 pctx = ctx.p1()
4433 4434 parent = pctx.rev()
4434 4435 matches.setdefault(rev, {})
4435 4436 matches.setdefault(parent, {})
4436 4437 files = revfiles.setdefault(rev, [])
4437 4438 for fn in fns:
4438 4439 flog = getfile(fn)
4439 4440 try:
4440 4441 fnode = ctx.filenode(fn)
4441 4442 except error.LookupError:
4442 4443 continue
4443 4444
4444 4445 copied = flog.renamed(fnode)
4445 4446 copy = follow and copied and copied[0]
4446 4447 if copy:
4447 4448 copies.setdefault(rev, {})[fn] = copy
4448 4449 if fn in skip:
4449 4450 if copy:
4450 4451 skip[copy] = True
4451 4452 continue
4452 4453 files.append(fn)
4453 4454
4454 4455 if fn not in matches[rev]:
4455 4456 grepbody(fn, rev, flog.read(fnode))
4456 4457
4457 4458 pfn = copy or fn
4458 4459 if pfn not in matches[parent]:
4459 4460 try:
4460 4461 fnode = pctx.filenode(pfn)
4461 4462 grepbody(pfn, parent, flog.read(fnode))
4462 4463 except error.LookupError:
4463 4464 pass
4464 4465
4465 4466 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
4466 4467 rev = ctx.rev()
4467 4468 parent = ctx.p1().rev()
4468 4469 for fn in sorted(revfiles.get(rev, [])):
4469 4470 states = matches[rev][fn]
4470 4471 copy = copies.get(rev, {}).get(fn)
4471 4472 if fn in skip:
4472 4473 if copy:
4473 4474 skip[copy] = True
4474 4475 continue
4475 4476 pstates = matches.get(parent, {}).get(copy or fn, [])
4476 4477 if pstates or states:
4477 4478 r = display(fn, ctx, pstates, states)
4478 4479 found = found or r
4479 4480 if r and not opts.get('all'):
4480 4481 skip[fn] = True
4481 4482 if copy:
4482 4483 skip[copy] = True
4483 4484 del matches[rev]
4484 4485 del revfiles[rev]
4485 4486
4486 4487 return not found
4487 4488
4488 4489 @command('heads',
4489 4490 [('r', 'rev', '',
4490 4491 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
4491 4492 ('t', 'topo', False, _('show topological heads only')),
4492 4493 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
4493 4494 ('c', 'closed', False, _('show normal and closed branch heads')),
4494 4495 ] + templateopts,
4495 4496 _('[-ct] [-r STARTREV] [REV]...'))
4496 4497 def heads(ui, repo, *branchrevs, **opts):
4497 4498 """show branch heads
4498 4499
4499 4500 With no arguments, show all open branch heads in the repository.
4500 4501 Branch heads are changesets that have no descendants on the
4501 4502 same branch. They are where development generally takes place and
4502 4503 are the usual targets for update and merge operations.
4503 4504
4504 4505 If one or more REVs are given, only open branch heads on the
4505 4506 branches associated with the specified changesets are shown. This
4506 4507 means that you can use :hg:`heads .` to see the heads on the
4507 4508 currently checked-out branch.
4508 4509
4509 4510 If -c/--closed is specified, also show branch heads marked closed
4510 4511 (see :hg:`commit --close-branch`).
4511 4512
4512 4513 If STARTREV is specified, only those heads that are descendants of
4513 4514 STARTREV will be displayed.
4514 4515
4515 4516 If -t/--topo is specified, named branch mechanics will be ignored and only
4516 4517 topological heads (changesets with no children) will be shown.
4517 4518
4518 4519 Returns 0 if matching heads are found, 1 if not.
4519 4520 """
4520 4521
4521 4522 start = None
4522 4523 if 'rev' in opts:
4523 4524 start = scmutil.revsingle(repo, opts['rev'], None).node()
4524 4525
4525 4526 if opts.get('topo'):
4526 4527 heads = [repo[h] for h in repo.heads(start)]
4527 4528 else:
4528 4529 heads = []
4529 4530 for branch in repo.branchmap():
4530 4531 heads += repo.branchheads(branch, start, opts.get('closed'))
4531 4532 heads = [repo[h] for h in heads]
4532 4533
4533 4534 if branchrevs:
4534 4535 branches = set(repo[br].branch() for br in branchrevs)
4535 4536 heads = [h for h in heads if h.branch() in branches]
4536 4537
4537 4538 if opts.get('active') and branchrevs:
4538 4539 dagheads = repo.heads(start)
4539 4540 heads = [h for h in heads if h.node() in dagheads]
4540 4541
4541 4542 if branchrevs:
4542 4543 haveheads = set(h.branch() for h in heads)
4543 4544 if branches - haveheads:
4544 4545 headless = ', '.join(b for b in branches - haveheads)
4545 4546 msg = _('no open branch heads found on branches %s')
4546 4547 if opts.get('rev'):
4547 4548 msg += _(' (started at %s)') % opts['rev']
4548 4549 ui.warn((msg + '\n') % headless)
4549 4550
4550 4551 if not heads:
4551 4552 return 1
4552 4553
4553 4554 heads = sorted(heads, key=lambda x: -x.rev())
4554 4555 displayer = cmdutil.show_changeset(ui, repo, opts)
4555 4556 for ctx in heads:
4556 4557 displayer.show(ctx)
4557 4558 displayer.close()
4558 4559
4559 4560 @command('help',
4560 4561 [('e', 'extension', None, _('show only help for extensions')),
4561 4562 ('c', 'command', None, _('show only help for commands')),
4562 4563 ('k', 'keyword', None, _('show topics matching keyword')),
4563 4564 ('s', 'system', [], _('show help for specific platform(s)')),
4564 4565 ],
4565 4566 _('[-ecks] [TOPIC]'),
4566 4567 norepo=True)
4567 4568 def help_(ui, name=None, **opts):
4568 4569 """show help for a given topic or a help overview
4569 4570
4570 4571 With no arguments, print a list of commands with short help messages.
4571 4572
4572 4573 Given a topic, extension, or command name, print help for that
4573 4574 topic.
4574 4575
4575 4576 Returns 0 if successful.
4576 4577 """
4577 4578
4578 4579 textwidth = ui.configint('ui', 'textwidth', 78)
4579 4580 termwidth = ui.termwidth() - 2
4580 4581 if textwidth <= 0 or termwidth < textwidth:
4581 4582 textwidth = termwidth
4582 4583
4583 4584 keep = opts.get('system') or []
4584 4585 if len(keep) == 0:
4585 4586 if sys.platform.startswith('win'):
4586 4587 keep.append('windows')
4587 4588 elif sys.platform == 'OpenVMS':
4588 4589 keep.append('vms')
4589 4590 elif sys.platform == 'plan9':
4590 4591 keep.append('plan9')
4591 4592 else:
4592 4593 keep.append('unix')
4593 4594 keep.append(sys.platform.lower())
4594 4595 if ui.verbose:
4595 4596 keep.append('verbose')
4596 4597
4597 4598 section = None
4598 4599 subtopic = None
4599 4600 if name and '.' in name:
4600 4601 name, remaining = name.split('.', 1)
4601 4602 remaining = encoding.lower(remaining)
4602 4603 if '.' in remaining:
4603 4604 subtopic, section = remaining.split('.', 1)
4604 4605 else:
4605 4606 if name in help.subtopics:
4606 4607 subtopic = remaining
4607 4608 else:
4608 4609 section = remaining
4609 4610
4610 4611 text = help.help_(ui, name, subtopic=subtopic, **opts)
4611 4612
4612 4613 formatted, pruned = minirst.format(text, textwidth, keep=keep,
4613 4614 section=section)
4614 4615
4615 4616 # We could have been given a weird ".foo" section without a name
4616 4617 # to look for, or we could have simply failed to found "foo.bar"
4617 4618 # because bar isn't a section of foo
4618 4619 if section and not (formatted and name):
4619 4620 raise error.Abort(_("help section not found"))
4620 4621
4621 4622 if 'verbose' in pruned:
4622 4623 keep.append('omitted')
4623 4624 else:
4624 4625 keep.append('notomitted')
4625 4626 formatted, pruned = minirst.format(text, textwidth, keep=keep,
4626 4627 section=section)
4627 4628 ui.write(formatted)
4628 4629
4629 4630
4630 4631 @command('identify|id',
4631 4632 [('r', 'rev', '',
4632 4633 _('identify the specified revision'), _('REV')),
4633 4634 ('n', 'num', None, _('show local revision number')),
4634 4635 ('i', 'id', None, _('show global revision id')),
4635 4636 ('b', 'branch', None, _('show branch')),
4636 4637 ('t', 'tags', None, _('show tags')),
4637 4638 ('B', 'bookmarks', None, _('show bookmarks')),
4638 4639 ] + remoteopts,
4639 4640 _('[-nibtB] [-r REV] [SOURCE]'),
4640 4641 optionalrepo=True)
4641 4642 def identify(ui, repo, source=None, rev=None,
4642 4643 num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
4643 4644 """identify the working directory or specified revision
4644 4645
4645 4646 Print a summary identifying the repository state at REV using one or
4646 4647 two parent hash identifiers, followed by a "+" if the working
4647 4648 directory has uncommitted changes, the branch name (if not default),
4648 4649 a list of tags, and a list of bookmarks.
4649 4650
4650 4651 When REV is not given, print a summary of the current state of the
4651 4652 repository.
4652 4653
4653 4654 Specifying a path to a repository root or Mercurial bundle will
4654 4655 cause lookup to operate on that repository/bundle.
4655 4656
4656 4657 .. container:: verbose
4657 4658
4658 4659 Examples:
4659 4660
4660 4661 - generate a build identifier for the working directory::
4661 4662
4662 4663 hg id --id > build-id.dat
4663 4664
4664 4665 - find the revision corresponding to a tag::
4665 4666
4666 4667 hg id -n -r 1.3
4667 4668
4668 4669 - check the most recent revision of a remote repository::
4669 4670
4670 4671 hg id -r tip http://selenic.com/hg/
4671 4672
4672 4673 See :hg:`log` for generating more information about specific revisions,
4673 4674 including full hash identifiers.
4674 4675
4675 4676 Returns 0 if successful.
4676 4677 """
4677 4678
4678 4679 if not repo and not source:
4679 4680 raise error.Abort(_("there is no Mercurial repository here "
4680 4681 "(.hg not found)"))
4681 4682
4682 4683 if ui.debugflag:
4683 4684 hexfunc = hex
4684 4685 else:
4685 4686 hexfunc = short
4686 4687 default = not (num or id or branch or tags or bookmarks)
4687 4688 output = []
4688 4689 revs = []
4689 4690
4690 4691 if source:
4691 4692 source, branches = hg.parseurl(ui.expandpath(source))
4692 4693 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
4693 4694 repo = peer.local()
4694 4695 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
4695 4696
4696 4697 if not repo:
4697 4698 if num or branch or tags:
4698 4699 raise error.Abort(
4699 4700 _("can't query remote revision number, branch, or tags"))
4700 4701 if not rev and revs:
4701 4702 rev = revs[0]
4702 4703 if not rev:
4703 4704 rev = "tip"
4704 4705
4705 4706 remoterev = peer.lookup(rev)
4706 4707 if default or id:
4707 4708 output = [hexfunc(remoterev)]
4708 4709
4709 4710 def getbms():
4710 4711 bms = []
4711 4712
4712 4713 if 'bookmarks' in peer.listkeys('namespaces'):
4713 4714 hexremoterev = hex(remoterev)
4714 4715 bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
4715 4716 if bmr == hexremoterev]
4716 4717
4717 4718 return sorted(bms)
4718 4719
4719 4720 if bookmarks:
4720 4721 output.extend(getbms())
4721 4722 elif default and not ui.quiet:
4722 4723 # multiple bookmarks for a single parent separated by '/'
4723 4724 bm = '/'.join(getbms())
4724 4725 if bm:
4725 4726 output.append(bm)
4726 4727 else:
4727 4728 ctx = scmutil.revsingle(repo, rev, None)
4728 4729
4729 4730 if ctx.rev() is None:
4730 4731 ctx = repo[None]
4731 4732 parents = ctx.parents()
4732 4733 taglist = []
4733 4734 for p in parents:
4734 4735 taglist.extend(p.tags())
4735 4736
4736 4737 changed = ""
4737 4738 if default or id or num:
4738 4739 if (any(repo.status())
4739 4740 or any(ctx.sub(s).dirty() for s in ctx.substate)):
4740 4741 changed = '+'
4741 4742 if default or id:
4742 4743 output = ["%s%s" %
4743 4744 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
4744 4745 if num:
4745 4746 output.append("%s%s" %
4746 4747 ('+'.join([str(p.rev()) for p in parents]), changed))
4747 4748 else:
4748 4749 if default or id:
4749 4750 output = [hexfunc(ctx.node())]
4750 4751 if num:
4751 4752 output.append(str(ctx.rev()))
4752 4753 taglist = ctx.tags()
4753 4754
4754 4755 if default and not ui.quiet:
4755 4756 b = ctx.branch()
4756 4757 if b != 'default':
4757 4758 output.append("(%s)" % b)
4758 4759
4759 4760 # multiple tags for a single parent separated by '/'
4760 4761 t = '/'.join(taglist)
4761 4762 if t:
4762 4763 output.append(t)
4763 4764
4764 4765 # multiple bookmarks for a single parent separated by '/'
4765 4766 bm = '/'.join(ctx.bookmarks())
4766 4767 if bm:
4767 4768 output.append(bm)
4768 4769 else:
4769 4770 if branch:
4770 4771 output.append(ctx.branch())
4771 4772
4772 4773 if tags:
4773 4774 output.extend(taglist)
4774 4775
4775 4776 if bookmarks:
4776 4777 output.extend(ctx.bookmarks())
4777 4778
4778 4779 ui.write("%s\n" % ' '.join(output))
4779 4780
4780 4781 @command('import|patch',
4781 4782 [('p', 'strip', 1,
4782 4783 _('directory strip option for patch. This has the same '
4783 4784 'meaning as the corresponding patch option'), _('NUM')),
4784 4785 ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
4785 4786 ('e', 'edit', False, _('invoke editor on commit messages')),
4786 4787 ('f', 'force', None,
4787 4788 _('skip check for outstanding uncommitted changes (DEPRECATED)')),
4788 4789 ('', 'no-commit', None,
4789 4790 _("don't commit, just update the working directory")),
4790 4791 ('', 'bypass', None,
4791 4792 _("apply patch without touching the working directory")),
4792 4793 ('', 'partial', None,
4793 4794 _('commit even if some hunks fail')),
4794 4795 ('', 'exact', None,
4795 4796 _('abort if patch would apply lossily')),
4796 4797 ('', 'prefix', '',
4797 4798 _('apply patch to subdirectory'), _('DIR')),
4798 4799 ('', 'import-branch', None,
4799 4800 _('use any branch information in patch (implied by --exact)'))] +
4800 4801 commitopts + commitopts2 + similarityopts,
4801 4802 _('[OPTION]... PATCH...'))
4802 4803 def import_(ui, repo, patch1=None, *patches, **opts):
4803 4804 """import an ordered set of patches
4804 4805
4805 4806 Import a list of patches and commit them individually (unless
4806 4807 --no-commit is specified).
4807 4808
4808 4809 To read a patch from standard input, use "-" as the patch name. If
4809 4810 a URL is specified, the patch will be downloaded from there.
4810 4811
4811 4812 Import first applies changes to the working directory (unless
4812 4813 --bypass is specified), import will abort if there are outstanding
4813 4814 changes.
4814 4815
4815 4816 Use --bypass to apply and commit patches directly to the
4816 4817 repository, without affecting the working directory. Without
4817 4818 --exact, patches will be applied on top of the working directory
4818 4819 parent revision.
4819 4820
4820 4821 You can import a patch straight from a mail message. Even patches
4821 4822 as attachments work (to use the body part, it must have type
4822 4823 text/plain or text/x-patch). From and Subject headers of email
4823 4824 message are used as default committer and commit message. All
4824 4825 text/plain body parts before first diff are added to the commit
4825 4826 message.
4826 4827
4827 4828 If the imported patch was generated by :hg:`export`, user and
4828 4829 description from patch override values from message headers and
4829 4830 body. Values given on command line with -m/--message and -u/--user
4830 4831 override these.
4831 4832
4832 4833 If --exact is specified, import will set the working directory to
4833 4834 the parent of each patch before applying it, and will abort if the
4834 4835 resulting changeset has a different ID than the one recorded in
4835 4836 the patch. This will guard against various ways that portable
4836 4837 patch formats and mail systems might fail to transfer Mercurial
4837 4838 data or metadata. See :hg:`bundle` for lossless transmission.
4838 4839
4839 4840 Use --partial to ensure a changeset will be created from the patch
4840 4841 even if some hunks fail to apply. Hunks that fail to apply will be
4841 4842 written to a <target-file>.rej file. Conflicts can then be resolved
4842 4843 by hand before :hg:`commit --amend` is run to update the created
4843 4844 changeset. This flag exists to let people import patches that
4844 4845 partially apply without losing the associated metadata (author,
4845 4846 date, description, ...).
4846 4847
4847 4848 .. note::
4848 4849
4849 4850 When no hunks apply cleanly, :hg:`import --partial` will create
4850 4851 an empty changeset, importing only the patch metadata.
4851 4852
4852 4853 With -s/--similarity, hg will attempt to discover renames and
4853 4854 copies in the patch in the same way as :hg:`addremove`.
4854 4855
4855 4856 It is possible to use external patch programs to perform the patch
4856 4857 by setting the ``ui.patch`` configuration option. For the default
4857 4858 internal tool, the fuzz can also be configured via ``patch.fuzz``.
4858 4859 See :hg:`help config` for more information about configuration
4859 4860 files and how to use these options.
4860 4861
4861 4862 See :hg:`help dates` for a list of formats valid for -d/--date.
4862 4863
4863 4864 .. container:: verbose
4864 4865
4865 4866 Examples:
4866 4867
4867 4868 - import a traditional patch from a website and detect renames::
4868 4869
4869 4870 hg import -s 80 http://example.com/bugfix.patch
4870 4871
4871 4872 - import a changeset from an hgweb server::
4872 4873
4873 4874 hg import http://www.selenic.com/hg/rev/5ca8c111e9aa
4874 4875
4875 4876 - import all the patches in an Unix-style mbox::
4876 4877
4877 4878 hg import incoming-patches.mbox
4878 4879
4879 4880 - attempt to exactly restore an exported changeset (not always
4880 4881 possible)::
4881 4882
4882 4883 hg import --exact proposed-fix.patch
4883 4884
4884 4885 - use an external tool to apply a patch which is too fuzzy for
4885 4886 the default internal tool.
4886 4887
4887 4888 hg import --config ui.patch="patch --merge" fuzzy.patch
4888 4889
4889 4890 - change the default fuzzing from 2 to a less strict 7
4890 4891
4891 4892 hg import --config ui.fuzz=7 fuzz.patch
4892 4893
4893 4894 Returns 0 on success, 1 on partial success (see --partial).
4894 4895 """
4895 4896
4896 4897 if not patch1:
4897 4898 raise error.Abort(_('need at least one patch to import'))
4898 4899
4899 4900 patches = (patch1,) + patches
4900 4901
4901 4902 date = opts.get('date')
4902 4903 if date:
4903 4904 opts['date'] = util.parsedate(date)
4904 4905
4905 4906 exact = opts.get('exact')
4906 4907 update = not opts.get('bypass')
4907 4908 if not update and opts.get('no_commit'):
4908 4909 raise error.Abort(_('cannot use --no-commit with --bypass'))
4909 4910 try:
4910 4911 sim = float(opts.get('similarity') or 0)
4911 4912 except ValueError:
4912 4913 raise error.Abort(_('similarity must be a number'))
4913 4914 if sim < 0 or sim > 100:
4914 4915 raise error.Abort(_('similarity must be between 0 and 100'))
4915 4916 if sim and not update:
4916 4917 raise error.Abort(_('cannot use --similarity with --bypass'))
4917 4918 if exact:
4918 4919 if opts.get('edit'):
4919 4920 raise error.Abort(_('cannot use --exact with --edit'))
4920 4921 if opts.get('prefix'):
4921 4922 raise error.Abort(_('cannot use --exact with --prefix'))
4922 4923
4923 4924 base = opts["base"]
4924 4925 wlock = dsguard = lock = tr = None
4925 4926 msgs = []
4926 4927 ret = 0
4927 4928
4928 4929
4929 4930 try:
4930 4931 wlock = repo.wlock()
4931 4932
4932 4933 if update:
4933 4934 cmdutil.checkunfinished(repo)
4934 4935 if (exact or not opts.get('force')):
4935 4936 cmdutil.bailifchanged(repo)
4936 4937
4937 4938 if not opts.get('no_commit'):
4938 4939 lock = repo.lock()
4939 4940 tr = repo.transaction('import')
4940 4941 else:
4941 4942 dsguard = cmdutil.dirstateguard(repo, 'import')
4942 4943 parents = repo[None].parents()
4943 4944 for patchurl in patches:
4944 4945 if patchurl == '-':
4945 4946 ui.status(_('applying patch from stdin\n'))
4946 4947 patchfile = ui.fin
4947 4948 patchurl = 'stdin' # for error message
4948 4949 else:
4949 4950 patchurl = os.path.join(base, patchurl)
4950 4951 ui.status(_('applying %s\n') % patchurl)
4951 4952 patchfile = hg.openpath(ui, patchurl)
4952 4953
4953 4954 haspatch = False
4954 4955 for hunk in patch.split(patchfile):
4955 4956 (msg, node, rej) = cmdutil.tryimportone(ui, repo, hunk,
4956 4957 parents, opts,
4957 4958 msgs, hg.clean)
4958 4959 if msg:
4959 4960 haspatch = True
4960 4961 ui.note(msg + '\n')
4961 4962 if update or exact:
4962 4963 parents = repo[None].parents()
4963 4964 else:
4964 4965 parents = [repo[node]]
4965 4966 if rej:
4966 4967 ui.write_err(_("patch applied partially\n"))
4967 4968 ui.write_err(_("(fix the .rej files and run "
4968 4969 "`hg commit --amend`)\n"))
4969 4970 ret = 1
4970 4971 break
4971 4972
4972 4973 if not haspatch:
4973 4974 raise error.Abort(_('%s: no diffs found') % patchurl)
4974 4975
4975 4976 if tr:
4976 4977 tr.close()
4977 4978 if msgs:
4978 4979 repo.savecommitmessage('\n* * *\n'.join(msgs))
4979 4980 if dsguard:
4980 4981 dsguard.close()
4981 4982 return ret
4982 4983 finally:
4983 4984 if tr:
4984 4985 tr.release()
4985 4986 release(lock, dsguard, wlock)
4986 4987
4987 4988 @command('incoming|in',
4988 4989 [('f', 'force', None,
4989 4990 _('run even if remote repository is unrelated')),
4990 4991 ('n', 'newest-first', None, _('show newest record first')),
4991 4992 ('', 'bundle', '',
4992 4993 _('file to store the bundles into'), _('FILE')),
4993 4994 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
4994 4995 ('B', 'bookmarks', False, _("compare bookmarks")),
4995 4996 ('b', 'branch', [],
4996 4997 _('a specific branch you would like to pull'), _('BRANCH')),
4997 4998 ] + logopts + remoteopts + subrepoopts,
4998 4999 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
4999 5000 def incoming(ui, repo, source="default", **opts):
5000 5001 """show new changesets found in source
5001 5002
5002 5003 Show new changesets found in the specified path/URL or the default
5003 5004 pull location. These are the changesets that would have been pulled
5004 5005 if a pull at the time you issued this command.
5005 5006
5006 5007 See pull for valid source format details.
5007 5008
5008 5009 .. container:: verbose
5009 5010
5010 5011 With -B/--bookmarks, the result of bookmark comparison between
5011 5012 local and remote repositories is displayed. With -v/--verbose,
5012 5013 status is also displayed for each bookmark like below::
5013 5014
5014 5015 BM1 01234567890a added
5015 5016 BM2 1234567890ab advanced
5016 5017 BM3 234567890abc diverged
5017 5018 BM4 34567890abcd changed
5018 5019
5019 5020 The action taken locally when pulling depends on the
5020 5021 status of each bookmark:
5021 5022
5022 5023 :``added``: pull will create it
5023 5024 :``advanced``: pull will update it
5024 5025 :``diverged``: pull will create a divergent bookmark
5025 5026 :``changed``: result depends on remote changesets
5026 5027
5027 5028 From the point of view of pulling behavior, bookmark
5028 5029 existing only in the remote repository are treated as ``added``,
5029 5030 even if it is in fact locally deleted.
5030 5031
5031 5032 .. container:: verbose
5032 5033
5033 5034 For remote repository, using --bundle avoids downloading the
5034 5035 changesets twice if the incoming is followed by a pull.
5035 5036
5036 5037 Examples:
5037 5038
5038 5039 - show incoming changes with patches and full description::
5039 5040
5040 5041 hg incoming -vp
5041 5042
5042 5043 - show incoming changes excluding merges, store a bundle::
5043 5044
5044 5045 hg in -vpM --bundle incoming.hg
5045 5046 hg pull incoming.hg
5046 5047
5047 5048 - briefly list changes inside a bundle::
5048 5049
5049 5050 hg in changes.hg -T "{desc|firstline}\\n"
5050 5051
5051 5052 Returns 0 if there are incoming changes, 1 otherwise.
5052 5053 """
5053 5054 if opts.get('graph'):
5054 5055 cmdutil.checkunsupportedgraphflags([], opts)
5055 5056 def display(other, chlist, displayer):
5056 5057 revdag = cmdutil.graphrevs(other, chlist, opts)
5057 5058 cmdutil.displaygraph(ui, repo, revdag, displayer,
5058 5059 graphmod.asciiedges)
5059 5060
5060 5061 hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
5061 5062 return 0
5062 5063
5063 5064 if opts.get('bundle') and opts.get('subrepos'):
5064 5065 raise error.Abort(_('cannot combine --bundle and --subrepos'))
5065 5066
5066 5067 if opts.get('bookmarks'):
5067 5068 source, branches = hg.parseurl(ui.expandpath(source),
5068 5069 opts.get('branch'))
5069 5070 other = hg.peer(repo, opts, source)
5070 5071 if 'bookmarks' not in other.listkeys('namespaces'):
5071 5072 ui.warn(_("remote doesn't support bookmarks\n"))
5072 5073 return 0
5073 5074 ui.status(_('comparing with %s\n') % util.hidepassword(source))
5074 5075 return bookmarks.incoming(ui, repo, other)
5075 5076
5076 5077 repo._subtoppath = ui.expandpath(source)
5077 5078 try:
5078 5079 return hg.incoming(ui, repo, source, opts)
5079 5080 finally:
5080 5081 del repo._subtoppath
5081 5082
5082 5083
5083 5084 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'),
5084 5085 norepo=True)
5085 5086 def init(ui, dest=".", **opts):
5086 5087 """create a new repository in the given directory
5087 5088
5088 5089 Initialize a new repository in the given directory. If the given
5089 5090 directory does not exist, it will be created.
5090 5091
5091 5092 If no directory is given, the current directory is used.
5092 5093
5093 5094 It is possible to specify an ``ssh://`` URL as the destination.
5094 5095 See :hg:`help urls` for more information.
5095 5096
5096 5097 Returns 0 on success.
5097 5098 """
5098 5099 hg.peer(ui, opts, ui.expandpath(dest), create=True)
5099 5100
5100 5101 @command('locate',
5101 5102 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
5102 5103 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
5103 5104 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
5104 5105 ] + walkopts,
5105 5106 _('[OPTION]... [PATTERN]...'))
5106 5107 def locate(ui, repo, *pats, **opts):
5107 5108 """locate files matching specific patterns (DEPRECATED)
5108 5109
5109 5110 Print files under Mercurial control in the working directory whose
5110 5111 names match the given patterns.
5111 5112
5112 5113 By default, this command searches all directories in the working
5113 5114 directory. To search just the current directory and its
5114 5115 subdirectories, use "--include .".
5115 5116
5116 5117 If no patterns are given to match, this command prints the names
5117 5118 of all files under Mercurial control in the working directory.
5118 5119
5119 5120 If you want to feed the output of this command into the "xargs"
5120 5121 command, use the -0 option to both this command and "xargs". This
5121 5122 will avoid the problem of "xargs" treating single filenames that
5122 5123 contain whitespace as multiple filenames.
5123 5124
5124 5125 See :hg:`help files` for a more versatile command.
5125 5126
5126 5127 Returns 0 if a match is found, 1 otherwise.
5127 5128 """
5128 5129 if opts.get('print0'):
5129 5130 end = '\0'
5130 5131 else:
5131 5132 end = '\n'
5132 5133 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
5133 5134
5134 5135 ret = 1
5135 5136 ctx = repo[rev]
5136 5137 m = scmutil.match(ctx, pats, opts, default='relglob',
5137 5138 badfn=lambda x, y: False)
5138 5139
5139 5140 for abs in ctx.matches(m):
5140 5141 if opts.get('fullpath'):
5141 5142 ui.write(repo.wjoin(abs), end)
5142 5143 else:
5143 5144 ui.write(((pats and m.rel(abs)) or abs), end)
5144 5145 ret = 0
5145 5146
5146 5147 return ret
5147 5148
5148 5149 @command('^log|history',
5149 5150 [('f', 'follow', None,
5150 5151 _('follow changeset history, or file history across copies and renames')),
5151 5152 ('', 'follow-first', None,
5152 5153 _('only follow the first parent of merge changesets (DEPRECATED)')),
5153 5154 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
5154 5155 ('C', 'copies', None, _('show copied files')),
5155 5156 ('k', 'keyword', [],
5156 5157 _('do case-insensitive search for a given text'), _('TEXT')),
5157 5158 ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
5158 5159 ('', 'removed', None, _('include revisions where files were removed')),
5159 5160 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
5160 5161 ('u', 'user', [], _('revisions committed by user'), _('USER')),
5161 5162 ('', 'only-branch', [],
5162 5163 _('show only changesets within the given named branch (DEPRECATED)'),
5163 5164 _('BRANCH')),
5164 5165 ('b', 'branch', [],
5165 5166 _('show changesets within the given named branch'), _('BRANCH')),
5166 5167 ('P', 'prune', [],
5167 5168 _('do not display revision or any of its ancestors'), _('REV')),
5168 5169 ] + logopts + walkopts,
5169 5170 _('[OPTION]... [FILE]'),
5170 5171 inferrepo=True)
5171 5172 def log(ui, repo, *pats, **opts):
5172 5173 """show revision history of entire repository or files
5173 5174
5174 5175 Print the revision history of the specified files or the entire
5175 5176 project.
5176 5177
5177 5178 If no revision range is specified, the default is ``tip:0`` unless
5178 5179 --follow is set, in which case the working directory parent is
5179 5180 used as the starting revision.
5180 5181
5181 5182 File history is shown without following rename or copy history of
5182 5183 files. Use -f/--follow with a filename to follow history across
5183 5184 renames and copies. --follow without a filename will only show
5184 5185 ancestors or descendants of the starting revision.
5185 5186
5186 5187 By default this command prints revision number and changeset id,
5187 5188 tags, non-trivial parents, user, date and time, and a summary for
5188 5189 each commit. When the -v/--verbose switch is used, the list of
5189 5190 changed files and full commit message are shown.
5190 5191
5191 5192 With --graph the revisions are shown as an ASCII art DAG with the most
5192 5193 recent changeset at the top.
5193 5194 'o' is a changeset, '@' is a working directory parent, 'x' is obsolete,
5194 5195 and '+' represents a fork where the changeset from the lines below is a
5195 5196 parent of the 'o' merge on the same line.
5196 5197
5197 5198 .. note::
5198 5199
5199 5200 :hg:`log --patch` may generate unexpected diff output for merge
5200 5201 changesets, as it will only compare the merge changeset against
5201 5202 its first parent. Also, only files different from BOTH parents
5202 5203 will appear in files:.
5203 5204
5204 5205 .. note::
5205 5206
5206 5207 For performance reasons, :hg:`log FILE` may omit duplicate changes
5207 5208 made on branches and will not show removals or mode changes. To
5208 5209 see all such changes, use the --removed switch.
5209 5210
5210 5211 .. container:: verbose
5211 5212
5212 5213 Some examples:
5213 5214
5214 5215 - changesets with full descriptions and file lists::
5215 5216
5216 5217 hg log -v
5217 5218
5218 5219 - changesets ancestral to the working directory::
5219 5220
5220 5221 hg log -f
5221 5222
5222 5223 - last 10 commits on the current branch::
5223 5224
5224 5225 hg log -l 10 -b .
5225 5226
5226 5227 - changesets showing all modifications of a file, including removals::
5227 5228
5228 5229 hg log --removed file.c
5229 5230
5230 5231 - all changesets that touch a directory, with diffs, excluding merges::
5231 5232
5232 5233 hg log -Mp lib/
5233 5234
5234 5235 - all revision numbers that match a keyword::
5235 5236
5236 5237 hg log -k bug --template "{rev}\\n"
5237 5238
5238 5239 - the full hash identifier of the working directory parent::
5239 5240
5240 5241 hg log -r . --template "{node}\\n"
5241 5242
5242 5243 - list available log templates::
5243 5244
5244 5245 hg log -T list
5245 5246
5246 5247 - check if a given changeset is included in a tagged release::
5247 5248
5248 5249 hg log -r "a21ccf and ancestor(1.9)"
5249 5250
5250 5251 - find all changesets by some user in a date range::
5251 5252
5252 5253 hg log -k alice -d "may 2008 to jul 2008"
5253 5254
5254 5255 - summary of all changesets after the last tag::
5255 5256
5256 5257 hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
5257 5258
5258 5259 See :hg:`help dates` for a list of formats valid for -d/--date.
5259 5260
5260 5261 See :hg:`help revisions` and :hg:`help revsets` for more about
5261 5262 specifying and ordering revisions.
5262 5263
5263 5264 See :hg:`help templates` for more about pre-packaged styles and
5264 5265 specifying custom templates.
5265 5266
5266 5267 Returns 0 on success.
5267 5268
5268 5269 """
5269 5270 if opts.get('follow') and opts.get('rev'):
5270 5271 opts['rev'] = [revset.formatspec('reverse(::%lr)', opts.get('rev'))]
5271 5272 del opts['follow']
5272 5273
5273 5274 if opts.get('graph'):
5274 5275 return cmdutil.graphlog(ui, repo, *pats, **opts)
5275 5276
5276 5277 revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts)
5277 5278 limit = cmdutil.loglimit(opts)
5278 5279 count = 0
5279 5280
5280 5281 getrenamed = None
5281 5282 if opts.get('copies'):
5282 5283 endrev = None
5283 5284 if opts.get('rev'):
5284 5285 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
5285 5286 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
5286 5287
5287 5288 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
5288 5289 for rev in revs:
5289 5290 if count == limit:
5290 5291 break
5291 5292 ctx = repo[rev]
5292 5293 copies = None
5293 5294 if getrenamed is not None and rev:
5294 5295 copies = []
5295 5296 for fn in ctx.files():
5296 5297 rename = getrenamed(fn, rev)
5297 5298 if rename:
5298 5299 copies.append((fn, rename[0]))
5299 5300 if filematcher:
5300 5301 revmatchfn = filematcher(ctx.rev())
5301 5302 else:
5302 5303 revmatchfn = None
5303 5304 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
5304 5305 if displayer.flush(ctx):
5305 5306 count += 1
5306 5307
5307 5308 displayer.close()
5308 5309
5309 5310 @command('manifest',
5310 5311 [('r', 'rev', '', _('revision to display'), _('REV')),
5311 5312 ('', 'all', False, _("list files from all revisions"))]
5312 5313 + formatteropts,
5313 5314 _('[-r REV]'))
5314 5315 def manifest(ui, repo, node=None, rev=None, **opts):
5315 5316 """output the current or given revision of the project manifest
5316 5317
5317 5318 Print a list of version controlled files for the given revision.
5318 5319 If no revision is given, the first parent of the working directory
5319 5320 is used, or the null revision if no revision is checked out.
5320 5321
5321 5322 With -v, print file permissions, symlink and executable bits.
5322 5323 With --debug, print file revision hashes.
5323 5324
5324 5325 If option --all is specified, the list of all files from all revisions
5325 5326 is printed. This includes deleted and renamed files.
5326 5327
5327 5328 Returns 0 on success.
5328 5329 """
5329 5330
5330 5331 fm = ui.formatter('manifest', opts)
5331 5332
5332 5333 if opts.get('all'):
5333 5334 if rev or node:
5334 5335 raise error.Abort(_("can't specify a revision with --all"))
5335 5336
5336 5337 res = []
5337 5338 prefix = "data/"
5338 5339 suffix = ".i"
5339 5340 plen = len(prefix)
5340 5341 slen = len(suffix)
5341 5342 with repo.lock():
5342 5343 for fn, b, size in repo.store.datafiles():
5343 5344 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
5344 5345 res.append(fn[plen:-slen])
5345 5346 for f in res:
5346 5347 fm.startitem()
5347 5348 fm.write("path", '%s\n', f)
5348 5349 fm.end()
5349 5350 return
5350 5351
5351 5352 if rev and node:
5352 5353 raise error.Abort(_("please specify just one revision"))
5353 5354
5354 5355 if not node:
5355 5356 node = rev
5356 5357
5357 5358 char = {'l': '@', 'x': '*', '': ''}
5358 5359 mode = {'l': '644', 'x': '755', '': '644'}
5359 5360 ctx = scmutil.revsingle(repo, node)
5360 5361 mf = ctx.manifest()
5361 5362 for f in ctx:
5362 5363 fm.startitem()
5363 5364 fl = ctx[f].flags()
5364 5365 fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
5365 5366 fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
5366 5367 fm.write('path', '%s\n', f)
5367 5368 fm.end()
5368 5369
5369 5370 @command('^merge',
5370 5371 [('f', 'force', None,
5371 5372 _('force a merge including outstanding changes (DEPRECATED)')),
5372 5373 ('r', 'rev', '', _('revision to merge'), _('REV')),
5373 5374 ('P', 'preview', None,
5374 5375 _('review revisions to merge (no merge is performed)'))
5375 5376 ] + mergetoolopts,
5376 5377 _('[-P] [[-r] REV]'))
5377 5378 def merge(ui, repo, node=None, **opts):
5378 5379 """merge another revision into working directory
5379 5380
5380 5381 The current working directory is updated with all changes made in
5381 5382 the requested revision since the last common predecessor revision.
5382 5383
5383 5384 Files that changed between either parent are marked as changed for
5384 5385 the next commit and a commit must be performed before any further
5385 5386 updates to the repository are allowed. The next commit will have
5386 5387 two parents.
5387 5388
5388 5389 ``--tool`` can be used to specify the merge tool used for file
5389 5390 merges. It overrides the HGMERGE environment variable and your
5390 5391 configuration files. See :hg:`help merge-tools` for options.
5391 5392
5392 5393 If no revision is specified, the working directory's parent is a
5393 5394 head revision, and the current branch contains exactly one other
5394 5395 head, the other head is merged with by default. Otherwise, an
5395 5396 explicit revision with which to merge with must be provided.
5396 5397
5397 5398 See :hg:`help resolve` for information on handling file conflicts.
5398 5399
5399 5400 To undo an uncommitted merge, use :hg:`update --clean .` which
5400 5401 will check out a clean copy of the original merge parent, losing
5401 5402 all changes.
5402 5403
5403 5404 Returns 0 on success, 1 if there are unresolved files.
5404 5405 """
5405 5406
5406 5407 if opts.get('rev') and node:
5407 5408 raise error.Abort(_("please specify just one revision"))
5408 5409 if not node:
5409 5410 node = opts.get('rev')
5410 5411
5411 5412 if node:
5412 5413 node = scmutil.revsingle(repo, node).node()
5413 5414
5414 5415 if not node:
5415 5416 node = repo[destutil.destmerge(repo)].node()
5416 5417
5417 5418 if opts.get('preview'):
5418 5419 # find nodes that are ancestors of p2 but not of p1
5419 5420 p1 = repo.lookup('.')
5420 5421 p2 = repo.lookup(node)
5421 5422 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
5422 5423
5423 5424 displayer = cmdutil.show_changeset(ui, repo, opts)
5424 5425 for node in nodes:
5425 5426 displayer.show(repo[node])
5426 5427 displayer.close()
5427 5428 return 0
5428 5429
5429 5430 try:
5430 5431 # ui.forcemerge is an internal variable, do not document
5431 5432 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge')
5432 5433 force = opts.get('force')
5433 5434 return hg.merge(repo, node, force=force, mergeforce=force)
5434 5435 finally:
5435 5436 ui.setconfig('ui', 'forcemerge', '', 'merge')
5436 5437
5437 5438 @command('outgoing|out',
5438 5439 [('f', 'force', None, _('run even when the destination is unrelated')),
5439 5440 ('r', 'rev', [],
5440 5441 _('a changeset intended to be included in the destination'), _('REV')),
5441 5442 ('n', 'newest-first', None, _('show newest record first')),
5442 5443 ('B', 'bookmarks', False, _('compare bookmarks')),
5443 5444 ('b', 'branch', [], _('a specific branch you would like to push'),
5444 5445 _('BRANCH')),
5445 5446 ] + logopts + remoteopts + subrepoopts,
5446 5447 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
5447 5448 def outgoing(ui, repo, dest=None, **opts):
5448 5449 """show changesets not found in the destination
5449 5450
5450 5451 Show changesets not found in the specified destination repository
5451 5452 or the default push location. These are the changesets that would
5452 5453 be pushed if a push was requested.
5453 5454
5454 5455 See pull for details of valid destination formats.
5455 5456
5456 5457 .. container:: verbose
5457 5458
5458 5459 With -B/--bookmarks, the result of bookmark comparison between
5459 5460 local and remote repositories is displayed. With -v/--verbose,
5460 5461 status is also displayed for each bookmark like below::
5461 5462
5462 5463 BM1 01234567890a added
5463 5464 BM2 deleted
5464 5465 BM3 234567890abc advanced
5465 5466 BM4 34567890abcd diverged
5466 5467 BM5 4567890abcde changed
5467 5468
5468 5469 The action taken when pushing depends on the
5469 5470 status of each bookmark:
5470 5471
5471 5472 :``added``: push with ``-B`` will create it
5472 5473 :``deleted``: push with ``-B`` will delete it
5473 5474 :``advanced``: push will update it
5474 5475 :``diverged``: push with ``-B`` will update it
5475 5476 :``changed``: push with ``-B`` will update it
5476 5477
5477 5478 From the point of view of pushing behavior, bookmarks
5478 5479 existing only in the remote repository are treated as
5479 5480 ``deleted``, even if it is in fact added remotely.
5480 5481
5481 5482 Returns 0 if there are outgoing changes, 1 otherwise.
5482 5483 """
5483 5484 if opts.get('graph'):
5484 5485 cmdutil.checkunsupportedgraphflags([], opts)
5485 5486 o, other = hg._outgoing(ui, repo, dest, opts)
5486 5487 if not o:
5487 5488 cmdutil.outgoinghooks(ui, repo, other, opts, o)
5488 5489 return
5489 5490
5490 5491 revdag = cmdutil.graphrevs(repo, o, opts)
5491 5492 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
5492 5493 cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges)
5493 5494 cmdutil.outgoinghooks(ui, repo, other, opts, o)
5494 5495 return 0
5495 5496
5496 5497 if opts.get('bookmarks'):
5497 5498 dest = ui.expandpath(dest or 'default-push', dest or 'default')
5498 5499 dest, branches = hg.parseurl(dest, opts.get('branch'))
5499 5500 other = hg.peer(repo, opts, dest)
5500 5501 if 'bookmarks' not in other.listkeys('namespaces'):
5501 5502 ui.warn(_("remote doesn't support bookmarks\n"))
5502 5503 return 0
5503 5504 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
5504 5505 return bookmarks.outgoing(ui, repo, other)
5505 5506
5506 5507 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
5507 5508 try:
5508 5509 return hg.outgoing(ui, repo, dest, opts)
5509 5510 finally:
5510 5511 del repo._subtoppath
5511 5512
5512 5513 @command('parents',
5513 5514 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
5514 5515 ] + templateopts,
5515 5516 _('[-r REV] [FILE]'),
5516 5517 inferrepo=True)
5517 5518 def parents(ui, repo, file_=None, **opts):
5518 5519 """show the parents of the working directory or revision (DEPRECATED)
5519 5520
5520 5521 Print the working directory's parent revisions. If a revision is
5521 5522 given via -r/--rev, the parent of that revision will be printed.
5522 5523 If a file argument is given, the revision in which the file was
5523 5524 last changed (before the working directory revision or the
5524 5525 argument to --rev if given) is printed.
5525 5526
5526 5527 This command is equivalent to::
5527 5528
5528 5529 hg log -r "p1()+p2()" or
5529 5530 hg log -r "p1(REV)+p2(REV)" or
5530 5531 hg log -r "max(::p1() and file(FILE))+max(::p2() and file(FILE))" or
5531 5532 hg log -r "max(::p1(REV) and file(FILE))+max(::p2(REV) and file(FILE))"
5532 5533
5533 5534 See :hg:`summary` and :hg:`help revsets` for related information.
5534 5535
5535 5536 Returns 0 on success.
5536 5537 """
5537 5538
5538 5539 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
5539 5540
5540 5541 if file_:
5541 5542 m = scmutil.match(ctx, (file_,), opts)
5542 5543 if m.anypats() or len(m.files()) != 1:
5543 5544 raise error.Abort(_('can only specify an explicit filename'))
5544 5545 file_ = m.files()[0]
5545 5546 filenodes = []
5546 5547 for cp in ctx.parents():
5547 5548 if not cp:
5548 5549 continue
5549 5550 try:
5550 5551 filenodes.append(cp.filenode(file_))
5551 5552 except error.LookupError:
5552 5553 pass
5553 5554 if not filenodes:
5554 5555 raise error.Abort(_("'%s' not found in manifest!") % file_)
5555 5556 p = []
5556 5557 for fn in filenodes:
5557 5558 fctx = repo.filectx(file_, fileid=fn)
5558 5559 p.append(fctx.node())
5559 5560 else:
5560 5561 p = [cp.node() for cp in ctx.parents()]
5561 5562
5562 5563 displayer = cmdutil.show_changeset(ui, repo, opts)
5563 5564 for n in p:
5564 5565 if n != nullid:
5565 5566 displayer.show(repo[n])
5566 5567 displayer.close()
5567 5568
5568 5569 @command('paths', formatteropts, _('[NAME]'), optionalrepo=True)
5569 5570 def paths(ui, repo, search=None, **opts):
5570 5571 """show aliases for remote repositories
5571 5572
5572 5573 Show definition of symbolic path name NAME. If no name is given,
5573 5574 show definition of all available names.
5574 5575
5575 5576 Option -q/--quiet suppresses all output when searching for NAME
5576 5577 and shows only the path names when listing all definitions.
5577 5578
5578 5579 Path names are defined in the [paths] section of your
5579 5580 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
5580 5581 repository, ``.hg/hgrc`` is used, too.
5581 5582
5582 5583 The path names ``default`` and ``default-push`` have a special
5583 5584 meaning. When performing a push or pull operation, they are used
5584 5585 as fallbacks if no location is specified on the command-line.
5585 5586 When ``default-push`` is set, it will be used for push and
5586 5587 ``default`` will be used for pull; otherwise ``default`` is used
5587 5588 as the fallback for both. When cloning a repository, the clone
5588 5589 source is written as ``default`` in ``.hg/hgrc``.
5589 5590
5590 5591 .. note::
5591 5592
5592 5593 ``default`` and ``default-push`` apply to all inbound (e.g.
5593 5594 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email`
5594 5595 and :hg:`bundle`) operations.
5595 5596
5596 5597 See :hg:`help urls` for more information.
5597 5598
5598 5599 Returns 0 on success.
5599 5600 """
5600 5601 if search:
5601 5602 pathitems = [(name, path) for name, path in ui.paths.iteritems()
5602 5603 if name == search]
5603 5604 else:
5604 5605 pathitems = sorted(ui.paths.iteritems())
5605 5606
5606 5607 fm = ui.formatter('paths', opts)
5607 5608 if fm:
5608 5609 hidepassword = str
5609 5610 else:
5610 5611 hidepassword = util.hidepassword
5611 5612 if ui.quiet:
5612 5613 namefmt = '%s\n'
5613 5614 else:
5614 5615 namefmt = '%s = '
5615 5616 showsubopts = not search and not ui.quiet
5616 5617
5617 5618 for name, path in pathitems:
5618 5619 fm.startitem()
5619 5620 fm.condwrite(not search, 'name', namefmt, name)
5620 5621 fm.condwrite(not ui.quiet, 'url', '%s\n', hidepassword(path.rawloc))
5621 5622 for subopt, value in sorted(path.suboptions.items()):
5622 5623 assert subopt not in ('name', 'url')
5623 5624 if showsubopts:
5624 5625 fm.plain('%s:%s = ' % (name, subopt))
5625 5626 fm.condwrite(showsubopts, subopt, '%s\n', value)
5626 5627
5627 5628 fm.end()
5628 5629
5629 5630 if search and not pathitems:
5630 5631 if not ui.quiet:
5631 5632 ui.warn(_("not found!\n"))
5632 5633 return 1
5633 5634 else:
5634 5635 return 0
5635 5636
5636 5637 @command('phase',
5637 5638 [('p', 'public', False, _('set changeset phase to public')),
5638 5639 ('d', 'draft', False, _('set changeset phase to draft')),
5639 5640 ('s', 'secret', False, _('set changeset phase to secret')),
5640 5641 ('f', 'force', False, _('allow to move boundary backward')),
5641 5642 ('r', 'rev', [], _('target revision'), _('REV')),
5642 5643 ],
5643 5644 _('[-p|-d|-s] [-f] [-r] [REV...]'))
5644 5645 def phase(ui, repo, *revs, **opts):
5645 5646 """set or show the current phase name
5646 5647
5647 5648 With no argument, show the phase name of the current revision(s).
5648 5649
5649 5650 With one of -p/--public, -d/--draft or -s/--secret, change the
5650 5651 phase value of the specified revisions.
5651 5652
5652 5653 Unless -f/--force is specified, :hg:`phase` won't move changeset from a
5653 5654 lower phase to an higher phase. Phases are ordered as follows::
5654 5655
5655 5656 public < draft < secret
5656 5657
5657 5658 Returns 0 on success, 1 if some phases could not be changed.
5658 5659
5659 5660 (For more information about the phases concept, see :hg:`help phases`.)
5660 5661 """
5661 5662 # search for a unique phase argument
5662 5663 targetphase = None
5663 5664 for idx, name in enumerate(phases.phasenames):
5664 5665 if opts[name]:
5665 5666 if targetphase is not None:
5666 5667 raise error.Abort(_('only one phase can be specified'))
5667 5668 targetphase = idx
5668 5669
5669 5670 # look for specified revision
5670 5671 revs = list(revs)
5671 5672 revs.extend(opts['rev'])
5672 5673 if not revs:
5673 5674 # display both parents as the second parent phase can influence
5674 5675 # the phase of a merge commit
5675 5676 revs = [c.rev() for c in repo[None].parents()]
5676 5677
5677 5678 revs = scmutil.revrange(repo, revs)
5678 5679
5679 5680 lock = None
5680 5681 ret = 0
5681 5682 if targetphase is None:
5682 5683 # display
5683 5684 for r in revs:
5684 5685 ctx = repo[r]
5685 5686 ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
5686 5687 else:
5687 5688 tr = None
5688 5689 lock = repo.lock()
5689 5690 try:
5690 5691 tr = repo.transaction("phase")
5691 5692 # set phase
5692 5693 if not revs:
5693 5694 raise error.Abort(_('empty revision set'))
5694 5695 nodes = [repo[r].node() for r in revs]
5695 5696 # moving revision from public to draft may hide them
5696 5697 # We have to check result on an unfiltered repository
5697 5698 unfi = repo.unfiltered()
5698 5699 getphase = unfi._phasecache.phase
5699 5700 olddata = [getphase(unfi, r) for r in unfi]
5700 5701 phases.advanceboundary(repo, tr, targetphase, nodes)
5701 5702 if opts['force']:
5702 5703 phases.retractboundary(repo, tr, targetphase, nodes)
5703 5704 tr.close()
5704 5705 finally:
5705 5706 if tr is not None:
5706 5707 tr.release()
5707 5708 lock.release()
5708 5709 getphase = unfi._phasecache.phase
5709 5710 newdata = [getphase(unfi, r) for r in unfi]
5710 5711 changes = sum(newdata[r] != olddata[r] for r in unfi)
5711 5712 cl = unfi.changelog
5712 5713 rejected = [n for n in nodes
5713 5714 if newdata[cl.rev(n)] < targetphase]
5714 5715 if rejected:
5715 5716 ui.warn(_('cannot move %i changesets to a higher '
5716 5717 'phase, use --force\n') % len(rejected))
5717 5718 ret = 1
5718 5719 if changes:
5719 5720 msg = _('phase changed for %i changesets\n') % changes
5720 5721 if ret:
5721 5722 ui.status(msg)
5722 5723 else:
5723 5724 ui.note(msg)
5724 5725 else:
5725 5726 ui.warn(_('no phases changed\n'))
5726 5727 return ret
5727 5728
5728 5729 def postincoming(ui, repo, modheads, optupdate, checkout, brev):
5729 5730 """Run after a changegroup has been added via pull/unbundle
5730 5731
5731 5732 This takes arguments below:
5732 5733
5733 5734 :modheads: change of heads by pull/unbundle
5734 5735 :optupdate: updating working directory is needed or not
5735 5736 :checkout: update destination revision (or None to default destination)
5736 5737 :brev: a name, which might be a bookmark to be activated after updating
5737 5738 """
5738 5739 if modheads == 0:
5739 5740 return
5740 5741 if optupdate:
5741 5742 try:
5742 5743 return hg.updatetotally(ui, repo, checkout, brev)
5743 5744 except error.UpdateAbort as inst:
5744 5745 msg = _("not updating: %s") % str(inst)
5745 5746 hint = inst.hint
5746 5747 raise error.UpdateAbort(msg, hint=hint)
5747 5748 if modheads > 1:
5748 5749 currentbranchheads = len(repo.branchheads())
5749 5750 if currentbranchheads == modheads:
5750 5751 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
5751 5752 elif currentbranchheads > 1:
5752 5753 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
5753 5754 "merge)\n"))
5754 5755 else:
5755 5756 ui.status(_("(run 'hg heads' to see heads)\n"))
5756 5757 else:
5757 5758 ui.status(_("(run 'hg update' to get a working copy)\n"))
5758 5759
5759 5760 @command('^pull',
5760 5761 [('u', 'update', None,
5761 5762 _('update to new branch head if changesets were pulled')),
5762 5763 ('f', 'force', None, _('run even when remote repository is unrelated')),
5763 5764 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
5764 5765 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
5765 5766 ('b', 'branch', [], _('a specific branch you would like to pull'),
5766 5767 _('BRANCH')),
5767 5768 ] + remoteopts,
5768 5769 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
5769 5770 def pull(ui, repo, source="default", **opts):
5770 5771 """pull changes from the specified source
5771 5772
5772 5773 Pull changes from a remote repository to a local one.
5773 5774
5774 5775 This finds all changes from the repository at the specified path
5775 5776 or URL and adds them to a local repository (the current one unless
5776 5777 -R is specified). By default, this does not update the copy of the
5777 5778 project in the working directory.
5778 5779
5779 5780 Use :hg:`incoming` if you want to see what would have been added
5780 5781 by a pull at the time you issued this command. If you then decide
5781 5782 to add those changes to the repository, you should use :hg:`pull
5782 5783 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
5783 5784
5784 5785 If SOURCE is omitted, the 'default' path will be used.
5785 5786 See :hg:`help urls` for more information.
5786 5787
5787 5788 Specifying bookmark as ``.`` is equivalent to specifying the active
5788 5789 bookmark's name.
5789 5790
5790 5791 Returns 0 on success, 1 if an update had unresolved files.
5791 5792 """
5792 5793 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
5793 5794 ui.status(_('pulling from %s\n') % util.hidepassword(source))
5794 5795 other = hg.peer(repo, opts, source)
5795 5796 try:
5796 5797 revs, checkout = hg.addbranchrevs(repo, other, branches,
5797 5798 opts.get('rev'))
5798 5799
5799 5800
5800 5801 pullopargs = {}
5801 5802 if opts.get('bookmark'):
5802 5803 if not revs:
5803 5804 revs = []
5804 5805 # The list of bookmark used here is not the one used to actually
5805 5806 # update the bookmark name. This can result in the revision pulled
5806 5807 # not ending up with the name of the bookmark because of a race
5807 5808 # condition on the server. (See issue 4689 for details)
5808 5809 remotebookmarks = other.listkeys('bookmarks')
5809 5810 pullopargs['remotebookmarks'] = remotebookmarks
5810 5811 for b in opts['bookmark']:
5811 5812 b = repo._bookmarks.expandname(b)
5812 5813 if b not in remotebookmarks:
5813 5814 raise error.Abort(_('remote bookmark %s not found!') % b)
5814 5815 revs.append(remotebookmarks[b])
5815 5816
5816 5817 if revs:
5817 5818 try:
5818 5819 # When 'rev' is a bookmark name, we cannot guarantee that it
5819 5820 # will be updated with that name because of a race condition
5820 5821 # server side. (See issue 4689 for details)
5821 5822 oldrevs = revs
5822 5823 revs = [] # actually, nodes
5823 5824 for r in oldrevs:
5824 5825 node = other.lookup(r)
5825 5826 revs.append(node)
5826 5827 if r == checkout:
5827 5828 checkout = node
5828 5829 except error.CapabilityError:
5829 5830 err = _("other repository doesn't support revision lookup, "
5830 5831 "so a rev cannot be specified.")
5831 5832 raise error.Abort(err)
5832 5833
5833 5834 pullopargs.update(opts.get('opargs', {}))
5834 5835 modheads = exchange.pull(repo, other, heads=revs,
5835 5836 force=opts.get('force'),
5836 5837 bookmarks=opts.get('bookmark', ()),
5837 5838 opargs=pullopargs).cgresult
5838 5839
5839 5840 # brev is a name, which might be a bookmark to be activated at
5840 5841 # the end of the update. In other words, it is an explicit
5841 5842 # destination of the update
5842 5843 brev = None
5843 5844
5844 5845 if checkout:
5845 5846 checkout = str(repo.changelog.rev(checkout))
5846 5847
5847 5848 # order below depends on implementation of
5848 5849 # hg.addbranchrevs(). opts['bookmark'] is ignored,
5849 5850 # because 'checkout' is determined without it.
5850 5851 if opts.get('rev'):
5851 5852 brev = opts['rev'][0]
5852 5853 elif opts.get('branch'):
5853 5854 brev = opts['branch'][0]
5854 5855 else:
5855 5856 brev = branches[0]
5856 5857 repo._subtoppath = source
5857 5858 try:
5858 5859 ret = postincoming(ui, repo, modheads, opts.get('update'),
5859 5860 checkout, brev)
5860 5861
5861 5862 finally:
5862 5863 del repo._subtoppath
5863 5864
5864 5865 finally:
5865 5866 other.close()
5866 5867 return ret
5867 5868
5868 5869 @command('^push',
5869 5870 [('f', 'force', None, _('force push')),
5870 5871 ('r', 'rev', [],
5871 5872 _('a changeset intended to be included in the destination'),
5872 5873 _('REV')),
5873 5874 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
5874 5875 ('b', 'branch', [],
5875 5876 _('a specific branch you would like to push'), _('BRANCH')),
5876 5877 ('', 'new-branch', False, _('allow pushing a new branch')),
5877 5878 ] + remoteopts,
5878 5879 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
5879 5880 def push(ui, repo, dest=None, **opts):
5880 5881 """push changes to the specified destination
5881 5882
5882 5883 Push changesets from the local repository to the specified
5883 5884 destination.
5884 5885
5885 5886 This operation is symmetrical to pull: it is identical to a pull
5886 5887 in the destination repository from the current one.
5887 5888
5888 5889 By default, push will not allow creation of new heads at the
5889 5890 destination, since multiple heads would make it unclear which head
5890 5891 to use. In this situation, it is recommended to pull and merge
5891 5892 before pushing.
5892 5893
5893 5894 Use --new-branch if you want to allow push to create a new named
5894 5895 branch that is not present at the destination. This allows you to
5895 5896 only create a new branch without forcing other changes.
5896 5897
5897 5898 .. note::
5898 5899
5899 5900 Extra care should be taken with the -f/--force option,
5900 5901 which will push all new heads on all branches, an action which will
5901 5902 almost always cause confusion for collaborators.
5902 5903
5903 5904 If -r/--rev is used, the specified revision and all its ancestors
5904 5905 will be pushed to the remote repository.
5905 5906
5906 5907 If -B/--bookmark is used, the specified bookmarked revision, its
5907 5908 ancestors, and the bookmark will be pushed to the remote
5908 5909 repository. Specifying ``.`` is equivalent to specifying the active
5909 5910 bookmark's name.
5910 5911
5911 5912 Please see :hg:`help urls` for important details about ``ssh://``
5912 5913 URLs. If DESTINATION is omitted, a default path will be used.
5913 5914
5914 5915 Returns 0 if push was successful, 1 if nothing to push.
5915 5916 """
5916 5917
5917 5918 if opts.get('bookmark'):
5918 5919 ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push')
5919 5920 for b in opts['bookmark']:
5920 5921 # translate -B options to -r so changesets get pushed
5921 5922 b = repo._bookmarks.expandname(b)
5922 5923 if b in repo._bookmarks:
5923 5924 opts.setdefault('rev', []).append(b)
5924 5925 else:
5925 5926 # if we try to push a deleted bookmark, translate it to null
5926 5927 # this lets simultaneous -r, -b options continue working
5927 5928 opts.setdefault('rev', []).append("null")
5928 5929
5929 5930 path = ui.paths.getpath(dest, default=('default-push', 'default'))
5930 5931 if not path:
5931 5932 raise error.Abort(_('default repository not configured!'),
5932 5933 hint=_('see the "path" section in "hg help config"'))
5933 5934 dest = path.pushloc or path.loc
5934 5935 branches = (path.branch, opts.get('branch') or [])
5935 5936 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
5936 5937 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
5937 5938 other = hg.peer(repo, opts, dest)
5938 5939
5939 5940 if revs:
5940 5941 revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
5941 5942 if not revs:
5942 5943 raise error.Abort(_("specified revisions evaluate to an empty set"),
5943 5944 hint=_("use different revision arguments"))
5944 5945 elif path.pushrev:
5945 5946 # It doesn't make any sense to specify ancestor revisions. So limit
5946 5947 # to DAG heads to make discovery simpler.
5947 5948 expr = revset.formatspec('heads(%r)', path.pushrev)
5948 5949 revs = scmutil.revrange(repo, [expr])
5949 5950 revs = [repo[rev].node() for rev in revs]
5950 5951 if not revs:
5951 5952 raise error.Abort(_('default push revset for path evaluates to an '
5952 5953 'empty set'))
5953 5954
5954 5955 repo._subtoppath = dest
5955 5956 try:
5956 5957 # push subrepos depth-first for coherent ordering
5957 5958 c = repo['']
5958 5959 subs = c.substate # only repos that are committed
5959 5960 for s in sorted(subs):
5960 5961 result = c.sub(s).push(opts)
5961 5962 if result == 0:
5962 5963 return not result
5963 5964 finally:
5964 5965 del repo._subtoppath
5965 5966 pushop = exchange.push(repo, other, opts.get('force'), revs=revs,
5966 5967 newbranch=opts.get('new_branch'),
5967 5968 bookmarks=opts.get('bookmark', ()),
5968 5969 opargs=opts.get('opargs'))
5969 5970
5970 5971 result = not pushop.cgresult
5971 5972
5972 5973 if pushop.bkresult is not None:
5973 5974 if pushop.bkresult == 2:
5974 5975 result = 2
5975 5976 elif not result and pushop.bkresult:
5976 5977 result = 2
5977 5978
5978 5979 return result
5979 5980
5980 5981 @command('recover', [])
5981 5982 def recover(ui, repo):
5982 5983 """roll back an interrupted transaction
5983 5984
5984 5985 Recover from an interrupted commit or pull.
5985 5986
5986 5987 This command tries to fix the repository status after an
5987 5988 interrupted operation. It should only be necessary when Mercurial
5988 5989 suggests it.
5989 5990
5990 5991 Returns 0 if successful, 1 if nothing to recover or verify fails.
5991 5992 """
5992 5993 if repo.recover():
5993 5994 return hg.verify(repo)
5994 5995 return 1
5995 5996
5996 5997 @command('^remove|rm',
5997 5998 [('A', 'after', None, _('record delete for missing files')),
5998 5999 ('f', 'force', None,
5999 6000 _('forget added files, delete modified files')),
6000 6001 ] + subrepoopts + walkopts,
6001 6002 _('[OPTION]... FILE...'),
6002 6003 inferrepo=True)
6003 6004 def remove(ui, repo, *pats, **opts):
6004 6005 """remove the specified files on the next commit
6005 6006
6006 6007 Schedule the indicated files for removal from the current branch.
6007 6008
6008 6009 This command schedules the files to be removed at the next commit.
6009 6010 To undo a remove before that, see :hg:`revert`. To undo added
6010 6011 files, see :hg:`forget`.
6011 6012
6012 6013 .. container:: verbose
6013 6014
6014 6015 -A/--after can be used to remove only files that have already
6015 6016 been deleted, -f/--force can be used to force deletion, and -Af
6016 6017 can be used to remove files from the next revision without
6017 6018 deleting them from the working directory.
6018 6019
6019 6020 The following table details the behavior of remove for different
6020 6021 file states (columns) and option combinations (rows). The file
6021 6022 states are Added [A], Clean [C], Modified [M] and Missing [!]
6022 6023 (as reported by :hg:`status`). The actions are Warn, Remove
6023 6024 (from branch) and Delete (from disk):
6024 6025
6025 6026 ========= == == == ==
6026 6027 opt/state A C M !
6027 6028 ========= == == == ==
6028 6029 none W RD W R
6029 6030 -f R RD RD R
6030 6031 -A W W W R
6031 6032 -Af R R R R
6032 6033 ========= == == == ==
6033 6034
6034 6035 .. note::
6035 6036
6036 6037 :hg:`remove` never deletes files in Added [A] state from the
6037 6038 working directory, not even if ``--force`` is specified.
6038 6039
6039 6040 Returns 0 on success, 1 if any warnings encountered.
6040 6041 """
6041 6042
6042 6043 after, force = opts.get('after'), opts.get('force')
6043 6044 if not pats and not after:
6044 6045 raise error.Abort(_('no files specified'))
6045 6046
6046 6047 m = scmutil.match(repo[None], pats, opts)
6047 6048 subrepos = opts.get('subrepos')
6048 6049 return cmdutil.remove(ui, repo, m, "", after, force, subrepos)
6049 6050
6050 6051 @command('rename|move|mv',
6051 6052 [('A', 'after', None, _('record a rename that has already occurred')),
6052 6053 ('f', 'force', None, _('forcibly copy over an existing managed file')),
6053 6054 ] + walkopts + dryrunopts,
6054 6055 _('[OPTION]... SOURCE... DEST'))
6055 6056 def rename(ui, repo, *pats, **opts):
6056 6057 """rename files; equivalent of copy + remove
6057 6058
6058 6059 Mark dest as copies of sources; mark sources for deletion. If dest
6059 6060 is a directory, copies are put in that directory. If dest is a
6060 6061 file, there can only be one source.
6061 6062
6062 6063 By default, this command copies the contents of files as they
6063 6064 exist in the working directory. If invoked with -A/--after, the
6064 6065 operation is recorded, but no copying is performed.
6065 6066
6066 6067 This command takes effect at the next commit. To undo a rename
6067 6068 before that, see :hg:`revert`.
6068 6069
6069 6070 Returns 0 on success, 1 if errors are encountered.
6070 6071 """
6071 6072 with repo.wlock(False):
6072 6073 return cmdutil.copy(ui, repo, pats, opts, rename=True)
6073 6074
6074 6075 @command('resolve',
6075 6076 [('a', 'all', None, _('select all unresolved files')),
6076 6077 ('l', 'list', None, _('list state of files needing merge')),
6077 6078 ('m', 'mark', None, _('mark files as resolved')),
6078 6079 ('u', 'unmark', None, _('mark files as unresolved')),
6079 6080 ('n', 'no-status', None, _('hide status prefix'))]
6080 6081 + mergetoolopts + walkopts + formatteropts,
6081 6082 _('[OPTION]... [FILE]...'),
6082 6083 inferrepo=True)
6083 6084 def resolve(ui, repo, *pats, **opts):
6084 6085 """redo merges or set/view the merge status of files
6085 6086
6086 6087 Merges with unresolved conflicts are often the result of
6087 6088 non-interactive merging using the ``internal:merge`` configuration
6088 6089 setting, or a command-line merge tool like ``diff3``. The resolve
6089 6090 command is used to manage the files involved in a merge, after
6090 6091 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
6091 6092 working directory must have two parents). See :hg:`help
6092 6093 merge-tools` for information on configuring merge tools.
6093 6094
6094 6095 The resolve command can be used in the following ways:
6095 6096
6096 6097 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
6097 6098 files, discarding any previous merge attempts. Re-merging is not
6098 6099 performed for files already marked as resolved. Use ``--all/-a``
6099 6100 to select all unresolved files. ``--tool`` can be used to specify
6100 6101 the merge tool used for the given files. It overrides the HGMERGE
6101 6102 environment variable and your configuration files. Previous file
6102 6103 contents are saved with a ``.orig`` suffix.
6103 6104
6104 6105 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
6105 6106 (e.g. after having manually fixed-up the files). The default is
6106 6107 to mark all unresolved files.
6107 6108
6108 6109 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
6109 6110 default is to mark all resolved files.
6110 6111
6111 6112 - :hg:`resolve -l`: list files which had or still have conflicts.
6112 6113 In the printed list, ``U`` = unresolved and ``R`` = resolved.
6113 6114
6114 6115 .. note::
6115 6116
6116 6117 Mercurial will not let you commit files with unresolved merge
6117 6118 conflicts. You must use :hg:`resolve -m ...` before you can
6118 6119 commit after a conflicting merge.
6119 6120
6120 6121 Returns 0 on success, 1 if any files fail a resolve attempt.
6121 6122 """
6122 6123
6123 6124 flaglist = 'all mark unmark list no_status'.split()
6124 6125 all, mark, unmark, show, nostatus = \
6125 6126 [opts.get(o) for o in flaglist]
6126 6127
6127 6128 if (show and (mark or unmark)) or (mark and unmark):
6128 6129 raise error.Abort(_("too many options specified"))
6129 6130 if pats and all:
6130 6131 raise error.Abort(_("can't specify --all and patterns"))
6131 6132 if not (all or pats or show or mark or unmark):
6132 6133 raise error.Abort(_('no files or directories specified'),
6133 6134 hint=('use --all to re-merge all unresolved files'))
6134 6135
6135 6136 if show:
6136 6137 fm = ui.formatter('resolve', opts)
6137 6138 ms = mergemod.mergestate.read(repo)
6138 6139 m = scmutil.match(repo[None], pats, opts)
6139 6140 for f in ms:
6140 6141 if not m(f):
6141 6142 continue
6142 6143 l = 'resolve.' + {'u': 'unresolved', 'r': 'resolved',
6143 6144 'd': 'driverresolved'}[ms[f]]
6144 6145 fm.startitem()
6145 6146 fm.condwrite(not nostatus, 'status', '%s ', ms[f].upper(), label=l)
6146 6147 fm.write('path', '%s\n', f, label=l)
6147 6148 fm.end()
6148 6149 return 0
6149 6150
6150 6151 with repo.wlock():
6151 6152 ms = mergemod.mergestate.read(repo)
6152 6153
6153 6154 if not (ms.active() or repo.dirstate.p2() != nullid):
6154 6155 raise error.Abort(
6155 6156 _('resolve command not applicable when not merging'))
6156 6157
6157 6158 wctx = repo[None]
6158 6159
6159 6160 if ms.mergedriver and ms.mdstate() == 'u':
6160 6161 proceed = mergemod.driverpreprocess(repo, ms, wctx)
6161 6162 ms.commit()
6162 6163 # allow mark and unmark to go through
6163 6164 if not mark and not unmark and not proceed:
6164 6165 return 1
6165 6166
6166 6167 m = scmutil.match(wctx, pats, opts)
6167 6168 ret = 0
6168 6169 didwork = False
6169 6170 runconclude = False
6170 6171
6171 6172 tocomplete = []
6172 6173 for f in ms:
6173 6174 if not m(f):
6174 6175 continue
6175 6176
6176 6177 didwork = True
6177 6178
6178 6179 # don't let driver-resolved files be marked, and run the conclude
6179 6180 # step if asked to resolve
6180 6181 if ms[f] == "d":
6181 6182 exact = m.exact(f)
6182 6183 if mark:
6183 6184 if exact:
6184 6185 ui.warn(_('not marking %s as it is driver-resolved\n')
6185 6186 % f)
6186 6187 elif unmark:
6187 6188 if exact:
6188 6189 ui.warn(_('not unmarking %s as it is driver-resolved\n')
6189 6190 % f)
6190 6191 else:
6191 6192 runconclude = True
6192 6193 continue
6193 6194
6194 6195 if mark:
6195 6196 ms.mark(f, "r")
6196 6197 elif unmark:
6197 6198 ms.mark(f, "u")
6198 6199 else:
6199 6200 # backup pre-resolve (merge uses .orig for its own purposes)
6200 6201 a = repo.wjoin(f)
6201 6202 try:
6202 6203 util.copyfile(a, a + ".resolve")
6203 6204 except (IOError, OSError) as inst:
6204 6205 if inst.errno != errno.ENOENT:
6205 6206 raise
6206 6207
6207 6208 try:
6208 6209 # preresolve file
6209 6210 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
6210 6211 'resolve')
6211 6212 complete, r = ms.preresolve(f, wctx)
6212 6213 if not complete:
6213 6214 tocomplete.append(f)
6214 6215 elif r:
6215 6216 ret = 1
6216 6217 finally:
6217 6218 ui.setconfig('ui', 'forcemerge', '', 'resolve')
6218 6219 ms.commit()
6219 6220
6220 6221 # replace filemerge's .orig file with our resolve file, but only
6221 6222 # for merges that are complete
6222 6223 if complete:
6223 6224 try:
6224 6225 util.rename(a + ".resolve",
6225 6226 scmutil.origpath(ui, repo, a))
6226 6227 except OSError as inst:
6227 6228 if inst.errno != errno.ENOENT:
6228 6229 raise
6229 6230
6230 6231 for f in tocomplete:
6231 6232 try:
6232 6233 # resolve file
6233 6234 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
6234 6235 'resolve')
6235 6236 r = ms.resolve(f, wctx)
6236 6237 if r:
6237 6238 ret = 1
6238 6239 finally:
6239 6240 ui.setconfig('ui', 'forcemerge', '', 'resolve')
6240 6241 ms.commit()
6241 6242
6242 6243 # replace filemerge's .orig file with our resolve file
6243 6244 a = repo.wjoin(f)
6244 6245 try:
6245 6246 util.rename(a + ".resolve", scmutil.origpath(ui, repo, a))
6246 6247 except OSError as inst:
6247 6248 if inst.errno != errno.ENOENT:
6248 6249 raise
6249 6250
6250 6251 ms.commit()
6251 6252 ms.recordactions()
6252 6253
6253 6254 if not didwork and pats:
6254 6255 hint = None
6255 6256 if not any([p for p in pats if p.find(':') >= 0]):
6256 6257 pats = ['path:%s' % p for p in pats]
6257 6258 m = scmutil.match(wctx, pats, opts)
6258 6259 for f in ms:
6259 6260 if not m(f):
6260 6261 continue
6261 6262 flags = ''.join(['-%s ' % o[0] for o in flaglist
6262 6263 if opts.get(o)])
6263 6264 hint = _("(try: hg resolve %s%s)\n") % (
6264 6265 flags,
6265 6266 ' '.join(pats))
6266 6267 break
6267 6268 ui.warn(_("arguments do not match paths that need resolving\n"))
6268 6269 if hint:
6269 6270 ui.warn(hint)
6270 6271 elif ms.mergedriver and ms.mdstate() != 's':
6271 6272 # run conclude step when either a driver-resolved file is requested
6272 6273 # or there are no driver-resolved files
6273 6274 # we can't use 'ret' to determine whether any files are unresolved
6274 6275 # because we might not have tried to resolve some
6275 6276 if ((runconclude or not list(ms.driverresolved()))
6276 6277 and not list(ms.unresolved())):
6277 6278 proceed = mergemod.driverconclude(repo, ms, wctx)
6278 6279 ms.commit()
6279 6280 if not proceed:
6280 6281 return 1
6281 6282
6282 6283 # Nudge users into finishing an unfinished operation
6283 6284 unresolvedf = list(ms.unresolved())
6284 6285 driverresolvedf = list(ms.driverresolved())
6285 6286 if not unresolvedf and not driverresolvedf:
6286 6287 ui.status(_('(no more unresolved files)\n'))
6287 6288 cmdutil.checkafterresolved(repo)
6288 6289 elif not unresolvedf:
6289 6290 ui.status(_('(no more unresolved files -- '
6290 6291 'run "hg resolve --all" to conclude)\n'))
6291 6292
6292 6293 return ret
6293 6294
6294 6295 @command('revert',
6295 6296 [('a', 'all', None, _('revert all changes when no arguments given')),
6296 6297 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
6297 6298 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
6298 6299 ('C', 'no-backup', None, _('do not save backup copies of files')),
6299 6300 ('i', 'interactive', None,
6300 6301 _('interactively select the changes (EXPERIMENTAL)')),
6301 6302 ] + walkopts + dryrunopts,
6302 6303 _('[OPTION]... [-r REV] [NAME]...'))
6303 6304 def revert(ui, repo, *pats, **opts):
6304 6305 """restore files to their checkout state
6305 6306
6306 6307 .. note::
6307 6308
6308 6309 To check out earlier revisions, you should use :hg:`update REV`.
6309 6310 To cancel an uncommitted merge (and lose your changes),
6310 6311 use :hg:`update --clean .`.
6311 6312
6312 6313 With no revision specified, revert the specified files or directories
6313 6314 to the contents they had in the parent of the working directory.
6314 6315 This restores the contents of files to an unmodified
6315 6316 state and unschedules adds, removes, copies, and renames. If the
6316 6317 working directory has two parents, you must explicitly specify a
6317 6318 revision.
6318 6319
6319 6320 Using the -r/--rev or -d/--date options, revert the given files or
6320 6321 directories to their states as of a specific revision. Because
6321 6322 revert does not change the working directory parents, this will
6322 6323 cause these files to appear modified. This can be helpful to "back
6323 6324 out" some or all of an earlier change. See :hg:`backout` for a
6324 6325 related method.
6325 6326
6326 6327 Modified files are saved with a .orig suffix before reverting.
6327 6328 To disable these backups, use --no-backup. It is possible to store
6328 6329 the backup files in a custom directory relative to the root of the
6329 6330 repository by setting the ``ui.origbackuppath`` configuration
6330 6331 option.
6331 6332
6332 6333 See :hg:`help dates` for a list of formats valid for -d/--date.
6333 6334
6334 6335 See :hg:`help backout` for a way to reverse the effect of an
6335 6336 earlier changeset.
6336 6337
6337 6338 Returns 0 on success.
6338 6339 """
6339 6340
6340 6341 if opts.get("date"):
6341 6342 if opts.get("rev"):
6342 6343 raise error.Abort(_("you can't specify a revision and a date"))
6343 6344 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
6344 6345
6345 6346 parent, p2 = repo.dirstate.parents()
6346 6347 if not opts.get('rev') and p2 != nullid:
6347 6348 # revert after merge is a trap for new users (issue2915)
6348 6349 raise error.Abort(_('uncommitted merge with no revision specified'),
6349 6350 hint=_("use 'hg update' or see 'hg help revert'"))
6350 6351
6351 6352 ctx = scmutil.revsingle(repo, opts.get('rev'))
6352 6353
6353 6354 if (not (pats or opts.get('include') or opts.get('exclude') or
6354 6355 opts.get('all') or opts.get('interactive'))):
6355 6356 msg = _("no files or directories specified")
6356 6357 if p2 != nullid:
6357 6358 hint = _("uncommitted merge, use --all to discard all changes,"
6358 6359 " or 'hg update -C .' to abort the merge")
6359 6360 raise error.Abort(msg, hint=hint)
6360 6361 dirty = any(repo.status())
6361 6362 node = ctx.node()
6362 6363 if node != parent:
6363 6364 if dirty:
6364 6365 hint = _("uncommitted changes, use --all to discard all"
6365 6366 " changes, or 'hg update %s' to update") % ctx.rev()
6366 6367 else:
6367 6368 hint = _("use --all to revert all files,"
6368 6369 " or 'hg update %s' to update") % ctx.rev()
6369 6370 elif dirty:
6370 6371 hint = _("uncommitted changes, use --all to discard all changes")
6371 6372 else:
6372 6373 hint = _("use --all to revert all files")
6373 6374 raise error.Abort(msg, hint=hint)
6374 6375
6375 6376 return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts)
6376 6377
6377 6378 @command('rollback', dryrunopts +
6378 6379 [('f', 'force', False, _('ignore safety measures'))])
6379 6380 def rollback(ui, repo, **opts):
6380 6381 """roll back the last transaction (DANGEROUS) (DEPRECATED)
6381 6382
6382 6383 Please use :hg:`commit --amend` instead of rollback to correct
6383 6384 mistakes in the last commit.
6384 6385
6385 6386 This command should be used with care. There is only one level of
6386 6387 rollback, and there is no way to undo a rollback. It will also
6387 6388 restore the dirstate at the time of the last transaction, losing
6388 6389 any dirstate changes since that time. This command does not alter
6389 6390 the working directory.
6390 6391
6391 6392 Transactions are used to encapsulate the effects of all commands
6392 6393 that create new changesets or propagate existing changesets into a
6393 6394 repository.
6394 6395
6395 6396 .. container:: verbose
6396 6397
6397 6398 For example, the following commands are transactional, and their
6398 6399 effects can be rolled back:
6399 6400
6400 6401 - commit
6401 6402 - import
6402 6403 - pull
6403 6404 - push (with this repository as the destination)
6404 6405 - unbundle
6405 6406
6406 6407 To avoid permanent data loss, rollback will refuse to rollback a
6407 6408 commit transaction if it isn't checked out. Use --force to
6408 6409 override this protection.
6409 6410
6410 6411 The rollback command can be entirely disabled by setting the
6411 6412 ``ui.rollback`` configuration setting to false. If you're here
6412 6413 because you want to use rollback and it's disabled, you can
6413 6414 re-enable the command by setting ``ui.rollback`` to true.
6414 6415
6415 6416 This command is not intended for use on public repositories. Once
6416 6417 changes are visible for pull by other users, rolling a transaction
6417 6418 back locally is ineffective (someone else may already have pulled
6418 6419 the changes). Furthermore, a race is possible with readers of the
6419 6420 repository; for example an in-progress pull from the repository
6420 6421 may fail if a rollback is performed.
6421 6422
6422 6423 Returns 0 on success, 1 if no rollback data is available.
6423 6424 """
6424 6425 if not ui.configbool('ui', 'rollback', True):
6425 6426 raise error.Abort(_('rollback is disabled because it is unsafe'),
6426 6427 hint=('see `hg help -v rollback` for information'))
6427 6428 return repo.rollback(dryrun=opts.get('dry_run'),
6428 6429 force=opts.get('force'))
6429 6430
6430 6431 @command('root', [])
6431 6432 def root(ui, repo):
6432 6433 """print the root (top) of the current working directory
6433 6434
6434 6435 Print the root directory of the current repository.
6435 6436
6436 6437 Returns 0 on success.
6437 6438 """
6438 6439 ui.write(repo.root + "\n")
6439 6440
6440 6441 @command('^serve',
6441 6442 [('A', 'accesslog', '', _('name of access log file to write to'),
6442 6443 _('FILE')),
6443 6444 ('d', 'daemon', None, _('run server in background')),
6444 6445 ('', 'daemon-postexec', [], _('used internally by daemon mode')),
6445 6446 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
6446 6447 # use string type, then we can check if something was passed
6447 6448 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
6448 6449 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
6449 6450 _('ADDR')),
6450 6451 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
6451 6452 _('PREFIX')),
6452 6453 ('n', 'name', '',
6453 6454 _('name to show in web pages (default: working directory)'), _('NAME')),
6454 6455 ('', 'web-conf', '',
6455 6456 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
6456 6457 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
6457 6458 _('FILE')),
6458 6459 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
6459 6460 ('', 'stdio', None, _('for remote clients')),
6460 6461 ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
6461 6462 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
6462 6463 ('', 'style', '', _('template style to use'), _('STYLE')),
6463 6464 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
6464 6465 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
6465 6466 _('[OPTION]...'),
6466 6467 optionalrepo=True)
6467 6468 def serve(ui, repo, **opts):
6468 6469 """start stand-alone webserver
6469 6470
6470 6471 Start a local HTTP repository browser and pull server. You can use
6471 6472 this for ad-hoc sharing and browsing of repositories. It is
6472 6473 recommended to use a real web server to serve a repository for
6473 6474 longer periods of time.
6474 6475
6475 6476 Please note that the server does not implement access control.
6476 6477 This means that, by default, anybody can read from the server and
6477 6478 nobody can write to it by default. Set the ``web.allow_push``
6478 6479 option to ``*`` to allow everybody to push to the server. You
6479 6480 should use a real web server if you need to authenticate users.
6480 6481
6481 6482 By default, the server logs accesses to stdout and errors to
6482 6483 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
6483 6484 files.
6484 6485
6485 6486 To have the server choose a free port number to listen on, specify
6486 6487 a port number of 0; in this case, the server will print the port
6487 6488 number it uses.
6488 6489
6489 6490 Returns 0 on success.
6490 6491 """
6491 6492
6492 6493 if opts["stdio"] and opts["cmdserver"]:
6493 6494 raise error.Abort(_("cannot use --stdio with --cmdserver"))
6494 6495
6495 6496 if opts["stdio"]:
6496 6497 if repo is None:
6497 6498 raise error.RepoError(_("there is no Mercurial repository here"
6498 6499 " (.hg not found)"))
6499 6500 s = sshserver.sshserver(ui, repo)
6500 6501 s.serve_forever()
6501 6502
6502 6503 if opts["cmdserver"]:
6503 6504 service = commandserver.createservice(ui, repo, opts)
6504 6505 else:
6505 6506 service = hgweb.createservice(ui, repo, opts)
6506 6507 return cmdutil.service(opts, initfn=service.init, runfn=service.run)
6507 6508
6508 6509 @command('^status|st',
6509 6510 [('A', 'all', None, _('show status of all files')),
6510 6511 ('m', 'modified', None, _('show only modified files')),
6511 6512 ('a', 'added', None, _('show only added files')),
6512 6513 ('r', 'removed', None, _('show only removed files')),
6513 6514 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
6514 6515 ('c', 'clean', None, _('show only files without changes')),
6515 6516 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
6516 6517 ('i', 'ignored', None, _('show only ignored files')),
6517 6518 ('n', 'no-status', None, _('hide status prefix')),
6518 6519 ('C', 'copies', None, _('show source of copied files')),
6519 6520 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
6520 6521 ('', 'rev', [], _('show difference from revision'), _('REV')),
6521 6522 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
6522 6523 ] + walkopts + subrepoopts + formatteropts,
6523 6524 _('[OPTION]... [FILE]...'),
6524 6525 inferrepo=True)
6525 6526 def status(ui, repo, *pats, **opts):
6526 6527 """show changed files in the working directory
6527 6528
6528 6529 Show status of files in the repository. If names are given, only
6529 6530 files that match are shown. Files that are clean or ignored or
6530 6531 the source of a copy/move operation, are not listed unless
6531 6532 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
6532 6533 Unless options described with "show only ..." are given, the
6533 6534 options -mardu are used.
6534 6535
6535 6536 Option -q/--quiet hides untracked (unknown and ignored) files
6536 6537 unless explicitly requested with -u/--unknown or -i/--ignored.
6537 6538
6538 6539 .. note::
6539 6540
6540 6541 :hg:`status` may appear to disagree with diff if permissions have
6541 6542 changed or a merge has occurred. The standard diff format does
6542 6543 not report permission changes and diff only reports changes
6543 6544 relative to one merge parent.
6544 6545
6545 6546 If one revision is given, it is used as the base revision.
6546 6547 If two revisions are given, the differences between them are
6547 6548 shown. The --change option can also be used as a shortcut to list
6548 6549 the changed files of a revision from its first parent.
6549 6550
6550 6551 The codes used to show the status of files are::
6551 6552
6552 6553 M = modified
6553 6554 A = added
6554 6555 R = removed
6555 6556 C = clean
6556 6557 ! = missing (deleted by non-hg command, but still tracked)
6557 6558 ? = not tracked
6558 6559 I = ignored
6559 6560 = origin of the previous file (with --copies)
6560 6561
6561 6562 .. container:: verbose
6562 6563
6563 6564 Examples:
6564 6565
6565 6566 - show changes in the working directory relative to a
6566 6567 changeset::
6567 6568
6568 6569 hg status --rev 9353
6569 6570
6570 6571 - show changes in the working directory relative to the
6571 6572 current directory (see :hg:`help patterns` for more information)::
6572 6573
6573 6574 hg status re:
6574 6575
6575 6576 - show all changes including copies in an existing changeset::
6576 6577
6577 6578 hg status --copies --change 9353
6578 6579
6579 6580 - get a NUL separated list of added files, suitable for xargs::
6580 6581
6581 6582 hg status -an0
6582 6583
6583 6584 Returns 0 on success.
6584 6585 """
6585 6586
6586 6587 revs = opts.get('rev')
6587 6588 change = opts.get('change')
6588 6589
6589 6590 if revs and change:
6590 6591 msg = _('cannot specify --rev and --change at the same time')
6591 6592 raise error.Abort(msg)
6592 6593 elif change:
6593 6594 node2 = scmutil.revsingle(repo, change, None).node()
6594 6595 node1 = repo[node2].p1().node()
6595 6596 else:
6596 6597 node1, node2 = scmutil.revpair(repo, revs)
6597 6598
6598 6599 if pats:
6599 6600 cwd = repo.getcwd()
6600 6601 else:
6601 6602 cwd = ''
6602 6603
6603 6604 if opts.get('print0'):
6604 6605 end = '\0'
6605 6606 else:
6606 6607 end = '\n'
6607 6608 copy = {}
6608 6609 states = 'modified added removed deleted unknown ignored clean'.split()
6609 6610 show = [k for k in states if opts.get(k)]
6610 6611 if opts.get('all'):
6611 6612 show += ui.quiet and (states[:4] + ['clean']) or states
6612 6613 if not show:
6613 6614 if ui.quiet:
6614 6615 show = states[:4]
6615 6616 else:
6616 6617 show = states[:5]
6617 6618
6618 6619 m = scmutil.match(repo[node2], pats, opts)
6619 6620 stat = repo.status(node1, node2, m,
6620 6621 'ignored' in show, 'clean' in show, 'unknown' in show,
6621 6622 opts.get('subrepos'))
6622 6623 changestates = zip(states, 'MAR!?IC', stat)
6623 6624
6624 6625 if (opts.get('all') or opts.get('copies')
6625 6626 or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'):
6626 6627 copy = copies.pathcopies(repo[node1], repo[node2], m)
6627 6628
6628 6629 fm = ui.formatter('status', opts)
6629 6630 fmt = '%s' + end
6630 6631 showchar = not opts.get('no_status')
6631 6632
6632 6633 for state, char, files in changestates:
6633 6634 if state in show:
6634 6635 label = 'status.' + state
6635 6636 for f in files:
6636 6637 fm.startitem()
6637 6638 fm.condwrite(showchar, 'status', '%s ', char, label=label)
6638 6639 fm.write('path', fmt, repo.pathto(f, cwd), label=label)
6639 6640 if f in copy:
6640 6641 fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd),
6641 6642 label='status.copied')
6642 6643 fm.end()
6643 6644
6644 6645 @command('^summary|sum',
6645 6646 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
6646 6647 def summary(ui, repo, **opts):
6647 6648 """summarize working directory state
6648 6649
6649 6650 This generates a brief summary of the working directory state,
6650 6651 including parents, branch, commit status, phase and available updates.
6651 6652
6652 6653 With the --remote option, this will check the default paths for
6653 6654 incoming and outgoing changes. This can be time-consuming.
6654 6655
6655 6656 Returns 0 on success.
6656 6657 """
6657 6658
6658 6659 ctx = repo[None]
6659 6660 parents = ctx.parents()
6660 6661 pnode = parents[0].node()
6661 6662 marks = []
6662 6663
6663 6664 ms = None
6664 6665 try:
6665 6666 ms = mergemod.mergestate.read(repo)
6666 6667 except error.UnsupportedMergeRecords as e:
6667 6668 s = ' '.join(e.recordtypes)
6668 6669 ui.warn(
6669 6670 _('warning: merge state has unsupported record types: %s\n') % s)
6670 6671 unresolved = 0
6671 6672 else:
6672 6673 unresolved = [f for f in ms if ms[f] == 'u']
6673 6674
6674 6675 for p in parents:
6675 6676 # label with log.changeset (instead of log.parent) since this
6676 6677 # shows a working directory parent *changeset*:
6677 6678 # i18n: column positioning for "hg summary"
6678 6679 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
6679 6680 label='log.changeset changeset.%s' % p.phasestr())
6680 6681 ui.write(' '.join(p.tags()), label='log.tag')
6681 6682 if p.bookmarks():
6682 6683 marks.extend(p.bookmarks())
6683 6684 if p.rev() == -1:
6684 6685 if not len(repo):
6685 6686 ui.write(_(' (empty repository)'))
6686 6687 else:
6687 6688 ui.write(_(' (no revision checked out)'))
6688 6689 ui.write('\n')
6689 6690 if p.description():
6690 6691 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
6691 6692 label='log.summary')
6692 6693
6693 6694 branch = ctx.branch()
6694 6695 bheads = repo.branchheads(branch)
6695 6696 # i18n: column positioning for "hg summary"
6696 6697 m = _('branch: %s\n') % branch
6697 6698 if branch != 'default':
6698 6699 ui.write(m, label='log.branch')
6699 6700 else:
6700 6701 ui.status(m, label='log.branch')
6701 6702
6702 6703 if marks:
6703 6704 active = repo._activebookmark
6704 6705 # i18n: column positioning for "hg summary"
6705 6706 ui.write(_('bookmarks:'), label='log.bookmark')
6706 6707 if active is not None:
6707 6708 if active in marks:
6708 6709 ui.write(' *' + active, label=activebookmarklabel)
6709 6710 marks.remove(active)
6710 6711 else:
6711 6712 ui.write(' [%s]' % active, label=activebookmarklabel)
6712 6713 for m in marks:
6713 6714 ui.write(' ' + m, label='log.bookmark')
6714 6715 ui.write('\n', label='log.bookmark')
6715 6716
6716 6717 status = repo.status(unknown=True)
6717 6718
6718 6719 c = repo.dirstate.copies()
6719 6720 copied, renamed = [], []
6720 6721 for d, s in c.iteritems():
6721 6722 if s in status.removed:
6722 6723 status.removed.remove(s)
6723 6724 renamed.append(d)
6724 6725 else:
6725 6726 copied.append(d)
6726 6727 if d in status.added:
6727 6728 status.added.remove(d)
6728 6729
6729 6730 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
6730 6731
6731 6732 labels = [(ui.label(_('%d modified'), 'status.modified'), status.modified),
6732 6733 (ui.label(_('%d added'), 'status.added'), status.added),
6733 6734 (ui.label(_('%d removed'), 'status.removed'), status.removed),
6734 6735 (ui.label(_('%d renamed'), 'status.copied'), renamed),
6735 6736 (ui.label(_('%d copied'), 'status.copied'), copied),
6736 6737 (ui.label(_('%d deleted'), 'status.deleted'), status.deleted),
6737 6738 (ui.label(_('%d unknown'), 'status.unknown'), status.unknown),
6738 6739 (ui.label(_('%d unresolved'), 'resolve.unresolved'), unresolved),
6739 6740 (ui.label(_('%d subrepos'), 'status.modified'), subs)]
6740 6741 t = []
6741 6742 for l, s in labels:
6742 6743 if s:
6743 6744 t.append(l % len(s))
6744 6745
6745 6746 t = ', '.join(t)
6746 6747 cleanworkdir = False
6747 6748
6748 6749 if repo.vfs.exists('graftstate'):
6749 6750 t += _(' (graft in progress)')
6750 6751 if repo.vfs.exists('updatestate'):
6751 6752 t += _(' (interrupted update)')
6752 6753 elif len(parents) > 1:
6753 6754 t += _(' (merge)')
6754 6755 elif branch != parents[0].branch():
6755 6756 t += _(' (new branch)')
6756 6757 elif (parents[0].closesbranch() and
6757 6758 pnode in repo.branchheads(branch, closed=True)):
6758 6759 t += _(' (head closed)')
6759 6760 elif not (status.modified or status.added or status.removed or renamed or
6760 6761 copied or subs):
6761 6762 t += _(' (clean)')
6762 6763 cleanworkdir = True
6763 6764 elif pnode not in bheads:
6764 6765 t += _(' (new branch head)')
6765 6766
6766 6767 if parents:
6767 6768 pendingphase = max(p.phase() for p in parents)
6768 6769 else:
6769 6770 pendingphase = phases.public
6770 6771
6771 6772 if pendingphase > phases.newcommitphase(ui):
6772 6773 t += ' (%s)' % phases.phasenames[pendingphase]
6773 6774
6774 6775 if cleanworkdir:
6775 6776 # i18n: column positioning for "hg summary"
6776 6777 ui.status(_('commit: %s\n') % t.strip())
6777 6778 else:
6778 6779 # i18n: column positioning for "hg summary"
6779 6780 ui.write(_('commit: %s\n') % t.strip())
6780 6781
6781 6782 # all ancestors of branch heads - all ancestors of parent = new csets
6782 6783 new = len(repo.changelog.findmissing([pctx.node() for pctx in parents],
6783 6784 bheads))
6784 6785
6785 6786 if new == 0:
6786 6787 # i18n: column positioning for "hg summary"
6787 6788 ui.status(_('update: (current)\n'))
6788 6789 elif pnode not in bheads:
6789 6790 # i18n: column positioning for "hg summary"
6790 6791 ui.write(_('update: %d new changesets (update)\n') % new)
6791 6792 else:
6792 6793 # i18n: column positioning for "hg summary"
6793 6794 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
6794 6795 (new, len(bheads)))
6795 6796
6796 6797 t = []
6797 6798 draft = len(repo.revs('draft()'))
6798 6799 if draft:
6799 6800 t.append(_('%d draft') % draft)
6800 6801 secret = len(repo.revs('secret()'))
6801 6802 if secret:
6802 6803 t.append(_('%d secret') % secret)
6803 6804
6804 6805 if draft or secret:
6805 6806 ui.status(_('phases: %s\n') % ', '.join(t))
6806 6807
6807 6808 if obsolete.isenabled(repo, obsolete.createmarkersopt):
6808 6809 for trouble in ("unstable", "divergent", "bumped"):
6809 6810 numtrouble = len(repo.revs(trouble + "()"))
6810 6811 # We write all the possibilities to ease translation
6811 6812 troublemsg = {
6812 6813 "unstable": _("unstable: %d changesets"),
6813 6814 "divergent": _("divergent: %d changesets"),
6814 6815 "bumped": _("bumped: %d changesets"),
6815 6816 }
6816 6817 if numtrouble > 0:
6817 6818 ui.status(troublemsg[trouble] % numtrouble + "\n")
6818 6819
6819 6820 cmdutil.summaryhooks(ui, repo)
6820 6821
6821 6822 if opts.get('remote'):
6822 6823 needsincoming, needsoutgoing = True, True
6823 6824 else:
6824 6825 needsincoming, needsoutgoing = False, False
6825 6826 for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None):
6826 6827 if i:
6827 6828 needsincoming = True
6828 6829 if o:
6829 6830 needsoutgoing = True
6830 6831 if not needsincoming and not needsoutgoing:
6831 6832 return
6832 6833
6833 6834 def getincoming():
6834 6835 source, branches = hg.parseurl(ui.expandpath('default'))
6835 6836 sbranch = branches[0]
6836 6837 try:
6837 6838 other = hg.peer(repo, {}, source)
6838 6839 except error.RepoError:
6839 6840 if opts.get('remote'):
6840 6841 raise
6841 6842 return source, sbranch, None, None, None
6842 6843 revs, checkout = hg.addbranchrevs(repo, other, branches, None)
6843 6844 if revs:
6844 6845 revs = [other.lookup(rev) for rev in revs]
6845 6846 ui.debug('comparing with %s\n' % util.hidepassword(source))
6846 6847 repo.ui.pushbuffer()
6847 6848 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
6848 6849 repo.ui.popbuffer()
6849 6850 return source, sbranch, other, commoninc, commoninc[1]
6850 6851
6851 6852 if needsincoming:
6852 6853 source, sbranch, sother, commoninc, incoming = getincoming()
6853 6854 else:
6854 6855 source = sbranch = sother = commoninc = incoming = None
6855 6856
6856 6857 def getoutgoing():
6857 6858 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
6858 6859 dbranch = branches[0]
6859 6860 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
6860 6861 if source != dest:
6861 6862 try:
6862 6863 dother = hg.peer(repo, {}, dest)
6863 6864 except error.RepoError:
6864 6865 if opts.get('remote'):
6865 6866 raise
6866 6867 return dest, dbranch, None, None
6867 6868 ui.debug('comparing with %s\n' % util.hidepassword(dest))
6868 6869 elif sother is None:
6869 6870 # there is no explicit destination peer, but source one is invalid
6870 6871 return dest, dbranch, None, None
6871 6872 else:
6872 6873 dother = sother
6873 6874 if (source != dest or (sbranch is not None and sbranch != dbranch)):
6874 6875 common = None
6875 6876 else:
6876 6877 common = commoninc
6877 6878 if revs:
6878 6879 revs = [repo.lookup(rev) for rev in revs]
6879 6880 repo.ui.pushbuffer()
6880 6881 outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs,
6881 6882 commoninc=common)
6882 6883 repo.ui.popbuffer()
6883 6884 return dest, dbranch, dother, outgoing
6884 6885
6885 6886 if needsoutgoing:
6886 6887 dest, dbranch, dother, outgoing = getoutgoing()
6887 6888 else:
6888 6889 dest = dbranch = dother = outgoing = None
6889 6890
6890 6891 if opts.get('remote'):
6891 6892 t = []
6892 6893 if incoming:
6893 6894 t.append(_('1 or more incoming'))
6894 6895 o = outgoing.missing
6895 6896 if o:
6896 6897 t.append(_('%d outgoing') % len(o))
6897 6898 other = dother or sother
6898 6899 if 'bookmarks' in other.listkeys('namespaces'):
6899 6900 counts = bookmarks.summary(repo, other)
6900 6901 if counts[0] > 0:
6901 6902 t.append(_('%d incoming bookmarks') % counts[0])
6902 6903 if counts[1] > 0:
6903 6904 t.append(_('%d outgoing bookmarks') % counts[1])
6904 6905
6905 6906 if t:
6906 6907 # i18n: column positioning for "hg summary"
6907 6908 ui.write(_('remote: %s\n') % (', '.join(t)))
6908 6909 else:
6909 6910 # i18n: column positioning for "hg summary"
6910 6911 ui.status(_('remote: (synced)\n'))
6911 6912
6912 6913 cmdutil.summaryremotehooks(ui, repo, opts,
6913 6914 ((source, sbranch, sother, commoninc),
6914 6915 (dest, dbranch, dother, outgoing)))
6915 6916
6916 6917 @command('tag',
6917 6918 [('f', 'force', None, _('force tag')),
6918 6919 ('l', 'local', None, _('make the tag local')),
6919 6920 ('r', 'rev', '', _('revision to tag'), _('REV')),
6920 6921 ('', 'remove', None, _('remove a tag')),
6921 6922 # -l/--local is already there, commitopts cannot be used
6922 6923 ('e', 'edit', None, _('invoke editor on commit messages')),
6923 6924 ('m', 'message', '', _('use text as commit message'), _('TEXT')),
6924 6925 ] + commitopts2,
6925 6926 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
6926 6927 def tag(ui, repo, name1, *names, **opts):
6927 6928 """add one or more tags for the current or given revision
6928 6929
6929 6930 Name a particular revision using <name>.
6930 6931
6931 6932 Tags are used to name particular revisions of the repository and are
6932 6933 very useful to compare different revisions, to go back to significant
6933 6934 earlier versions or to mark branch points as releases, etc. Changing
6934 6935 an existing tag is normally disallowed; use -f/--force to override.
6935 6936
6936 6937 If no revision is given, the parent of the working directory is
6937 6938 used.
6938 6939
6939 6940 To facilitate version control, distribution, and merging of tags,
6940 6941 they are stored as a file named ".hgtags" which is managed similarly
6941 6942 to other project files and can be hand-edited if necessary. This
6942 6943 also means that tagging creates a new commit. The file
6943 6944 ".hg/localtags" is used for local tags (not shared among
6944 6945 repositories).
6945 6946
6946 6947 Tag commits are usually made at the head of a branch. If the parent
6947 6948 of the working directory is not a branch head, :hg:`tag` aborts; use
6948 6949 -f/--force to force the tag commit to be based on a non-head
6949 6950 changeset.
6950 6951
6951 6952 See :hg:`help dates` for a list of formats valid for -d/--date.
6952 6953
6953 6954 Since tag names have priority over branch names during revision
6954 6955 lookup, using an existing branch name as a tag name is discouraged.
6955 6956
6956 6957 Returns 0 on success.
6957 6958 """
6958 6959 wlock = lock = None
6959 6960 try:
6960 6961 wlock = repo.wlock()
6961 6962 lock = repo.lock()
6962 6963 rev_ = "."
6963 6964 names = [t.strip() for t in (name1,) + names]
6964 6965 if len(names) != len(set(names)):
6965 6966 raise error.Abort(_('tag names must be unique'))
6966 6967 for n in names:
6967 6968 scmutil.checknewlabel(repo, n, 'tag')
6968 6969 if not n:
6969 6970 raise error.Abort(_('tag names cannot consist entirely of '
6970 6971 'whitespace'))
6971 6972 if opts.get('rev') and opts.get('remove'):
6972 6973 raise error.Abort(_("--rev and --remove are incompatible"))
6973 6974 if opts.get('rev'):
6974 6975 rev_ = opts['rev']
6975 6976 message = opts.get('message')
6976 6977 if opts.get('remove'):
6977 6978 if opts.get('local'):
6978 6979 expectedtype = 'local'
6979 6980 else:
6980 6981 expectedtype = 'global'
6981 6982
6982 6983 for n in names:
6983 6984 if not repo.tagtype(n):
6984 6985 raise error.Abort(_("tag '%s' does not exist") % n)
6985 6986 if repo.tagtype(n) != expectedtype:
6986 6987 if expectedtype == 'global':
6987 6988 raise error.Abort(_("tag '%s' is not a global tag") % n)
6988 6989 else:
6989 6990 raise error.Abort(_("tag '%s' is not a local tag") % n)
6990 6991 rev_ = 'null'
6991 6992 if not message:
6992 6993 # we don't translate commit messages
6993 6994 message = 'Removed tag %s' % ', '.join(names)
6994 6995 elif not opts.get('force'):
6995 6996 for n in names:
6996 6997 if n in repo.tags():
6997 6998 raise error.Abort(_("tag '%s' already exists "
6998 6999 "(use -f to force)") % n)
6999 7000 if not opts.get('local'):
7000 7001 p1, p2 = repo.dirstate.parents()
7001 7002 if p2 != nullid:
7002 7003 raise error.Abort(_('uncommitted merge'))
7003 7004 bheads = repo.branchheads()
7004 7005 if not opts.get('force') and bheads and p1 not in bheads:
7005 7006 raise error.Abort(_('not at a branch head (use -f to force)'))
7006 7007 r = scmutil.revsingle(repo, rev_).node()
7007 7008
7008 7009 if not message:
7009 7010 # we don't translate commit messages
7010 7011 message = ('Added tag %s for changeset %s' %
7011 7012 (', '.join(names), short(r)))
7012 7013
7013 7014 date = opts.get('date')
7014 7015 if date:
7015 7016 date = util.parsedate(date)
7016 7017
7017 7018 if opts.get('remove'):
7018 7019 editform = 'tag.remove'
7019 7020 else:
7020 7021 editform = 'tag.add'
7021 7022 editor = cmdutil.getcommiteditor(editform=editform, **opts)
7022 7023
7023 7024 # don't allow tagging the null rev
7024 7025 if (not opts.get('remove') and
7025 7026 scmutil.revsingle(repo, rev_).rev() == nullrev):
7026 7027 raise error.Abort(_("cannot tag null revision"))
7027 7028
7028 7029 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date,
7029 7030 editor=editor)
7030 7031 finally:
7031 7032 release(lock, wlock)
7032 7033
7033 7034 @command('tags', formatteropts, '')
7034 7035 def tags(ui, repo, **opts):
7035 7036 """list repository tags
7036 7037
7037 7038 This lists both regular and local tags. When the -v/--verbose
7038 7039 switch is used, a third column "local" is printed for local tags.
7039 7040 When the -q/--quiet switch is used, only the tag name is printed.
7040 7041
7041 7042 Returns 0 on success.
7042 7043 """
7043 7044
7044 7045 fm = ui.formatter('tags', opts)
7045 7046 hexfunc = fm.hexfunc
7046 7047 tagtype = ""
7047 7048
7048 7049 for t, n in reversed(repo.tagslist()):
7049 7050 hn = hexfunc(n)
7050 7051 label = 'tags.normal'
7051 7052 tagtype = ''
7052 7053 if repo.tagtype(t) == 'local':
7053 7054 label = 'tags.local'
7054 7055 tagtype = 'local'
7055 7056
7056 7057 fm.startitem()
7057 7058 fm.write('tag', '%s', t, label=label)
7058 7059 fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
7059 7060 fm.condwrite(not ui.quiet, 'rev node', fmt,
7060 7061 repo.changelog.rev(n), hn, label=label)
7061 7062 fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
7062 7063 tagtype, label=label)
7063 7064 fm.plain('\n')
7064 7065 fm.end()
7065 7066
7066 7067 @command('tip',
7067 7068 [('p', 'patch', None, _('show patch')),
7068 7069 ('g', 'git', None, _('use git extended diff format')),
7069 7070 ] + templateopts,
7070 7071 _('[-p] [-g]'))
7071 7072 def tip(ui, repo, **opts):
7072 7073 """show the tip revision (DEPRECATED)
7073 7074
7074 7075 The tip revision (usually just called the tip) is the changeset
7075 7076 most recently added to the repository (and therefore the most
7076 7077 recently changed head).
7077 7078
7078 7079 If you have just made a commit, that commit will be the tip. If
7079 7080 you have just pulled changes from another repository, the tip of
7080 7081 that repository becomes the current tip. The "tip" tag is special
7081 7082 and cannot be renamed or assigned to a different changeset.
7082 7083
7083 7084 This command is deprecated, please use :hg:`heads` instead.
7084 7085
7085 7086 Returns 0 on success.
7086 7087 """
7087 7088 displayer = cmdutil.show_changeset(ui, repo, opts)
7088 7089 displayer.show(repo['tip'])
7089 7090 displayer.close()
7090 7091
7091 7092 @command('unbundle',
7092 7093 [('u', 'update', None,
7093 7094 _('update to new branch head if changesets were unbundled'))],
7094 7095 _('[-u] FILE...'))
7095 7096 def unbundle(ui, repo, fname1, *fnames, **opts):
7096 7097 """apply one or more changegroup files
7097 7098
7098 7099 Apply one or more compressed changegroup files generated by the
7099 7100 bundle command.
7100 7101
7101 7102 Returns 0 on success, 1 if an update has unresolved files.
7102 7103 """
7103 7104 fnames = (fname1,) + fnames
7104 7105
7105 7106 with repo.lock():
7106 7107 for fname in fnames:
7107 7108 f = hg.openpath(ui, fname)
7108 7109 gen = exchange.readbundle(ui, f, fname)
7109 7110 if isinstance(gen, bundle2.unbundle20):
7110 7111 tr = repo.transaction('unbundle')
7111 7112 try:
7112 7113 op = bundle2.applybundle(repo, gen, tr, source='unbundle',
7113 7114 url='bundle:' + fname)
7114 7115 tr.close()
7115 7116 except error.BundleUnknownFeatureError as exc:
7116 7117 raise error.Abort(_('%s: unknown bundle feature, %s')
7117 7118 % (fname, exc),
7118 7119 hint=_("see https://mercurial-scm.org/"
7119 7120 "wiki/BundleFeature for more "
7120 7121 "information"))
7121 7122 finally:
7122 7123 if tr:
7123 7124 tr.release()
7124 7125 changes = [r.get('return', 0)
7125 7126 for r in op.records['changegroup']]
7126 7127 modheads = changegroup.combineresults(changes)
7127 7128 elif isinstance(gen, streamclone.streamcloneapplier):
7128 7129 raise error.Abort(
7129 7130 _('packed bundles cannot be applied with '
7130 7131 '"hg unbundle"'),
7131 7132 hint=_('use "hg debugapplystreamclonebundle"'))
7132 7133 else:
7133 7134 modheads = gen.apply(repo, 'unbundle', 'bundle:' + fname)
7134 7135
7135 7136 return postincoming(ui, repo, modheads, opts.get('update'), None, None)
7136 7137
7137 7138 @command('^update|up|checkout|co',
7138 7139 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
7139 7140 ('c', 'check', None, _('require clean working directory')),
7140 7141 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
7141 7142 ('r', 'rev', '', _('revision'), _('REV'))
7142 7143 ] + mergetoolopts,
7143 7144 _('[-c] [-C] [-d DATE] [[-r] REV]'))
7144 7145 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False,
7145 7146 tool=None):
7146 7147 """update working directory (or switch revisions)
7147 7148
7148 7149 Update the repository's working directory to the specified
7149 7150 changeset. If no changeset is specified, update to the tip of the
7150 7151 current named branch and move the active bookmark (see :hg:`help
7151 7152 bookmarks`).
7152 7153
7153 7154 Update sets the working directory's parent revision to the specified
7154 7155 changeset (see :hg:`help parents`).
7155 7156
7156 7157 If the changeset is not a descendant or ancestor of the working
7157 7158 directory's parent, the update is aborted. With the -c/--check
7158 7159 option, the working directory is checked for uncommitted changes; if
7159 7160 none are found, the working directory is updated to the specified
7160 7161 changeset.
7161 7162
7162 7163 .. container:: verbose
7163 7164
7164 7165 The following rules apply when the working directory contains
7165 7166 uncommitted changes:
7166 7167
7167 7168 1. If neither -c/--check nor -C/--clean is specified, and if
7168 7169 the requested changeset is an ancestor or descendant of
7169 7170 the working directory's parent, the uncommitted changes
7170 7171 are merged into the requested changeset and the merged
7171 7172 result is left uncommitted. If the requested changeset is
7172 7173 not an ancestor or descendant (that is, it is on another
7173 7174 branch), the update is aborted and the uncommitted changes
7174 7175 are preserved.
7175 7176
7176 7177 2. With the -c/--check option, the update is aborted and the
7177 7178 uncommitted changes are preserved.
7178 7179
7179 7180 3. With the -C/--clean option, uncommitted changes are discarded and
7180 7181 the working directory is updated to the requested changeset.
7181 7182
7182 7183 To cancel an uncommitted merge (and lose your changes), use
7183 7184 :hg:`update --clean .`.
7184 7185
7185 7186 Use null as the changeset to remove the working directory (like
7186 7187 :hg:`clone -U`).
7187 7188
7188 7189 If you want to revert just one file to an older revision, use
7189 7190 :hg:`revert [-r REV] NAME`.
7190 7191
7191 7192 See :hg:`help dates` for a list of formats valid for -d/--date.
7192 7193
7193 7194 Returns 0 on success, 1 if there are unresolved files.
7194 7195 """
7195 7196 if rev and node:
7196 7197 raise error.Abort(_("please specify just one revision"))
7197 7198
7198 7199 if rev is None or rev == '':
7199 7200 rev = node
7200 7201
7201 7202 if date and rev is not None:
7202 7203 raise error.Abort(_("you can't specify a revision and a date"))
7203 7204
7204 7205 if check and clean:
7205 7206 raise error.Abort(_("cannot specify both -c/--check and -C/--clean"))
7206 7207
7207 7208 with repo.wlock():
7208 7209 cmdutil.clearunfinished(repo)
7209 7210
7210 7211 if date:
7211 7212 rev = cmdutil.finddate(ui, repo, date)
7212 7213
7213 7214 # if we defined a bookmark, we have to remember the original name
7214 7215 brev = rev
7215 7216 rev = scmutil.revsingle(repo, rev, rev).rev()
7216 7217
7217 7218 if check:
7218 7219 cmdutil.bailifchanged(repo, merge=False)
7219 7220
7220 7221 repo.ui.setconfig('ui', 'forcemerge', tool, 'update')
7221 7222
7222 7223 return hg.updatetotally(ui, repo, rev, brev, clean=clean, check=check)
7223 7224
7224 7225 @command('verify', [])
7225 7226 def verify(ui, repo):
7226 7227 """verify the integrity of the repository
7227 7228
7228 7229 Verify the integrity of the current repository.
7229 7230
7230 7231 This will perform an extensive check of the repository's
7231 7232 integrity, validating the hashes and checksums of each entry in
7232 7233 the changelog, manifest, and tracked files, as well as the
7233 7234 integrity of their crosslinks and indices.
7234 7235
7235 7236 Please see https://mercurial-scm.org/wiki/RepositoryCorruption
7236 7237 for more information about recovery from corruption of the
7237 7238 repository.
7238 7239
7239 7240 Returns 0 on success, 1 if errors are encountered.
7240 7241 """
7241 7242 return hg.verify(repo)
7242 7243
7243 7244 @command('version', [], norepo=True)
7244 7245 def version_(ui):
7245 7246 """output version and copyright information"""
7246 7247 ui.write(_("Mercurial Distributed SCM (version %s)\n")
7247 7248 % util.version())
7248 7249 ui.status(_(
7249 7250 "(see https://mercurial-scm.org for more information)\n"
7250 7251 "\nCopyright (C) 2005-2016 Matt Mackall and others\n"
7251 7252 "This is free software; see the source for copying conditions. "
7252 7253 "There is NO\nwarranty; "
7253 7254 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
7254 7255 ))
7255 7256
7256 7257 ui.note(_("\nEnabled extensions:\n\n"))
7257 7258 if ui.verbose:
7258 7259 # format names and versions into columns
7259 7260 names = []
7260 7261 vers = []
7261 7262 place = []
7262 7263 for name, module in extensions.extensions():
7263 7264 names.append(name)
7264 7265 vers.append(extensions.moduleversion(module))
7265 7266 if extensions.ismoduleinternal(module):
7266 7267 place.append(_("internal"))
7267 7268 else:
7268 7269 place.append(_("external"))
7269 7270 if names:
7270 7271 maxnamelen = max(len(n) for n in names)
7271 7272 for i, name in enumerate(names):
7272 7273 ui.write(" %-*s %s %s\n" %
7273 7274 (maxnamelen, name, place[i], vers[i]))
7274 7275
7275 7276 def loadcmdtable(ui, name, cmdtable):
7276 7277 """Load command functions from specified cmdtable
7277 7278 """
7278 7279 overrides = [cmd for cmd in cmdtable if cmd in table]
7279 7280 if overrides:
7280 7281 ui.warn(_("extension '%s' overrides commands: %s\n")
7281 7282 % (name, " ".join(overrides)))
7282 7283 table.update(cmdtable)
@@ -1,1941 +1,1942 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 base85,
20 20 bookmarks as bookmod,
21 21 bundle2,
22 22 changegroup,
23 23 discovery,
24 24 error,
25 25 lock as lockmod,
26 26 obsolete,
27 27 phases,
28 28 pushkey,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 tags,
33 33 url as urlmod,
34 34 util,
35 35 )
36 36
37 37 urlerr = util.urlerr
38 38 urlreq = util.urlreq
39 39
40 40 # Maps bundle compression human names to internal representation.
41 41 _bundlespeccompressions = {'none': None,
42 42 'bzip2': 'BZ',
43 43 'gzip': 'GZ',
44 44 }
45 45
46 46 # Maps bundle version human names to changegroup versions.
47 47 _bundlespeccgversions = {'v1': '01',
48 48 'v2': '02',
49 49 'packed1': 's1',
50 50 'bundle2': '02', #legacy
51 51 }
52 52
53 53 def parsebundlespec(repo, spec, strict=True, externalnames=False):
54 54 """Parse a bundle string specification into parts.
55 55
56 56 Bundle specifications denote a well-defined bundle/exchange format.
57 57 The content of a given specification should not change over time in
58 58 order to ensure that bundles produced by a newer version of Mercurial are
59 59 readable from an older version.
60 60
61 61 The string currently has the form:
62 62
63 63 <compression>-<type>[;<parameter0>[;<parameter1>]]
64 64
65 65 Where <compression> is one of the supported compression formats
66 66 and <type> is (currently) a version string. A ";" can follow the type and
67 67 all text afterwards is interpretted as URI encoded, ";" delimited key=value
68 68 pairs.
69 69
70 70 If ``strict`` is True (the default) <compression> is required. Otherwise,
71 71 it is optional.
72 72
73 73 If ``externalnames`` is False (the default), the human-centric names will
74 74 be converted to their internal representation.
75 75
76 76 Returns a 3-tuple of (compression, version, parameters). Compression will
77 77 be ``None`` if not in strict mode and a compression isn't defined.
78 78
79 79 An ``InvalidBundleSpecification`` is raised when the specification is
80 80 not syntactically well formed.
81 81
82 82 An ``UnsupportedBundleSpecification`` is raised when the compression or
83 83 bundle type/version is not recognized.
84 84
85 85 Note: this function will likely eventually return a more complex data
86 86 structure, including bundle2 part information.
87 87 """
88 88 def parseparams(s):
89 89 if ';' not in s:
90 90 return s, {}
91 91
92 92 params = {}
93 93 version, paramstr = s.split(';', 1)
94 94
95 95 for p in paramstr.split(';'):
96 96 if '=' not in p:
97 97 raise error.InvalidBundleSpecification(
98 98 _('invalid bundle specification: '
99 99 'missing "=" in parameter: %s') % p)
100 100
101 101 key, value = p.split('=', 1)
102 102 key = urlreq.unquote(key)
103 103 value = urlreq.unquote(value)
104 104 params[key] = value
105 105
106 106 return version, params
107 107
108 108
109 109 if strict and '-' not in spec:
110 110 raise error.InvalidBundleSpecification(
111 111 _('invalid bundle specification; '
112 112 'must be prefixed with compression: %s') % spec)
113 113
114 114 if '-' in spec:
115 115 compression, version = spec.split('-', 1)
116 116
117 117 if compression not in _bundlespeccompressions:
118 118 raise error.UnsupportedBundleSpecification(
119 119 _('%s compression is not supported') % compression)
120 120
121 121 version, params = parseparams(version)
122 122
123 123 if version not in _bundlespeccgversions:
124 124 raise error.UnsupportedBundleSpecification(
125 125 _('%s is not a recognized bundle version') % version)
126 126 else:
127 127 # Value could be just the compression or just the version, in which
128 128 # case some defaults are assumed (but only when not in strict mode).
129 129 assert not strict
130 130
131 131 spec, params = parseparams(spec)
132 132
133 133 if spec in _bundlespeccompressions:
134 134 compression = spec
135 135 version = 'v1'
136 136 if 'generaldelta' in repo.requirements:
137 137 version = 'v2'
138 138 elif spec in _bundlespeccgversions:
139 139 if spec == 'packed1':
140 140 compression = 'none'
141 141 else:
142 142 compression = 'bzip2'
143 143 version = spec
144 144 else:
145 145 raise error.UnsupportedBundleSpecification(
146 146 _('%s is not a recognized bundle specification') % spec)
147 147
148 148 # The specification for packed1 can optionally declare the data formats
149 149 # required to apply it. If we see this metadata, compare against what the
150 150 # repo supports and error if the bundle isn't compatible.
151 151 if version == 'packed1' and 'requirements' in params:
152 152 requirements = set(params['requirements'].split(','))
153 153 missingreqs = requirements - repo.supportedformats
154 154 if missingreqs:
155 155 raise error.UnsupportedBundleSpecification(
156 156 _('missing support for repository features: %s') %
157 157 ', '.join(sorted(missingreqs)))
158 158
159 159 if not externalnames:
160 160 compression = _bundlespeccompressions[compression]
161 161 version = _bundlespeccgversions[version]
162 162 return compression, version, params
163 163
164 164 def readbundle(ui, fh, fname, vfs=None):
165 165 header = changegroup.readexactly(fh, 4)
166 166
167 167 alg = None
168 168 if not fname:
169 169 fname = "stream"
170 170 if not header.startswith('HG') and header.startswith('\0'):
171 171 fh = changegroup.headerlessfixup(fh, header)
172 172 header = "HG10"
173 173 alg = 'UN'
174 174 elif vfs:
175 175 fname = vfs.join(fname)
176 176
177 177 magic, version = header[0:2], header[2:4]
178 178
179 179 if magic != 'HG':
180 180 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
181 181 if version == '10':
182 182 if alg is None:
183 183 alg = changegroup.readexactly(fh, 2)
184 184 return changegroup.cg1unpacker(fh, alg)
185 185 elif version.startswith('2'):
186 186 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
187 187 elif version == 'S1':
188 188 return streamclone.streamcloneapplier(fh)
189 189 else:
190 190 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
191 191
192 192 def getbundlespec(ui, fh):
193 193 """Infer the bundlespec from a bundle file handle.
194 194
195 195 The input file handle is seeked and the original seek position is not
196 196 restored.
197 197 """
198 198 def speccompression(alg):
199 199 for k, v in _bundlespeccompressions.items():
200 200 if v == alg:
201 201 return k
202 202 return None
203 203
204 204 b = readbundle(ui, fh, None)
205 205 if isinstance(b, changegroup.cg1unpacker):
206 206 alg = b._type
207 207 if alg == '_truncatedBZ':
208 208 alg = 'BZ'
209 209 comp = speccompression(alg)
210 210 if not comp:
211 211 raise error.Abort(_('unknown compression algorithm: %s') % alg)
212 212 return '%s-v1' % comp
213 213 elif isinstance(b, bundle2.unbundle20):
214 214 if 'Compression' in b.params:
215 215 comp = speccompression(b.params['Compression'])
216 216 if not comp:
217 217 raise error.Abort(_('unknown compression algorithm: %s') % comp)
218 218 else:
219 219 comp = 'none'
220 220
221 221 version = None
222 222 for part in b.iterparts():
223 223 if part.type == 'changegroup':
224 224 version = part.params['version']
225 225 if version in ('01', '02'):
226 226 version = 'v2'
227 227 else:
228 228 raise error.Abort(_('changegroup version %s does not have '
229 229 'a known bundlespec') % version,
230 230 hint=_('try upgrading your Mercurial '
231 231 'client'))
232 232
233 233 if not version:
234 234 raise error.Abort(_('could not identify changegroup version in '
235 235 'bundle'))
236 236
237 237 return '%s-%s' % (comp, version)
238 238 elif isinstance(b, streamclone.streamcloneapplier):
239 239 requirements = streamclone.readbundle1header(fh)[2]
240 240 params = 'requirements=%s' % ','.join(sorted(requirements))
241 241 return 'none-packed1;%s' % urlreq.quote(params)
242 242 else:
243 243 raise error.Abort(_('unknown bundle type: %s') % b)
244 244
245 245 def buildobsmarkerspart(bundler, markers):
246 246 """add an obsmarker part to the bundler with <markers>
247 247
248 248 No part is created if markers is empty.
249 249 Raises ValueError if the bundler doesn't support any known obsmarker format.
250 250 """
251 251 if markers:
252 252 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
253 253 version = obsolete.commonversion(remoteversions)
254 254 if version is None:
255 255 raise ValueError('bundler does not support common obsmarker format')
256 256 stream = obsolete.encodemarkers(markers, True, version=version)
257 257 return bundler.newpart('obsmarkers', data=stream)
258 258 return None
259 259
260 260 def _forcebundle1(op):
261 261 """return true if a pull/push must use bundle1
262 262
263 263 This function is used to allow testing of the older bundle version"""
264 264 ui = op.repo.ui
265 265 forcebundle1 = False
266 266 # The goal is this config is to allow developper to choose the bundle
267 267 # version used during exchanged. This is especially handy during test.
268 268 # Value is a list of bundle version to be picked from, highest version
269 269 # should be used.
270 270 #
271 271 # developer config: devel.legacy.exchange
272 272 exchange = ui.configlist('devel', 'legacy.exchange')
273 273 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
274 274 return forcebundle1 or not op.remote.capable('bundle2')
275 275
276 276 class pushoperation(object):
277 277 """A object that represent a single push operation
278 278
279 279 Its purpose is to carry push related state and very common operations.
280 280
281 281 A new pushoperation should be created at the beginning of each push and
282 282 discarded afterward.
283 283 """
284 284
285 285 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
286 286 bookmarks=()):
287 287 # repo we push from
288 288 self.repo = repo
289 289 self.ui = repo.ui
290 290 # repo we push to
291 291 self.remote = remote
292 292 # force option provided
293 293 self.force = force
294 294 # revs to be pushed (None is "all")
295 295 self.revs = revs
296 296 # bookmark explicitly pushed
297 297 self.bookmarks = bookmarks
298 298 # allow push of new branch
299 299 self.newbranch = newbranch
300 300 # did a local lock get acquired?
301 301 self.locallocked = None
302 302 # step already performed
303 303 # (used to check what steps have been already performed through bundle2)
304 304 self.stepsdone = set()
305 305 # Integer version of the changegroup push result
306 306 # - None means nothing to push
307 307 # - 0 means HTTP error
308 308 # - 1 means we pushed and remote head count is unchanged *or*
309 309 # we have outgoing changesets but refused to push
310 310 # - other values as described by addchangegroup()
311 311 self.cgresult = None
312 312 # Boolean value for the bookmark push
313 313 self.bkresult = None
314 314 # discover.outgoing object (contains common and outgoing data)
315 315 self.outgoing = None
316 316 # all remote heads before the push
317 317 self.remoteheads = None
318 318 # testable as a boolean indicating if any nodes are missing locally.
319 319 self.incoming = None
320 320 # phases changes that must be pushed along side the changesets
321 321 self.outdatedphases = None
322 322 # phases changes that must be pushed if changeset push fails
323 323 self.fallbackoutdatedphases = None
324 324 # outgoing obsmarkers
325 325 self.outobsmarkers = set()
326 326 # outgoing bookmarks
327 327 self.outbookmarks = []
328 328 # transaction manager
329 329 self.trmanager = None
330 330 # map { pushkey partid -> callback handling failure}
331 331 # used to handle exception from mandatory pushkey part failure
332 332 self.pkfailcb = {}
333 333
334 334 @util.propertycache
335 335 def futureheads(self):
336 336 """future remote heads if the changeset push succeeds"""
337 337 return self.outgoing.missingheads
338 338
339 339 @util.propertycache
340 340 def fallbackheads(self):
341 341 """future remote heads if the changeset push fails"""
342 342 if self.revs is None:
343 343 # not target to push, all common are relevant
344 344 return self.outgoing.commonheads
345 345 unfi = self.repo.unfiltered()
346 346 # I want cheads = heads(::missingheads and ::commonheads)
347 347 # (missingheads is revs with secret changeset filtered out)
348 348 #
349 349 # This can be expressed as:
350 350 # cheads = ( (missingheads and ::commonheads)
351 351 # + (commonheads and ::missingheads))"
352 352 # )
353 353 #
354 354 # while trying to push we already computed the following:
355 355 # common = (::commonheads)
356 356 # missing = ((commonheads::missingheads) - commonheads)
357 357 #
358 358 # We can pick:
359 359 # * missingheads part of common (::commonheads)
360 360 common = self.outgoing.common
361 361 nm = self.repo.changelog.nodemap
362 362 cheads = [node for node in self.revs if nm[node] in common]
363 363 # and
364 364 # * commonheads parents on missing
365 365 revset = unfi.set('%ln and parents(roots(%ln))',
366 366 self.outgoing.commonheads,
367 367 self.outgoing.missing)
368 368 cheads.extend(c.node() for c in revset)
369 369 return cheads
370 370
371 371 @property
372 372 def commonheads(self):
373 373 """set of all common heads after changeset bundle push"""
374 374 if self.cgresult:
375 375 return self.futureheads
376 376 else:
377 377 return self.fallbackheads
378 378
379 379 # mapping of message used when pushing bookmark
380 380 bookmsgmap = {'update': (_("updating bookmark %s\n"),
381 381 _('updating bookmark %s failed!\n')),
382 382 'export': (_("exporting bookmark %s\n"),
383 383 _('exporting bookmark %s failed!\n')),
384 384 'delete': (_("deleting remote bookmark %s\n"),
385 385 _('deleting remote bookmark %s failed!\n')),
386 386 }
387 387
388 388
389 389 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
390 390 opargs=None):
391 391 '''Push outgoing changesets (limited by revs) from a local
392 392 repository to remote. Return an integer:
393 393 - None means nothing to push
394 394 - 0 means HTTP error
395 395 - 1 means we pushed and remote head count is unchanged *or*
396 396 we have outgoing changesets but refused to push
397 397 - other values as described by addchangegroup()
398 398 '''
399 399 if opargs is None:
400 400 opargs = {}
401 401 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
402 402 **opargs)
403 403 if pushop.remote.local():
404 404 missing = (set(pushop.repo.requirements)
405 405 - pushop.remote.local().supported)
406 406 if missing:
407 407 msg = _("required features are not"
408 408 " supported in the destination:"
409 409 " %s") % (', '.join(sorted(missing)))
410 410 raise error.Abort(msg)
411 411
412 412 # there are two ways to push to remote repo:
413 413 #
414 414 # addchangegroup assumes local user can lock remote
415 415 # repo (local filesystem, old ssh servers).
416 416 #
417 417 # unbundle assumes local user cannot lock remote repo (new ssh
418 418 # servers, http servers).
419 419
420 420 if not pushop.remote.canpush():
421 421 raise error.Abort(_("destination does not support push"))
422 422 # get local lock as we might write phase data
423 423 localwlock = locallock = None
424 424 try:
425 425 # bundle2 push may receive a reply bundle touching bookmarks or other
426 426 # things requiring the wlock. Take it now to ensure proper ordering.
427 427 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
428 428 if (not _forcebundle1(pushop)) and maypushback:
429 429 localwlock = pushop.repo.wlock()
430 430 locallock = pushop.repo.lock()
431 431 pushop.locallocked = True
432 432 except IOError as err:
433 433 pushop.locallocked = False
434 434 if err.errno != errno.EACCES:
435 435 raise
436 436 # source repo cannot be locked.
437 437 # We do not abort the push, but just disable the local phase
438 438 # synchronisation.
439 439 msg = 'cannot lock source repository: %s\n' % err
440 440 pushop.ui.debug(msg)
441 441 try:
442 442 if pushop.locallocked:
443 443 pushop.trmanager = transactionmanager(pushop.repo,
444 444 'push-response',
445 445 pushop.remote.url())
446 446 pushop.repo.checkpush(pushop)
447 447 lock = None
448 448 unbundle = pushop.remote.capable('unbundle')
449 449 if not unbundle:
450 450 lock = pushop.remote.lock()
451 451 try:
452 452 _pushdiscovery(pushop)
453 453 if not _forcebundle1(pushop):
454 454 _pushbundle2(pushop)
455 455 _pushchangeset(pushop)
456 456 _pushsyncphase(pushop)
457 457 _pushobsolete(pushop)
458 458 _pushbookmark(pushop)
459 459 finally:
460 460 if lock is not None:
461 461 lock.release()
462 462 if pushop.trmanager:
463 463 pushop.trmanager.close()
464 464 finally:
465 465 if pushop.trmanager:
466 466 pushop.trmanager.release()
467 467 if locallock is not None:
468 468 locallock.release()
469 469 if localwlock is not None:
470 470 localwlock.release()
471 471
472 472 return pushop
473 473
474 474 # list of steps to perform discovery before push
475 475 pushdiscoveryorder = []
476 476
477 477 # Mapping between step name and function
478 478 #
479 479 # This exists to help extensions wrap steps if necessary
480 480 pushdiscoverymapping = {}
481 481
482 482 def pushdiscovery(stepname):
483 483 """decorator for function performing discovery before push
484 484
485 485 The function is added to the step -> function mapping and appended to the
486 486 list of steps. Beware that decorated function will be added in order (this
487 487 may matter).
488 488
489 489 You can only use this decorator for a new step, if you want to wrap a step
490 490 from an extension, change the pushdiscovery dictionary directly."""
491 491 def dec(func):
492 492 assert stepname not in pushdiscoverymapping
493 493 pushdiscoverymapping[stepname] = func
494 494 pushdiscoveryorder.append(stepname)
495 495 return func
496 496 return dec
497 497
498 498 def _pushdiscovery(pushop):
499 499 """Run all discovery steps"""
500 500 for stepname in pushdiscoveryorder:
501 501 step = pushdiscoverymapping[stepname]
502 502 step(pushop)
503 503
504 504 @pushdiscovery('changeset')
505 505 def _pushdiscoverychangeset(pushop):
506 506 """discover the changeset that need to be pushed"""
507 507 fci = discovery.findcommonincoming
508 508 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
509 509 common, inc, remoteheads = commoninc
510 510 fco = discovery.findcommonoutgoing
511 511 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
512 512 commoninc=commoninc, force=pushop.force)
513 513 pushop.outgoing = outgoing
514 514 pushop.remoteheads = remoteheads
515 515 pushop.incoming = inc
516 516
517 517 @pushdiscovery('phase')
518 518 def _pushdiscoveryphase(pushop):
519 519 """discover the phase that needs to be pushed
520 520
521 521 (computed for both success and failure case for changesets push)"""
522 522 outgoing = pushop.outgoing
523 523 unfi = pushop.repo.unfiltered()
524 524 remotephases = pushop.remote.listkeys('phases')
525 525 publishing = remotephases.get('publishing', False)
526 526 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
527 527 and remotephases # server supports phases
528 528 and not pushop.outgoing.missing # no changesets to be pushed
529 529 and publishing):
530 530 # When:
531 531 # - this is a subrepo push
532 532 # - and remote support phase
533 533 # - and no changeset are to be pushed
534 534 # - and remote is publishing
535 535 # We may be in issue 3871 case!
536 536 # We drop the possible phase synchronisation done by
537 537 # courtesy to publish changesets possibly locally draft
538 538 # on the remote.
539 539 remotephases = {'publishing': 'True'}
540 540 ana = phases.analyzeremotephases(pushop.repo,
541 541 pushop.fallbackheads,
542 542 remotephases)
543 543 pheads, droots = ana
544 544 extracond = ''
545 545 if not publishing:
546 546 extracond = ' and public()'
547 547 revset = 'heads((%%ln::%%ln) %s)' % extracond
548 548 # Get the list of all revs draft on remote by public here.
549 549 # XXX Beware that revset break if droots is not strictly
550 550 # XXX root we may want to ensure it is but it is costly
551 551 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
552 552 if not outgoing.missing:
553 553 future = fallback
554 554 else:
555 555 # adds changeset we are going to push as draft
556 556 #
557 557 # should not be necessary for publishing server, but because of an
558 558 # issue fixed in xxxxx we have to do it anyway.
559 559 fdroots = list(unfi.set('roots(%ln + %ln::)',
560 560 outgoing.missing, droots))
561 561 fdroots = [f.node() for f in fdroots]
562 562 future = list(unfi.set(revset, fdroots, pushop.futureheads))
563 563 pushop.outdatedphases = future
564 564 pushop.fallbackoutdatedphases = fallback
565 565
566 566 @pushdiscovery('obsmarker')
567 567 def _pushdiscoveryobsmarkers(pushop):
568 568 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
569 569 and pushop.repo.obsstore
570 570 and 'obsolete' in pushop.remote.listkeys('namespaces')):
571 571 repo = pushop.repo
572 572 # very naive computation, that can be quite expensive on big repo.
573 573 # However: evolution is currently slow on them anyway.
574 574 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
575 575 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
576 576
577 577 @pushdiscovery('bookmarks')
578 578 def _pushdiscoverybookmarks(pushop):
579 579 ui = pushop.ui
580 580 repo = pushop.repo.unfiltered()
581 581 remote = pushop.remote
582 582 ui.debug("checking for updated bookmarks\n")
583 583 ancestors = ()
584 584 if pushop.revs:
585 585 revnums = map(repo.changelog.rev, pushop.revs)
586 586 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
587 587 remotebookmark = remote.listkeys('bookmarks')
588 588
589 589 explicit = set([repo._bookmarks.expandname(bookmark)
590 590 for bookmark in pushop.bookmarks])
591 591
592 592 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
593 593 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
594 594 for b, scid, dcid in advsrc:
595 595 if b in explicit:
596 596 explicit.remove(b)
597 597 if not ancestors or repo[scid].rev() in ancestors:
598 598 pushop.outbookmarks.append((b, dcid, scid))
599 599 # search added bookmark
600 600 for b, scid, dcid in addsrc:
601 601 if b in explicit:
602 602 explicit.remove(b)
603 603 pushop.outbookmarks.append((b, '', scid))
604 604 # search for overwritten bookmark
605 605 for b, scid, dcid in advdst + diverge + differ:
606 606 if b in explicit:
607 607 explicit.remove(b)
608 608 pushop.outbookmarks.append((b, dcid, scid))
609 609 # search for bookmark to delete
610 610 for b, scid, dcid in adddst:
611 611 if b in explicit:
612 612 explicit.remove(b)
613 613 # treat as "deleted locally"
614 614 pushop.outbookmarks.append((b, dcid, ''))
615 615 # identical bookmarks shouldn't get reported
616 616 for b, scid, dcid in same:
617 617 if b in explicit:
618 618 explicit.remove(b)
619 619
620 620 if explicit:
621 621 explicit = sorted(explicit)
622 622 # we should probably list all of them
623 623 ui.warn(_('bookmark %s does not exist on the local '
624 624 'or remote repository!\n') % explicit[0])
625 625 pushop.bkresult = 2
626 626
627 627 pushop.outbookmarks.sort()
628 628
629 629 def _pushcheckoutgoing(pushop):
630 630 outgoing = pushop.outgoing
631 631 unfi = pushop.repo.unfiltered()
632 632 if not outgoing.missing:
633 633 # nothing to push
634 634 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
635 635 return False
636 636 # something to push
637 637 if not pushop.force:
638 638 # if repo.obsstore == False --> no obsolete
639 639 # then, save the iteration
640 640 if unfi.obsstore:
641 641 # this message are here for 80 char limit reason
642 642 mso = _("push includes obsolete changeset: %s!")
643 643 mst = {"unstable": _("push includes unstable changeset: %s!"),
644 644 "bumped": _("push includes bumped changeset: %s!"),
645 645 "divergent": _("push includes divergent changeset: %s!")}
646 646 # If we are to push if there is at least one
647 647 # obsolete or unstable changeset in missing, at
648 648 # least one of the missinghead will be obsolete or
649 649 # unstable. So checking heads only is ok
650 650 for node in outgoing.missingheads:
651 651 ctx = unfi[node]
652 652 if ctx.obsolete():
653 653 raise error.Abort(mso % ctx)
654 654 elif ctx.troubled():
655 655 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
656 656
657 657 discovery.checkheads(pushop)
658 658 return True
659 659
660 660 # List of names of steps to perform for an outgoing bundle2, order matters.
661 661 b2partsgenorder = []
662 662
663 663 # Mapping between step name and function
664 664 #
665 665 # This exists to help extensions wrap steps if necessary
666 666 b2partsgenmapping = {}
667 667
668 668 def b2partsgenerator(stepname, idx=None):
669 669 """decorator for function generating bundle2 part
670 670
671 671 The function is added to the step -> function mapping and appended to the
672 672 list of steps. Beware that decorated functions will be added in order
673 673 (this may matter).
674 674
675 675 You can only use this decorator for new steps, if you want to wrap a step
676 676 from an extension, attack the b2partsgenmapping dictionary directly."""
677 677 def dec(func):
678 678 assert stepname not in b2partsgenmapping
679 679 b2partsgenmapping[stepname] = func
680 680 if idx is None:
681 681 b2partsgenorder.append(stepname)
682 682 else:
683 683 b2partsgenorder.insert(idx, stepname)
684 684 return func
685 685 return dec
686 686
687 687 def _pushb2ctxcheckheads(pushop, bundler):
688 688 """Generate race condition checking parts
689 689
690 690 Exists as an independent function to aid extensions
691 691 """
692 692 if not pushop.force:
693 693 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
694 694
695 695 @b2partsgenerator('changeset')
696 696 def _pushb2ctx(pushop, bundler):
697 697 """handle changegroup push through bundle2
698 698
699 699 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
700 700 """
701 701 if 'changesets' in pushop.stepsdone:
702 702 return
703 703 pushop.stepsdone.add('changesets')
704 704 # Send known heads to the server for race detection.
705 705 if not _pushcheckoutgoing(pushop):
706 706 return
707 707 pushop.repo.prepushoutgoinghooks(pushop)
708 708
709 709 _pushb2ctxcheckheads(pushop, bundler)
710 710
711 711 b2caps = bundle2.bundle2caps(pushop.remote)
712 712 version = '01'
713 713 cgversions = b2caps.get('changegroup')
714 714 if cgversions: # 3.1 and 3.2 ship with an empty value
715 715 cgversions = [v for v in cgversions
716 716 if v in changegroup.supportedoutgoingversions(
717 717 pushop.repo)]
718 718 if not cgversions:
719 719 raise ValueError(_('no common changegroup version'))
720 720 version = max(cgversions)
721 721 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
722 722 pushop.outgoing,
723 723 version=version)
724 724 cgpart = bundler.newpart('changegroup', data=cg)
725 725 if cgversions:
726 726 cgpart.addparam('version', version)
727 727 if 'treemanifest' in pushop.repo.requirements:
728 728 cgpart.addparam('treemanifest', '1')
729 729 def handlereply(op):
730 730 """extract addchangegroup returns from server reply"""
731 731 cgreplies = op.records.getreplies(cgpart.id)
732 732 assert len(cgreplies['changegroup']) == 1
733 733 pushop.cgresult = cgreplies['changegroup'][0]['return']
734 734 return handlereply
735 735
736 736 @b2partsgenerator('phase')
737 737 def _pushb2phases(pushop, bundler):
738 738 """handle phase push through bundle2"""
739 739 if 'phases' in pushop.stepsdone:
740 740 return
741 741 b2caps = bundle2.bundle2caps(pushop.remote)
742 742 if not 'pushkey' in b2caps:
743 743 return
744 744 pushop.stepsdone.add('phases')
745 745 part2node = []
746 746
747 747 def handlefailure(pushop, exc):
748 748 targetid = int(exc.partid)
749 749 for partid, node in part2node:
750 750 if partid == targetid:
751 751 raise error.Abort(_('updating %s to public failed') % node)
752 752
753 753 enc = pushkey.encode
754 754 for newremotehead in pushop.outdatedphases:
755 755 part = bundler.newpart('pushkey')
756 756 part.addparam('namespace', enc('phases'))
757 757 part.addparam('key', enc(newremotehead.hex()))
758 758 part.addparam('old', enc(str(phases.draft)))
759 759 part.addparam('new', enc(str(phases.public)))
760 760 part2node.append((part.id, newremotehead))
761 761 pushop.pkfailcb[part.id] = handlefailure
762 762
763 763 def handlereply(op):
764 764 for partid, node in part2node:
765 765 partrep = op.records.getreplies(partid)
766 766 results = partrep['pushkey']
767 767 assert len(results) <= 1
768 768 msg = None
769 769 if not results:
770 770 msg = _('server ignored update of %s to public!\n') % node
771 771 elif not int(results[0]['return']):
772 772 msg = _('updating %s to public failed!\n') % node
773 773 if msg is not None:
774 774 pushop.ui.warn(msg)
775 775 return handlereply
776 776
777 777 @b2partsgenerator('obsmarkers')
778 778 def _pushb2obsmarkers(pushop, bundler):
779 779 if 'obsmarkers' in pushop.stepsdone:
780 780 return
781 781 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
782 782 if obsolete.commonversion(remoteversions) is None:
783 783 return
784 784 pushop.stepsdone.add('obsmarkers')
785 785 if pushop.outobsmarkers:
786 786 markers = sorted(pushop.outobsmarkers)
787 787 buildobsmarkerspart(bundler, markers)
788 788
789 789 @b2partsgenerator('bookmarks')
790 790 def _pushb2bookmarks(pushop, bundler):
791 791 """handle bookmark push through bundle2"""
792 792 if 'bookmarks' in pushop.stepsdone:
793 793 return
794 794 b2caps = bundle2.bundle2caps(pushop.remote)
795 795 if 'pushkey' not in b2caps:
796 796 return
797 797 pushop.stepsdone.add('bookmarks')
798 798 part2book = []
799 799 enc = pushkey.encode
800 800
801 801 def handlefailure(pushop, exc):
802 802 targetid = int(exc.partid)
803 803 for partid, book, action in part2book:
804 804 if partid == targetid:
805 805 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
806 806 # we should not be called for part we did not generated
807 807 assert False
808 808
809 809 for book, old, new in pushop.outbookmarks:
810 810 part = bundler.newpart('pushkey')
811 811 part.addparam('namespace', enc('bookmarks'))
812 812 part.addparam('key', enc(book))
813 813 part.addparam('old', enc(old))
814 814 part.addparam('new', enc(new))
815 815 action = 'update'
816 816 if not old:
817 817 action = 'export'
818 818 elif not new:
819 819 action = 'delete'
820 820 part2book.append((part.id, book, action))
821 821 pushop.pkfailcb[part.id] = handlefailure
822 822
823 823 def handlereply(op):
824 824 ui = pushop.ui
825 825 for partid, book, action in part2book:
826 826 partrep = op.records.getreplies(partid)
827 827 results = partrep['pushkey']
828 828 assert len(results) <= 1
829 829 if not results:
830 830 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
831 831 else:
832 832 ret = int(results[0]['return'])
833 833 if ret:
834 834 ui.status(bookmsgmap[action][0] % book)
835 835 else:
836 836 ui.warn(bookmsgmap[action][1] % book)
837 837 if pushop.bkresult is not None:
838 838 pushop.bkresult = 1
839 839 return handlereply
840 840
841 841
842 842 def _pushbundle2(pushop):
843 843 """push data to the remote using bundle2
844 844
845 845 The only currently supported type of data is changegroup but this will
846 846 evolve in the future."""
847 847 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
848 848 pushback = (pushop.trmanager
849 849 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
850 850
851 851 # create reply capability
852 852 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
853 853 allowpushback=pushback))
854 854 bundler.newpart('replycaps', data=capsblob)
855 855 replyhandlers = []
856 856 for partgenname in b2partsgenorder:
857 857 partgen = b2partsgenmapping[partgenname]
858 858 ret = partgen(pushop, bundler)
859 859 if callable(ret):
860 860 replyhandlers.append(ret)
861 861 # do not push if nothing to push
862 862 if bundler.nbparts <= 1:
863 863 return
864 864 stream = util.chunkbuffer(bundler.getchunks())
865 865 try:
866 866 try:
867 867 reply = pushop.remote.unbundle(
868 868 stream, ['force'], pushop.remote.url())
869 869 except error.BundleValueError as exc:
870 870 raise error.Abort(_('missing support for %s') % exc)
871 871 try:
872 872 trgetter = None
873 873 if pushback:
874 874 trgetter = pushop.trmanager.transaction
875 875 op = bundle2.processbundle(pushop.repo, reply, trgetter)
876 876 except error.BundleValueError as exc:
877 877 raise error.Abort(_('missing support for %s') % exc)
878 878 except bundle2.AbortFromPart as exc:
879 879 pushop.ui.status(_('remote: %s\n') % exc)
880 880 raise error.Abort(_('push failed on remote'), hint=exc.hint)
881 881 except error.PushkeyFailed as exc:
882 882 partid = int(exc.partid)
883 883 if partid not in pushop.pkfailcb:
884 884 raise
885 885 pushop.pkfailcb[partid](pushop, exc)
886 886 for rephand in replyhandlers:
887 887 rephand(op)
888 888
889 889 def _pushchangeset(pushop):
890 890 """Make the actual push of changeset bundle to remote repo"""
891 891 if 'changesets' in pushop.stepsdone:
892 892 return
893 893 pushop.stepsdone.add('changesets')
894 894 if not _pushcheckoutgoing(pushop):
895 895 return
896 896 pushop.repo.prepushoutgoinghooks(pushop)
897 897 outgoing = pushop.outgoing
898 898 unbundle = pushop.remote.capable('unbundle')
899 899 # TODO: get bundlecaps from remote
900 900 bundlecaps = None
901 901 # create a changegroup from local
902 902 if pushop.revs is None and not (outgoing.excluded
903 903 or pushop.repo.changelog.filteredrevs):
904 904 # push everything,
905 905 # use the fast path, no race possible on push
906 906 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
907 907 cg = changegroup.getsubset(pushop.repo,
908 908 outgoing,
909 909 bundler,
910 910 'push',
911 911 fastpath=True)
912 912 else:
913 913 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
914 914 bundlecaps)
915 915
916 916 # apply changegroup to remote
917 917 if unbundle:
918 918 # local repo finds heads on server, finds out what
919 919 # revs it must push. once revs transferred, if server
920 920 # finds it has different heads (someone else won
921 921 # commit/push race), server aborts.
922 922 if pushop.force:
923 923 remoteheads = ['force']
924 924 else:
925 925 remoteheads = pushop.remoteheads
926 926 # ssh: return remote's addchangegroup()
927 927 # http: return remote's addchangegroup() or 0 for error
928 928 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
929 929 pushop.repo.url())
930 930 else:
931 931 # we return an integer indicating remote head count
932 932 # change
933 933 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
934 934 pushop.repo.url())
935 935
936 936 def _pushsyncphase(pushop):
937 937 """synchronise phase information locally and remotely"""
938 938 cheads = pushop.commonheads
939 939 # even when we don't push, exchanging phase data is useful
940 940 remotephases = pushop.remote.listkeys('phases')
941 941 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
942 942 and remotephases # server supports phases
943 943 and pushop.cgresult is None # nothing was pushed
944 944 and remotephases.get('publishing', False)):
945 945 # When:
946 946 # - this is a subrepo push
947 947 # - and remote support phase
948 948 # - and no changeset was pushed
949 949 # - and remote is publishing
950 950 # We may be in issue 3871 case!
951 951 # We drop the possible phase synchronisation done by
952 952 # courtesy to publish changesets possibly locally draft
953 953 # on the remote.
954 954 remotephases = {'publishing': 'True'}
955 955 if not remotephases: # old server or public only reply from non-publishing
956 956 _localphasemove(pushop, cheads)
957 957 # don't push any phase data as there is nothing to push
958 958 else:
959 959 ana = phases.analyzeremotephases(pushop.repo, cheads,
960 960 remotephases)
961 961 pheads, droots = ana
962 962 ### Apply remote phase on local
963 963 if remotephases.get('publishing', False):
964 964 _localphasemove(pushop, cheads)
965 965 else: # publish = False
966 966 _localphasemove(pushop, pheads)
967 967 _localphasemove(pushop, cheads, phases.draft)
968 968 ### Apply local phase on remote
969 969
970 970 if pushop.cgresult:
971 971 if 'phases' in pushop.stepsdone:
972 972 # phases already pushed though bundle2
973 973 return
974 974 outdated = pushop.outdatedphases
975 975 else:
976 976 outdated = pushop.fallbackoutdatedphases
977 977
978 978 pushop.stepsdone.add('phases')
979 979
980 980 # filter heads already turned public by the push
981 981 outdated = [c for c in outdated if c.node() not in pheads]
982 982 # fallback to independent pushkey command
983 983 for newremotehead in outdated:
984 984 r = pushop.remote.pushkey('phases',
985 985 newremotehead.hex(),
986 986 str(phases.draft),
987 987 str(phases.public))
988 988 if not r:
989 989 pushop.ui.warn(_('updating %s to public failed!\n')
990 990 % newremotehead)
991 991
992 992 def _localphasemove(pushop, nodes, phase=phases.public):
993 993 """move <nodes> to <phase> in the local source repo"""
994 994 if pushop.trmanager:
995 995 phases.advanceboundary(pushop.repo,
996 996 pushop.trmanager.transaction(),
997 997 phase,
998 998 nodes)
999 999 else:
1000 1000 # repo is not locked, do not change any phases!
1001 1001 # Informs the user that phases should have been moved when
1002 1002 # applicable.
1003 1003 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1004 1004 phasestr = phases.phasenames[phase]
1005 1005 if actualmoves:
1006 1006 pushop.ui.status(_('cannot lock source repo, skipping '
1007 1007 'local %s phase update\n') % phasestr)
1008 1008
1009 1009 def _pushobsolete(pushop):
1010 1010 """utility function to push obsolete markers to a remote"""
1011 1011 if 'obsmarkers' in pushop.stepsdone:
1012 1012 return
1013 1013 repo = pushop.repo
1014 1014 remote = pushop.remote
1015 1015 pushop.stepsdone.add('obsmarkers')
1016 1016 if pushop.outobsmarkers:
1017 1017 pushop.ui.debug('try to push obsolete markers to remote\n')
1018 1018 rslts = []
1019 1019 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1020 1020 for key in sorted(remotedata, reverse=True):
1021 1021 # reverse sort to ensure we end with dump0
1022 1022 data = remotedata[key]
1023 1023 rslts.append(remote.pushkey('obsolete', key, '', data))
1024 1024 if [r for r in rslts if not r]:
1025 1025 msg = _('failed to push some obsolete markers!\n')
1026 1026 repo.ui.warn(msg)
1027 1027
1028 1028 def _pushbookmark(pushop):
1029 1029 """Update bookmark position on remote"""
1030 1030 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1031 1031 return
1032 1032 pushop.stepsdone.add('bookmarks')
1033 1033 ui = pushop.ui
1034 1034 remote = pushop.remote
1035 1035
1036 1036 for b, old, new in pushop.outbookmarks:
1037 1037 action = 'update'
1038 1038 if not old:
1039 1039 action = 'export'
1040 1040 elif not new:
1041 1041 action = 'delete'
1042 1042 if remote.pushkey('bookmarks', b, old, new):
1043 1043 ui.status(bookmsgmap[action][0] % b)
1044 1044 else:
1045 1045 ui.warn(bookmsgmap[action][1] % b)
1046 1046 # discovery can have set the value form invalid entry
1047 1047 if pushop.bkresult is not None:
1048 1048 pushop.bkresult = 1
1049 1049
1050 1050 class pulloperation(object):
1051 1051 """A object that represent a single pull operation
1052 1052
1053 1053 It purpose is to carry pull related state and very common operation.
1054 1054
1055 1055 A new should be created at the beginning of each pull and discarded
1056 1056 afterward.
1057 1057 """
1058 1058
1059 1059 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1060 1060 remotebookmarks=None, streamclonerequested=None):
1061 1061 # repo we pull into
1062 1062 self.repo = repo
1063 1063 # repo we pull from
1064 1064 self.remote = remote
1065 1065 # revision we try to pull (None is "all")
1066 1066 self.heads = heads
1067 1067 # bookmark pulled explicitly
1068 1068 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1069 1069 for bookmark in bookmarks]
1070 1070 # do we force pull?
1071 1071 self.force = force
1072 1072 # whether a streaming clone was requested
1073 1073 self.streamclonerequested = streamclonerequested
1074 1074 # transaction manager
1075 1075 self.trmanager = None
1076 1076 # set of common changeset between local and remote before pull
1077 1077 self.common = None
1078 1078 # set of pulled head
1079 1079 self.rheads = None
1080 1080 # list of missing changeset to fetch remotely
1081 1081 self.fetch = None
1082 1082 # remote bookmarks data
1083 1083 self.remotebookmarks = remotebookmarks
1084 1084 # result of changegroup pulling (used as return code by pull)
1085 1085 self.cgresult = None
1086 1086 # list of step already done
1087 1087 self.stepsdone = set()
1088 1088 # Whether we attempted a clone from pre-generated bundles.
1089 1089 self.clonebundleattempted = False
1090 1090
1091 1091 @util.propertycache
1092 1092 def pulledsubset(self):
1093 1093 """heads of the set of changeset target by the pull"""
1094 1094 # compute target subset
1095 1095 if self.heads is None:
1096 1096 # We pulled every thing possible
1097 1097 # sync on everything common
1098 1098 c = set(self.common)
1099 1099 ret = list(self.common)
1100 1100 for n in self.rheads:
1101 1101 if n not in c:
1102 1102 ret.append(n)
1103 1103 return ret
1104 1104 else:
1105 1105 # We pulled a specific subset
1106 1106 # sync on this subset
1107 1107 return self.heads
1108 1108
1109 1109 @util.propertycache
1110 1110 def canusebundle2(self):
1111 1111 return not _forcebundle1(self)
1112 1112
1113 1113 @util.propertycache
1114 1114 def remotebundle2caps(self):
1115 1115 return bundle2.bundle2caps(self.remote)
1116 1116
1117 1117 def gettransaction(self):
1118 1118 # deprecated; talk to trmanager directly
1119 1119 return self.trmanager.transaction()
1120 1120
1121 1121 class transactionmanager(object):
1122 1122 """An object to manage the life cycle of a transaction
1123 1123
1124 1124 It creates the transaction on demand and calls the appropriate hooks when
1125 1125 closing the transaction."""
1126 1126 def __init__(self, repo, source, url):
1127 1127 self.repo = repo
1128 1128 self.source = source
1129 1129 self.url = url
1130 1130 self._tr = None
1131 1131
1132 1132 def transaction(self):
1133 1133 """Return an open transaction object, constructing if necessary"""
1134 1134 if not self._tr:
1135 1135 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1136 1136 self._tr = self.repo.transaction(trname)
1137 1137 self._tr.hookargs['source'] = self.source
1138 1138 self._tr.hookargs['url'] = self.url
1139 1139 return self._tr
1140 1140
1141 1141 def close(self):
1142 1142 """close transaction if created"""
1143 1143 if self._tr is not None:
1144 1144 self._tr.close()
1145 1145
1146 1146 def release(self):
1147 1147 """release transaction if created"""
1148 1148 if self._tr is not None:
1149 1149 self._tr.release()
1150 1150
1151 1151 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1152 1152 streamclonerequested=None):
1153 1153 """Fetch repository data from a remote.
1154 1154
1155 1155 This is the main function used to retrieve data from a remote repository.
1156 1156
1157 1157 ``repo`` is the local repository to clone into.
1158 1158 ``remote`` is a peer instance.
1159 1159 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1160 1160 default) means to pull everything from the remote.
1161 1161 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1162 1162 default, all remote bookmarks are pulled.
1163 1163 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1164 1164 initialization.
1165 1165 ``streamclonerequested`` is a boolean indicating whether a "streaming
1166 1166 clone" is requested. A "streaming clone" is essentially a raw file copy
1167 1167 of revlogs from the server. This only works when the local repository is
1168 1168 empty. The default value of ``None`` means to respect the server
1169 1169 configuration for preferring stream clones.
1170 1170
1171 1171 Returns the ``pulloperation`` created for this pull.
1172 1172 """
1173 1173 if opargs is None:
1174 1174 opargs = {}
1175 1175 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1176 1176 streamclonerequested=streamclonerequested, **opargs)
1177 1177 if pullop.remote.local():
1178 1178 missing = set(pullop.remote.requirements) - pullop.repo.supported
1179 1179 if missing:
1180 1180 msg = _("required features are not"
1181 1181 " supported in the destination:"
1182 1182 " %s") % (', '.join(sorted(missing)))
1183 1183 raise error.Abort(msg)
1184 1184
1185 1185 lock = pullop.repo.lock()
1186 1186 try:
1187 1187 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1188 1188 streamclone.maybeperformlegacystreamclone(pullop)
1189 1189 # This should ideally be in _pullbundle2(). However, it needs to run
1190 1190 # before discovery to avoid extra work.
1191 1191 _maybeapplyclonebundle(pullop)
1192 1192 _pulldiscovery(pullop)
1193 1193 if pullop.canusebundle2:
1194 1194 _pullbundle2(pullop)
1195 1195 _pullchangeset(pullop)
1196 1196 _pullphase(pullop)
1197 1197 _pullbookmarks(pullop)
1198 1198 _pullobsolete(pullop)
1199 1199 pullop.trmanager.close()
1200 1200 finally:
1201 1201 pullop.trmanager.release()
1202 1202 lock.release()
1203 1203
1204 1204 return pullop
1205 1205
1206 1206 # list of steps to perform discovery before pull
1207 1207 pulldiscoveryorder = []
1208 1208
1209 1209 # Mapping between step name and function
1210 1210 #
1211 1211 # This exists to help extensions wrap steps if necessary
1212 1212 pulldiscoverymapping = {}
1213 1213
1214 1214 def pulldiscovery(stepname):
1215 1215 """decorator for function performing discovery before pull
1216 1216
1217 1217 The function is added to the step -> function mapping and appended to the
1218 1218 list of steps. Beware that decorated function will be added in order (this
1219 1219 may matter).
1220 1220
1221 1221 You can only use this decorator for a new step, if you want to wrap a step
1222 1222 from an extension, change the pulldiscovery dictionary directly."""
1223 1223 def dec(func):
1224 1224 assert stepname not in pulldiscoverymapping
1225 1225 pulldiscoverymapping[stepname] = func
1226 1226 pulldiscoveryorder.append(stepname)
1227 1227 return func
1228 1228 return dec
1229 1229
1230 1230 def _pulldiscovery(pullop):
1231 1231 """Run all discovery steps"""
1232 1232 for stepname in pulldiscoveryorder:
1233 1233 step = pulldiscoverymapping[stepname]
1234 1234 step(pullop)
1235 1235
1236 1236 @pulldiscovery('b1:bookmarks')
1237 1237 def _pullbookmarkbundle1(pullop):
1238 1238 """fetch bookmark data in bundle1 case
1239 1239
1240 1240 If not using bundle2, we have to fetch bookmarks before changeset
1241 1241 discovery to reduce the chance and impact of race conditions."""
1242 1242 if pullop.remotebookmarks is not None:
1243 1243 return
1244 1244 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1245 1245 # all known bundle2 servers now support listkeys, but lets be nice with
1246 1246 # new implementation.
1247 1247 return
1248 1248 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1249 1249
1250 1250
1251 1251 @pulldiscovery('changegroup')
1252 1252 def _pulldiscoverychangegroup(pullop):
1253 1253 """discovery phase for the pull
1254 1254
1255 1255 Current handle changeset discovery only, will change handle all discovery
1256 1256 at some point."""
1257 1257 tmp = discovery.findcommonincoming(pullop.repo,
1258 1258 pullop.remote,
1259 1259 heads=pullop.heads,
1260 1260 force=pullop.force)
1261 1261 common, fetch, rheads = tmp
1262 1262 nm = pullop.repo.unfiltered().changelog.nodemap
1263 1263 if fetch and rheads:
1264 1264 # If a remote heads in filtered locally, lets drop it from the unknown
1265 1265 # remote heads and put in back in common.
1266 1266 #
1267 1267 # This is a hackish solution to catch most of "common but locally
1268 1268 # hidden situation". We do not performs discovery on unfiltered
1269 1269 # repository because it end up doing a pathological amount of round
1270 1270 # trip for w huge amount of changeset we do not care about.
1271 1271 #
1272 1272 # If a set of such "common but filtered" changeset exist on the server
1273 1273 # but are not including a remote heads, we'll not be able to detect it,
1274 1274 scommon = set(common)
1275 1275 filteredrheads = []
1276 1276 for n in rheads:
1277 1277 if n in nm:
1278 1278 if n not in scommon:
1279 1279 common.append(n)
1280 1280 else:
1281 1281 filteredrheads.append(n)
1282 1282 if not filteredrheads:
1283 1283 fetch = []
1284 1284 rheads = filteredrheads
1285 1285 pullop.common = common
1286 1286 pullop.fetch = fetch
1287 1287 pullop.rheads = rheads
1288 1288
1289 1289 def _pullbundle2(pullop):
1290 1290 """pull data using bundle2
1291 1291
1292 1292 For now, the only supported data are changegroup."""
1293 1293 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1294 1294
1295 1295 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1296 1296
1297 1297 # pulling changegroup
1298 1298 pullop.stepsdone.add('changegroup')
1299 1299
1300 1300 kwargs['common'] = pullop.common
1301 1301 kwargs['heads'] = pullop.heads or pullop.rheads
1302 1302 kwargs['cg'] = pullop.fetch
1303 1303 if 'listkeys' in pullop.remotebundle2caps:
1304 1304 kwargs['listkeys'] = ['phases']
1305 1305 if pullop.remotebookmarks is None:
1306 1306 # make sure to always includes bookmark data when migrating
1307 1307 # `hg incoming --bundle` to using this function.
1308 1308 kwargs['listkeys'].append('bookmarks')
1309 1309
1310 1310 # If this is a full pull / clone and the server supports the clone bundles
1311 1311 # feature, tell the server whether we attempted a clone bundle. The
1312 1312 # presence of this flag indicates the client supports clone bundles. This
1313 1313 # will enable the server to treat clients that support clone bundles
1314 1314 # differently from those that don't.
1315 1315 if (pullop.remote.capable('clonebundles')
1316 1316 and pullop.heads is None and list(pullop.common) == [nullid]):
1317 1317 kwargs['cbattempted'] = pullop.clonebundleattempted
1318 1318
1319 1319 if streaming:
1320 1320 pullop.repo.ui.status(_('streaming all changes\n'))
1321 1321 elif not pullop.fetch:
1322 1322 pullop.repo.ui.status(_("no changes found\n"))
1323 1323 pullop.cgresult = 0
1324 1324 else:
1325 1325 if pullop.heads is None and list(pullop.common) == [nullid]:
1326 1326 pullop.repo.ui.status(_("requesting all changes\n"))
1327 1327 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1328 1328 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1329 1329 if obsolete.commonversion(remoteversions) is not None:
1330 1330 kwargs['obsmarkers'] = True
1331 1331 pullop.stepsdone.add('obsmarkers')
1332 1332 _pullbundle2extraprepare(pullop, kwargs)
1333 1333 bundle = pullop.remote.getbundle('pull', **kwargs)
1334 1334 try:
1335 1335 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1336 1336 except error.BundleValueError as exc:
1337 1337 raise error.Abort(_('missing support for %s') % exc)
1338 1338
1339 1339 if pullop.fetch:
1340 1340 results = [cg['return'] for cg in op.records['changegroup']]
1341 1341 pullop.cgresult = changegroup.combineresults(results)
1342 1342
1343 1343 # processing phases change
1344 1344 for namespace, value in op.records['listkeys']:
1345 1345 if namespace == 'phases':
1346 1346 _pullapplyphases(pullop, value)
1347 1347
1348 1348 # processing bookmark update
1349 1349 for namespace, value in op.records['listkeys']:
1350 1350 if namespace == 'bookmarks':
1351 1351 pullop.remotebookmarks = value
1352 1352
1353 1353 # bookmark data were either already there or pulled in the bundle
1354 1354 if pullop.remotebookmarks is not None:
1355 1355 _pullbookmarks(pullop)
1356 1356
1357 1357 def _pullbundle2extraprepare(pullop, kwargs):
1358 1358 """hook function so that extensions can extend the getbundle call"""
1359 1359 pass
1360 1360
1361 1361 def _pullchangeset(pullop):
1362 1362 """pull changeset from unbundle into the local repo"""
1363 1363 # We delay the open of the transaction as late as possible so we
1364 1364 # don't open transaction for nothing or you break future useful
1365 1365 # rollback call
1366 1366 if 'changegroup' in pullop.stepsdone:
1367 1367 return
1368 1368 pullop.stepsdone.add('changegroup')
1369 1369 if not pullop.fetch:
1370 1370 pullop.repo.ui.status(_("no changes found\n"))
1371 1371 pullop.cgresult = 0
1372 1372 return
1373 1373 pullop.gettransaction()
1374 1374 if pullop.heads is None and list(pullop.common) == [nullid]:
1375 1375 pullop.repo.ui.status(_("requesting all changes\n"))
1376 1376 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1377 1377 # issue1320, avoid a race if remote changed after discovery
1378 1378 pullop.heads = pullop.rheads
1379 1379
1380 1380 if pullop.remote.capable('getbundle'):
1381 1381 # TODO: get bundlecaps from remote
1382 1382 cg = pullop.remote.getbundle('pull', common=pullop.common,
1383 1383 heads=pullop.heads or pullop.rheads)
1384 1384 elif pullop.heads is None:
1385 1385 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1386 1386 elif not pullop.remote.capable('changegroupsubset'):
1387 1387 raise error.Abort(_("partial pull cannot be done because "
1388 1388 "other repository doesn't support "
1389 1389 "changegroupsubset."))
1390 1390 else:
1391 1391 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1392 1392 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1393 1393
1394 1394 def _pullphase(pullop):
1395 1395 # Get remote phases data from remote
1396 1396 if 'phases' in pullop.stepsdone:
1397 1397 return
1398 1398 remotephases = pullop.remote.listkeys('phases')
1399 1399 _pullapplyphases(pullop, remotephases)
1400 1400
1401 1401 def _pullapplyphases(pullop, remotephases):
1402 1402 """apply phase movement from observed remote state"""
1403 1403 if 'phases' in pullop.stepsdone:
1404 1404 return
1405 1405 pullop.stepsdone.add('phases')
1406 1406 publishing = bool(remotephases.get('publishing', False))
1407 1407 if remotephases and not publishing:
1408 1408 # remote is new and unpublishing
1409 1409 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1410 1410 pullop.pulledsubset,
1411 1411 remotephases)
1412 1412 dheads = pullop.pulledsubset
1413 1413 else:
1414 1414 # Remote is old or publishing all common changesets
1415 1415 # should be seen as public
1416 1416 pheads = pullop.pulledsubset
1417 1417 dheads = []
1418 1418 unfi = pullop.repo.unfiltered()
1419 1419 phase = unfi._phasecache.phase
1420 1420 rev = unfi.changelog.nodemap.get
1421 1421 public = phases.public
1422 1422 draft = phases.draft
1423 1423
1424 1424 # exclude changesets already public locally and update the others
1425 1425 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1426 1426 if pheads:
1427 1427 tr = pullop.gettransaction()
1428 1428 phases.advanceboundary(pullop.repo, tr, public, pheads)
1429 1429
1430 1430 # exclude changesets already draft locally and update the others
1431 1431 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1432 1432 if dheads:
1433 1433 tr = pullop.gettransaction()
1434 1434 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1435 1435
1436 1436 def _pullbookmarks(pullop):
1437 1437 """process the remote bookmark information to update the local one"""
1438 1438 if 'bookmarks' in pullop.stepsdone:
1439 1439 return
1440 1440 pullop.stepsdone.add('bookmarks')
1441 1441 repo = pullop.repo
1442 1442 remotebookmarks = pullop.remotebookmarks
1443 1443 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1444 1444 pullop.remote.url(),
1445 1445 pullop.gettransaction,
1446 1446 explicit=pullop.explicitbookmarks)
1447 1447
1448 1448 def _pullobsolete(pullop):
1449 1449 """utility function to pull obsolete markers from a remote
1450 1450
1451 1451 The `gettransaction` is function that return the pull transaction, creating
1452 1452 one if necessary. We return the transaction to inform the calling code that
1453 1453 a new transaction have been created (when applicable).
1454 1454
1455 1455 Exists mostly to allow overriding for experimentation purpose"""
1456 1456 if 'obsmarkers' in pullop.stepsdone:
1457 1457 return
1458 1458 pullop.stepsdone.add('obsmarkers')
1459 1459 tr = None
1460 1460 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1461 1461 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1462 1462 remoteobs = pullop.remote.listkeys('obsolete')
1463 1463 if 'dump0' in remoteobs:
1464 1464 tr = pullop.gettransaction()
1465 1465 markers = []
1466 1466 for key in sorted(remoteobs, reverse=True):
1467 1467 if key.startswith('dump'):
1468 1468 data = base85.b85decode(remoteobs[key])
1469 1469 version, newmarks = obsolete._readmarkers(data)
1470 1470 markers += newmarks
1471 1471 if markers:
1472 1472 pullop.repo.obsstore.add(tr, markers)
1473 1473 pullop.repo.invalidatevolatilesets()
1474 1474 return tr
1475 1475
1476 1476 def caps20to10(repo):
1477 1477 """return a set with appropriate options to use bundle20 during getbundle"""
1478 1478 caps = set(['HG20'])
1479 1479 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1480 1480 caps.add('bundle2=' + urlreq.quote(capsblob))
1481 1481 return caps
1482 1482
1483 1483 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1484 1484 getbundle2partsorder = []
1485 1485
1486 1486 # Mapping between step name and function
1487 1487 #
1488 1488 # This exists to help extensions wrap steps if necessary
1489 1489 getbundle2partsmapping = {}
1490 1490
1491 1491 def getbundle2partsgenerator(stepname, idx=None):
1492 1492 """decorator for function generating bundle2 part for getbundle
1493 1493
1494 1494 The function is added to the step -> function mapping and appended to the
1495 1495 list of steps. Beware that decorated functions will be added in order
1496 1496 (this may matter).
1497 1497
1498 1498 You can only use this decorator for new steps, if you want to wrap a step
1499 1499 from an extension, attack the getbundle2partsmapping dictionary directly."""
1500 1500 def dec(func):
1501 1501 assert stepname not in getbundle2partsmapping
1502 1502 getbundle2partsmapping[stepname] = func
1503 1503 if idx is None:
1504 1504 getbundle2partsorder.append(stepname)
1505 1505 else:
1506 1506 getbundle2partsorder.insert(idx, stepname)
1507 1507 return func
1508 1508 return dec
1509 1509
1510 1510 def bundle2requested(bundlecaps):
1511 1511 if bundlecaps is not None:
1512 1512 return any(cap.startswith('HG2') for cap in bundlecaps)
1513 1513 return False
1514 1514
1515 1515 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1516 1516 **kwargs):
1517 1517 """return a full bundle (with potentially multiple kind of parts)
1518 1518
1519 1519 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1520 1520 passed. For now, the bundle can contain only changegroup, but this will
1521 1521 changes when more part type will be available for bundle2.
1522 1522
1523 1523 This is different from changegroup.getchangegroup that only returns an HG10
1524 1524 changegroup bundle. They may eventually get reunited in the future when we
1525 1525 have a clearer idea of the API we what to query different data.
1526 1526
1527 1527 The implementation is at a very early stage and will get massive rework
1528 1528 when the API of bundle is refined.
1529 1529 """
1530 1530 usebundle2 = bundle2requested(bundlecaps)
1531 1531 # bundle10 case
1532 1532 if not usebundle2:
1533 1533 if bundlecaps and not kwargs.get('cg', True):
1534 1534 raise ValueError(_('request for bundle10 must include changegroup'))
1535 1535
1536 1536 if kwargs:
1537 1537 raise ValueError(_('unsupported getbundle arguments: %s')
1538 1538 % ', '.join(sorted(kwargs.keys())))
1539 return changegroup.getchangegroup(repo, source, heads=heads,
1540 common=common, bundlecaps=bundlecaps)
1539 outgoing = changegroup.computeoutgoing(repo, heads, common)
1540 return changegroup.getchangegroup(repo, source, outgoing,
1541 bundlecaps=bundlecaps)
1541 1542
1542 1543 # bundle20 case
1543 1544 b2caps = {}
1544 1545 for bcaps in bundlecaps:
1545 1546 if bcaps.startswith('bundle2='):
1546 1547 blob = urlreq.unquote(bcaps[len('bundle2='):])
1547 1548 b2caps.update(bundle2.decodecaps(blob))
1548 1549 bundler = bundle2.bundle20(repo.ui, b2caps)
1549 1550
1550 1551 kwargs['heads'] = heads
1551 1552 kwargs['common'] = common
1552 1553
1553 1554 for name in getbundle2partsorder:
1554 1555 func = getbundle2partsmapping[name]
1555 1556 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1556 1557 **kwargs)
1557 1558
1558 1559 return util.chunkbuffer(bundler.getchunks())
1559 1560
1560 1561 @getbundle2partsgenerator('changegroup')
1561 1562 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1562 1563 b2caps=None, heads=None, common=None, **kwargs):
1563 1564 """add a changegroup part to the requested bundle"""
1564 1565 cg = None
1565 1566 if kwargs.get('cg', True):
1566 1567 # build changegroup bundle here.
1567 1568 version = '01'
1568 1569 cgversions = b2caps.get('changegroup')
1569 1570 if cgversions: # 3.1 and 3.2 ship with an empty value
1570 1571 cgversions = [v for v in cgversions
1571 1572 if v in changegroup.supportedoutgoingversions(repo)]
1572 1573 if not cgversions:
1573 1574 raise ValueError(_('no common changegroup version'))
1574 1575 version = max(cgversions)
1575 1576 outgoing = changegroup.computeoutgoing(repo, heads, common)
1576 1577 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1577 1578 bundlecaps=bundlecaps,
1578 1579 version=version)
1579 1580
1580 1581 if cg:
1581 1582 part = bundler.newpart('changegroup', data=cg)
1582 1583 if cgversions:
1583 1584 part.addparam('version', version)
1584 1585 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1585 1586 if 'treemanifest' in repo.requirements:
1586 1587 part.addparam('treemanifest', '1')
1587 1588
1588 1589 @getbundle2partsgenerator('listkeys')
1589 1590 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1590 1591 b2caps=None, **kwargs):
1591 1592 """add parts containing listkeys namespaces to the requested bundle"""
1592 1593 listkeys = kwargs.get('listkeys', ())
1593 1594 for namespace in listkeys:
1594 1595 part = bundler.newpart('listkeys')
1595 1596 part.addparam('namespace', namespace)
1596 1597 keys = repo.listkeys(namespace).items()
1597 1598 part.data = pushkey.encodekeys(keys)
1598 1599
1599 1600 @getbundle2partsgenerator('obsmarkers')
1600 1601 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1601 1602 b2caps=None, heads=None, **kwargs):
1602 1603 """add an obsolescence markers part to the requested bundle"""
1603 1604 if kwargs.get('obsmarkers', False):
1604 1605 if heads is None:
1605 1606 heads = repo.heads()
1606 1607 subset = [c.node() for c in repo.set('::%ln', heads)]
1607 1608 markers = repo.obsstore.relevantmarkers(subset)
1608 1609 markers = sorted(markers)
1609 1610 buildobsmarkerspart(bundler, markers)
1610 1611
1611 1612 @getbundle2partsgenerator('hgtagsfnodes')
1612 1613 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1613 1614 b2caps=None, heads=None, common=None,
1614 1615 **kwargs):
1615 1616 """Transfer the .hgtags filenodes mapping.
1616 1617
1617 1618 Only values for heads in this bundle will be transferred.
1618 1619
1619 1620 The part data consists of pairs of 20 byte changeset node and .hgtags
1620 1621 filenodes raw values.
1621 1622 """
1622 1623 # Don't send unless:
1623 1624 # - changeset are being exchanged,
1624 1625 # - the client supports it.
1625 1626 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1626 1627 return
1627 1628
1628 1629 outgoing = changegroup.computeoutgoing(repo, heads, common)
1629 1630
1630 1631 if not outgoing.missingheads:
1631 1632 return
1632 1633
1633 1634 cache = tags.hgtagsfnodescache(repo.unfiltered())
1634 1635 chunks = []
1635 1636
1636 1637 # .hgtags fnodes are only relevant for head changesets. While we could
1637 1638 # transfer values for all known nodes, there will likely be little to
1638 1639 # no benefit.
1639 1640 #
1640 1641 # We don't bother using a generator to produce output data because
1641 1642 # a) we only have 40 bytes per head and even esoteric numbers of heads
1642 1643 # consume little memory (1M heads is 40MB) b) we don't want to send the
1643 1644 # part if we don't have entries and knowing if we have entries requires
1644 1645 # cache lookups.
1645 1646 for node in outgoing.missingheads:
1646 1647 # Don't compute missing, as this may slow down serving.
1647 1648 fnode = cache.getfnode(node, computemissing=False)
1648 1649 if fnode is not None:
1649 1650 chunks.extend([node, fnode])
1650 1651
1651 1652 if chunks:
1652 1653 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1653 1654
1654 1655 def check_heads(repo, their_heads, context):
1655 1656 """check if the heads of a repo have been modified
1656 1657
1657 1658 Used by peer for unbundling.
1658 1659 """
1659 1660 heads = repo.heads()
1660 1661 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1661 1662 if not (their_heads == ['force'] or their_heads == heads or
1662 1663 their_heads == ['hashed', heads_hash]):
1663 1664 # someone else committed/pushed/unbundled while we
1664 1665 # were transferring data
1665 1666 raise error.PushRaced('repository changed while %s - '
1666 1667 'please try again' % context)
1667 1668
1668 1669 def unbundle(repo, cg, heads, source, url):
1669 1670 """Apply a bundle to a repo.
1670 1671
1671 1672 this function makes sure the repo is locked during the application and have
1672 1673 mechanism to check that no push race occurred between the creation of the
1673 1674 bundle and its application.
1674 1675
1675 1676 If the push was raced as PushRaced exception is raised."""
1676 1677 r = 0
1677 1678 # need a transaction when processing a bundle2 stream
1678 1679 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1679 1680 lockandtr = [None, None, None]
1680 1681 recordout = None
1681 1682 # quick fix for output mismatch with bundle2 in 3.4
1682 1683 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1683 1684 False)
1684 1685 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1685 1686 captureoutput = True
1686 1687 try:
1687 1688 check_heads(repo, heads, 'uploading changes')
1688 1689 # push can proceed
1689 1690 if util.safehasattr(cg, 'params'):
1690 1691 r = None
1691 1692 try:
1692 1693 def gettransaction():
1693 1694 if not lockandtr[2]:
1694 1695 lockandtr[0] = repo.wlock()
1695 1696 lockandtr[1] = repo.lock()
1696 1697 lockandtr[2] = repo.transaction(source)
1697 1698 lockandtr[2].hookargs['source'] = source
1698 1699 lockandtr[2].hookargs['url'] = url
1699 1700 lockandtr[2].hookargs['bundle2'] = '1'
1700 1701 return lockandtr[2]
1701 1702
1702 1703 # Do greedy locking by default until we're satisfied with lazy
1703 1704 # locking.
1704 1705 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1705 1706 gettransaction()
1706 1707
1707 1708 op = bundle2.bundleoperation(repo, gettransaction,
1708 1709 captureoutput=captureoutput)
1709 1710 try:
1710 1711 op = bundle2.processbundle(repo, cg, op=op)
1711 1712 finally:
1712 1713 r = op.reply
1713 1714 if captureoutput and r is not None:
1714 1715 repo.ui.pushbuffer(error=True, subproc=True)
1715 1716 def recordout(output):
1716 1717 r.newpart('output', data=output, mandatory=False)
1717 1718 if lockandtr[2] is not None:
1718 1719 lockandtr[2].close()
1719 1720 except BaseException as exc:
1720 1721 exc.duringunbundle2 = True
1721 1722 if captureoutput and r is not None:
1722 1723 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1723 1724 def recordout(output):
1724 1725 part = bundle2.bundlepart('output', data=output,
1725 1726 mandatory=False)
1726 1727 parts.append(part)
1727 1728 raise
1728 1729 else:
1729 1730 lockandtr[1] = repo.lock()
1730 1731 r = cg.apply(repo, source, url)
1731 1732 finally:
1732 1733 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1733 1734 if recordout is not None:
1734 1735 recordout(repo.ui.popbuffer())
1735 1736 return r
1736 1737
1737 1738 def _maybeapplyclonebundle(pullop):
1738 1739 """Apply a clone bundle from a remote, if possible."""
1739 1740
1740 1741 repo = pullop.repo
1741 1742 remote = pullop.remote
1742 1743
1743 1744 if not repo.ui.configbool('ui', 'clonebundles', True):
1744 1745 return
1745 1746
1746 1747 # Only run if local repo is empty.
1747 1748 if len(repo):
1748 1749 return
1749 1750
1750 1751 if pullop.heads:
1751 1752 return
1752 1753
1753 1754 if not remote.capable('clonebundles'):
1754 1755 return
1755 1756
1756 1757 res = remote._call('clonebundles')
1757 1758
1758 1759 # If we call the wire protocol command, that's good enough to record the
1759 1760 # attempt.
1760 1761 pullop.clonebundleattempted = True
1761 1762
1762 1763 entries = parseclonebundlesmanifest(repo, res)
1763 1764 if not entries:
1764 1765 repo.ui.note(_('no clone bundles available on remote; '
1765 1766 'falling back to regular clone\n'))
1766 1767 return
1767 1768
1768 1769 entries = filterclonebundleentries(repo, entries)
1769 1770 if not entries:
1770 1771 # There is a thundering herd concern here. However, if a server
1771 1772 # operator doesn't advertise bundles appropriate for its clients,
1772 1773 # they deserve what's coming. Furthermore, from a client's
1773 1774 # perspective, no automatic fallback would mean not being able to
1774 1775 # clone!
1775 1776 repo.ui.warn(_('no compatible clone bundles available on server; '
1776 1777 'falling back to regular clone\n'))
1777 1778 repo.ui.warn(_('(you may want to report this to the server '
1778 1779 'operator)\n'))
1779 1780 return
1780 1781
1781 1782 entries = sortclonebundleentries(repo.ui, entries)
1782 1783
1783 1784 url = entries[0]['URL']
1784 1785 repo.ui.status(_('applying clone bundle from %s\n') % url)
1785 1786 if trypullbundlefromurl(repo.ui, repo, url):
1786 1787 repo.ui.status(_('finished applying clone bundle\n'))
1787 1788 # Bundle failed.
1788 1789 #
1789 1790 # We abort by default to avoid the thundering herd of
1790 1791 # clients flooding a server that was expecting expensive
1791 1792 # clone load to be offloaded.
1792 1793 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1793 1794 repo.ui.warn(_('falling back to normal clone\n'))
1794 1795 else:
1795 1796 raise error.Abort(_('error applying bundle'),
1796 1797 hint=_('if this error persists, consider contacting '
1797 1798 'the server operator or disable clone '
1798 1799 'bundles via '
1799 1800 '"--config ui.clonebundles=false"'))
1800 1801
1801 1802 def parseclonebundlesmanifest(repo, s):
1802 1803 """Parses the raw text of a clone bundles manifest.
1803 1804
1804 1805 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1805 1806 to the URL and other keys are the attributes for the entry.
1806 1807 """
1807 1808 m = []
1808 1809 for line in s.splitlines():
1809 1810 fields = line.split()
1810 1811 if not fields:
1811 1812 continue
1812 1813 attrs = {'URL': fields[0]}
1813 1814 for rawattr in fields[1:]:
1814 1815 key, value = rawattr.split('=', 1)
1815 1816 key = urlreq.unquote(key)
1816 1817 value = urlreq.unquote(value)
1817 1818 attrs[key] = value
1818 1819
1819 1820 # Parse BUNDLESPEC into components. This makes client-side
1820 1821 # preferences easier to specify since you can prefer a single
1821 1822 # component of the BUNDLESPEC.
1822 1823 if key == 'BUNDLESPEC':
1823 1824 try:
1824 1825 comp, version, params = parsebundlespec(repo, value,
1825 1826 externalnames=True)
1826 1827 attrs['COMPRESSION'] = comp
1827 1828 attrs['VERSION'] = version
1828 1829 except error.InvalidBundleSpecification:
1829 1830 pass
1830 1831 except error.UnsupportedBundleSpecification:
1831 1832 pass
1832 1833
1833 1834 m.append(attrs)
1834 1835
1835 1836 return m
1836 1837
1837 1838 def filterclonebundleentries(repo, entries):
1838 1839 """Remove incompatible clone bundle manifest entries.
1839 1840
1840 1841 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1841 1842 and returns a new list consisting of only the entries that this client
1842 1843 should be able to apply.
1843 1844
1844 1845 There is no guarantee we'll be able to apply all returned entries because
1845 1846 the metadata we use to filter on may be missing or wrong.
1846 1847 """
1847 1848 newentries = []
1848 1849 for entry in entries:
1849 1850 spec = entry.get('BUNDLESPEC')
1850 1851 if spec:
1851 1852 try:
1852 1853 parsebundlespec(repo, spec, strict=True)
1853 1854 except error.InvalidBundleSpecification as e:
1854 1855 repo.ui.debug(str(e) + '\n')
1855 1856 continue
1856 1857 except error.UnsupportedBundleSpecification as e:
1857 1858 repo.ui.debug('filtering %s because unsupported bundle '
1858 1859 'spec: %s\n' % (entry['URL'], str(e)))
1859 1860 continue
1860 1861
1861 1862 if 'REQUIRESNI' in entry and not sslutil.hassni:
1862 1863 repo.ui.debug('filtering %s because SNI not supported\n' %
1863 1864 entry['URL'])
1864 1865 continue
1865 1866
1866 1867 newentries.append(entry)
1867 1868
1868 1869 return newentries
1869 1870
1870 1871 def sortclonebundleentries(ui, entries):
1871 1872 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1872 1873 if not prefers:
1873 1874 return list(entries)
1874 1875
1875 1876 prefers = [p.split('=', 1) for p in prefers]
1876 1877
1877 1878 # Our sort function.
1878 1879 def compareentry(a, b):
1879 1880 for prefkey, prefvalue in prefers:
1880 1881 avalue = a.get(prefkey)
1881 1882 bvalue = b.get(prefkey)
1882 1883
1883 1884 # Special case for b missing attribute and a matches exactly.
1884 1885 if avalue is not None and bvalue is None and avalue == prefvalue:
1885 1886 return -1
1886 1887
1887 1888 # Special case for a missing attribute and b matches exactly.
1888 1889 if bvalue is not None and avalue is None and bvalue == prefvalue:
1889 1890 return 1
1890 1891
1891 1892 # We can't compare unless attribute present on both.
1892 1893 if avalue is None or bvalue is None:
1893 1894 continue
1894 1895
1895 1896 # Same values should fall back to next attribute.
1896 1897 if avalue == bvalue:
1897 1898 continue
1898 1899
1899 1900 # Exact matches come first.
1900 1901 if avalue == prefvalue:
1901 1902 return -1
1902 1903 if bvalue == prefvalue:
1903 1904 return 1
1904 1905
1905 1906 # Fall back to next attribute.
1906 1907 continue
1907 1908
1908 1909 # If we got here we couldn't sort by attributes and prefers. Fall
1909 1910 # back to index order.
1910 1911 return 0
1911 1912
1912 1913 return sorted(entries, cmp=compareentry)
1913 1914
1914 1915 def trypullbundlefromurl(ui, repo, url):
1915 1916 """Attempt to apply a bundle from a URL."""
1916 1917 lock = repo.lock()
1917 1918 try:
1918 1919 tr = repo.transaction('bundleurl')
1919 1920 try:
1920 1921 try:
1921 1922 fh = urlmod.open(ui, url)
1922 1923 cg = readbundle(ui, fh, 'stream')
1923 1924
1924 1925 if isinstance(cg, bundle2.unbundle20):
1925 1926 bundle2.processbundle(repo, cg, lambda: tr)
1926 1927 elif isinstance(cg, streamclone.streamcloneapplier):
1927 1928 cg.apply(repo)
1928 1929 else:
1929 1930 cg.apply(repo, 'clonebundles', url)
1930 1931 tr.close()
1931 1932 return True
1932 1933 except urlerr.httperror as e:
1933 1934 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1934 1935 except urlerr.urlerror as e:
1935 1936 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1936 1937
1937 1938 return False
1938 1939 finally:
1939 1940 tr.release()
1940 1941 finally:
1941 1942 lock.release()
@@ -1,259 +1,260 b''
1 1 Create an extension to test bundle2 with multiple changegroups
2 2
3 3 $ cat > bundle2.py <<EOF
4 4 > """
5 5 > """
6 > from mercurial import changegroup, exchange
6 > from mercurial import changegroup, discovery, exchange
7 7 >
8 8 > def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
9 9 > b2caps=None, heads=None, common=None,
10 10 > **kwargs):
11 11 > # Create two changegroups given the common changesets and heads for the
12 12 > # changegroup part we are being requested. Use the parent of each head
13 13 > # in 'heads' as intermediate heads for the first changegroup.
14 14 > intermediates = [repo[r].p1().node() for r in heads]
15 > cg = changegroup.getchangegroup(repo, source, heads=intermediates,
16 > common=common, bundlecaps=bundlecaps)
15 > outgoing = discovery.outgoing(repo, common, intermediates)
16 > cg = changegroup.getchangegroup(repo, source, outgoing,
17 > bundlecaps=bundlecaps)
17 18 > bundler.newpart('output', data='changegroup1')
18 19 > bundler.newpart('changegroup', data=cg.getchunks())
19 > cg = changegroup.getchangegroup(repo, source, heads=heads,
20 > common=common + intermediates,
20 > outgoing = discovery.outgoing(repo, common + intermediates, heads)
21 > cg = changegroup.getchangegroup(repo, source, outgoing,
21 22 > bundlecaps=bundlecaps)
22 23 > bundler.newpart('output', data='changegroup2')
23 24 > bundler.newpart('changegroup', data=cg.getchunks())
24 25 >
25 26 > def _pull(repo, *args, **kwargs):
26 27 > pullop = _orig_pull(repo, *args, **kwargs)
27 28 > repo.ui.write('pullop.cgresult is %d\n' % pullop.cgresult)
28 29 > return pullop
29 30 >
30 31 > _orig_pull = exchange.pull
31 32 > exchange.pull = _pull
32 33 > exchange.getbundle2partsmapping['changegroup'] = _getbundlechangegrouppart
33 34 > EOF
34 35
35 36 $ cat >> $HGRCPATH << EOF
36 37 > [ui]
37 38 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
38 39 > EOF
39 40
40 41 Start with a simple repository with a single commit
41 42
42 43 $ hg init repo
43 44 $ cd repo
44 45 $ cat > .hg/hgrc << EOF
45 46 > [extensions]
46 47 > bundle2=$TESTTMP/bundle2.py
47 48 > EOF
48 49
49 50 $ echo A > A
50 51 $ hg commit -A -m A -q
51 52 $ cd ..
52 53
53 54 Clone
54 55
55 56 $ hg clone -q repo clone
56 57
57 58 Add two linear commits
58 59
59 60 $ cd repo
60 61 $ echo B > B
61 62 $ hg commit -A -m B -q
62 63 $ echo C > C
63 64 $ hg commit -A -m C -q
64 65
65 66 $ cd ../clone
66 67 $ cat >> .hg/hgrc <<EOF
67 68 > [hooks]
68 69 > pretxnchangegroup = sh -c "printenv.py pretxnchangegroup"
69 70 > changegroup = sh -c "printenv.py changegroup"
70 71 > incoming = sh -c "printenv.py incoming"
71 72 > EOF
72 73
73 74 Pull the new commits in the clone
74 75
75 76 $ hg pull
76 77 pulling from $TESTTMP/repo (glob)
77 78 searching for changes
78 79 remote: changegroup1
79 80 adding changesets
80 81 adding manifests
81 82 adding file changes
82 83 added 1 changesets with 1 changes to 1 files
83 84 pretxnchangegroup hook: HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_NODE_LAST=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
84 85 remote: changegroup2
85 86 adding changesets
86 87 adding manifests
87 88 adding file changes
88 89 added 1 changesets with 1 changes to 1 files
89 90 pretxnchangegroup hook: HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_NODE_LAST=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
90 91 changegroup hook: HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_NODE_LAST=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
91 92 incoming hook: HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
92 93 changegroup hook: HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_NODE_LAST=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
93 94 incoming hook: HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
94 95 pullop.cgresult is 1
95 96 (run 'hg update' to get a working copy)
96 97 $ hg update
97 98 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 99 $ hg log -G
99 100 @ 2:f838bfaca5c7 public test C
100 101 |
101 102 o 1:27547f69f254 public test B
102 103 |
103 104 o 0:4a2df7238c3b public test A
104 105
105 106 Add more changesets with multiple heads to the original repository
106 107
107 108 $ cd ../repo
108 109 $ echo D > D
109 110 $ hg commit -A -m D -q
110 111 $ hg up -r 1
111 112 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
112 113 $ echo E > E
113 114 $ hg commit -A -m E -q
114 115 $ echo F > F
115 116 $ hg commit -A -m F -q
116 117 $ hg up -r 1
117 118 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
118 119 $ echo G > G
119 120 $ hg commit -A -m G -q
120 121 $ hg up -r 3
121 122 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
122 123 $ echo H > H
123 124 $ hg commit -A -m H -q
124 125 $ hg log -G
125 126 @ 7:5cd59d311f65 draft test H
126 127 |
127 128 | o 6:1d14c3ce6ac0 draft test G
128 129 | |
129 130 | | o 5:7f219660301f draft test F
130 131 | | |
131 132 | | o 4:8a5212ebc852 draft test E
132 133 | |/
133 134 o | 3:b3325c91a4d9 draft test D
134 135 | |
135 136 o | 2:f838bfaca5c7 draft test C
136 137 |/
137 138 o 1:27547f69f254 draft test B
138 139 |
139 140 o 0:4a2df7238c3b draft test A
140 141
141 142 New heads are reported during transfer and properly accounted for in
142 143 pullop.cgresult
143 144
144 145 $ cd ../clone
145 146 $ hg pull
146 147 pulling from $TESTTMP/repo (glob)
147 148 searching for changes
148 149 remote: changegroup1
149 150 adding changesets
150 151 adding manifests
151 152 adding file changes
152 153 added 2 changesets with 2 changes to 2 files (+1 heads)
153 154 pretxnchangegroup hook: HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_NODE_LAST=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
154 155 remote: changegroup2
155 156 adding changesets
156 157 adding manifests
157 158 adding file changes
158 159 added 3 changesets with 3 changes to 3 files (+1 heads)
159 160 pretxnchangegroup hook: HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_NODE_LAST=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
160 161 changegroup hook: HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_NODE_LAST=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
161 162 incoming hook: HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
162 163 incoming hook: HG_NODE=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
163 164 changegroup hook: HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_NODE_LAST=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
164 165 incoming hook: HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
165 166 incoming hook: HG_NODE=1d14c3ce6ac0582d2809220d33e8cd7a696e0156 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
166 167 incoming hook: HG_NODE=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
167 168 pullop.cgresult is 3
168 169 (run 'hg heads' to see heads, 'hg merge' to merge)
169 170 $ hg log -G
170 171 o 7:5cd59d311f65 public test H
171 172 |
172 173 | o 6:1d14c3ce6ac0 public test G
173 174 | |
174 175 | | o 5:7f219660301f public test F
175 176 | | |
176 177 | | o 4:8a5212ebc852 public test E
177 178 | |/
178 179 o | 3:b3325c91a4d9 public test D
179 180 | |
180 181 @ | 2:f838bfaca5c7 public test C
181 182 |/
182 183 o 1:27547f69f254 public test B
183 184 |
184 185 o 0:4a2df7238c3b public test A
185 186
186 187 Removing a head from the original repository by merging it
187 188
188 189 $ cd ../repo
189 190 $ hg merge -r 6 -q
190 191 $ hg commit -m Merge
191 192 $ echo I > I
192 193 $ hg commit -A -m H -q
193 194 $ hg log -G
194 195 @ 9:9d18e5bd9ab0 draft test H
195 196 |
196 197 o 8:71bd7b46de72 draft test Merge
197 198 |\
198 199 | o 7:5cd59d311f65 draft test H
199 200 | |
200 201 o | 6:1d14c3ce6ac0 draft test G
201 202 | |
202 203 | | o 5:7f219660301f draft test F
203 204 | | |
204 205 +---o 4:8a5212ebc852 draft test E
205 206 | |
206 207 | o 3:b3325c91a4d9 draft test D
207 208 | |
208 209 | o 2:f838bfaca5c7 draft test C
209 210 |/
210 211 o 1:27547f69f254 draft test B
211 212 |
212 213 o 0:4a2df7238c3b draft test A
213 214
214 215 Removed heads are reported during transfer and properly accounted for in
215 216 pullop.cgresult
216 217
217 218 $ cd ../clone
218 219 $ hg pull
219 220 pulling from $TESTTMP/repo (glob)
220 221 searching for changes
221 222 remote: changegroup1
222 223 adding changesets
223 224 adding manifests
224 225 adding file changes
225 226 added 1 changesets with 0 changes to 0 files (-1 heads)
226 227 pretxnchangegroup hook: HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_NODE_LAST=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
227 228 remote: changegroup2
228 229 adding changesets
229 230 adding manifests
230 231 adding file changes
231 232 added 1 changesets with 1 changes to 1 files
232 233 pretxnchangegroup hook: HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_NODE_LAST=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
233 234 changegroup hook: HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_NODE_LAST=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
234 235 incoming hook: HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
235 236 changegroup hook: HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_NODE_LAST=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
236 237 incoming hook: HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
237 238 pullop.cgresult is -2
238 239 (run 'hg update' to get a working copy)
239 240 $ hg log -G
240 241 o 9:9d18e5bd9ab0 public test H
241 242 |
242 243 o 8:71bd7b46de72 public test Merge
243 244 |\
244 245 | o 7:5cd59d311f65 public test H
245 246 | |
246 247 o | 6:1d14c3ce6ac0 public test G
247 248 | |
248 249 | | o 5:7f219660301f public test F
249 250 | | |
250 251 +---o 4:8a5212ebc852 public test E
251 252 | |
252 253 | o 3:b3325c91a4d9 public test D
253 254 | |
254 255 | @ 2:f838bfaca5c7 public test C
255 256 |/
256 257 o 1:27547f69f254 public test B
257 258 |
258 259 o 0:4a2df7238c3b public test A
259 260
@@ -1,590 +1,590 b''
1 1 #require killdaemons
2 2
3 3 Create an extension to test bundle2 remote-changegroup parts
4 4
5 5 $ cat > bundle2.py << EOF
6 6 > """A small extension to test bundle2 remote-changegroup parts.
7 7 >
8 8 > Current bundle2 implementation doesn't provide a way to generate those
9 9 > parts, so they must be created by extensions.
10 10 > """
11 > from mercurial import bundle2, changegroup, exchange, util
11 > from mercurial import bundle2, changegroup, discovery, exchange, util
12 12 >
13 13 > def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
14 14 > b2caps=None, heads=None, common=None,
15 15 > **kwargs):
16 16 > """this function replaces the changegroup part handler for getbundle.
17 17 > It allows to create a set of arbitrary parts containing changegroups
18 18 > and remote-changegroups, as described in a bundle2maker file in the
19 19 > repository .hg/ directory.
20 20 >
21 21 > Each line of that bundle2maker file contain a description of the
22 22 > part to add:
23 23 > - changegroup common_revset heads_revset
24 24 > Creates a changegroup part based, using common_revset and
25 > heads_revset for changegroup.getchangegroup.
25 > heads_revset for outgoing
26 26 > - remote-changegroup url file
27 27 > Creates a remote-changegroup part for a bundle at the given
28 28 > url. Size and digest, as required by the client, are computed
29 29 > from the given file.
30 30 > - raw-remote-changegroup <python expression>
31 31 > Creates a remote-changegroup part with the data given in the
32 32 > python expression as parameters. The python expression is
33 33 > evaluated with eval, and is expected to be a dict.
34 34 > """
35 35 > def newpart(name, data=''):
36 36 > """wrapper around bundler.newpart adding an extra part making the
37 37 > client output information about each processed part"""
38 38 > bundler.newpart('output', data=name)
39 39 > part = bundler.newpart(name, data=data)
40 40 > return part
41 41 >
42 42 > for line in open(repo.join('bundle2maker'), 'r'):
43 43 > line = line.strip()
44 44 > try:
45 45 > verb, args = line.split(None, 1)
46 46 > except ValueError:
47 47 > verb, args = line, ''
48 48 > if verb == 'remote-changegroup':
49 49 > url, file = args.split()
50 50 > bundledata = open(file, 'rb').read()
51 51 > digest = util.digester.preferred(b2caps['digests'])
52 52 > d = util.digester([digest], bundledata)
53 53 > part = newpart('remote-changegroup')
54 54 > part.addparam('url', url)
55 55 > part.addparam('size', str(len(bundledata)))
56 56 > part.addparam('digests', digest)
57 57 > part.addparam('digest:%s' % digest, d[digest])
58 58 > elif verb == 'raw-remote-changegroup':
59 59 > part = newpart('remote-changegroup')
60 60 > for k, v in eval(args).items():
61 61 > part.addparam(k, str(v))
62 62 > elif verb == 'changegroup':
63 63 > _common, heads = args.split()
64 64 > common.extend(repo.lookup(r) for r in repo.revs(_common))
65 65 > heads = [repo.lookup(r) for r in repo.revs(heads)]
66 > cg = changegroup.getchangegroup(repo, 'changegroup',
67 > heads=heads, common=common)
66 > outgoing = discovery.outgoing(repo, common, heads)
67 > cg = changegroup.getchangegroup(repo, 'changegroup', outgoing)
68 68 > newpart('changegroup', cg.getchunks())
69 69 > else:
70 70 > raise Exception('unknown verb')
71 71 >
72 72 > exchange.getbundle2partsmapping['changegroup'] = _getbundlechangegrouppart
73 73 > EOF
74 74
75 75 Start a simple HTTP server to serve bundles
76 76
77 77 $ python "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid
78 78 $ cat dumb.pid >> $DAEMON_PIDS
79 79
80 80 $ cat >> $HGRCPATH << EOF
81 81 > [ui]
82 82 > ssh=python "$TESTDIR/dummyssh"
83 83 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
84 84 > EOF
85 85
86 86 $ hg init repo
87 87
88 88 $ hg -R repo unbundle $TESTDIR/bundles/rebase.hg
89 89 adding changesets
90 90 adding manifests
91 91 adding file changes
92 92 added 8 changesets with 7 changes to 7 files (+2 heads)
93 93 (run 'hg heads' to see heads, 'hg merge' to merge)
94 94
95 95 $ hg -R repo log -G
96 96 o 7:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> H
97 97 |
98 98 | o 6:eea13746799a draft Nicolas Dumazet <nicdumz.commits@gmail.com> G
99 99 |/|
100 100 o | 5:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
101 101 | |
102 102 | o 4:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
103 103 |/
104 104 | o 3:32af7686d403 draft Nicolas Dumazet <nicdumz.commits@gmail.com> D
105 105 | |
106 106 | o 2:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> C
107 107 | |
108 108 | o 1:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> B
109 109 |/
110 110 o 0:cd010b8cd998 draft Nicolas Dumazet <nicdumz.commits@gmail.com> A
111 111
112 112 $ hg clone repo orig
113 113 updating to branch default
114 114 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
115 115
116 116 $ cat > repo/.hg/hgrc << EOF
117 117 > [extensions]
118 118 > bundle2=$TESTTMP/bundle2.py
119 119 > EOF
120 120
121 121 Test a pull with an remote-changegroup
122 122
123 123 $ hg bundle -R repo --type v1 --base '0:4' -r '5:7' bundle.hg
124 124 3 changesets found
125 125 $ cat > repo/.hg/bundle2maker << EOF
126 126 > remote-changegroup http://localhost:$HGPORT/bundle.hg bundle.hg
127 127 > EOF
128 128 $ hg clone orig clone -r 3 -r 4
129 129 adding changesets
130 130 adding manifests
131 131 adding file changes
132 132 added 5 changesets with 5 changes to 5 files (+1 heads)
133 133 updating to branch default
134 134 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
135 135 $ hg pull -R clone ssh://user@dummy/repo
136 136 pulling from ssh://user@dummy/repo
137 137 searching for changes
138 138 remote: remote-changegroup
139 139 adding changesets
140 140 adding manifests
141 141 adding file changes
142 142 added 3 changesets with 2 changes to 2 files (+1 heads)
143 143 (run 'hg heads .' to see heads, 'hg merge' to merge)
144 144 $ hg -R clone log -G
145 145 o 7:02de42196ebe public Nicolas Dumazet <nicdumz.commits@gmail.com> H
146 146 |
147 147 | o 6:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> G
148 148 |/|
149 149 o | 5:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
150 150 | |
151 151 | o 4:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
152 152 |/
153 153 | @ 3:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> D
154 154 | |
155 155 | o 2:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> C
156 156 | |
157 157 | o 1:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> B
158 158 |/
159 159 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
160 160
161 161 $ rm -rf clone
162 162
163 163 Test a pull with an remote-changegroup and a following changegroup
164 164
165 165 $ hg bundle -R repo --type v1 --base 2 -r '3:4' bundle2.hg
166 166 2 changesets found
167 167 $ cat > repo/.hg/bundle2maker << EOF
168 168 > remote-changegroup http://localhost:$HGPORT/bundle2.hg bundle2.hg
169 169 > changegroup 0:4 5:7
170 170 > EOF
171 171 $ hg clone orig clone -r 2
172 172 adding changesets
173 173 adding manifests
174 174 adding file changes
175 175 added 3 changesets with 3 changes to 3 files
176 176 updating to branch default
177 177 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
178 178 $ hg pull -R clone ssh://user@dummy/repo
179 179 pulling from ssh://user@dummy/repo
180 180 searching for changes
181 181 remote: remote-changegroup
182 182 adding changesets
183 183 adding manifests
184 184 adding file changes
185 185 added 2 changesets with 2 changes to 2 files (+1 heads)
186 186 remote: changegroup
187 187 adding changesets
188 188 adding manifests
189 189 adding file changes
190 190 added 3 changesets with 2 changes to 2 files (+1 heads)
191 191 (run 'hg heads' to see heads, 'hg merge' to merge)
192 192 $ hg -R clone log -G
193 193 o 7:02de42196ebe public Nicolas Dumazet <nicdumz.commits@gmail.com> H
194 194 |
195 195 | o 6:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> G
196 196 |/|
197 197 o | 5:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
198 198 | |
199 199 | o 4:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
200 200 |/
201 201 | o 3:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> D
202 202 | |
203 203 | @ 2:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> C
204 204 | |
205 205 | o 1:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> B
206 206 |/
207 207 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
208 208
209 209 $ rm -rf clone
210 210
211 211 Test a pull with a changegroup followed by an remote-changegroup
212 212
213 213 $ hg bundle -R repo --type v1 --base '0:4' -r '5:7' bundle3.hg
214 214 3 changesets found
215 215 $ cat > repo/.hg/bundle2maker << EOF
216 216 > changegroup 000000000000 :4
217 217 > remote-changegroup http://localhost:$HGPORT/bundle3.hg bundle3.hg
218 218 > EOF
219 219 $ hg clone orig clone -r 2
220 220 adding changesets
221 221 adding manifests
222 222 adding file changes
223 223 added 3 changesets with 3 changes to 3 files
224 224 updating to branch default
225 225 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 226 $ hg pull -R clone ssh://user@dummy/repo
227 227 pulling from ssh://user@dummy/repo
228 228 searching for changes
229 229 remote: changegroup
230 230 adding changesets
231 231 adding manifests
232 232 adding file changes
233 233 added 2 changesets with 2 changes to 2 files (+1 heads)
234 234 remote: remote-changegroup
235 235 adding changesets
236 236 adding manifests
237 237 adding file changes
238 238 added 3 changesets with 2 changes to 2 files (+1 heads)
239 239 (run 'hg heads' to see heads, 'hg merge' to merge)
240 240 $ hg -R clone log -G
241 241 o 7:02de42196ebe public Nicolas Dumazet <nicdumz.commits@gmail.com> H
242 242 |
243 243 | o 6:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> G
244 244 |/|
245 245 o | 5:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
246 246 | |
247 247 | o 4:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
248 248 |/
249 249 | o 3:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> D
250 250 | |
251 251 | @ 2:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> C
252 252 | |
253 253 | o 1:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> B
254 254 |/
255 255 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
256 256
257 257 $ rm -rf clone
258 258
259 259 Test a pull with two remote-changegroups and a changegroup
260 260
261 261 $ hg bundle -R repo --type v1 --base 2 -r '3:4' bundle4.hg
262 262 2 changesets found
263 263 $ hg bundle -R repo --type v1 --base '3:4' -r '5:6' bundle5.hg
264 264 2 changesets found
265 265 $ cat > repo/.hg/bundle2maker << EOF
266 266 > remote-changegroup http://localhost:$HGPORT/bundle4.hg bundle4.hg
267 267 > remote-changegroup http://localhost:$HGPORT/bundle5.hg bundle5.hg
268 268 > changegroup 0:6 7
269 269 > EOF
270 270 $ hg clone orig clone -r 2
271 271 adding changesets
272 272 adding manifests
273 273 adding file changes
274 274 added 3 changesets with 3 changes to 3 files
275 275 updating to branch default
276 276 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
277 277 $ hg pull -R clone ssh://user@dummy/repo
278 278 pulling from ssh://user@dummy/repo
279 279 searching for changes
280 280 remote: remote-changegroup
281 281 adding changesets
282 282 adding manifests
283 283 adding file changes
284 284 added 2 changesets with 2 changes to 2 files (+1 heads)
285 285 remote: remote-changegroup
286 286 adding changesets
287 287 adding manifests
288 288 adding file changes
289 289 added 2 changesets with 1 changes to 1 files
290 290 remote: changegroup
291 291 adding changesets
292 292 adding manifests
293 293 adding file changes
294 294 added 1 changesets with 1 changes to 1 files (+1 heads)
295 295 (run 'hg heads' to see heads, 'hg merge' to merge)
296 296 $ hg -R clone log -G
297 297 o 7:02de42196ebe public Nicolas Dumazet <nicdumz.commits@gmail.com> H
298 298 |
299 299 | o 6:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> G
300 300 |/|
301 301 o | 5:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
302 302 | |
303 303 | o 4:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
304 304 |/
305 305 | o 3:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> D
306 306 | |
307 307 | @ 2:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> C
308 308 | |
309 309 | o 1:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> B
310 310 |/
311 311 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
312 312
313 313 $ rm -rf clone
314 314
315 315 Hash digest tests
316 316
317 317 $ hg bundle -R repo --type v1 -a bundle6.hg
318 318 8 changesets found
319 319
320 320 $ cat > repo/.hg/bundle2maker << EOF
321 321 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle6.hg', 'size': 1663, 'digests': 'sha1', 'digest:sha1': '2c880cfec23cff7d8f80c2f12958d1563cbdaba6'}
322 322 > EOF
323 323 $ hg clone ssh://user@dummy/repo clone
324 324 requesting all changes
325 325 remote: remote-changegroup
326 326 adding changesets
327 327 adding manifests
328 328 adding file changes
329 329 added 8 changesets with 7 changes to 7 files (+2 heads)
330 330 updating to branch default
331 331 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
332 332 $ rm -rf clone
333 333
334 334 $ cat > repo/.hg/bundle2maker << EOF
335 335 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle6.hg', 'size': 1663, 'digests': 'md5', 'digest:md5': 'e22172c2907ef88794b7bea6642c2394'}
336 336 > EOF
337 337 $ hg clone ssh://user@dummy/repo clone
338 338 requesting all changes
339 339 remote: remote-changegroup
340 340 adding changesets
341 341 adding manifests
342 342 adding file changes
343 343 added 8 changesets with 7 changes to 7 files (+2 heads)
344 344 updating to branch default
345 345 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 346 $ rm -rf clone
347 347
348 348 Hash digest mismatch throws an error
349 349
350 350 $ cat > repo/.hg/bundle2maker << EOF
351 351 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle6.hg', 'size': 1663, 'digests': 'sha1', 'digest:sha1': '0' * 40}
352 352 > EOF
353 353 $ hg clone ssh://user@dummy/repo clone
354 354 requesting all changes
355 355 remote: remote-changegroup
356 356 adding changesets
357 357 adding manifests
358 358 adding file changes
359 359 added 8 changesets with 7 changes to 7 files (+2 heads)
360 360 transaction abort!
361 361 rollback completed
362 362 abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted:
363 363 sha1 mismatch: expected 0000000000000000000000000000000000000000, got 2c880cfec23cff7d8f80c2f12958d1563cbdaba6
364 364 [255]
365 365
366 366 Multiple hash digests can be given
367 367
368 368 $ cat > repo/.hg/bundle2maker << EOF
369 369 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle6.hg', 'size': 1663, 'digests': 'md5 sha1', 'digest:md5': 'e22172c2907ef88794b7bea6642c2394', 'digest:sha1': '2c880cfec23cff7d8f80c2f12958d1563cbdaba6'}
370 370 > EOF
371 371 $ hg clone ssh://user@dummy/repo clone
372 372 requesting all changes
373 373 remote: remote-changegroup
374 374 adding changesets
375 375 adding manifests
376 376 adding file changes
377 377 added 8 changesets with 7 changes to 7 files (+2 heads)
378 378 updating to branch default
379 379 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
380 380 $ rm -rf clone
381 381
382 382 If either of the multiple hash digests mismatches, an error is thrown
383 383
384 384 $ cat > repo/.hg/bundle2maker << EOF
385 385 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle6.hg', 'size': 1663, 'digests': 'md5 sha1', 'digest:md5': '0' * 32, 'digest:sha1': '2c880cfec23cff7d8f80c2f12958d1563cbdaba6'}
386 386 > EOF
387 387 $ hg clone ssh://user@dummy/repo clone
388 388 requesting all changes
389 389 remote: remote-changegroup
390 390 adding changesets
391 391 adding manifests
392 392 adding file changes
393 393 added 8 changesets with 7 changes to 7 files (+2 heads)
394 394 transaction abort!
395 395 rollback completed
396 396 abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted:
397 397 md5 mismatch: expected 00000000000000000000000000000000, got e22172c2907ef88794b7bea6642c2394
398 398 [255]
399 399
400 400 $ cat > repo/.hg/bundle2maker << EOF
401 401 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle6.hg', 'size': 1663, 'digests': 'md5 sha1', 'digest:md5': 'e22172c2907ef88794b7bea6642c2394', 'digest:sha1': '0' * 40}
402 402 > EOF
403 403 $ hg clone ssh://user@dummy/repo clone
404 404 requesting all changes
405 405 remote: remote-changegroup
406 406 adding changesets
407 407 adding manifests
408 408 adding file changes
409 409 added 8 changesets with 7 changes to 7 files (+2 heads)
410 410 transaction abort!
411 411 rollback completed
412 412 abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted:
413 413 sha1 mismatch: expected 0000000000000000000000000000000000000000, got 2c880cfec23cff7d8f80c2f12958d1563cbdaba6
414 414 [255]
415 415
416 416 Corruption tests
417 417
418 418 $ hg clone orig clone -r 2
419 419 adding changesets
420 420 adding manifests
421 421 adding file changes
422 422 added 3 changesets with 3 changes to 3 files
423 423 updating to branch default
424 424 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
425 425
426 426 $ cat > repo/.hg/bundle2maker << EOF
427 427 > remote-changegroup http://localhost:$HGPORT/bundle4.hg bundle4.hg
428 428 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle5.hg', 'size': 578, 'digests': 'sha1', 'digest:sha1': '0' * 40}
429 429 > changegroup 0:6 7
430 430 > EOF
431 431 $ hg pull -R clone ssh://user@dummy/repo
432 432 pulling from ssh://user@dummy/repo
433 433 searching for changes
434 434 remote: remote-changegroup
435 435 adding changesets
436 436 adding manifests
437 437 adding file changes
438 438 added 2 changesets with 2 changes to 2 files (+1 heads)
439 439 remote: remote-changegroup
440 440 adding changesets
441 441 adding manifests
442 442 adding file changes
443 443 added 2 changesets with 1 changes to 1 files
444 444 transaction abort!
445 445 rollback completed
446 446 abort: bundle at http://localhost:$HGPORT/bundle5.hg is corrupted:
447 447 sha1 mismatch: expected 0000000000000000000000000000000000000000, got f29485d6bfd37db99983cfc95ecb52f8ca396106
448 448 [255]
449 449
450 450 The entire transaction has been rolled back in the pull above
451 451
452 452 $ hg -R clone log -G
453 453 @ 2:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> C
454 454 |
455 455 o 1:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> B
456 456 |
457 457 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
458 458
459 459
460 460 No params
461 461
462 462 $ cat > repo/.hg/bundle2maker << EOF
463 463 > raw-remote-changegroup {}
464 464 > EOF
465 465 $ hg pull -R clone ssh://user@dummy/repo
466 466 pulling from ssh://user@dummy/repo
467 467 searching for changes
468 468 remote: remote-changegroup
469 469 abort: remote-changegroup: missing "url" param
470 470 [255]
471 471
472 472 Missing size
473 473
474 474 $ cat > repo/.hg/bundle2maker << EOF
475 475 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle4.hg'}
476 476 > EOF
477 477 $ hg pull -R clone ssh://user@dummy/repo
478 478 pulling from ssh://user@dummy/repo
479 479 searching for changes
480 480 remote: remote-changegroup
481 481 abort: remote-changegroup: missing "size" param
482 482 [255]
483 483
484 484 Invalid size
485 485
486 486 $ cat > repo/.hg/bundle2maker << EOF
487 487 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle4.hg', 'size': 'foo'}
488 488 > EOF
489 489 $ hg pull -R clone ssh://user@dummy/repo
490 490 pulling from ssh://user@dummy/repo
491 491 searching for changes
492 492 remote: remote-changegroup
493 493 abort: remote-changegroup: invalid value for param "size"
494 494 [255]
495 495
496 496 Size mismatch
497 497
498 498 $ cat > repo/.hg/bundle2maker << EOF
499 499 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle4.hg', 'size': 42}
500 500 > EOF
501 501 $ hg pull -R clone ssh://user@dummy/repo
502 502 pulling from ssh://user@dummy/repo
503 503 searching for changes
504 504 remote: remote-changegroup
505 505 adding changesets
506 506 adding manifests
507 507 adding file changes
508 508 added 2 changesets with 2 changes to 2 files (+1 heads)
509 509 transaction abort!
510 510 rollback completed
511 511 abort: bundle at http://localhost:$HGPORT/bundle4.hg is corrupted:
512 512 size mismatch: expected 42, got 581
513 513 [255]
514 514
515 515 Unknown digest
516 516
517 517 $ cat > repo/.hg/bundle2maker << EOF
518 518 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle4.hg', 'size': 581, 'digests': 'foo', 'digest:foo': 'bar'}
519 519 > EOF
520 520 $ hg pull -R clone ssh://user@dummy/repo
521 521 pulling from ssh://user@dummy/repo
522 522 searching for changes
523 523 remote: remote-changegroup
524 524 abort: missing support for remote-changegroup - digest:foo
525 525 [255]
526 526
527 527 Missing digest
528 528
529 529 $ cat > repo/.hg/bundle2maker << EOF
530 530 > raw-remote-changegroup {'url': 'http://localhost:$HGPORT/bundle4.hg', 'size': 581, 'digests': 'sha1'}
531 531 > EOF
532 532 $ hg pull -R clone ssh://user@dummy/repo
533 533 pulling from ssh://user@dummy/repo
534 534 searching for changes
535 535 remote: remote-changegroup
536 536 abort: remote-changegroup: missing "digest:sha1" param
537 537 [255]
538 538
539 539 Not an HTTP url
540 540
541 541 $ cat > repo/.hg/bundle2maker << EOF
542 542 > raw-remote-changegroup {'url': 'ssh://localhost:$HGPORT/bundle4.hg', 'size': 581}
543 543 > EOF
544 544 $ hg pull -R clone ssh://user@dummy/repo
545 545 pulling from ssh://user@dummy/repo
546 546 searching for changes
547 547 remote: remote-changegroup
548 548 abort: remote-changegroup does not support ssh urls
549 549 [255]
550 550
551 551 Not a bundle
552 552
553 553 $ cat > notbundle.hg << EOF
554 554 > foo
555 555 > EOF
556 556 $ cat > repo/.hg/bundle2maker << EOF
557 557 > remote-changegroup http://localhost:$HGPORT/notbundle.hg notbundle.hg
558 558 > EOF
559 559 $ hg pull -R clone ssh://user@dummy/repo
560 560 pulling from ssh://user@dummy/repo
561 561 searching for changes
562 562 remote: remote-changegroup
563 563 abort: http://localhost:$HGPORT/notbundle.hg: not a Mercurial bundle
564 564 [255]
565 565
566 566 Not a bundle 1.0
567 567
568 568 $ cat > notbundle10.hg << EOF
569 569 > HG20
570 570 > EOF
571 571 $ cat > repo/.hg/bundle2maker << EOF
572 572 > remote-changegroup http://localhost:$HGPORT/notbundle10.hg notbundle10.hg
573 573 > EOF
574 574 $ hg pull -R clone ssh://user@dummy/repo
575 575 pulling from ssh://user@dummy/repo
576 576 searching for changes
577 577 remote: remote-changegroup
578 578 abort: http://localhost:$HGPORT/notbundle10.hg: not a bundle version 1.0
579 579 [255]
580 580
581 581 $ hg -R clone log -G
582 582 @ 2:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> C
583 583 |
584 584 o 1:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> B
585 585 |
586 586 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
587 587
588 588 $ rm -rf clone
589 589
590 590 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now