##// END OF EJS Templates
localrepo: allow narrowmatch() to accept matcher to intersect with...
Martin von Zweigbergk -
r40437:4fd0fac4 default
parent child Browse files
Show More
@@ -1,1362 +1,1361
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import weakref
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 nullrev,
19 19 short,
20 20 )
21 21
22 22 from . import (
23 23 error,
24 24 match as matchmod,
25 25 mdiff,
26 26 phases,
27 27 pycompat,
28 28 repository,
29 29 util,
30 30 )
31 31
32 32 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
33 33 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
34 34 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
35 35
36 36 LFS_REQUIREMENT = 'lfs'
37 37
38 38 readexactly = util.readexactly
39 39
40 40 def getchunk(stream):
41 41 """return the next chunk from stream as a string"""
42 42 d = readexactly(stream, 4)
43 43 l = struct.unpack(">l", d)[0]
44 44 if l <= 4:
45 45 if l:
46 46 raise error.Abort(_("invalid chunk length %d") % l)
47 47 return ""
48 48 return readexactly(stream, l - 4)
49 49
50 50 def chunkheader(length):
51 51 """return a changegroup chunk header (string)"""
52 52 return struct.pack(">l", length + 4)
53 53
54 54 def closechunk():
55 55 """return a changegroup chunk header (string) for a zero-length chunk"""
56 56 return struct.pack(">l", 0)
57 57
58 58 def _fileheader(path):
59 59 """Obtain a changegroup chunk header for a named path."""
60 60 return chunkheader(len(path)) + path
61 61
62 62 def writechunks(ui, chunks, filename, vfs=None):
63 63 """Write chunks to a file and return its filename.
64 64
65 65 The stream is assumed to be a bundle file.
66 66 Existing files will not be overwritten.
67 67 If no filename is specified, a temporary file is created.
68 68 """
69 69 fh = None
70 70 cleanup = None
71 71 try:
72 72 if filename:
73 73 if vfs:
74 74 fh = vfs.open(filename, "wb")
75 75 else:
76 76 # Increase default buffer size because default is usually
77 77 # small (4k is common on Linux).
78 78 fh = open(filename, "wb", 131072)
79 79 else:
80 80 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 81 fh = os.fdopen(fd, r"wb")
82 82 cleanup = filename
83 83 for c in chunks:
84 84 fh.write(c)
85 85 cleanup = None
86 86 return filename
87 87 finally:
88 88 if fh is not None:
89 89 fh.close()
90 90 if cleanup is not None:
91 91 if filename and vfs:
92 92 vfs.unlink(cleanup)
93 93 else:
94 94 os.unlink(cleanup)
95 95
96 96 class cg1unpacker(object):
97 97 """Unpacker for cg1 changegroup streams.
98 98
99 99 A changegroup unpacker handles the framing of the revision data in
100 100 the wire format. Most consumers will want to use the apply()
101 101 method to add the changes from the changegroup to a repository.
102 102
103 103 If you're forwarding a changegroup unmodified to another consumer,
104 104 use getchunks(), which returns an iterator of changegroup
105 105 chunks. This is mostly useful for cases where you need to know the
106 106 data stream has ended by observing the end of the changegroup.
107 107
108 108 deltachunk() is useful only if you're applying delta data. Most
109 109 consumers should prefer apply() instead.
110 110
111 111 A few other public methods exist. Those are used only for
112 112 bundlerepo and some debug commands - their use is discouraged.
113 113 """
114 114 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 115 deltaheadersize = deltaheader.size
116 116 version = '01'
117 117 _grouplistcount = 1 # One list of files after the manifests
118 118
119 119 def __init__(self, fh, alg, extras=None):
120 120 if alg is None:
121 121 alg = 'UN'
122 122 if alg not in util.compengines.supportedbundletypes:
123 123 raise error.Abort(_('unknown stream compression type: %s')
124 124 % alg)
125 125 if alg == 'BZ':
126 126 alg = '_truncatedBZ'
127 127
128 128 compengine = util.compengines.forbundletype(alg)
129 129 self._stream = compengine.decompressorreader(fh)
130 130 self._type = alg
131 131 self.extras = extras or {}
132 132 self.callback = None
133 133
134 134 # These methods (compressed, read, seek, tell) all appear to only
135 135 # be used by bundlerepo, but it's a little hard to tell.
136 136 def compressed(self):
137 137 return self._type is not None and self._type != 'UN'
138 138 def read(self, l):
139 139 return self._stream.read(l)
140 140 def seek(self, pos):
141 141 return self._stream.seek(pos)
142 142 def tell(self):
143 143 return self._stream.tell()
144 144 def close(self):
145 145 return self._stream.close()
146 146
147 147 def _chunklength(self):
148 148 d = readexactly(self._stream, 4)
149 149 l = struct.unpack(">l", d)[0]
150 150 if l <= 4:
151 151 if l:
152 152 raise error.Abort(_("invalid chunk length %d") % l)
153 153 return 0
154 154 if self.callback:
155 155 self.callback()
156 156 return l - 4
157 157
158 158 def changelogheader(self):
159 159 """v10 does not have a changelog header chunk"""
160 160 return {}
161 161
162 162 def manifestheader(self):
163 163 """v10 does not have a manifest header chunk"""
164 164 return {}
165 165
166 166 def filelogheader(self):
167 167 """return the header of the filelogs chunk, v10 only has the filename"""
168 168 l = self._chunklength()
169 169 if not l:
170 170 return {}
171 171 fname = readexactly(self._stream, l)
172 172 return {'filename': fname}
173 173
174 174 def _deltaheader(self, headertuple, prevnode):
175 175 node, p1, p2, cs = headertuple
176 176 if prevnode is None:
177 177 deltabase = p1
178 178 else:
179 179 deltabase = prevnode
180 180 flags = 0
181 181 return node, p1, p2, deltabase, cs, flags
182 182
183 183 def deltachunk(self, prevnode):
184 184 l = self._chunklength()
185 185 if not l:
186 186 return {}
187 187 headerdata = readexactly(self._stream, self.deltaheadersize)
188 188 header = self.deltaheader.unpack(headerdata)
189 189 delta = readexactly(self._stream, l - self.deltaheadersize)
190 190 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 191 return (node, p1, p2, cs, deltabase, delta, flags)
192 192
193 193 def getchunks(self):
194 194 """returns all the chunks contains in the bundle
195 195
196 196 Used when you need to forward the binary stream to a file or another
197 197 network API. To do so, it parse the changegroup data, otherwise it will
198 198 block in case of sshrepo because it don't know the end of the stream.
199 199 """
200 200 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
201 201 # and a list of filelogs. For changegroup 3, we expect 4 parts:
202 202 # changelog, manifestlog, a list of tree manifestlogs, and a list of
203 203 # filelogs.
204 204 #
205 205 # Changelog and manifestlog parts are terminated with empty chunks. The
206 206 # tree and file parts are a list of entry sections. Each entry section
207 207 # is a series of chunks terminating in an empty chunk. The list of these
208 208 # entry sections is terminated in yet another empty chunk, so we know
209 209 # we've reached the end of the tree/file list when we reach an empty
210 210 # chunk that was proceeded by no non-empty chunks.
211 211
212 212 parts = 0
213 213 while parts < 2 + self._grouplistcount:
214 214 noentries = True
215 215 while True:
216 216 chunk = getchunk(self)
217 217 if not chunk:
218 218 # The first two empty chunks represent the end of the
219 219 # changelog and the manifestlog portions. The remaining
220 220 # empty chunks represent either A) the end of individual
221 221 # tree or file entries in the file list, or B) the end of
222 222 # the entire list. It's the end of the entire list if there
223 223 # were no entries (i.e. noentries is True).
224 224 if parts < 2:
225 225 parts += 1
226 226 elif noentries:
227 227 parts += 1
228 228 break
229 229 noentries = False
230 230 yield chunkheader(len(chunk))
231 231 pos = 0
232 232 while pos < len(chunk):
233 233 next = pos + 2**20
234 234 yield chunk[pos:next]
235 235 pos = next
236 236 yield closechunk()
237 237
238 238 def _unpackmanifests(self, repo, revmap, trp, prog):
239 239 self.callback = prog.increment
240 240 # no need to check for empty manifest group here:
241 241 # if the result of the merge of 1 and 2 is the same in 3 and 4,
242 242 # no new manifest will be created and the manifest group will
243 243 # be empty during the pull
244 244 self.manifestheader()
245 245 deltas = self.deltaiter()
246 246 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
247 247 prog.complete()
248 248 self.callback = None
249 249
250 250 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
251 251 expectedtotal=None):
252 252 """Add the changegroup returned by source.read() to this repo.
253 253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
254 254 the URL of the repo where this changegroup is coming from.
255 255
256 256 Return an integer summarizing the change to this repo:
257 257 - nothing changed or no source: 0
258 258 - more heads than before: 1+added heads (2..n)
259 259 - fewer heads than before: -1-removed heads (-2..-n)
260 260 - number of heads stays the same: 1
261 261 """
262 262 repo = repo.unfiltered()
263 263 def csmap(x):
264 264 repo.ui.debug("add changeset %s\n" % short(x))
265 265 return len(cl)
266 266
267 267 def revmap(x):
268 268 return cl.rev(x)
269 269
270 270 changesets = files = revisions = 0
271 271
272 272 try:
273 273 # The transaction may already carry source information. In this
274 274 # case we use the top level data. We overwrite the argument
275 275 # because we need to use the top level value (if they exist)
276 276 # in this function.
277 277 srctype = tr.hookargs.setdefault('source', srctype)
278 278 url = tr.hookargs.setdefault('url', url)
279 279 repo.hook('prechangegroup',
280 280 throw=True, **pycompat.strkwargs(tr.hookargs))
281 281
282 282 # write changelog data to temp files so concurrent readers
283 283 # will not see an inconsistent view
284 284 cl = repo.changelog
285 285 cl.delayupdate(tr)
286 286 oldheads = set(cl.heads())
287 287
288 288 trp = weakref.proxy(tr)
289 289 # pull off the changeset group
290 290 repo.ui.status(_("adding changesets\n"))
291 291 clstart = len(cl)
292 292 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
293 293 total=expectedtotal)
294 294 self.callback = progress.increment
295 295
296 296 efiles = set()
297 297 def onchangelog(cl, node):
298 298 efiles.update(cl.readfiles(node))
299 299
300 300 self.changelogheader()
301 301 deltas = self.deltaiter()
302 302 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
303 303 efiles = len(efiles)
304 304
305 305 if not cgnodes:
306 306 repo.ui.develwarn('applied empty changelog from changegroup',
307 307 config='warn-empty-changegroup')
308 308 clend = len(cl)
309 309 changesets = clend - clstart
310 310 progress.complete()
311 311 self.callback = None
312 312
313 313 # pull off the manifest group
314 314 repo.ui.status(_("adding manifests\n"))
315 315 # We know that we'll never have more manifests than we had
316 316 # changesets.
317 317 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
318 318 total=changesets)
319 319 self._unpackmanifests(repo, revmap, trp, progress)
320 320
321 321 needfiles = {}
322 322 if repo.ui.configbool('server', 'validate'):
323 323 cl = repo.changelog
324 324 ml = repo.manifestlog
325 325 # validate incoming csets have their manifests
326 326 for cset in pycompat.xrange(clstart, clend):
327 327 mfnode = cl.changelogrevision(cset).manifest
328 328 mfest = ml[mfnode].readdelta()
329 329 # store file cgnodes we must see
330 330 for f, n in mfest.iteritems():
331 331 needfiles.setdefault(f, set()).add(n)
332 332
333 333 # process the files
334 334 repo.ui.status(_("adding file changes\n"))
335 335 newrevs, newfiles = _addchangegroupfiles(
336 336 repo, self, revmap, trp, efiles, needfiles)
337 337 revisions += newrevs
338 338 files += newfiles
339 339
340 340 deltaheads = 0
341 341 if oldheads:
342 342 heads = cl.heads()
343 343 deltaheads = len(heads) - len(oldheads)
344 344 for h in heads:
345 345 if h not in oldheads and repo[h].closesbranch():
346 346 deltaheads -= 1
347 347 htext = ""
348 348 if deltaheads:
349 349 htext = _(" (%+d heads)") % deltaheads
350 350
351 351 repo.ui.status(_("added %d changesets"
352 352 " with %d changes to %d files%s\n")
353 353 % (changesets, revisions, files, htext))
354 354 repo.invalidatevolatilesets()
355 355
356 356 if changesets > 0:
357 357 if 'node' not in tr.hookargs:
358 358 tr.hookargs['node'] = hex(cl.node(clstart))
359 359 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
360 360 hookargs = dict(tr.hookargs)
361 361 else:
362 362 hookargs = dict(tr.hookargs)
363 363 hookargs['node'] = hex(cl.node(clstart))
364 364 hookargs['node_last'] = hex(cl.node(clend - 1))
365 365 repo.hook('pretxnchangegroup',
366 366 throw=True, **pycompat.strkwargs(hookargs))
367 367
368 368 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
369 369 phaseall = None
370 370 if srctype in ('push', 'serve'):
371 371 # Old servers can not push the boundary themselves.
372 372 # New servers won't push the boundary if changeset already
373 373 # exists locally as secret
374 374 #
375 375 # We should not use added here but the list of all change in
376 376 # the bundle
377 377 if repo.publishing():
378 378 targetphase = phaseall = phases.public
379 379 else:
380 380 # closer target phase computation
381 381
382 382 # Those changesets have been pushed from the
383 383 # outside, their phases are going to be pushed
384 384 # alongside. Therefor `targetphase` is
385 385 # ignored.
386 386 targetphase = phaseall = phases.draft
387 387 if added:
388 388 phases.registernew(repo, tr, targetphase, added)
389 389 if phaseall is not None:
390 390 phases.advanceboundary(repo, tr, phaseall, cgnodes)
391 391
392 392 if changesets > 0:
393 393
394 394 def runhooks():
395 395 # These hooks run when the lock releases, not when the
396 396 # transaction closes. So it's possible for the changelog
397 397 # to have changed since we last saw it.
398 398 if clstart >= len(repo):
399 399 return
400 400
401 401 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
402 402
403 403 for n in added:
404 404 args = hookargs.copy()
405 405 args['node'] = hex(n)
406 406 del args['node_last']
407 407 repo.hook("incoming", **pycompat.strkwargs(args))
408 408
409 409 newheads = [h for h in repo.heads()
410 410 if h not in oldheads]
411 411 repo.ui.log("incoming",
412 412 "%d incoming changes - new heads: %s\n",
413 413 len(added),
414 414 ', '.join([hex(c[:6]) for c in newheads]))
415 415
416 416 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
417 417 lambda tr: repo._afterlock(runhooks))
418 418 finally:
419 419 repo.ui.flush()
420 420 # never return 0 here:
421 421 if deltaheads < 0:
422 422 ret = deltaheads - 1
423 423 else:
424 424 ret = deltaheads + 1
425 425 return ret
426 426
427 427 def deltaiter(self):
428 428 """
429 429 returns an iterator of the deltas in this changegroup
430 430
431 431 Useful for passing to the underlying storage system to be stored.
432 432 """
433 433 chain = None
434 434 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
435 435 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
436 436 yield chunkdata
437 437 chain = chunkdata[0]
438 438
439 439 class cg2unpacker(cg1unpacker):
440 440 """Unpacker for cg2 streams.
441 441
442 442 cg2 streams add support for generaldelta, so the delta header
443 443 format is slightly different. All other features about the data
444 444 remain the same.
445 445 """
446 446 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
447 447 deltaheadersize = deltaheader.size
448 448 version = '02'
449 449
450 450 def _deltaheader(self, headertuple, prevnode):
451 451 node, p1, p2, deltabase, cs = headertuple
452 452 flags = 0
453 453 return node, p1, p2, deltabase, cs, flags
454 454
455 455 class cg3unpacker(cg2unpacker):
456 456 """Unpacker for cg3 streams.
457 457
458 458 cg3 streams add support for exchanging treemanifests and revlog
459 459 flags. It adds the revlog flags to the delta header and an empty chunk
460 460 separating manifests and files.
461 461 """
462 462 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
463 463 deltaheadersize = deltaheader.size
464 464 version = '03'
465 465 _grouplistcount = 2 # One list of manifests and one list of files
466 466
467 467 def _deltaheader(self, headertuple, prevnode):
468 468 node, p1, p2, deltabase, cs, flags = headertuple
469 469 return node, p1, p2, deltabase, cs, flags
470 470
471 471 def _unpackmanifests(self, repo, revmap, trp, prog):
472 472 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
473 473 for chunkdata in iter(self.filelogheader, {}):
474 474 # If we get here, there are directory manifests in the changegroup
475 475 d = chunkdata["filename"]
476 476 repo.ui.debug("adding %s revisions\n" % d)
477 477 deltas = self.deltaiter()
478 478 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
479 479 raise error.Abort(_("received dir revlog group is empty"))
480 480
481 481 class headerlessfixup(object):
482 482 def __init__(self, fh, h):
483 483 self._h = h
484 484 self._fh = fh
485 485 def read(self, n):
486 486 if self._h:
487 487 d, self._h = self._h[:n], self._h[n:]
488 488 if len(d) < n:
489 489 d += readexactly(self._fh, n - len(d))
490 490 return d
491 491 return readexactly(self._fh, n)
492 492
493 493 def _revisiondeltatochunks(delta, headerfn):
494 494 """Serialize a revisiondelta to changegroup chunks."""
495 495
496 496 # The captured revision delta may be encoded as a delta against
497 497 # a base revision or as a full revision. The changegroup format
498 498 # requires that everything on the wire be deltas. So for full
499 499 # revisions, we need to invent a header that says to rewrite
500 500 # data.
501 501
502 502 if delta.delta is not None:
503 503 prefix, data = b'', delta.delta
504 504 elif delta.basenode == nullid:
505 505 data = delta.revision
506 506 prefix = mdiff.trivialdiffheader(len(data))
507 507 else:
508 508 data = delta.revision
509 509 prefix = mdiff.replacediffheader(delta.baserevisionsize,
510 510 len(data))
511 511
512 512 meta = headerfn(delta)
513 513
514 514 yield chunkheader(len(meta) + len(prefix) + len(data))
515 515 yield meta
516 516 if prefix:
517 517 yield prefix
518 518 yield data
519 519
520 520 def _sortnodesellipsis(store, nodes, cl, lookup):
521 521 """Sort nodes for changegroup generation."""
522 522 # Ellipses serving mode.
523 523 #
524 524 # In a perfect world, we'd generate better ellipsis-ified graphs
525 525 # for non-changelog revlogs. In practice, we haven't started doing
526 526 # that yet, so the resulting DAGs for the manifestlog and filelogs
527 527 # are actually full of bogus parentage on all the ellipsis
528 528 # nodes. This has the side effect that, while the contents are
529 529 # correct, the individual DAGs might be completely out of whack in
530 530 # a case like 882681bc3166 and its ancestors (back about 10
531 531 # revisions or so) in the main hg repo.
532 532 #
533 533 # The one invariant we *know* holds is that the new (potentially
534 534 # bogus) DAG shape will be valid if we order the nodes in the
535 535 # order that they're introduced in dramatis personae by the
536 536 # changelog, so what we do is we sort the non-changelog histories
537 537 # by the order in which they are used by the changelog.
538 538 key = lambda n: cl.rev(lookup(n))
539 539 return sorted(nodes, key=key)
540 540
541 541 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
542 542 linknode, clrevtolocalrev, fullclnodes,
543 543 precomputedellipsis):
544 544 linkparents = precomputedellipsis[linkrev]
545 545 def local(clrev):
546 546 """Turn a changelog revnum into a local revnum.
547 547
548 548 The ellipsis dag is stored as revnums on the changelog,
549 549 but when we're producing ellipsis entries for
550 550 non-changelog revlogs, we need to turn those numbers into
551 551 something local. This does that for us, and during the
552 552 changelog sending phase will also expand the stored
553 553 mappings as needed.
554 554 """
555 555 if clrev == nullrev:
556 556 return nullrev
557 557
558 558 if ischangelog:
559 559 return clrev
560 560
561 561 # Walk the ellipsis-ized changelog breadth-first looking for a
562 562 # change that has been linked from the current revlog.
563 563 #
564 564 # For a flat manifest revlog only a single step should be necessary
565 565 # as all relevant changelog entries are relevant to the flat
566 566 # manifest.
567 567 #
568 568 # For a filelog or tree manifest dirlog however not every changelog
569 569 # entry will have been relevant, so we need to skip some changelog
570 570 # nodes even after ellipsis-izing.
571 571 walk = [clrev]
572 572 while walk:
573 573 p = walk[0]
574 574 walk = walk[1:]
575 575 if p in clrevtolocalrev:
576 576 return clrevtolocalrev[p]
577 577 elif p in fullclnodes:
578 578 walk.extend([pp for pp in cl.parentrevs(p)
579 579 if pp != nullrev])
580 580 elif p in precomputedellipsis:
581 581 walk.extend([pp for pp in precomputedellipsis[p]
582 582 if pp != nullrev])
583 583 else:
584 584 # In this case, we've got an ellipsis with parents
585 585 # outside the current bundle (likely an
586 586 # incremental pull). We "know" that we can use the
587 587 # value of this same revlog at whatever revision
588 588 # is pointed to by linknode. "Know" is in scare
589 589 # quotes because I haven't done enough examination
590 590 # of edge cases to convince myself this is really
591 591 # a fact - it works for all the (admittedly
592 592 # thorough) cases in our testsuite, but I would be
593 593 # somewhat unsurprised to find a case in the wild
594 594 # where this breaks down a bit. That said, I don't
595 595 # know if it would hurt anything.
596 596 for i in pycompat.xrange(rev, 0, -1):
597 597 if store.linkrev(i) == clrev:
598 598 return i
599 599 # We failed to resolve a parent for this node, so
600 600 # we crash the changegroup construction.
601 601 raise error.Abort(
602 602 'unable to resolve parent while packing %r %r'
603 603 ' for changeset %r' % (store.indexfile, rev, clrev))
604 604
605 605 return nullrev
606 606
607 607 if not linkparents or (
608 608 store.parentrevs(rev) == (nullrev, nullrev)):
609 609 p1, p2 = nullrev, nullrev
610 610 elif len(linkparents) == 1:
611 611 p1, = sorted(local(p) for p in linkparents)
612 612 p2 = nullrev
613 613 else:
614 614 p1, p2 = sorted(local(p) for p in linkparents)
615 615
616 616 p1node, p2node = store.node(p1), store.node(p2)
617 617
618 618 return p1node, p2node, linknode
619 619
620 620 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
621 621 topic=None,
622 622 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
623 623 precomputedellipsis=None):
624 624 """Calculate deltas for a set of revisions.
625 625
626 626 Is a generator of ``revisiondelta`` instances.
627 627
628 628 If topic is not None, progress detail will be generated using this
629 629 topic name (e.g. changesets, manifests, etc).
630 630 """
631 631 if not nodes:
632 632 return
633 633
634 634 cl = repo.changelog
635 635
636 636 if ischangelog:
637 637 # `hg log` shows changesets in storage order. To preserve order
638 638 # across clones, send out changesets in storage order.
639 639 nodesorder = 'storage'
640 640 elif ellipses:
641 641 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
642 642 nodesorder = 'nodes'
643 643 else:
644 644 nodesorder = None
645 645
646 646 # Perform ellipses filtering and revision massaging. We do this before
647 647 # emitrevisions() because a) filtering out revisions creates less work
648 648 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
649 649 # assumptions about delta choices and we would possibly send a delta
650 650 # referencing a missing base revision.
651 651 #
652 652 # Also, calling lookup() has side-effects with regards to populating
653 653 # data structures. If we don't call lookup() for each node or if we call
654 654 # lookup() after the first pass through each node, things can break -
655 655 # possibly intermittently depending on the python hash seed! For that
656 656 # reason, we store a mapping of all linknodes during the initial node
657 657 # pass rather than use lookup() on the output side.
658 658 if ellipses:
659 659 filtered = []
660 660 adjustedparents = {}
661 661 linknodes = {}
662 662
663 663 for node in nodes:
664 664 rev = store.rev(node)
665 665 linknode = lookup(node)
666 666 linkrev = cl.rev(linknode)
667 667 clrevtolocalrev[linkrev] = rev
668 668
669 669 # If linknode is in fullclnodes, it means the corresponding
670 670 # changeset was a full changeset and is being sent unaltered.
671 671 if linknode in fullclnodes:
672 672 linknodes[node] = linknode
673 673
674 674 # If the corresponding changeset wasn't in the set computed
675 675 # as relevant to us, it should be dropped outright.
676 676 elif linkrev not in precomputedellipsis:
677 677 continue
678 678
679 679 else:
680 680 # We could probably do this later and avoid the dict
681 681 # holding state. But it likely doesn't matter.
682 682 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
683 683 cl, store, ischangelog, rev, linkrev, linknode,
684 684 clrevtolocalrev, fullclnodes, precomputedellipsis)
685 685
686 686 adjustedparents[node] = (p1node, p2node)
687 687 linknodes[node] = linknode
688 688
689 689 filtered.append(node)
690 690
691 691 nodes = filtered
692 692
693 693 # We expect the first pass to be fast, so we only engage the progress
694 694 # meter for constructing the revision deltas.
695 695 progress = None
696 696 if topic is not None:
697 697 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
698 698 total=len(nodes))
699 699
700 700 revisions = store.emitrevisions(
701 701 nodes,
702 702 nodesorder=nodesorder,
703 703 revisiondata=True,
704 704 assumehaveparentrevisions=not ellipses,
705 705 deltaprevious=forcedeltaparentprev)
706 706
707 707 for i, revision in enumerate(revisions):
708 708 if progress:
709 709 progress.update(i + 1)
710 710
711 711 if ellipses:
712 712 linknode = linknodes[revision.node]
713 713
714 714 if revision.node in adjustedparents:
715 715 p1node, p2node = adjustedparents[revision.node]
716 716 revision.p1node = p1node
717 717 revision.p2node = p2node
718 718 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
719 719
720 720 else:
721 721 linknode = lookup(revision.node)
722 722
723 723 revision.linknode = linknode
724 724 yield revision
725 725
726 726 if progress:
727 727 progress.complete()
728 728
729 729 class cgpacker(object):
730 730 def __init__(self, repo, filematcher, version,
731 731 builddeltaheader, manifestsend,
732 732 forcedeltaparentprev=False,
733 733 bundlecaps=None, ellipses=False,
734 734 shallow=False, ellipsisroots=None, fullnodes=None):
735 735 """Given a source repo, construct a bundler.
736 736
737 737 filematcher is a matcher that matches on files to include in the
738 738 changegroup. Used to facilitate sparse changegroups.
739 739
740 740 forcedeltaparentprev indicates whether delta parents must be against
741 741 the previous revision in a delta group. This should only be used for
742 742 compatibility with changegroup version 1.
743 743
744 744 builddeltaheader is a callable that constructs the header for a group
745 745 delta.
746 746
747 747 manifestsend is a chunk to send after manifests have been fully emitted.
748 748
749 749 ellipses indicates whether ellipsis serving mode is enabled.
750 750
751 751 bundlecaps is optional and can be used to specify the set of
752 752 capabilities which can be used to build the bundle. While bundlecaps is
753 753 unused in core Mercurial, extensions rely on this feature to communicate
754 754 capabilities to customize the changegroup packer.
755 755
756 756 shallow indicates whether shallow data might be sent. The packer may
757 757 need to pack file contents not introduced by the changes being packed.
758 758
759 759 fullnodes is the set of changelog nodes which should not be ellipsis
760 760 nodes. We store this rather than the set of nodes that should be
761 761 ellipsis because for very large histories we expect this to be
762 762 significantly smaller.
763 763 """
764 764 assert filematcher
765 765 self._filematcher = filematcher
766 766
767 767 self.version = version
768 768 self._forcedeltaparentprev = forcedeltaparentprev
769 769 self._builddeltaheader = builddeltaheader
770 770 self._manifestsend = manifestsend
771 771 self._ellipses = ellipses
772 772
773 773 # Set of capabilities we can use to build the bundle.
774 774 if bundlecaps is None:
775 775 bundlecaps = set()
776 776 self._bundlecaps = bundlecaps
777 777 self._isshallow = shallow
778 778 self._fullclnodes = fullnodes
779 779
780 780 # Maps ellipsis revs to their roots at the changelog level.
781 781 self._precomputedellipsis = ellipsisroots
782 782
783 783 self._repo = repo
784 784
785 785 if self._repo.ui.verbose and not self._repo.ui.debugflag:
786 786 self._verbosenote = self._repo.ui.note
787 787 else:
788 788 self._verbosenote = lambda s: None
789 789
790 790 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
791 791 changelog=True):
792 792 """Yield a sequence of changegroup byte chunks.
793 793 If changelog is False, changelog data won't be added to changegroup
794 794 """
795 795
796 796 repo = self._repo
797 797 cl = repo.changelog
798 798
799 799 self._verbosenote(_('uncompressed size of bundle content:\n'))
800 800 size = 0
801 801
802 802 clstate, deltas = self._generatechangelog(cl, clnodes)
803 803 for delta in deltas:
804 804 if changelog:
805 805 for chunk in _revisiondeltatochunks(delta,
806 806 self._builddeltaheader):
807 807 size += len(chunk)
808 808 yield chunk
809 809
810 810 close = closechunk()
811 811 size += len(close)
812 812 yield closechunk()
813 813
814 814 self._verbosenote(_('%8.i (changelog)\n') % size)
815 815
816 816 clrevorder = clstate['clrevorder']
817 817 manifests = clstate['manifests']
818 818 changedfiles = clstate['changedfiles']
819 819
820 820 # We need to make sure that the linkrev in the changegroup refers to
821 821 # the first changeset that introduced the manifest or file revision.
822 822 # The fastpath is usually safer than the slowpath, because the filelogs
823 823 # are walked in revlog order.
824 824 #
825 825 # When taking the slowpath when the manifest revlog uses generaldelta,
826 826 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
827 827 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
828 828 #
829 829 # When taking the fastpath, we are only vulnerable to reordering
830 830 # of the changelog itself. The changelog never uses generaldelta and is
831 831 # never reordered. To handle this case, we simply take the slowpath,
832 832 # which already has the 'clrevorder' logic. This was also fixed in
833 833 # cc0ff93d0c0c.
834 834
835 835 # Treemanifests don't work correctly with fastpathlinkrev
836 836 # either, because we don't discover which directory nodes to
837 837 # send along with files. This could probably be fixed.
838 838 fastpathlinkrev = fastpathlinkrev and (
839 839 'treemanifest' not in repo.requirements)
840 840
841 841 fnodes = {} # needed file nodes
842 842
843 843 size = 0
844 844 it = self.generatemanifests(
845 845 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
846 846 clstate['clrevtomanifestrev'])
847 847
848 848 for tree, deltas in it:
849 849 if tree:
850 850 assert self.version == b'03'
851 851 chunk = _fileheader(tree)
852 852 size += len(chunk)
853 853 yield chunk
854 854
855 855 for delta in deltas:
856 856 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
857 857 for chunk in chunks:
858 858 size += len(chunk)
859 859 yield chunk
860 860
861 861 close = closechunk()
862 862 size += len(close)
863 863 yield close
864 864
865 865 self._verbosenote(_('%8.i (manifests)\n') % size)
866 866 yield self._manifestsend
867 867
868 868 mfdicts = None
869 869 if self._ellipses and self._isshallow:
870 870 mfdicts = [(self._repo.manifestlog[n].read(), lr)
871 871 for (n, lr) in manifests.iteritems()]
872 872
873 873 manifests.clear()
874 874 clrevs = set(cl.rev(x) for x in clnodes)
875 875
876 876 it = self.generatefiles(changedfiles, commonrevs,
877 877 source, mfdicts, fastpathlinkrev,
878 878 fnodes, clrevs)
879 879
880 880 for path, deltas in it:
881 881 h = _fileheader(path)
882 882 size = len(h)
883 883 yield h
884 884
885 885 for delta in deltas:
886 886 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
887 887 for chunk in chunks:
888 888 size += len(chunk)
889 889 yield chunk
890 890
891 891 close = closechunk()
892 892 size += len(close)
893 893 yield close
894 894
895 895 self._verbosenote(_('%8.i %s\n') % (size, path))
896 896
897 897 yield closechunk()
898 898
899 899 if clnodes:
900 900 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
901 901
902 902 def _generatechangelog(self, cl, nodes):
903 903 """Generate data for changelog chunks.
904 904
905 905 Returns a 2-tuple of a dict containing state and an iterable of
906 906 byte chunks. The state will not be fully populated until the
907 907 chunk stream has been fully consumed.
908 908 """
909 909 clrevorder = {}
910 910 manifests = {}
911 911 mfl = self._repo.manifestlog
912 912 changedfiles = set()
913 913 clrevtomanifestrev = {}
914 914
915 915 # Callback for the changelog, used to collect changed files and
916 916 # manifest nodes.
917 917 # Returns the linkrev node (identity in the changelog case).
918 918 def lookupcl(x):
919 919 c = cl.changelogrevision(x)
920 920 clrevorder[x] = len(clrevorder)
921 921
922 922 if self._ellipses:
923 923 # Only update manifests if x is going to be sent. Otherwise we
924 924 # end up with bogus linkrevs specified for manifests and
925 925 # we skip some manifest nodes that we should otherwise
926 926 # have sent.
927 927 if (x in self._fullclnodes
928 928 or cl.rev(x) in self._precomputedellipsis):
929 929
930 930 manifestnode = c.manifest
931 931 # Record the first changeset introducing this manifest
932 932 # version.
933 933 manifests.setdefault(manifestnode, x)
934 934 # Set this narrow-specific dict so we have the lowest
935 935 # manifest revnum to look up for this cl revnum. (Part of
936 936 # mapping changelog ellipsis parents to manifest ellipsis
937 937 # parents)
938 938 clrevtomanifestrev.setdefault(
939 939 cl.rev(x), mfl.rev(manifestnode))
940 940 # We can't trust the changed files list in the changeset if the
941 941 # client requested a shallow clone.
942 942 if self._isshallow:
943 943 changedfiles.update(mfl[c.manifest].read().keys())
944 944 else:
945 945 changedfiles.update(c.files)
946 946 else:
947 947 # record the first changeset introducing this manifest version
948 948 manifests.setdefault(c.manifest, x)
949 949 # Record a complete list of potentially-changed files in
950 950 # this manifest.
951 951 changedfiles.update(c.files)
952 952
953 953 return x
954 954
955 955 state = {
956 956 'clrevorder': clrevorder,
957 957 'manifests': manifests,
958 958 'changedfiles': changedfiles,
959 959 'clrevtomanifestrev': clrevtomanifestrev,
960 960 }
961 961
962 962 gen = deltagroup(
963 963 self._repo, cl, nodes, True, lookupcl,
964 964 self._forcedeltaparentprev,
965 965 ellipses=self._ellipses,
966 966 topic=_('changesets'),
967 967 clrevtolocalrev={},
968 968 fullclnodes=self._fullclnodes,
969 969 precomputedellipsis=self._precomputedellipsis)
970 970
971 971 return state, gen
972 972
973 973 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
974 974 manifests, fnodes, source, clrevtolocalrev):
975 975 """Returns an iterator of changegroup chunks containing manifests.
976 976
977 977 `source` is unused here, but is used by extensions like remotefilelog to
978 978 change what is sent based in pulls vs pushes, etc.
979 979 """
980 980 repo = self._repo
981 981 mfl = repo.manifestlog
982 982 tmfnodes = {'': manifests}
983 983
984 984 # Callback for the manifest, used to collect linkrevs for filelog
985 985 # revisions.
986 986 # Returns the linkrev node (collected in lookupcl).
987 987 def makelookupmflinknode(tree, nodes):
988 988 if fastpathlinkrev:
989 989 assert not tree
990 990 return manifests.__getitem__
991 991
992 992 def lookupmflinknode(x):
993 993 """Callback for looking up the linknode for manifests.
994 994
995 995 Returns the linkrev node for the specified manifest.
996 996
997 997 SIDE EFFECT:
998 998
999 999 1) fclnodes gets populated with the list of relevant
1000 1000 file nodes if we're not using fastpathlinkrev
1001 1001 2) When treemanifests are in use, collects treemanifest nodes
1002 1002 to send
1003 1003
1004 1004 Note that this means manifests must be completely sent to
1005 1005 the client before you can trust the list of files and
1006 1006 treemanifests to send.
1007 1007 """
1008 1008 clnode = nodes[x]
1009 1009 mdata = mfl.get(tree, x).readfast(shallow=True)
1010 1010 for p, n, fl in mdata.iterentries():
1011 1011 if fl == 't': # subdirectory manifest
1012 1012 subtree = tree + p + '/'
1013 1013 tmfclnodes = tmfnodes.setdefault(subtree, {})
1014 1014 tmfclnode = tmfclnodes.setdefault(n, clnode)
1015 1015 if clrevorder[clnode] < clrevorder[tmfclnode]:
1016 1016 tmfclnodes[n] = clnode
1017 1017 else:
1018 1018 f = tree + p
1019 1019 fclnodes = fnodes.setdefault(f, {})
1020 1020 fclnode = fclnodes.setdefault(n, clnode)
1021 1021 if clrevorder[clnode] < clrevorder[fclnode]:
1022 1022 fclnodes[n] = clnode
1023 1023 return clnode
1024 1024 return lookupmflinknode
1025 1025
1026 1026 while tmfnodes:
1027 1027 tree, nodes = tmfnodes.popitem()
1028 1028 store = mfl.getstorage(tree)
1029 1029
1030 1030 if not self._filematcher.visitdir(store.tree[:-1] or '.'):
1031 1031 # No nodes to send because this directory is out of
1032 1032 # the client's view of the repository (probably
1033 1033 # because of narrow clones).
1034 1034 prunednodes = []
1035 1035 else:
1036 1036 # Avoid sending any manifest nodes we can prove the
1037 1037 # client already has by checking linkrevs. See the
1038 1038 # related comment in generatefiles().
1039 1039 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1040 1040 if tree and not prunednodes:
1041 1041 continue
1042 1042
1043 1043 lookupfn = makelookupmflinknode(tree, nodes)
1044 1044
1045 1045 deltas = deltagroup(
1046 1046 self._repo, store, prunednodes, False, lookupfn,
1047 1047 self._forcedeltaparentprev,
1048 1048 ellipses=self._ellipses,
1049 1049 topic=_('manifests'),
1050 1050 clrevtolocalrev=clrevtolocalrev,
1051 1051 fullclnodes=self._fullclnodes,
1052 1052 precomputedellipsis=self._precomputedellipsis)
1053 1053
1054 1054 yield tree, deltas
1055 1055
1056 1056 def _prunemanifests(self, store, nodes, commonrevs):
1057 1057 # This is split out as a separate method to allow filtering
1058 1058 # commonrevs in extension code.
1059 1059 #
1060 1060 # TODO(augie): this shouldn't be required, instead we should
1061 1061 # make filtering of revisions to send delegated to the store
1062 1062 # layer.
1063 1063 frev, flr = store.rev, store.linkrev
1064 1064 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1065 1065
1066 1066 # The 'source' parameter is useful for extensions
1067 1067 def generatefiles(self, changedfiles, commonrevs, source,
1068 1068 mfdicts, fastpathlinkrev, fnodes, clrevs):
1069 1069 changedfiles = list(filter(self._filematcher, changedfiles))
1070 1070
1071 1071 if not fastpathlinkrev:
1072 1072 def normallinknodes(unused, fname):
1073 1073 return fnodes.get(fname, {})
1074 1074 else:
1075 1075 cln = self._repo.changelog.node
1076 1076
1077 1077 def normallinknodes(store, fname):
1078 1078 flinkrev = store.linkrev
1079 1079 fnode = store.node
1080 1080 revs = ((r, flinkrev(r)) for r in store)
1081 1081 return dict((fnode(r), cln(lr))
1082 1082 for r, lr in revs if lr in clrevs)
1083 1083
1084 1084 clrevtolocalrev = {}
1085 1085
1086 1086 if self._isshallow:
1087 1087 # In a shallow clone, the linknodes callback needs to also include
1088 1088 # those file nodes that are in the manifests we sent but weren't
1089 1089 # introduced by those manifests.
1090 1090 commonctxs = [self._repo[c] for c in commonrevs]
1091 1091 clrev = self._repo.changelog.rev
1092 1092
1093 1093 def linknodes(flog, fname):
1094 1094 for c in commonctxs:
1095 1095 try:
1096 1096 fnode = c.filenode(fname)
1097 1097 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1098 1098 except error.ManifestLookupError:
1099 1099 pass
1100 1100 links = normallinknodes(flog, fname)
1101 1101 if len(links) != len(mfdicts):
1102 1102 for mf, lr in mfdicts:
1103 1103 fnode = mf.get(fname, None)
1104 1104 if fnode in links:
1105 1105 links[fnode] = min(links[fnode], lr, key=clrev)
1106 1106 elif fnode:
1107 1107 links[fnode] = lr
1108 1108 return links
1109 1109 else:
1110 1110 linknodes = normallinknodes
1111 1111
1112 1112 repo = self._repo
1113 1113 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1114 1114 total=len(changedfiles))
1115 1115 for i, fname in enumerate(sorted(changedfiles)):
1116 1116 filerevlog = repo.file(fname)
1117 1117 if not filerevlog:
1118 1118 raise error.Abort(_("empty or missing file data for %s") %
1119 1119 fname)
1120 1120
1121 1121 clrevtolocalrev.clear()
1122 1122
1123 1123 linkrevnodes = linknodes(filerevlog, fname)
1124 1124 # Lookup for filenodes, we collected the linkrev nodes above in the
1125 1125 # fastpath case and with lookupmf in the slowpath case.
1126 1126 def lookupfilelog(x):
1127 1127 return linkrevnodes[x]
1128 1128
1129 1129 frev, flr = filerevlog.rev, filerevlog.linkrev
1130 1130 # Skip sending any filenode we know the client already
1131 1131 # has. This avoids over-sending files relatively
1132 1132 # inexpensively, so it's not a problem if we under-filter
1133 1133 # here.
1134 1134 filenodes = [n for n in linkrevnodes
1135 1135 if flr(frev(n)) not in commonrevs]
1136 1136
1137 1137 if not filenodes:
1138 1138 continue
1139 1139
1140 1140 progress.update(i + 1, item=fname)
1141 1141
1142 1142 deltas = deltagroup(
1143 1143 self._repo, filerevlog, filenodes, False, lookupfilelog,
1144 1144 self._forcedeltaparentprev,
1145 1145 ellipses=self._ellipses,
1146 1146 clrevtolocalrev=clrevtolocalrev,
1147 1147 fullclnodes=self._fullclnodes,
1148 1148 precomputedellipsis=self._precomputedellipsis)
1149 1149
1150 1150 yield fname, deltas
1151 1151
1152 1152 progress.complete()
1153 1153
1154 1154 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1155 1155 shallow=False, ellipsisroots=None, fullnodes=None):
1156 1156 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1157 1157 d.node, d.p1node, d.p2node, d.linknode)
1158 1158
1159 1159 return cgpacker(repo, filematcher, b'01',
1160 1160 builddeltaheader=builddeltaheader,
1161 1161 manifestsend=b'',
1162 1162 forcedeltaparentprev=True,
1163 1163 bundlecaps=bundlecaps,
1164 1164 ellipses=ellipses,
1165 1165 shallow=shallow,
1166 1166 ellipsisroots=ellipsisroots,
1167 1167 fullnodes=fullnodes)
1168 1168
1169 1169 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1170 1170 shallow=False, ellipsisroots=None, fullnodes=None):
1171 1171 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1172 1172 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1173 1173
1174 1174 return cgpacker(repo, filematcher, b'02',
1175 1175 builddeltaheader=builddeltaheader,
1176 1176 manifestsend=b'',
1177 1177 bundlecaps=bundlecaps,
1178 1178 ellipses=ellipses,
1179 1179 shallow=shallow,
1180 1180 ellipsisroots=ellipsisroots,
1181 1181 fullnodes=fullnodes)
1182 1182
1183 1183 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1184 1184 shallow=False, ellipsisroots=None, fullnodes=None):
1185 1185 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1186 1186 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1187 1187
1188 1188 return cgpacker(repo, filematcher, b'03',
1189 1189 builddeltaheader=builddeltaheader,
1190 1190 manifestsend=closechunk(),
1191 1191 bundlecaps=bundlecaps,
1192 1192 ellipses=ellipses,
1193 1193 shallow=shallow,
1194 1194 ellipsisroots=ellipsisroots,
1195 1195 fullnodes=fullnodes)
1196 1196
1197 1197 _packermap = {'01': (_makecg1packer, cg1unpacker),
1198 1198 # cg2 adds support for exchanging generaldelta
1199 1199 '02': (_makecg2packer, cg2unpacker),
1200 1200 # cg3 adds support for exchanging revlog flags and treemanifests
1201 1201 '03': (_makecg3packer, cg3unpacker),
1202 1202 }
1203 1203
1204 1204 def allsupportedversions(repo):
1205 1205 versions = set(_packermap.keys())
1206 1206 if not (repo.ui.configbool('experimental', 'changegroup3') or
1207 1207 repo.ui.configbool('experimental', 'treemanifest') or
1208 1208 'treemanifest' in repo.requirements):
1209 1209 versions.discard('03')
1210 1210 return versions
1211 1211
1212 1212 # Changegroup versions that can be applied to the repo
1213 1213 def supportedincomingversions(repo):
1214 1214 return allsupportedversions(repo)
1215 1215
1216 1216 # Changegroup versions that can be created from the repo
1217 1217 def supportedoutgoingversions(repo):
1218 1218 versions = allsupportedversions(repo)
1219 1219 if 'treemanifest' in repo.requirements:
1220 1220 # Versions 01 and 02 support only flat manifests and it's just too
1221 1221 # expensive to convert between the flat manifest and tree manifest on
1222 1222 # the fly. Since tree manifests are hashed differently, all of history
1223 1223 # would have to be converted. Instead, we simply don't even pretend to
1224 1224 # support versions 01 and 02.
1225 1225 versions.discard('01')
1226 1226 versions.discard('02')
1227 1227 if repository.NARROW_REQUIREMENT in repo.requirements:
1228 1228 # Versions 01 and 02 don't support revlog flags, and we need to
1229 1229 # support that for stripping and unbundling to work.
1230 1230 versions.discard('01')
1231 1231 versions.discard('02')
1232 1232 if LFS_REQUIREMENT in repo.requirements:
1233 1233 # Versions 01 and 02 don't support revlog flags, and we need to
1234 1234 # mark LFS entries with REVIDX_EXTSTORED.
1235 1235 versions.discard('01')
1236 1236 versions.discard('02')
1237 1237
1238 1238 return versions
1239 1239
1240 1240 def localversion(repo):
1241 1241 # Finds the best version to use for bundles that are meant to be used
1242 1242 # locally, such as those from strip and shelve, and temporary bundles.
1243 1243 return max(supportedoutgoingversions(repo))
1244 1244
1245 1245 def safeversion(repo):
1246 1246 # Finds the smallest version that it's safe to assume clients of the repo
1247 1247 # will support. For example, all hg versions that support generaldelta also
1248 1248 # support changegroup 02.
1249 1249 versions = supportedoutgoingversions(repo)
1250 1250 if 'generaldelta' in repo.requirements:
1251 1251 versions.discard('01')
1252 1252 assert versions
1253 1253 return min(versions)
1254 1254
1255 1255 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1256 1256 ellipses=False, shallow=False, ellipsisroots=None,
1257 1257 fullnodes=None):
1258 1258 assert version in supportedoutgoingversions(repo)
1259 1259
1260 1260 if filematcher is None:
1261 1261 filematcher = matchmod.alwaysmatcher(repo.root, '')
1262 1262
1263 1263 if version == '01' and not filematcher.always():
1264 1264 raise error.ProgrammingError('version 01 changegroups do not support '
1265 1265 'sparse file matchers')
1266 1266
1267 1267 if ellipses and version in (b'01', b'02'):
1268 1268 raise error.Abort(
1269 1269 _('ellipsis nodes require at least cg3 on client and server, '
1270 1270 'but negotiated version %s') % version)
1271 1271
1272 1272 # Requested files could include files not in the local store. So
1273 1273 # filter those out.
1274 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1275 filematcher)
1274 filematcher = repo.narrowmatch(filematcher)
1276 1275
1277 1276 fn = _packermap[version][0]
1278 1277 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1279 1278 shallow=shallow, ellipsisroots=ellipsisroots,
1280 1279 fullnodes=fullnodes)
1281 1280
1282 1281 def getunbundler(version, fh, alg, extras=None):
1283 1282 return _packermap[version][1](fh, alg, extras=extras)
1284 1283
1285 1284 def _changegroupinfo(repo, nodes, source):
1286 1285 if repo.ui.verbose or source == 'bundle':
1287 1286 repo.ui.status(_("%d changesets found\n") % len(nodes))
1288 1287 if repo.ui.debugflag:
1289 1288 repo.ui.debug("list of changesets:\n")
1290 1289 for node in nodes:
1291 1290 repo.ui.debug("%s\n" % hex(node))
1292 1291
1293 1292 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1294 1293 bundlecaps=None):
1295 1294 cgstream = makestream(repo, outgoing, version, source,
1296 1295 fastpath=fastpath, bundlecaps=bundlecaps)
1297 1296 return getunbundler(version, util.chunkbuffer(cgstream), None,
1298 1297 {'clcount': len(outgoing.missing) })
1299 1298
1300 1299 def makestream(repo, outgoing, version, source, fastpath=False,
1301 1300 bundlecaps=None, filematcher=None):
1302 1301 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1303 1302 filematcher=filematcher)
1304 1303
1305 1304 repo = repo.unfiltered()
1306 1305 commonrevs = outgoing.common
1307 1306 csets = outgoing.missing
1308 1307 heads = outgoing.missingheads
1309 1308 # We go through the fast path if we get told to, or if all (unfiltered
1310 1309 # heads have been requested (since we then know there all linkrevs will
1311 1310 # be pulled by the client).
1312 1311 heads.sort()
1313 1312 fastpathlinkrev = fastpath or (
1314 1313 repo.filtername is None and heads == sorted(repo.heads()))
1315 1314
1316 1315 repo.hook('preoutgoing', throw=True, source=source)
1317 1316 _changegroupinfo(repo, csets, source)
1318 1317 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1319 1318
1320 1319 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1321 1320 revisions = 0
1322 1321 files = 0
1323 1322 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1324 1323 total=expectedfiles)
1325 1324 for chunkdata in iter(source.filelogheader, {}):
1326 1325 files += 1
1327 1326 f = chunkdata["filename"]
1328 1327 repo.ui.debug("adding %s revisions\n" % f)
1329 1328 progress.increment()
1330 1329 fl = repo.file(f)
1331 1330 o = len(fl)
1332 1331 try:
1333 1332 deltas = source.deltaiter()
1334 1333 if not fl.addgroup(deltas, revmap, trp):
1335 1334 raise error.Abort(_("received file revlog group is empty"))
1336 1335 except error.CensoredBaseError as e:
1337 1336 raise error.Abort(_("received delta base is censored: %s") % e)
1338 1337 revisions += len(fl) - o
1339 1338 if f in needfiles:
1340 1339 needs = needfiles[f]
1341 1340 for new in pycompat.xrange(o, len(fl)):
1342 1341 n = fl.node(new)
1343 1342 if n in needs:
1344 1343 needs.remove(n)
1345 1344 else:
1346 1345 raise error.Abort(
1347 1346 _("received spurious file revlog entry"))
1348 1347 if not needs:
1349 1348 del needfiles[f]
1350 1349 progress.complete()
1351 1350
1352 1351 for f, needs in needfiles.iteritems():
1353 1352 fl = repo.file(f)
1354 1353 for n in needs:
1355 1354 try:
1356 1355 fl.rev(n)
1357 1356 except error.LookupError:
1358 1357 raise error.Abort(
1359 1358 _('missing file data for %s:%s - run hg verify') %
1360 1359 (f, hex(n)))
1361 1360
1362 1361 return revisions, files
@@ -1,5993 +1,5992
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import os
13 13 import re
14 14 import sys
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 wdirhex,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 archival,
27 27 bookmarks,
28 28 bundle2,
29 29 changegroup,
30 30 cmdutil,
31 31 copies,
32 32 debugcommands as debugcommandsmod,
33 33 destutil,
34 34 dirstateguard,
35 35 discovery,
36 36 encoding,
37 37 error,
38 38 exchange,
39 39 extensions,
40 40 filemerge,
41 41 formatter,
42 42 graphmod,
43 43 hbisect,
44 44 help,
45 45 hg,
46 46 logcmdutil,
47 match as matchmod,
48 47 merge as mergemod,
49 48 narrowspec,
50 49 obsolete,
51 50 obsutil,
52 51 patch,
53 52 phases,
54 53 pycompat,
55 54 rcutil,
56 55 registrar,
57 56 repair,
58 57 revsetlang,
59 58 rewriteutil,
60 59 scmutil,
61 60 server,
62 61 state as statemod,
63 62 streamclone,
64 63 tags as tagsmod,
65 64 templatekw,
66 65 ui as uimod,
67 66 util,
68 67 wireprotoserver,
69 68 )
70 69 from .utils import (
71 70 dateutil,
72 71 stringutil,
73 72 )
74 73
75 74 table = {}
76 75 table.update(debugcommandsmod.command._table)
77 76
78 77 command = registrar.command(table)
79 78 INTENT_READONLY = registrar.INTENT_READONLY
80 79
81 80 # common command options
82 81
83 82 globalopts = [
84 83 ('R', 'repository', '',
85 84 _('repository root directory or name of overlay bundle file'),
86 85 _('REPO')),
87 86 ('', 'cwd', '',
88 87 _('change working directory'), _('DIR')),
89 88 ('y', 'noninteractive', None,
90 89 _('do not prompt, automatically pick the first choice for all prompts')),
91 90 ('q', 'quiet', None, _('suppress output')),
92 91 ('v', 'verbose', None, _('enable additional output')),
93 92 ('', 'color', '',
94 93 # i18n: 'always', 'auto', 'never', and 'debug' are keywords
95 94 # and should not be translated
96 95 _("when to colorize (boolean, always, auto, never, or debug)"),
97 96 _('TYPE')),
98 97 ('', 'config', [],
99 98 _('set/override config option (use \'section.name=value\')'),
100 99 _('CONFIG')),
101 100 ('', 'debug', None, _('enable debugging output')),
102 101 ('', 'debugger', None, _('start debugger')),
103 102 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
104 103 _('ENCODE')),
105 104 ('', 'encodingmode', encoding.encodingmode,
106 105 _('set the charset encoding mode'), _('MODE')),
107 106 ('', 'traceback', None, _('always print a traceback on exception')),
108 107 ('', 'time', None, _('time how long the command takes')),
109 108 ('', 'profile', None, _('print command execution profile')),
110 109 ('', 'version', None, _('output version information and exit')),
111 110 ('h', 'help', None, _('display help and exit')),
112 111 ('', 'hidden', False, _('consider hidden changesets')),
113 112 ('', 'pager', 'auto',
114 113 _("when to paginate (boolean, always, auto, or never)"), _('TYPE')),
115 114 ]
116 115
117 116 dryrunopts = cmdutil.dryrunopts
118 117 remoteopts = cmdutil.remoteopts
119 118 walkopts = cmdutil.walkopts
120 119 commitopts = cmdutil.commitopts
121 120 commitopts2 = cmdutil.commitopts2
122 121 formatteropts = cmdutil.formatteropts
123 122 templateopts = cmdutil.templateopts
124 123 logopts = cmdutil.logopts
125 124 diffopts = cmdutil.diffopts
126 125 diffwsopts = cmdutil.diffwsopts
127 126 diffopts2 = cmdutil.diffopts2
128 127 mergetoolopts = cmdutil.mergetoolopts
129 128 similarityopts = cmdutil.similarityopts
130 129 subrepoopts = cmdutil.subrepoopts
131 130 debugrevlogopts = cmdutil.debugrevlogopts
132 131
133 132 # Commands start here, listed alphabetically
134 133
135 134 @command('^add',
136 135 walkopts + subrepoopts + dryrunopts,
137 136 _('[OPTION]... [FILE]...'),
138 137 inferrepo=True)
139 138 def add(ui, repo, *pats, **opts):
140 139 """add the specified files on the next commit
141 140
142 141 Schedule files to be version controlled and added to the
143 142 repository.
144 143
145 144 The files will be added to the repository at the next commit. To
146 145 undo an add before that, see :hg:`forget`.
147 146
148 147 If no names are given, add all files to the repository (except
149 148 files matching ``.hgignore``).
150 149
151 150 .. container:: verbose
152 151
153 152 Examples:
154 153
155 154 - New (unknown) files are added
156 155 automatically by :hg:`add`::
157 156
158 157 $ ls
159 158 foo.c
160 159 $ hg status
161 160 ? foo.c
162 161 $ hg add
163 162 adding foo.c
164 163 $ hg status
165 164 A foo.c
166 165
167 166 - Specific files to be added can be specified::
168 167
169 168 $ ls
170 169 bar.c foo.c
171 170 $ hg status
172 171 ? bar.c
173 172 ? foo.c
174 173 $ hg add bar.c
175 174 $ hg status
176 175 A bar.c
177 176 ? foo.c
178 177
179 178 Returns 0 if all files are successfully added.
180 179 """
181 180
182 181 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
183 182 rejected = cmdutil.add(ui, repo, m, "", False, **opts)
184 183 return rejected and 1 or 0
185 184
186 185 @command('addremove',
187 186 similarityopts + subrepoopts + walkopts + dryrunopts,
188 187 _('[OPTION]... [FILE]...'),
189 188 inferrepo=True)
190 189 def addremove(ui, repo, *pats, **opts):
191 190 """add all new files, delete all missing files
192 191
193 192 Add all new files and remove all missing files from the
194 193 repository.
195 194
196 195 Unless names are given, new files are ignored if they match any of
197 196 the patterns in ``.hgignore``. As with add, these changes take
198 197 effect at the next commit.
199 198
200 199 Use the -s/--similarity option to detect renamed files. This
201 200 option takes a percentage between 0 (disabled) and 100 (files must
202 201 be identical) as its parameter. With a parameter greater than 0,
203 202 this compares every removed file with every added file and records
204 203 those similar enough as renames. Detecting renamed files this way
205 204 can be expensive. After using this option, :hg:`status -C` can be
206 205 used to check which files were identified as moved or renamed. If
207 206 not specified, -s/--similarity defaults to 100 and only renames of
208 207 identical files are detected.
209 208
210 209 .. container:: verbose
211 210
212 211 Examples:
213 212
214 213 - A number of files (bar.c and foo.c) are new,
215 214 while foobar.c has been removed (without using :hg:`remove`)
216 215 from the repository::
217 216
218 217 $ ls
219 218 bar.c foo.c
220 219 $ hg status
221 220 ! foobar.c
222 221 ? bar.c
223 222 ? foo.c
224 223 $ hg addremove
225 224 adding bar.c
226 225 adding foo.c
227 226 removing foobar.c
228 227 $ hg status
229 228 A bar.c
230 229 A foo.c
231 230 R foobar.c
232 231
233 232 - A file foobar.c was moved to foo.c without using :hg:`rename`.
234 233 Afterwards, it was edited slightly::
235 234
236 235 $ ls
237 236 foo.c
238 237 $ hg status
239 238 ! foobar.c
240 239 ? foo.c
241 240 $ hg addremove --similarity 90
242 241 removing foobar.c
243 242 adding foo.c
244 243 recording removal of foobar.c as rename to foo.c (94% similar)
245 244 $ hg status -C
246 245 A foo.c
247 246 foobar.c
248 247 R foobar.c
249 248
250 249 Returns 0 if all files are successfully added.
251 250 """
252 251 opts = pycompat.byteskwargs(opts)
253 252 if not opts.get('similarity'):
254 253 opts['similarity'] = '100'
255 254 matcher = scmutil.match(repo[None], pats, opts)
256 255 return scmutil.addremove(repo, matcher, "", opts)
257 256
258 257 @command('^annotate|blame',
259 258 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
260 259 ('', 'follow', None,
261 260 _('follow copies/renames and list the filename (DEPRECATED)')),
262 261 ('', 'no-follow', None, _("don't follow copies and renames")),
263 262 ('a', 'text', None, _('treat all files as text')),
264 263 ('u', 'user', None, _('list the author (long with -v)')),
265 264 ('f', 'file', None, _('list the filename')),
266 265 ('d', 'date', None, _('list the date (short with -q)')),
267 266 ('n', 'number', None, _('list the revision number (default)')),
268 267 ('c', 'changeset', None, _('list the changeset')),
269 268 ('l', 'line-number', None, _('show line number at the first appearance')),
270 269 ('', 'skip', [], _('revision to not display (EXPERIMENTAL)'), _('REV')),
271 270 ] + diffwsopts + walkopts + formatteropts,
272 271 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
273 272 inferrepo=True)
274 273 def annotate(ui, repo, *pats, **opts):
275 274 """show changeset information by line for each file
276 275
277 276 List changes in files, showing the revision id responsible for
278 277 each line.
279 278
280 279 This command is useful for discovering when a change was made and
281 280 by whom.
282 281
283 282 If you include --file, --user, or --date, the revision number is
284 283 suppressed unless you also include --number.
285 284
286 285 Without the -a/--text option, annotate will avoid processing files
287 286 it detects as binary. With -a, annotate will annotate the file
288 287 anyway, although the results will probably be neither useful
289 288 nor desirable.
290 289
291 290 .. container:: verbose
292 291
293 292 Template:
294 293
295 294 The following keywords are supported in addition to the common template
296 295 keywords and functions. See also :hg:`help templates`.
297 296
298 297 :lines: List of lines with annotation data.
299 298 :path: String. Repository-absolute path of the specified file.
300 299
301 300 And each entry of ``{lines}`` provides the following sub-keywords in
302 301 addition to ``{date}``, ``{node}``, ``{rev}``, ``{user}``, etc.
303 302
304 303 :line: String. Line content.
305 304 :lineno: Integer. Line number at that revision.
306 305 :path: String. Repository-absolute path of the file at that revision.
307 306
308 307 See :hg:`help templates.operators` for the list expansion syntax.
309 308
310 309 Returns 0 on success.
311 310 """
312 311 opts = pycompat.byteskwargs(opts)
313 312 if not pats:
314 313 raise error.Abort(_('at least one filename or pattern is required'))
315 314
316 315 if opts.get('follow'):
317 316 # --follow is deprecated and now just an alias for -f/--file
318 317 # to mimic the behavior of Mercurial before version 1.5
319 318 opts['file'] = True
320 319
321 320 rev = opts.get('rev')
322 321 if rev:
323 322 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
324 323 ctx = scmutil.revsingle(repo, rev)
325 324
326 325 rootfm = ui.formatter('annotate', opts)
327 326 if ui.debugflag:
328 327 shorthex = pycompat.identity
329 328 else:
330 329 def shorthex(h):
331 330 return h[:12]
332 331 if ui.quiet:
333 332 datefunc = dateutil.shortdate
334 333 else:
335 334 datefunc = dateutil.datestr
336 335 if ctx.rev() is None:
337 336 if opts.get('changeset'):
338 337 # omit "+" suffix which is appended to node hex
339 338 def formatrev(rev):
340 339 if rev == wdirrev:
341 340 return '%d' % ctx.p1().rev()
342 341 else:
343 342 return '%d' % rev
344 343 else:
345 344 def formatrev(rev):
346 345 if rev == wdirrev:
347 346 return '%d+' % ctx.p1().rev()
348 347 else:
349 348 return '%d ' % rev
350 349 def formathex(h):
351 350 if h == wdirhex:
352 351 return '%s+' % shorthex(hex(ctx.p1().node()))
353 352 else:
354 353 return '%s ' % shorthex(h)
355 354 else:
356 355 formatrev = b'%d'.__mod__
357 356 formathex = shorthex
358 357
359 358 opmap = [('user', ' ', lambda x: x.fctx.user(), ui.shortuser),
360 359 ('rev', ' ', lambda x: scmutil.intrev(x.fctx), formatrev),
361 360 ('node', ' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
362 361 ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
363 362 ('path', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
364 363 ('lineno', ':', lambda x: x.lineno, pycompat.bytestr),
365 364 ]
366 365 opnamemap = {'rev': 'number', 'node': 'changeset', 'path': 'file',
367 366 'lineno': 'line_number'}
368 367
369 368 if (not opts.get('user') and not opts.get('changeset')
370 369 and not opts.get('date') and not opts.get('file')):
371 370 opts['number'] = True
372 371
373 372 linenumber = opts.get('line_number') is not None
374 373 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
375 374 raise error.Abort(_('at least one of -n/-c is required for -l'))
376 375
377 376 ui.pager('annotate')
378 377
379 378 if rootfm.isplain():
380 379 def makefunc(get, fmt):
381 380 return lambda x: fmt(get(x))
382 381 else:
383 382 def makefunc(get, fmt):
384 383 return get
385 384 datahint = rootfm.datahint()
386 385 funcmap = [(makefunc(get, fmt), sep) for fn, sep, get, fmt in opmap
387 386 if opts.get(opnamemap.get(fn, fn)) or fn in datahint]
388 387 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
389 388 fields = ' '.join(fn for fn, sep, get, fmt in opmap
390 389 if opts.get(opnamemap.get(fn, fn)) or fn in datahint)
391 390
392 391 def bad(x, y):
393 392 raise error.Abort("%s: %s" % (x, y))
394 393
395 394 m = scmutil.match(ctx, pats, opts, badfn=bad)
396 395
397 396 follow = not opts.get('no_follow')
398 397 diffopts = patch.difffeatureopts(ui, opts, section='annotate',
399 398 whitespace=True)
400 399 skiprevs = opts.get('skip')
401 400 if skiprevs:
402 401 skiprevs = scmutil.revrange(repo, skiprevs)
403 402
404 403 for abs in ctx.walk(m):
405 404 fctx = ctx[abs]
406 405 rootfm.startitem()
407 406 rootfm.data(path=abs)
408 407 if not opts.get('text') and fctx.isbinary():
409 408 rootfm.plain(_("%s: binary file\n")
410 409 % ((pats and m.rel(abs)) or abs))
411 410 continue
412 411
413 412 fm = rootfm.nested('lines', tmpl='{rev}: {line}')
414 413 lines = fctx.annotate(follow=follow, skiprevs=skiprevs,
415 414 diffopts=diffopts)
416 415 if not lines:
417 416 fm.end()
418 417 continue
419 418 formats = []
420 419 pieces = []
421 420
422 421 for f, sep in funcmap:
423 422 l = [f(n) for n in lines]
424 423 if fm.isplain():
425 424 sizes = [encoding.colwidth(x) for x in l]
426 425 ml = max(sizes)
427 426 formats.append([sep + ' ' * (ml - w) + '%s' for w in sizes])
428 427 else:
429 428 formats.append(['%s' for x in l])
430 429 pieces.append(l)
431 430
432 431 for f, p, n in zip(zip(*formats), zip(*pieces), lines):
433 432 fm.startitem()
434 433 fm.context(fctx=n.fctx)
435 434 fm.write(fields, "".join(f), *p)
436 435 if n.skip:
437 436 fmt = "* %s"
438 437 else:
439 438 fmt = ": %s"
440 439 fm.write('line', fmt, n.text)
441 440
442 441 if not lines[-1].text.endswith('\n'):
443 442 fm.plain('\n')
444 443 fm.end()
445 444
446 445 rootfm.end()
447 446
448 447 @command('archive',
449 448 [('', 'no-decode', None, _('do not pass files through decoders')),
450 449 ('p', 'prefix', '', _('directory prefix for files in archive'),
451 450 _('PREFIX')),
452 451 ('r', 'rev', '', _('revision to distribute'), _('REV')),
453 452 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
454 453 ] + subrepoopts + walkopts,
455 454 _('[OPTION]... DEST'))
456 455 def archive(ui, repo, dest, **opts):
457 456 '''create an unversioned archive of a repository revision
458 457
459 458 By default, the revision used is the parent of the working
460 459 directory; use -r/--rev to specify a different revision.
461 460
462 461 The archive type is automatically detected based on file
463 462 extension (to override, use -t/--type).
464 463
465 464 .. container:: verbose
466 465
467 466 Examples:
468 467
469 468 - create a zip file containing the 1.0 release::
470 469
471 470 hg archive -r 1.0 project-1.0.zip
472 471
473 472 - create a tarball excluding .hg files::
474 473
475 474 hg archive project.tar.gz -X ".hg*"
476 475
477 476 Valid types are:
478 477
479 478 :``files``: a directory full of files (default)
480 479 :``tar``: tar archive, uncompressed
481 480 :``tbz2``: tar archive, compressed using bzip2
482 481 :``tgz``: tar archive, compressed using gzip
483 482 :``uzip``: zip archive, uncompressed
484 483 :``zip``: zip archive, compressed using deflate
485 484
486 485 The exact name of the destination archive or directory is given
487 486 using a format string; see :hg:`help export` for details.
488 487
489 488 Each member added to an archive file has a directory prefix
490 489 prepended. Use -p/--prefix to specify a format string for the
491 490 prefix. The default is the basename of the archive, with suffixes
492 491 removed.
493 492
494 493 Returns 0 on success.
495 494 '''
496 495
497 496 opts = pycompat.byteskwargs(opts)
498 497 rev = opts.get('rev')
499 498 if rev:
500 499 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
501 500 ctx = scmutil.revsingle(repo, rev)
502 501 if not ctx:
503 502 raise error.Abort(_('no working directory: please specify a revision'))
504 503 node = ctx.node()
505 504 dest = cmdutil.makefilename(ctx, dest)
506 505 if os.path.realpath(dest) == repo.root:
507 506 raise error.Abort(_('repository root cannot be destination'))
508 507
509 508 kind = opts.get('type') or archival.guesskind(dest) or 'files'
510 509 prefix = opts.get('prefix')
511 510
512 511 if dest == '-':
513 512 if kind == 'files':
514 513 raise error.Abort(_('cannot archive plain files to stdout'))
515 514 dest = cmdutil.makefileobj(ctx, dest)
516 515 if not prefix:
517 516 prefix = os.path.basename(repo.root) + '-%h'
518 517
519 518 prefix = cmdutil.makefilename(ctx, prefix)
520 519 match = scmutil.match(ctx, [], opts)
521 520 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
522 521 match, prefix, subrepos=opts.get('subrepos'))
523 522
524 523 @command('backout',
525 524 [('', 'merge', None, _('merge with old dirstate parent after backout')),
526 525 ('', 'commit', None,
527 526 _('commit if no conflicts were encountered (DEPRECATED)')),
528 527 ('', 'no-commit', None, _('do not commit')),
529 528 ('', 'parent', '',
530 529 _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
531 530 ('r', 'rev', '', _('revision to backout'), _('REV')),
532 531 ('e', 'edit', False, _('invoke editor on commit messages')),
533 532 ] + mergetoolopts + walkopts + commitopts + commitopts2,
534 533 _('[OPTION]... [-r] REV'))
535 534 def backout(ui, repo, node=None, rev=None, **opts):
536 535 '''reverse effect of earlier changeset
537 536
538 537 Prepare a new changeset with the effect of REV undone in the
539 538 current working directory. If no conflicts were encountered,
540 539 it will be committed immediately.
541 540
542 541 If REV is the parent of the working directory, then this new changeset
543 542 is committed automatically (unless --no-commit is specified).
544 543
545 544 .. note::
546 545
547 546 :hg:`backout` cannot be used to fix either an unwanted or
548 547 incorrect merge.
549 548
550 549 .. container:: verbose
551 550
552 551 Examples:
553 552
554 553 - Reverse the effect of the parent of the working directory.
555 554 This backout will be committed immediately::
556 555
557 556 hg backout -r .
558 557
559 558 - Reverse the effect of previous bad revision 23::
560 559
561 560 hg backout -r 23
562 561
563 562 - Reverse the effect of previous bad revision 23 and
564 563 leave changes uncommitted::
565 564
566 565 hg backout -r 23 --no-commit
567 566 hg commit -m "Backout revision 23"
568 567
569 568 By default, the pending changeset will have one parent,
570 569 maintaining a linear history. With --merge, the pending
571 570 changeset will instead have two parents: the old parent of the
572 571 working directory and a new child of REV that simply undoes REV.
573 572
574 573 Before version 1.7, the behavior without --merge was equivalent
575 574 to specifying --merge followed by :hg:`update --clean .` to
576 575 cancel the merge and leave the child of REV as a head to be
577 576 merged separately.
578 577
579 578 See :hg:`help dates` for a list of formats valid for -d/--date.
580 579
581 580 See :hg:`help revert` for a way to restore files to the state
582 581 of another revision.
583 582
584 583 Returns 0 on success, 1 if nothing to backout or there are unresolved
585 584 files.
586 585 '''
587 586 with repo.wlock(), repo.lock():
588 587 return _dobackout(ui, repo, node, rev, **opts)
589 588
590 589 def _dobackout(ui, repo, node=None, rev=None, **opts):
591 590 opts = pycompat.byteskwargs(opts)
592 591 if opts.get('commit') and opts.get('no_commit'):
593 592 raise error.Abort(_("cannot use --commit with --no-commit"))
594 593 if opts.get('merge') and opts.get('no_commit'):
595 594 raise error.Abort(_("cannot use --merge with --no-commit"))
596 595
597 596 if rev and node:
598 597 raise error.Abort(_("please specify just one revision"))
599 598
600 599 if not rev:
601 600 rev = node
602 601
603 602 if not rev:
604 603 raise error.Abort(_("please specify a revision to backout"))
605 604
606 605 date = opts.get('date')
607 606 if date:
608 607 opts['date'] = dateutil.parsedate(date)
609 608
610 609 cmdutil.checkunfinished(repo)
611 610 cmdutil.bailifchanged(repo)
612 611 node = scmutil.revsingle(repo, rev).node()
613 612
614 613 op1, op2 = repo.dirstate.parents()
615 614 if not repo.changelog.isancestor(node, op1):
616 615 raise error.Abort(_('cannot backout change that is not an ancestor'))
617 616
618 617 p1, p2 = repo.changelog.parents(node)
619 618 if p1 == nullid:
620 619 raise error.Abort(_('cannot backout a change with no parents'))
621 620 if p2 != nullid:
622 621 if not opts.get('parent'):
623 622 raise error.Abort(_('cannot backout a merge changeset'))
624 623 p = repo.lookup(opts['parent'])
625 624 if p not in (p1, p2):
626 625 raise error.Abort(_('%s is not a parent of %s') %
627 626 (short(p), short(node)))
628 627 parent = p
629 628 else:
630 629 if opts.get('parent'):
631 630 raise error.Abort(_('cannot use --parent on non-merge changeset'))
632 631 parent = p1
633 632
634 633 # the backout should appear on the same branch
635 634 branch = repo.dirstate.branch()
636 635 bheads = repo.branchheads(branch)
637 636 rctx = scmutil.revsingle(repo, hex(parent))
638 637 if not opts.get('merge') and op1 != node:
639 638 with dirstateguard.dirstateguard(repo, 'backout'):
640 639 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
641 640 with ui.configoverride(overrides, 'backout'):
642 641 stats = mergemod.update(repo, parent, True, True, node, False)
643 642 repo.setparents(op1, op2)
644 643 hg._showstats(repo, stats)
645 644 if stats.unresolvedcount:
646 645 repo.ui.status(_("use 'hg resolve' to retry unresolved "
647 646 "file merges\n"))
648 647 return 1
649 648 else:
650 649 hg.clean(repo, node, show_stats=False)
651 650 repo.dirstate.setbranch(branch)
652 651 cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
653 652
654 653 if opts.get('no_commit'):
655 654 msg = _("changeset %s backed out, "
656 655 "don't forget to commit.\n")
657 656 ui.status(msg % short(node))
658 657 return 0
659 658
660 659 def commitfunc(ui, repo, message, match, opts):
661 660 editform = 'backout'
662 661 e = cmdutil.getcommiteditor(editform=editform,
663 662 **pycompat.strkwargs(opts))
664 663 if not message:
665 664 # we don't translate commit messages
666 665 message = "Backed out changeset %s" % short(node)
667 666 e = cmdutil.getcommiteditor(edit=True, editform=editform)
668 667 return repo.commit(message, opts.get('user'), opts.get('date'),
669 668 match, editor=e)
670 669 newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
671 670 if not newnode:
672 671 ui.status(_("nothing changed\n"))
673 672 return 1
674 673 cmdutil.commitstatus(repo, newnode, branch, bheads)
675 674
676 675 def nice(node):
677 676 return '%d:%s' % (repo.changelog.rev(node), short(node))
678 677 ui.status(_('changeset %s backs out changeset %s\n') %
679 678 (nice(repo.changelog.tip()), nice(node)))
680 679 if opts.get('merge') and op1 != node:
681 680 hg.clean(repo, op1, show_stats=False)
682 681 ui.status(_('merging with changeset %s\n')
683 682 % nice(repo.changelog.tip()))
684 683 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
685 684 with ui.configoverride(overrides, 'backout'):
686 685 return hg.merge(repo, hex(repo.changelog.tip()))
687 686 return 0
688 687
689 688 @command('bisect',
690 689 [('r', 'reset', False, _('reset bisect state')),
691 690 ('g', 'good', False, _('mark changeset good')),
692 691 ('b', 'bad', False, _('mark changeset bad')),
693 692 ('s', 'skip', False, _('skip testing changeset')),
694 693 ('e', 'extend', False, _('extend the bisect range')),
695 694 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
696 695 ('U', 'noupdate', False, _('do not update to target'))],
697 696 _("[-gbsr] [-U] [-c CMD] [REV]"))
698 697 def bisect(ui, repo, rev=None, extra=None, command=None,
699 698 reset=None, good=None, bad=None, skip=None, extend=None,
700 699 noupdate=None):
701 700 """subdivision search of changesets
702 701
703 702 This command helps to find changesets which introduce problems. To
704 703 use, mark the earliest changeset you know exhibits the problem as
705 704 bad, then mark the latest changeset which is free from the problem
706 705 as good. Bisect will update your working directory to a revision
707 706 for testing (unless the -U/--noupdate option is specified). Once
708 707 you have performed tests, mark the working directory as good or
709 708 bad, and bisect will either update to another candidate changeset
710 709 or announce that it has found the bad revision.
711 710
712 711 As a shortcut, you can also use the revision argument to mark a
713 712 revision as good or bad without checking it out first.
714 713
715 714 If you supply a command, it will be used for automatic bisection.
716 715 The environment variable HG_NODE will contain the ID of the
717 716 changeset being tested. The exit status of the command will be
718 717 used to mark revisions as good or bad: status 0 means good, 125
719 718 means to skip the revision, 127 (command not found) will abort the
720 719 bisection, and any other non-zero exit status means the revision
721 720 is bad.
722 721
723 722 .. container:: verbose
724 723
725 724 Some examples:
726 725
727 726 - start a bisection with known bad revision 34, and good revision 12::
728 727
729 728 hg bisect --bad 34
730 729 hg bisect --good 12
731 730
732 731 - advance the current bisection by marking current revision as good or
733 732 bad::
734 733
735 734 hg bisect --good
736 735 hg bisect --bad
737 736
738 737 - mark the current revision, or a known revision, to be skipped (e.g. if
739 738 that revision is not usable because of another issue)::
740 739
741 740 hg bisect --skip
742 741 hg bisect --skip 23
743 742
744 743 - skip all revisions that do not touch directories ``foo`` or ``bar``::
745 744
746 745 hg bisect --skip "!( file('path:foo') & file('path:bar') )"
747 746
748 747 - forget the current bisection::
749 748
750 749 hg bisect --reset
751 750
752 751 - use 'make && make tests' to automatically find the first broken
753 752 revision::
754 753
755 754 hg bisect --reset
756 755 hg bisect --bad 34
757 756 hg bisect --good 12
758 757 hg bisect --command "make && make tests"
759 758
760 759 - see all changesets whose states are already known in the current
761 760 bisection::
762 761
763 762 hg log -r "bisect(pruned)"
764 763
765 764 - see the changeset currently being bisected (especially useful
766 765 if running with -U/--noupdate)::
767 766
768 767 hg log -r "bisect(current)"
769 768
770 769 - see all changesets that took part in the current bisection::
771 770
772 771 hg log -r "bisect(range)"
773 772
774 773 - you can even get a nice graph::
775 774
776 775 hg log --graph -r "bisect(range)"
777 776
778 777 See :hg:`help revisions.bisect` for more about the `bisect()` predicate.
779 778
780 779 Returns 0 on success.
781 780 """
782 781 # backward compatibility
783 782 if rev in "good bad reset init".split():
784 783 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
785 784 cmd, rev, extra = rev, extra, None
786 785 if cmd == "good":
787 786 good = True
788 787 elif cmd == "bad":
789 788 bad = True
790 789 else:
791 790 reset = True
792 791 elif extra:
793 792 raise error.Abort(_('incompatible arguments'))
794 793
795 794 incompatibles = {
796 795 '--bad': bad,
797 796 '--command': bool(command),
798 797 '--extend': extend,
799 798 '--good': good,
800 799 '--reset': reset,
801 800 '--skip': skip,
802 801 }
803 802
804 803 enabled = [x for x in incompatibles if incompatibles[x]]
805 804
806 805 if len(enabled) > 1:
807 806 raise error.Abort(_('%s and %s are incompatible') %
808 807 tuple(sorted(enabled)[0:2]))
809 808
810 809 if reset:
811 810 hbisect.resetstate(repo)
812 811 return
813 812
814 813 state = hbisect.load_state(repo)
815 814
816 815 # update state
817 816 if good or bad or skip:
818 817 if rev:
819 818 nodes = [repo[i].node() for i in scmutil.revrange(repo, [rev])]
820 819 else:
821 820 nodes = [repo.lookup('.')]
822 821 if good:
823 822 state['good'] += nodes
824 823 elif bad:
825 824 state['bad'] += nodes
826 825 elif skip:
827 826 state['skip'] += nodes
828 827 hbisect.save_state(repo, state)
829 828 if not (state['good'] and state['bad']):
830 829 return
831 830
832 831 def mayupdate(repo, node, show_stats=True):
833 832 """common used update sequence"""
834 833 if noupdate:
835 834 return
836 835 cmdutil.checkunfinished(repo)
837 836 cmdutil.bailifchanged(repo)
838 837 return hg.clean(repo, node, show_stats=show_stats)
839 838
840 839 displayer = logcmdutil.changesetdisplayer(ui, repo, {})
841 840
842 841 if command:
843 842 changesets = 1
844 843 if noupdate:
845 844 try:
846 845 node = state['current'][0]
847 846 except LookupError:
848 847 raise error.Abort(_('current bisect revision is unknown - '
849 848 'start a new bisect to fix'))
850 849 else:
851 850 node, p2 = repo.dirstate.parents()
852 851 if p2 != nullid:
853 852 raise error.Abort(_('current bisect revision is a merge'))
854 853 if rev:
855 854 node = repo[scmutil.revsingle(repo, rev, node)].node()
856 855 try:
857 856 while changesets:
858 857 # update state
859 858 state['current'] = [node]
860 859 hbisect.save_state(repo, state)
861 860 status = ui.system(command, environ={'HG_NODE': hex(node)},
862 861 blockedtag='bisect_check')
863 862 if status == 125:
864 863 transition = "skip"
865 864 elif status == 0:
866 865 transition = "good"
867 866 # status < 0 means process was killed
868 867 elif status == 127:
869 868 raise error.Abort(_("failed to execute %s") % command)
870 869 elif status < 0:
871 870 raise error.Abort(_("%s killed") % command)
872 871 else:
873 872 transition = "bad"
874 873 state[transition].append(node)
875 874 ctx = repo[node]
876 875 ui.status(_('changeset %d:%s: %s\n') % (ctx.rev(), ctx,
877 876 transition))
878 877 hbisect.checkstate(state)
879 878 # bisect
880 879 nodes, changesets, bgood = hbisect.bisect(repo, state)
881 880 # update to next check
882 881 node = nodes[0]
883 882 mayupdate(repo, node, show_stats=False)
884 883 finally:
885 884 state['current'] = [node]
886 885 hbisect.save_state(repo, state)
887 886 hbisect.printresult(ui, repo, state, displayer, nodes, bgood)
888 887 return
889 888
890 889 hbisect.checkstate(state)
891 890
892 891 # actually bisect
893 892 nodes, changesets, good = hbisect.bisect(repo, state)
894 893 if extend:
895 894 if not changesets:
896 895 extendnode = hbisect.extendrange(repo, state, nodes, good)
897 896 if extendnode is not None:
898 897 ui.write(_("Extending search to changeset %d:%s\n")
899 898 % (extendnode.rev(), extendnode))
900 899 state['current'] = [extendnode.node()]
901 900 hbisect.save_state(repo, state)
902 901 return mayupdate(repo, extendnode.node())
903 902 raise error.Abort(_("nothing to extend"))
904 903
905 904 if changesets == 0:
906 905 hbisect.printresult(ui, repo, state, displayer, nodes, good)
907 906 else:
908 907 assert len(nodes) == 1 # only a single node can be tested next
909 908 node = nodes[0]
910 909 # compute the approximate number of remaining tests
911 910 tests, size = 0, 2
912 911 while size <= changesets:
913 912 tests, size = tests + 1, size * 2
914 913 rev = repo.changelog.rev(node)
915 914 ui.write(_("Testing changeset %d:%s "
916 915 "(%d changesets remaining, ~%d tests)\n")
917 916 % (rev, short(node), changesets, tests))
918 917 state['current'] = [node]
919 918 hbisect.save_state(repo, state)
920 919 return mayupdate(repo, node)
921 920
922 921 @command('bookmarks|bookmark',
923 922 [('f', 'force', False, _('force')),
924 923 ('r', 'rev', '', _('revision for bookmark action'), _('REV')),
925 924 ('d', 'delete', False, _('delete a given bookmark')),
926 925 ('m', 'rename', '', _('rename a given bookmark'), _('OLD')),
927 926 ('i', 'inactive', False, _('mark a bookmark inactive')),
928 927 ('l', 'list', False, _('list existing bookmarks')),
929 928 ] + formatteropts,
930 929 _('hg bookmarks [OPTIONS]... [NAME]...'))
931 930 def bookmark(ui, repo, *names, **opts):
932 931 '''create a new bookmark or list existing bookmarks
933 932
934 933 Bookmarks are labels on changesets to help track lines of development.
935 934 Bookmarks are unversioned and can be moved, renamed and deleted.
936 935 Deleting or moving a bookmark has no effect on the associated changesets.
937 936
938 937 Creating or updating to a bookmark causes it to be marked as 'active'.
939 938 The active bookmark is indicated with a '*'.
940 939 When a commit is made, the active bookmark will advance to the new commit.
941 940 A plain :hg:`update` will also advance an active bookmark, if possible.
942 941 Updating away from a bookmark will cause it to be deactivated.
943 942
944 943 Bookmarks can be pushed and pulled between repositories (see
945 944 :hg:`help push` and :hg:`help pull`). If a shared bookmark has
946 945 diverged, a new 'divergent bookmark' of the form 'name@path' will
947 946 be created. Using :hg:`merge` will resolve the divergence.
948 947
949 948 Specifying bookmark as '.' to -m/-d/-l options is equivalent to specifying
950 949 the active bookmark's name.
951 950
952 951 A bookmark named '@' has the special property that :hg:`clone` will
953 952 check it out by default if it exists.
954 953
955 954 .. container:: verbose
956 955
957 956 Template:
958 957
959 958 The following keywords are supported in addition to the common template
960 959 keywords and functions such as ``{bookmark}``. See also
961 960 :hg:`help templates`.
962 961
963 962 :active: Boolean. True if the bookmark is active.
964 963
965 964 Examples:
966 965
967 966 - create an active bookmark for a new line of development::
968 967
969 968 hg book new-feature
970 969
971 970 - create an inactive bookmark as a place marker::
972 971
973 972 hg book -i reviewed
974 973
975 974 - create an inactive bookmark on another changeset::
976 975
977 976 hg book -r .^ tested
978 977
979 978 - rename bookmark turkey to dinner::
980 979
981 980 hg book -m turkey dinner
982 981
983 982 - move the '@' bookmark from another branch::
984 983
985 984 hg book -f @
986 985
987 986 - print only the active bookmark name::
988 987
989 988 hg book -ql .
990 989 '''
991 990 opts = pycompat.byteskwargs(opts)
992 991 force = opts.get('force')
993 992 rev = opts.get('rev')
994 993 inactive = opts.get('inactive') # meaning add/rename to inactive bookmark
995 994
996 995 selactions = [k for k in ['delete', 'rename', 'list'] if opts.get(k)]
997 996 if len(selactions) > 1:
998 997 raise error.Abort(_('--%s and --%s are incompatible')
999 998 % tuple(selactions[:2]))
1000 999 if selactions:
1001 1000 action = selactions[0]
1002 1001 elif names or rev:
1003 1002 action = 'add'
1004 1003 elif inactive:
1005 1004 action = 'inactive' # meaning deactivate
1006 1005 else:
1007 1006 action = 'list'
1008 1007
1009 1008 if rev and action in {'delete', 'rename', 'list'}:
1010 1009 raise error.Abort(_("--rev is incompatible with --%s") % action)
1011 1010 if inactive and action in {'delete', 'list'}:
1012 1011 raise error.Abort(_("--inactive is incompatible with --%s") % action)
1013 1012 if not names and action in {'add', 'delete'}:
1014 1013 raise error.Abort(_("bookmark name required"))
1015 1014
1016 1015 if action in {'add', 'delete', 'rename', 'inactive'}:
1017 1016 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
1018 1017 if action == 'delete':
1019 1018 names = pycompat.maplist(repo._bookmarks.expandname, names)
1020 1019 bookmarks.delete(repo, tr, names)
1021 1020 elif action == 'rename':
1022 1021 if not names:
1023 1022 raise error.Abort(_("new bookmark name required"))
1024 1023 elif len(names) > 1:
1025 1024 raise error.Abort(_("only one new bookmark name allowed"))
1026 1025 oldname = repo._bookmarks.expandname(opts['rename'])
1027 1026 bookmarks.rename(repo, tr, oldname, names[0], force, inactive)
1028 1027 elif action == 'add':
1029 1028 bookmarks.addbookmarks(repo, tr, names, rev, force, inactive)
1030 1029 elif action == 'inactive':
1031 1030 if len(repo._bookmarks) == 0:
1032 1031 ui.status(_("no bookmarks set\n"))
1033 1032 elif not repo._activebookmark:
1034 1033 ui.status(_("no active bookmark\n"))
1035 1034 else:
1036 1035 bookmarks.deactivate(repo)
1037 1036 elif action == 'list':
1038 1037 names = pycompat.maplist(repo._bookmarks.expandname, names)
1039 1038 with ui.formatter('bookmarks', opts) as fm:
1040 1039 bookmarks.printbookmarks(ui, repo, fm, names)
1041 1040 else:
1042 1041 raise error.ProgrammingError('invalid action: %s' % action)
1043 1042
1044 1043 @command('branch',
1045 1044 [('f', 'force', None,
1046 1045 _('set branch name even if it shadows an existing branch')),
1047 1046 ('C', 'clean', None, _('reset branch name to parent branch name')),
1048 1047 ('r', 'rev', [], _('change branches of the given revs (EXPERIMENTAL)')),
1049 1048 ],
1050 1049 _('[-fC] [NAME]'))
1051 1050 def branch(ui, repo, label=None, **opts):
1052 1051 """set or show the current branch name
1053 1052
1054 1053 .. note::
1055 1054
1056 1055 Branch names are permanent and global. Use :hg:`bookmark` to create a
1057 1056 light-weight bookmark instead. See :hg:`help glossary` for more
1058 1057 information about named branches and bookmarks.
1059 1058
1060 1059 With no argument, show the current branch name. With one argument,
1061 1060 set the working directory branch name (the branch will not exist
1062 1061 in the repository until the next commit). Standard practice
1063 1062 recommends that primary development take place on the 'default'
1064 1063 branch.
1065 1064
1066 1065 Unless -f/--force is specified, branch will not let you set a
1067 1066 branch name that already exists.
1068 1067
1069 1068 Use -C/--clean to reset the working directory branch to that of
1070 1069 the parent of the working directory, negating a previous branch
1071 1070 change.
1072 1071
1073 1072 Use the command :hg:`update` to switch to an existing branch. Use
1074 1073 :hg:`commit --close-branch` to mark this branch head as closed.
1075 1074 When all heads of a branch are closed, the branch will be
1076 1075 considered closed.
1077 1076
1078 1077 Returns 0 on success.
1079 1078 """
1080 1079 opts = pycompat.byteskwargs(opts)
1081 1080 revs = opts.get('rev')
1082 1081 if label:
1083 1082 label = label.strip()
1084 1083
1085 1084 if not opts.get('clean') and not label:
1086 1085 if revs:
1087 1086 raise error.Abort(_("no branch name specified for the revisions"))
1088 1087 ui.write("%s\n" % repo.dirstate.branch())
1089 1088 return
1090 1089
1091 1090 with repo.wlock():
1092 1091 if opts.get('clean'):
1093 1092 label = repo[None].p1().branch()
1094 1093 repo.dirstate.setbranch(label)
1095 1094 ui.status(_('reset working directory to branch %s\n') % label)
1096 1095 elif label:
1097 1096
1098 1097 scmutil.checknewlabel(repo, label, 'branch')
1099 1098 if revs:
1100 1099 return cmdutil.changebranch(ui, repo, revs, label)
1101 1100
1102 1101 if not opts.get('force') and label in repo.branchmap():
1103 1102 if label not in [p.branch() for p in repo[None].parents()]:
1104 1103 raise error.Abort(_('a branch of the same name already'
1105 1104 ' exists'),
1106 1105 # i18n: "it" refers to an existing branch
1107 1106 hint=_("use 'hg update' to switch to it"))
1108 1107
1109 1108 repo.dirstate.setbranch(label)
1110 1109 ui.status(_('marked working directory as branch %s\n') % label)
1111 1110
1112 1111 # find any open named branches aside from default
1113 1112 others = [n for n, h, t, c in repo.branchmap().iterbranches()
1114 1113 if n != "default" and not c]
1115 1114 if not others:
1116 1115 ui.status(_('(branches are permanent and global, '
1117 1116 'did you want a bookmark?)\n'))
1118 1117
1119 1118 @command('branches',
1120 1119 [('a', 'active', False,
1121 1120 _('show only branches that have unmerged heads (DEPRECATED)')),
1122 1121 ('c', 'closed', False, _('show normal and closed branches')),
1123 1122 ] + formatteropts,
1124 1123 _('[-c]'),
1125 1124 intents={INTENT_READONLY})
1126 1125 def branches(ui, repo, active=False, closed=False, **opts):
1127 1126 """list repository named branches
1128 1127
1129 1128 List the repository's named branches, indicating which ones are
1130 1129 inactive. If -c/--closed is specified, also list branches which have
1131 1130 been marked closed (see :hg:`commit --close-branch`).
1132 1131
1133 1132 Use the command :hg:`update` to switch to an existing branch.
1134 1133
1135 1134 .. container:: verbose
1136 1135
1137 1136 Template:
1138 1137
1139 1138 The following keywords are supported in addition to the common template
1140 1139 keywords and functions such as ``{branch}``. See also
1141 1140 :hg:`help templates`.
1142 1141
1143 1142 :active: Boolean. True if the branch is active.
1144 1143 :closed: Boolean. True if the branch is closed.
1145 1144 :current: Boolean. True if it is the current branch.
1146 1145
1147 1146 Returns 0.
1148 1147 """
1149 1148
1150 1149 opts = pycompat.byteskwargs(opts)
1151 1150 ui.pager('branches')
1152 1151 fm = ui.formatter('branches', opts)
1153 1152 hexfunc = fm.hexfunc
1154 1153
1155 1154 allheads = set(repo.heads())
1156 1155 branches = []
1157 1156 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
1158 1157 isactive = False
1159 1158 if not isclosed:
1160 1159 openheads = set(repo.branchmap().iteropen(heads))
1161 1160 isactive = bool(openheads & allheads)
1162 1161 branches.append((tag, repo[tip], isactive, not isclosed))
1163 1162 branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]),
1164 1163 reverse=True)
1165 1164
1166 1165 for tag, ctx, isactive, isopen in branches:
1167 1166 if active and not isactive:
1168 1167 continue
1169 1168 if isactive:
1170 1169 label = 'branches.active'
1171 1170 notice = ''
1172 1171 elif not isopen:
1173 1172 if not closed:
1174 1173 continue
1175 1174 label = 'branches.closed'
1176 1175 notice = _(' (closed)')
1177 1176 else:
1178 1177 label = 'branches.inactive'
1179 1178 notice = _(' (inactive)')
1180 1179 current = (tag == repo.dirstate.branch())
1181 1180 if current:
1182 1181 label = 'branches.current'
1183 1182
1184 1183 fm.startitem()
1185 1184 fm.write('branch', '%s', tag, label=label)
1186 1185 rev = ctx.rev()
1187 1186 padsize = max(31 - len("%d" % rev) - encoding.colwidth(tag), 0)
1188 1187 fmt = ' ' * padsize + ' %d:%s'
1189 1188 fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()),
1190 1189 label='log.changeset changeset.%s' % ctx.phasestr())
1191 1190 fm.context(ctx=ctx)
1192 1191 fm.data(active=isactive, closed=not isopen, current=current)
1193 1192 if not ui.quiet:
1194 1193 fm.plain(notice)
1195 1194 fm.plain('\n')
1196 1195 fm.end()
1197 1196
1198 1197 @command('bundle',
1199 1198 [('f', 'force', None, _('run even when the destination is unrelated')),
1200 1199 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
1201 1200 _('REV')),
1202 1201 ('b', 'branch', [], _('a specific branch you would like to bundle'),
1203 1202 _('BRANCH')),
1204 1203 ('', 'base', [],
1205 1204 _('a base changeset assumed to be available at the destination'),
1206 1205 _('REV')),
1207 1206 ('a', 'all', None, _('bundle all changesets in the repository')),
1208 1207 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
1209 1208 ] + remoteopts,
1210 1209 _('[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
1211 1210 def bundle(ui, repo, fname, dest=None, **opts):
1212 1211 """create a bundle file
1213 1212
1214 1213 Generate a bundle file containing data to be transferred to another
1215 1214 repository.
1216 1215
1217 1216 To create a bundle containing all changesets, use -a/--all
1218 1217 (or --base null). Otherwise, hg assumes the destination will have
1219 1218 all the nodes you specify with --base parameters. Otherwise, hg
1220 1219 will assume the repository has all the nodes in destination, or
1221 1220 default-push/default if no destination is specified, where destination
1222 1221 is the repository you provide through DEST option.
1223 1222
1224 1223 You can change bundle format with the -t/--type option. See
1225 1224 :hg:`help bundlespec` for documentation on this format. By default,
1226 1225 the most appropriate format is used and compression defaults to
1227 1226 bzip2.
1228 1227
1229 1228 The bundle file can then be transferred using conventional means
1230 1229 and applied to another repository with the unbundle or pull
1231 1230 command. This is useful when direct push and pull are not
1232 1231 available or when exporting an entire repository is undesirable.
1233 1232
1234 1233 Applying bundles preserves all changeset contents including
1235 1234 permissions, copy/rename information, and revision history.
1236 1235
1237 1236 Returns 0 on success, 1 if no changes found.
1238 1237 """
1239 1238 opts = pycompat.byteskwargs(opts)
1240 1239 revs = None
1241 1240 if 'rev' in opts:
1242 1241 revstrings = opts['rev']
1243 1242 revs = scmutil.revrange(repo, revstrings)
1244 1243 if revstrings and not revs:
1245 1244 raise error.Abort(_('no commits to bundle'))
1246 1245
1247 1246 bundletype = opts.get('type', 'bzip2').lower()
1248 1247 try:
1249 1248 bundlespec = exchange.parsebundlespec(repo, bundletype, strict=False)
1250 1249 except error.UnsupportedBundleSpecification as e:
1251 1250 raise error.Abort(pycompat.bytestr(e),
1252 1251 hint=_("see 'hg help bundlespec' for supported "
1253 1252 "values for --type"))
1254 1253 cgversion = bundlespec.contentopts["cg.version"]
1255 1254
1256 1255 # Packed bundles are a pseudo bundle format for now.
1257 1256 if cgversion == 's1':
1258 1257 raise error.Abort(_('packed bundles cannot be produced by "hg bundle"'),
1259 1258 hint=_("use 'hg debugcreatestreamclonebundle'"))
1260 1259
1261 1260 if opts.get('all'):
1262 1261 if dest:
1263 1262 raise error.Abort(_("--all is incompatible with specifying "
1264 1263 "a destination"))
1265 1264 if opts.get('base'):
1266 1265 ui.warn(_("ignoring --base because --all was specified\n"))
1267 1266 base = [nullrev]
1268 1267 else:
1269 1268 base = scmutil.revrange(repo, opts.get('base'))
1270 1269 if cgversion not in changegroup.supportedoutgoingversions(repo):
1271 1270 raise error.Abort(_("repository does not support bundle version %s") %
1272 1271 cgversion)
1273 1272
1274 1273 if base:
1275 1274 if dest:
1276 1275 raise error.Abort(_("--base is incompatible with specifying "
1277 1276 "a destination"))
1278 1277 common = [repo[rev].node() for rev in base]
1279 1278 heads = [repo[r].node() for r in revs] if revs else None
1280 1279 outgoing = discovery.outgoing(repo, common, heads)
1281 1280 else:
1282 1281 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1283 1282 dest, branches = hg.parseurl(dest, opts.get('branch'))
1284 1283 other = hg.peer(repo, opts, dest)
1285 1284 revs = [repo[r].hex() for r in revs]
1286 1285 revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
1287 1286 heads = revs and pycompat.maplist(repo.lookup, revs) or revs
1288 1287 outgoing = discovery.findcommonoutgoing(repo, other,
1289 1288 onlyheads=heads,
1290 1289 force=opts.get('force'),
1291 1290 portable=True)
1292 1291
1293 1292 if not outgoing.missing:
1294 1293 scmutil.nochangesfound(ui, repo, not base and outgoing.excluded)
1295 1294 return 1
1296 1295
1297 1296 if cgversion == '01': #bundle1
1298 1297 bversion = 'HG10' + bundlespec.wirecompression
1299 1298 bcompression = None
1300 1299 elif cgversion in ('02', '03'):
1301 1300 bversion = 'HG20'
1302 1301 bcompression = bundlespec.wirecompression
1303 1302 else:
1304 1303 raise error.ProgrammingError(
1305 1304 'bundle: unexpected changegroup version %s' % cgversion)
1306 1305
1307 1306 # TODO compression options should be derived from bundlespec parsing.
1308 1307 # This is a temporary hack to allow adjusting bundle compression
1309 1308 # level without a) formalizing the bundlespec changes to declare it
1310 1309 # b) introducing a command flag.
1311 1310 compopts = {}
1312 1311 complevel = ui.configint('experimental',
1313 1312 'bundlecomplevel.' + bundlespec.compression)
1314 1313 if complevel is None:
1315 1314 complevel = ui.configint('experimental', 'bundlecomplevel')
1316 1315 if complevel is not None:
1317 1316 compopts['level'] = complevel
1318 1317
1319 1318 # Allow overriding the bundling of obsmarker in phases through
1320 1319 # configuration while we don't have a bundle version that include them
1321 1320 if repo.ui.configbool('experimental', 'evolution.bundle-obsmarker'):
1322 1321 bundlespec.contentopts['obsolescence'] = True
1323 1322 if repo.ui.configbool('experimental', 'bundle-phases'):
1324 1323 bundlespec.contentopts['phases'] = True
1325 1324
1326 1325 bundle2.writenewbundle(ui, repo, 'bundle', fname, bversion, outgoing,
1327 1326 bundlespec.contentopts, compression=bcompression,
1328 1327 compopts=compopts)
1329 1328
1330 1329 @command('cat',
1331 1330 [('o', 'output', '',
1332 1331 _('print output to file with formatted name'), _('FORMAT')),
1333 1332 ('r', 'rev', '', _('print the given revision'), _('REV')),
1334 1333 ('', 'decode', None, _('apply any matching decode filter')),
1335 1334 ] + walkopts + formatteropts,
1336 1335 _('[OPTION]... FILE...'),
1337 1336 inferrepo=True,
1338 1337 intents={INTENT_READONLY})
1339 1338 def cat(ui, repo, file1, *pats, **opts):
1340 1339 """output the current or given revision of files
1341 1340
1342 1341 Print the specified files as they were at the given revision. If
1343 1342 no revision is given, the parent of the working directory is used.
1344 1343
1345 1344 Output may be to a file, in which case the name of the file is
1346 1345 given using a template string. See :hg:`help templates`. In addition
1347 1346 to the common template keywords, the following formatting rules are
1348 1347 supported:
1349 1348
1350 1349 :``%%``: literal "%" character
1351 1350 :``%s``: basename of file being printed
1352 1351 :``%d``: dirname of file being printed, or '.' if in repository root
1353 1352 :``%p``: root-relative path name of file being printed
1354 1353 :``%H``: changeset hash (40 hexadecimal digits)
1355 1354 :``%R``: changeset revision number
1356 1355 :``%h``: short-form changeset hash (12 hexadecimal digits)
1357 1356 :``%r``: zero-padded changeset revision number
1358 1357 :``%b``: basename of the exporting repository
1359 1358 :``\\``: literal "\\" character
1360 1359
1361 1360 .. container:: verbose
1362 1361
1363 1362 Template:
1364 1363
1365 1364 The following keywords are supported in addition to the common template
1366 1365 keywords and functions. See also :hg:`help templates`.
1367 1366
1368 1367 :data: String. File content.
1369 1368 :path: String. Repository-absolute path of the file.
1370 1369
1371 1370 Returns 0 on success.
1372 1371 """
1373 1372 opts = pycompat.byteskwargs(opts)
1374 1373 rev = opts.get('rev')
1375 1374 if rev:
1376 1375 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
1377 1376 ctx = scmutil.revsingle(repo, rev)
1378 1377 m = scmutil.match(ctx, (file1,) + pats, opts)
1379 1378 fntemplate = opts.pop('output', '')
1380 1379 if cmdutil.isstdiofilename(fntemplate):
1381 1380 fntemplate = ''
1382 1381
1383 1382 if fntemplate:
1384 1383 fm = formatter.nullformatter(ui, 'cat', opts)
1385 1384 else:
1386 1385 ui.pager('cat')
1387 1386 fm = ui.formatter('cat', opts)
1388 1387 with fm:
1389 1388 return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '',
1390 1389 **pycompat.strkwargs(opts))
1391 1390
1392 1391 @command('^clone',
1393 1392 [('U', 'noupdate', None, _('the clone will include an empty working '
1394 1393 'directory (only a repository)')),
1395 1394 ('u', 'updaterev', '', _('revision, tag, or branch to check out'),
1396 1395 _('REV')),
1397 1396 ('r', 'rev', [], _('do not clone everything, but include this changeset'
1398 1397 ' and its ancestors'), _('REV')),
1399 1398 ('b', 'branch', [], _('do not clone everything, but include this branch\'s'
1400 1399 ' changesets and their ancestors'), _('BRANCH')),
1401 1400 ('', 'pull', None, _('use pull protocol to copy metadata')),
1402 1401 ('', 'uncompressed', None,
1403 1402 _('an alias to --stream (DEPRECATED)')),
1404 1403 ('', 'stream', None,
1405 1404 _('clone with minimal data processing')),
1406 1405 ] + remoteopts,
1407 1406 _('[OPTION]... SOURCE [DEST]'),
1408 1407 norepo=True)
1409 1408 def clone(ui, source, dest=None, **opts):
1410 1409 """make a copy of an existing repository
1411 1410
1412 1411 Create a copy of an existing repository in a new directory.
1413 1412
1414 1413 If no destination directory name is specified, it defaults to the
1415 1414 basename of the source.
1416 1415
1417 1416 The location of the source is added to the new repository's
1418 1417 ``.hg/hgrc`` file, as the default to be used for future pulls.
1419 1418
1420 1419 Only local paths and ``ssh://`` URLs are supported as
1421 1420 destinations. For ``ssh://`` destinations, no working directory or
1422 1421 ``.hg/hgrc`` will be created on the remote side.
1423 1422
1424 1423 If the source repository has a bookmark called '@' set, that
1425 1424 revision will be checked out in the new repository by default.
1426 1425
1427 1426 To check out a particular version, use -u/--update, or
1428 1427 -U/--noupdate to create a clone with no working directory.
1429 1428
1430 1429 To pull only a subset of changesets, specify one or more revisions
1431 1430 identifiers with -r/--rev or branches with -b/--branch. The
1432 1431 resulting clone will contain only the specified changesets and
1433 1432 their ancestors. These options (or 'clone src#rev dest') imply
1434 1433 --pull, even for local source repositories.
1435 1434
1436 1435 In normal clone mode, the remote normalizes repository data into a common
1437 1436 exchange format and the receiving end translates this data into its local
1438 1437 storage format. --stream activates a different clone mode that essentially
1439 1438 copies repository files from the remote with minimal data processing. This
1440 1439 significantly reduces the CPU cost of a clone both remotely and locally.
1441 1440 However, it often increases the transferred data size by 30-40%. This can
1442 1441 result in substantially faster clones where I/O throughput is plentiful,
1443 1442 especially for larger repositories. A side-effect of --stream clones is
1444 1443 that storage settings and requirements on the remote are applied locally:
1445 1444 a modern client may inherit legacy or inefficient storage used by the
1446 1445 remote or a legacy Mercurial client may not be able to clone from a
1447 1446 modern Mercurial remote.
1448 1447
1449 1448 .. note::
1450 1449
1451 1450 Specifying a tag will include the tagged changeset but not the
1452 1451 changeset containing the tag.
1453 1452
1454 1453 .. container:: verbose
1455 1454
1456 1455 For efficiency, hardlinks are used for cloning whenever the
1457 1456 source and destination are on the same filesystem (note this
1458 1457 applies only to the repository data, not to the working
1459 1458 directory). Some filesystems, such as AFS, implement hardlinking
1460 1459 incorrectly, but do not report errors. In these cases, use the
1461 1460 --pull option to avoid hardlinking.
1462 1461
1463 1462 Mercurial will update the working directory to the first applicable
1464 1463 revision from this list:
1465 1464
1466 1465 a) null if -U or the source repository has no changesets
1467 1466 b) if -u . and the source repository is local, the first parent of
1468 1467 the source repository's working directory
1469 1468 c) the changeset specified with -u (if a branch name, this means the
1470 1469 latest head of that branch)
1471 1470 d) the changeset specified with -r
1472 1471 e) the tipmost head specified with -b
1473 1472 f) the tipmost head specified with the url#branch source syntax
1474 1473 g) the revision marked with the '@' bookmark, if present
1475 1474 h) the tipmost head of the default branch
1476 1475 i) tip
1477 1476
1478 1477 When cloning from servers that support it, Mercurial may fetch
1479 1478 pre-generated data from a server-advertised URL or inline from the
1480 1479 same stream. When this is done, hooks operating on incoming changesets
1481 1480 and changegroups may fire more than once, once for each pre-generated
1482 1481 bundle and as well as for any additional remaining data. In addition,
1483 1482 if an error occurs, the repository may be rolled back to a partial
1484 1483 clone. This behavior may change in future releases.
1485 1484 See :hg:`help -e clonebundles` for more.
1486 1485
1487 1486 Examples:
1488 1487
1489 1488 - clone a remote repository to a new directory named hg/::
1490 1489
1491 1490 hg clone https://www.mercurial-scm.org/repo/hg/
1492 1491
1493 1492 - create a lightweight local clone::
1494 1493
1495 1494 hg clone project/ project-feature/
1496 1495
1497 1496 - clone from an absolute path on an ssh server (note double-slash)::
1498 1497
1499 1498 hg clone ssh://user@server//home/projects/alpha/
1500 1499
1501 1500 - do a streaming clone while checking out a specified version::
1502 1501
1503 1502 hg clone --stream http://server/repo -u 1.5
1504 1503
1505 1504 - create a repository without changesets after a particular revision::
1506 1505
1507 1506 hg clone -r 04e544 experimental/ good/
1508 1507
1509 1508 - clone (and track) a particular named branch::
1510 1509
1511 1510 hg clone https://www.mercurial-scm.org/repo/hg/#stable
1512 1511
1513 1512 See :hg:`help urls` for details on specifying URLs.
1514 1513
1515 1514 Returns 0 on success.
1516 1515 """
1517 1516 opts = pycompat.byteskwargs(opts)
1518 1517 if opts.get('noupdate') and opts.get('updaterev'):
1519 1518 raise error.Abort(_("cannot specify both --noupdate and --updaterev"))
1520 1519
1521 1520 # --include/--exclude can come from narrow or sparse.
1522 1521 includepats, excludepats = None, None
1523 1522
1524 1523 # hg.clone() differentiates between None and an empty set. So make sure
1525 1524 # patterns are sets if narrow is requested without patterns.
1526 1525 if opts.get('narrow'):
1527 1526 includepats = set()
1528 1527 excludepats = set()
1529 1528
1530 1529 if opts.get('include'):
1531 1530 includepats = narrowspec.parsepatterns(opts.get('include'))
1532 1531 if opts.get('exclude'):
1533 1532 excludepats = narrowspec.parsepatterns(opts.get('exclude'))
1534 1533
1535 1534 r = hg.clone(ui, opts, source, dest,
1536 1535 pull=opts.get('pull'),
1537 1536 stream=opts.get('stream') or opts.get('uncompressed'),
1538 1537 revs=opts.get('rev'),
1539 1538 update=opts.get('updaterev') or not opts.get('noupdate'),
1540 1539 branch=opts.get('branch'),
1541 1540 shareopts=opts.get('shareopts'),
1542 1541 storeincludepats=includepats,
1543 1542 storeexcludepats=excludepats)
1544 1543
1545 1544 return r is None
1546 1545
1547 1546 @command('^commit|ci',
1548 1547 [('A', 'addremove', None,
1549 1548 _('mark new/missing files as added/removed before committing')),
1550 1549 ('', 'close-branch', None,
1551 1550 _('mark a branch head as closed')),
1552 1551 ('', 'amend', None, _('amend the parent of the working directory')),
1553 1552 ('s', 'secret', None, _('use the secret phase for committing')),
1554 1553 ('e', 'edit', None, _('invoke editor on commit messages')),
1555 1554 ('i', 'interactive', None, _('use interactive mode')),
1556 1555 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1557 1556 _('[OPTION]... [FILE]...'),
1558 1557 inferrepo=True)
1559 1558 def commit(ui, repo, *pats, **opts):
1560 1559 """commit the specified files or all outstanding changes
1561 1560
1562 1561 Commit changes to the given files into the repository. Unlike a
1563 1562 centralized SCM, this operation is a local operation. See
1564 1563 :hg:`push` for a way to actively distribute your changes.
1565 1564
1566 1565 If a list of files is omitted, all changes reported by :hg:`status`
1567 1566 will be committed.
1568 1567
1569 1568 If you are committing the result of a merge, do not provide any
1570 1569 filenames or -I/-X filters.
1571 1570
1572 1571 If no commit message is specified, Mercurial starts your
1573 1572 configured editor where you can enter a message. In case your
1574 1573 commit fails, you will find a backup of your message in
1575 1574 ``.hg/last-message.txt``.
1576 1575
1577 1576 The --close-branch flag can be used to mark the current branch
1578 1577 head closed. When all heads of a branch are closed, the branch
1579 1578 will be considered closed and no longer listed.
1580 1579
1581 1580 The --amend flag can be used to amend the parent of the
1582 1581 working directory with a new commit that contains the changes
1583 1582 in the parent in addition to those currently reported by :hg:`status`,
1584 1583 if there are any. The old commit is stored in a backup bundle in
1585 1584 ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
1586 1585 on how to restore it).
1587 1586
1588 1587 Message, user and date are taken from the amended commit unless
1589 1588 specified. When a message isn't specified on the command line,
1590 1589 the editor will open with the message of the amended commit.
1591 1590
1592 1591 It is not possible to amend public changesets (see :hg:`help phases`)
1593 1592 or changesets that have children.
1594 1593
1595 1594 See :hg:`help dates` for a list of formats valid for -d/--date.
1596 1595
1597 1596 Returns 0 on success, 1 if nothing changed.
1598 1597
1599 1598 .. container:: verbose
1600 1599
1601 1600 Examples:
1602 1601
1603 1602 - commit all files ending in .py::
1604 1603
1605 1604 hg commit --include "set:**.py"
1606 1605
1607 1606 - commit all non-binary files::
1608 1607
1609 1608 hg commit --exclude "set:binary()"
1610 1609
1611 1610 - amend the current commit and set the date to now::
1612 1611
1613 1612 hg commit --amend --date now
1614 1613 """
1615 1614 with repo.wlock(), repo.lock():
1616 1615 return _docommit(ui, repo, *pats, **opts)
1617 1616
1618 1617 def _docommit(ui, repo, *pats, **opts):
1619 1618 if opts.get(r'interactive'):
1620 1619 opts.pop(r'interactive')
1621 1620 ret = cmdutil.dorecord(ui, repo, commit, None, False,
1622 1621 cmdutil.recordfilter, *pats,
1623 1622 **opts)
1624 1623 # ret can be 0 (no changes to record) or the value returned by
1625 1624 # commit(), 1 if nothing changed or None on success.
1626 1625 return 1 if ret == 0 else ret
1627 1626
1628 1627 opts = pycompat.byteskwargs(opts)
1629 1628 if opts.get('subrepos'):
1630 1629 if opts.get('amend'):
1631 1630 raise error.Abort(_('cannot amend with --subrepos'))
1632 1631 # Let --subrepos on the command line override config setting.
1633 1632 ui.setconfig('ui', 'commitsubrepos', True, 'commit')
1634 1633
1635 1634 cmdutil.checkunfinished(repo, commit=True)
1636 1635
1637 1636 branch = repo[None].branch()
1638 1637 bheads = repo.branchheads(branch)
1639 1638
1640 1639 extra = {}
1641 1640 if opts.get('close_branch'):
1642 1641 extra['close'] = '1'
1643 1642
1644 1643 if not bheads:
1645 1644 raise error.Abort(_('can only close branch heads'))
1646 1645 elif opts.get('amend'):
1647 1646 if repo[None].parents()[0].p1().branch() != branch and \
1648 1647 repo[None].parents()[0].p2().branch() != branch:
1649 1648 raise error.Abort(_('can only close branch heads'))
1650 1649
1651 1650 if opts.get('amend'):
1652 1651 if ui.configbool('ui', 'commitsubrepos'):
1653 1652 raise error.Abort(_('cannot amend with ui.commitsubrepos enabled'))
1654 1653
1655 1654 old = repo['.']
1656 1655 rewriteutil.precheck(repo, [old.rev()], 'amend')
1657 1656
1658 1657 # Currently histedit gets confused if an amend happens while histedit
1659 1658 # is in progress. Since we have a checkunfinished command, we are
1660 1659 # temporarily honoring it.
1661 1660 #
1662 1661 # Note: eventually this guard will be removed. Please do not expect
1663 1662 # this behavior to remain.
1664 1663 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1665 1664 cmdutil.checkunfinished(repo)
1666 1665
1667 1666 node = cmdutil.amend(ui, repo, old, extra, pats, opts)
1668 1667 if node == old.node():
1669 1668 ui.status(_("nothing changed\n"))
1670 1669 return 1
1671 1670 else:
1672 1671 def commitfunc(ui, repo, message, match, opts):
1673 1672 overrides = {}
1674 1673 if opts.get('secret'):
1675 1674 overrides[('phases', 'new-commit')] = 'secret'
1676 1675
1677 1676 baseui = repo.baseui
1678 1677 with baseui.configoverride(overrides, 'commit'):
1679 1678 with ui.configoverride(overrides, 'commit'):
1680 1679 editform = cmdutil.mergeeditform(repo[None],
1681 1680 'commit.normal')
1682 1681 editor = cmdutil.getcommiteditor(
1683 1682 editform=editform, **pycompat.strkwargs(opts))
1684 1683 return repo.commit(message,
1685 1684 opts.get('user'),
1686 1685 opts.get('date'),
1687 1686 match,
1688 1687 editor=editor,
1689 1688 extra=extra)
1690 1689
1691 1690 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1692 1691
1693 1692 if not node:
1694 1693 stat = cmdutil.postcommitstatus(repo, pats, opts)
1695 1694 if stat[3]:
1696 1695 ui.status(_("nothing changed (%d missing files, see "
1697 1696 "'hg status')\n") % len(stat[3]))
1698 1697 else:
1699 1698 ui.status(_("nothing changed\n"))
1700 1699 return 1
1701 1700
1702 1701 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1703 1702
1704 1703 @command('config|showconfig|debugconfig',
1705 1704 [('u', 'untrusted', None, _('show untrusted configuration options')),
1706 1705 ('e', 'edit', None, _('edit user config')),
1707 1706 ('l', 'local', None, _('edit repository config')),
1708 1707 ('g', 'global', None, _('edit global config'))] + formatteropts,
1709 1708 _('[-u] [NAME]...'),
1710 1709 optionalrepo=True,
1711 1710 intents={INTENT_READONLY})
1712 1711 def config(ui, repo, *values, **opts):
1713 1712 """show combined config settings from all hgrc files
1714 1713
1715 1714 With no arguments, print names and values of all config items.
1716 1715
1717 1716 With one argument of the form section.name, print just the value
1718 1717 of that config item.
1719 1718
1720 1719 With multiple arguments, print names and values of all config
1721 1720 items with matching section names or section.names.
1722 1721
1723 1722 With --edit, start an editor on the user-level config file. With
1724 1723 --global, edit the system-wide config file. With --local, edit the
1725 1724 repository-level config file.
1726 1725
1727 1726 With --debug, the source (filename and line number) is printed
1728 1727 for each config item.
1729 1728
1730 1729 See :hg:`help config` for more information about config files.
1731 1730
1732 1731 .. container:: verbose
1733 1732
1734 1733 Template:
1735 1734
1736 1735 The following keywords are supported. See also :hg:`help templates`.
1737 1736
1738 1737 :name: String. Config name.
1739 1738 :source: String. Filename and line number where the item is defined.
1740 1739 :value: String. Config value.
1741 1740
1742 1741 Returns 0 on success, 1 if NAME does not exist.
1743 1742
1744 1743 """
1745 1744
1746 1745 opts = pycompat.byteskwargs(opts)
1747 1746 if opts.get('edit') or opts.get('local') or opts.get('global'):
1748 1747 if opts.get('local') and opts.get('global'):
1749 1748 raise error.Abort(_("can't use --local and --global together"))
1750 1749
1751 1750 if opts.get('local'):
1752 1751 if not repo:
1753 1752 raise error.Abort(_("can't use --local outside a repository"))
1754 1753 paths = [repo.vfs.join('hgrc')]
1755 1754 elif opts.get('global'):
1756 1755 paths = rcutil.systemrcpath()
1757 1756 else:
1758 1757 paths = rcutil.userrcpath()
1759 1758
1760 1759 for f in paths:
1761 1760 if os.path.exists(f):
1762 1761 break
1763 1762 else:
1764 1763 if opts.get('global'):
1765 1764 samplehgrc = uimod.samplehgrcs['global']
1766 1765 elif opts.get('local'):
1767 1766 samplehgrc = uimod.samplehgrcs['local']
1768 1767 else:
1769 1768 samplehgrc = uimod.samplehgrcs['user']
1770 1769
1771 1770 f = paths[0]
1772 1771 fp = open(f, "wb")
1773 1772 fp.write(util.tonativeeol(samplehgrc))
1774 1773 fp.close()
1775 1774
1776 1775 editor = ui.geteditor()
1777 1776 ui.system("%s \"%s\"" % (editor, f),
1778 1777 onerr=error.Abort, errprefix=_("edit failed"),
1779 1778 blockedtag='config_edit')
1780 1779 return
1781 1780 ui.pager('config')
1782 1781 fm = ui.formatter('config', opts)
1783 1782 for t, f in rcutil.rccomponents():
1784 1783 if t == 'path':
1785 1784 ui.debug('read config from: %s\n' % f)
1786 1785 elif t == 'items':
1787 1786 for section, name, value, source in f:
1788 1787 ui.debug('set config by: %s\n' % source)
1789 1788 else:
1790 1789 raise error.ProgrammingError('unknown rctype: %s' % t)
1791 1790 untrusted = bool(opts.get('untrusted'))
1792 1791
1793 1792 selsections = selentries = []
1794 1793 if values:
1795 1794 selsections = [v for v in values if '.' not in v]
1796 1795 selentries = [v for v in values if '.' in v]
1797 1796 uniquesel = (len(selentries) == 1 and not selsections)
1798 1797 selsections = set(selsections)
1799 1798 selentries = set(selentries)
1800 1799
1801 1800 matched = False
1802 1801 for section, name, value in ui.walkconfig(untrusted=untrusted):
1803 1802 source = ui.configsource(section, name, untrusted)
1804 1803 value = pycompat.bytestr(value)
1805 1804 if fm.isplain():
1806 1805 source = source or 'none'
1807 1806 value = value.replace('\n', '\\n')
1808 1807 entryname = section + '.' + name
1809 1808 if values and not (section in selsections or entryname in selentries):
1810 1809 continue
1811 1810 fm.startitem()
1812 1811 fm.condwrite(ui.debugflag, 'source', '%s: ', source)
1813 1812 if uniquesel:
1814 1813 fm.data(name=entryname)
1815 1814 fm.write('value', '%s\n', value)
1816 1815 else:
1817 1816 fm.write('name value', '%s=%s\n', entryname, value)
1818 1817 matched = True
1819 1818 fm.end()
1820 1819 if matched:
1821 1820 return 0
1822 1821 return 1
1823 1822
1824 1823 @command('copy|cp',
1825 1824 [('A', 'after', None, _('record a copy that has already occurred')),
1826 1825 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1827 1826 ] + walkopts + dryrunopts,
1828 1827 _('[OPTION]... [SOURCE]... DEST'))
1829 1828 def copy(ui, repo, *pats, **opts):
1830 1829 """mark files as copied for the next commit
1831 1830
1832 1831 Mark dest as having copies of source files. If dest is a
1833 1832 directory, copies are put in that directory. If dest is a file,
1834 1833 the source must be a single file.
1835 1834
1836 1835 By default, this command copies the contents of files as they
1837 1836 exist in the working directory. If invoked with -A/--after, the
1838 1837 operation is recorded, but no copying is performed.
1839 1838
1840 1839 This command takes effect with the next commit. To undo a copy
1841 1840 before that, see :hg:`revert`.
1842 1841
1843 1842 Returns 0 on success, 1 if errors are encountered.
1844 1843 """
1845 1844 opts = pycompat.byteskwargs(opts)
1846 1845 with repo.wlock(False):
1847 1846 return cmdutil.copy(ui, repo, pats, opts)
1848 1847
1849 1848 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
1850 1849 def debugcommands(ui, cmd='', *args):
1851 1850 """list all available commands and options"""
1852 1851 for cmd, vals in sorted(table.iteritems()):
1853 1852 cmd = cmd.split('|')[0].strip('^')
1854 1853 opts = ', '.join([i[1] for i in vals[1]])
1855 1854 ui.write('%s: %s\n' % (cmd, opts))
1856 1855
1857 1856 @command('debugcomplete',
1858 1857 [('o', 'options', None, _('show the command options'))],
1859 1858 _('[-o] CMD'),
1860 1859 norepo=True)
1861 1860 def debugcomplete(ui, cmd='', **opts):
1862 1861 """returns the completion list associated with the given command"""
1863 1862
1864 1863 if opts.get(r'options'):
1865 1864 options = []
1866 1865 otables = [globalopts]
1867 1866 if cmd:
1868 1867 aliases, entry = cmdutil.findcmd(cmd, table, False)
1869 1868 otables.append(entry[1])
1870 1869 for t in otables:
1871 1870 for o in t:
1872 1871 if "(DEPRECATED)" in o[3]:
1873 1872 continue
1874 1873 if o[0]:
1875 1874 options.append('-%s' % o[0])
1876 1875 options.append('--%s' % o[1])
1877 1876 ui.write("%s\n" % "\n".join(options))
1878 1877 return
1879 1878
1880 1879 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, table)
1881 1880 if ui.verbose:
1882 1881 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1883 1882 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1884 1883
1885 1884 @command('^diff',
1886 1885 [('r', 'rev', [], _('revision'), _('REV')),
1887 1886 ('c', 'change', '', _('change made by revision'), _('REV'))
1888 1887 ] + diffopts + diffopts2 + walkopts + subrepoopts,
1889 1888 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
1890 1889 inferrepo=True,
1891 1890 intents={INTENT_READONLY})
1892 1891 def diff(ui, repo, *pats, **opts):
1893 1892 """diff repository (or selected files)
1894 1893
1895 1894 Show differences between revisions for the specified files.
1896 1895
1897 1896 Differences between files are shown using the unified diff format.
1898 1897
1899 1898 .. note::
1900 1899
1901 1900 :hg:`diff` may generate unexpected results for merges, as it will
1902 1901 default to comparing against the working directory's first
1903 1902 parent changeset if no revisions are specified.
1904 1903
1905 1904 When two revision arguments are given, then changes are shown
1906 1905 between those revisions. If only one revision is specified then
1907 1906 that revision is compared to the working directory, and, when no
1908 1907 revisions are specified, the working directory files are compared
1909 1908 to its first parent.
1910 1909
1911 1910 Alternatively you can specify -c/--change with a revision to see
1912 1911 the changes in that changeset relative to its first parent.
1913 1912
1914 1913 Without the -a/--text option, diff will avoid generating diffs of
1915 1914 files it detects as binary. With -a, diff will generate a diff
1916 1915 anyway, probably with undesirable results.
1917 1916
1918 1917 Use the -g/--git option to generate diffs in the git extended diff
1919 1918 format. For more information, read :hg:`help diffs`.
1920 1919
1921 1920 .. container:: verbose
1922 1921
1923 1922 Examples:
1924 1923
1925 1924 - compare a file in the current working directory to its parent::
1926 1925
1927 1926 hg diff foo.c
1928 1927
1929 1928 - compare two historical versions of a directory, with rename info::
1930 1929
1931 1930 hg diff --git -r 1.0:1.2 lib/
1932 1931
1933 1932 - get change stats relative to the last change on some date::
1934 1933
1935 1934 hg diff --stat -r "date('may 2')"
1936 1935
1937 1936 - diff all newly-added files that contain a keyword::
1938 1937
1939 1938 hg diff "set:added() and grep(GNU)"
1940 1939
1941 1940 - compare a revision and its parents::
1942 1941
1943 1942 hg diff -c 9353 # compare against first parent
1944 1943 hg diff -r 9353^:9353 # same using revset syntax
1945 1944 hg diff -r 9353^2:9353 # compare against the second parent
1946 1945
1947 1946 Returns 0 on success.
1948 1947 """
1949 1948
1950 1949 opts = pycompat.byteskwargs(opts)
1951 1950 revs = opts.get('rev')
1952 1951 change = opts.get('change')
1953 1952 stat = opts.get('stat')
1954 1953 reverse = opts.get('reverse')
1955 1954
1956 1955 if revs and change:
1957 1956 msg = _('cannot specify --rev and --change at the same time')
1958 1957 raise error.Abort(msg)
1959 1958 elif change:
1960 1959 repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn')
1961 1960 ctx2 = scmutil.revsingle(repo, change, None)
1962 1961 ctx1 = ctx2.p1()
1963 1962 else:
1964 1963 repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn')
1965 1964 ctx1, ctx2 = scmutil.revpair(repo, revs)
1966 1965 node1, node2 = ctx1.node(), ctx2.node()
1967 1966
1968 1967 if reverse:
1969 1968 node1, node2 = node2, node1
1970 1969
1971 1970 diffopts = patch.diffallopts(ui, opts)
1972 1971 m = scmutil.match(ctx2, pats, opts)
1973 m = matchmod.intersectmatchers(m, repo.narrowmatch())
1972 m = repo.narrowmatch(m)
1974 1973 ui.pager('diff')
1975 1974 logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
1976 1975 listsubrepos=opts.get('subrepos'),
1977 1976 root=opts.get('root'))
1978 1977
1979 1978 @command('^export',
1980 1979 [('B', 'bookmark', '',
1981 1980 _('export changes only reachable by given bookmark')),
1982 1981 ('o', 'output', '',
1983 1982 _('print output to file with formatted name'), _('FORMAT')),
1984 1983 ('', 'switch-parent', None, _('diff against the second parent')),
1985 1984 ('r', 'rev', [], _('revisions to export'), _('REV')),
1986 1985 ] + diffopts + formatteropts,
1987 1986 _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'),
1988 1987 intents={INTENT_READONLY})
1989 1988 def export(ui, repo, *changesets, **opts):
1990 1989 """dump the header and diffs for one or more changesets
1991 1990
1992 1991 Print the changeset header and diffs for one or more revisions.
1993 1992 If no revision is given, the parent of the working directory is used.
1994 1993
1995 1994 The information shown in the changeset header is: author, date,
1996 1995 branch name (if non-default), changeset hash, parent(s) and commit
1997 1996 comment.
1998 1997
1999 1998 .. note::
2000 1999
2001 2000 :hg:`export` may generate unexpected diff output for merge
2002 2001 changesets, as it will compare the merge changeset against its
2003 2002 first parent only.
2004 2003
2005 2004 Output may be to a file, in which case the name of the file is
2006 2005 given using a template string. See :hg:`help templates`. In addition
2007 2006 to the common template keywords, the following formatting rules are
2008 2007 supported:
2009 2008
2010 2009 :``%%``: literal "%" character
2011 2010 :``%H``: changeset hash (40 hexadecimal digits)
2012 2011 :``%N``: number of patches being generated
2013 2012 :``%R``: changeset revision number
2014 2013 :``%b``: basename of the exporting repository
2015 2014 :``%h``: short-form changeset hash (12 hexadecimal digits)
2016 2015 :``%m``: first line of the commit message (only alphanumeric characters)
2017 2016 :``%n``: zero-padded sequence number, starting at 1
2018 2017 :``%r``: zero-padded changeset revision number
2019 2018 :``\\``: literal "\\" character
2020 2019
2021 2020 Without the -a/--text option, export will avoid generating diffs
2022 2021 of files it detects as binary. With -a, export will generate a
2023 2022 diff anyway, probably with undesirable results.
2024 2023
2025 2024 With -B/--bookmark changesets reachable by the given bookmark are
2026 2025 selected.
2027 2026
2028 2027 Use the -g/--git option to generate diffs in the git extended diff
2029 2028 format. See :hg:`help diffs` for more information.
2030 2029
2031 2030 With the --switch-parent option, the diff will be against the
2032 2031 second parent. It can be useful to review a merge.
2033 2032
2034 2033 .. container:: verbose
2035 2034
2036 2035 Template:
2037 2036
2038 2037 The following keywords are supported in addition to the common template
2039 2038 keywords and functions. See also :hg:`help templates`.
2040 2039
2041 2040 :diff: String. Diff content.
2042 2041 :parents: List of strings. Parent nodes of the changeset.
2043 2042
2044 2043 Examples:
2045 2044
2046 2045 - use export and import to transplant a bugfix to the current
2047 2046 branch::
2048 2047
2049 2048 hg export -r 9353 | hg import -
2050 2049
2051 2050 - export all the changesets between two revisions to a file with
2052 2051 rename information::
2053 2052
2054 2053 hg export --git -r 123:150 > changes.txt
2055 2054
2056 2055 - split outgoing changes into a series of patches with
2057 2056 descriptive names::
2058 2057
2059 2058 hg export -r "outgoing()" -o "%n-%m.patch"
2060 2059
2061 2060 Returns 0 on success.
2062 2061 """
2063 2062 opts = pycompat.byteskwargs(opts)
2064 2063 bookmark = opts.get('bookmark')
2065 2064 changesets += tuple(opts.get('rev', []))
2066 2065
2067 2066 if bookmark and changesets:
2068 2067 raise error.Abort(_("-r and -B are mutually exclusive"))
2069 2068
2070 2069 if bookmark:
2071 2070 if bookmark not in repo._bookmarks:
2072 2071 raise error.Abort(_("bookmark '%s' not found") % bookmark)
2073 2072
2074 2073 revs = scmutil.bookmarkrevs(repo, bookmark)
2075 2074 else:
2076 2075 if not changesets:
2077 2076 changesets = ['.']
2078 2077
2079 2078 repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn')
2080 2079 revs = scmutil.revrange(repo, changesets)
2081 2080
2082 2081 if not revs:
2083 2082 raise error.Abort(_("export requires at least one changeset"))
2084 2083 if len(revs) > 1:
2085 2084 ui.note(_('exporting patches:\n'))
2086 2085 else:
2087 2086 ui.note(_('exporting patch:\n'))
2088 2087
2089 2088 fntemplate = opts.get('output')
2090 2089 if cmdutil.isstdiofilename(fntemplate):
2091 2090 fntemplate = ''
2092 2091
2093 2092 if fntemplate:
2094 2093 fm = formatter.nullformatter(ui, 'export', opts)
2095 2094 else:
2096 2095 ui.pager('export')
2097 2096 fm = ui.formatter('export', opts)
2098 2097 with fm:
2099 2098 cmdutil.export(repo, revs, fm, fntemplate=fntemplate,
2100 2099 switch_parent=opts.get('switch_parent'),
2101 2100 opts=patch.diffallopts(ui, opts))
2102 2101
2103 2102 @command('files',
2104 2103 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
2105 2104 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
2106 2105 ] + walkopts + formatteropts + subrepoopts,
2107 2106 _('[OPTION]... [FILE]...'),
2108 2107 intents={INTENT_READONLY})
2109 2108 def files(ui, repo, *pats, **opts):
2110 2109 """list tracked files
2111 2110
2112 2111 Print files under Mercurial control in the working directory or
2113 2112 specified revision for given files (excluding removed files).
2114 2113 Files can be specified as filenames or filesets.
2115 2114
2116 2115 If no files are given to match, this command prints the names
2117 2116 of all files under Mercurial control.
2118 2117
2119 2118 .. container:: verbose
2120 2119
2121 2120 Template:
2122 2121
2123 2122 The following keywords are supported in addition to the common template
2124 2123 keywords and functions. See also :hg:`help templates`.
2125 2124
2126 2125 :flags: String. Character denoting file's symlink and executable bits.
2127 2126 :path: String. Repository-absolute path of the file.
2128 2127 :size: Integer. Size of the file in bytes.
2129 2128
2130 2129 Examples:
2131 2130
2132 2131 - list all files under the current directory::
2133 2132
2134 2133 hg files .
2135 2134
2136 2135 - shows sizes and flags for current revision::
2137 2136
2138 2137 hg files -vr .
2139 2138
2140 2139 - list all files named README::
2141 2140
2142 2141 hg files -I "**/README"
2143 2142
2144 2143 - list all binary files::
2145 2144
2146 2145 hg files "set:binary()"
2147 2146
2148 2147 - find files containing a regular expression::
2149 2148
2150 2149 hg files "set:grep('bob')"
2151 2150
2152 2151 - search tracked file contents with xargs and grep::
2153 2152
2154 2153 hg files -0 | xargs -0 grep foo
2155 2154
2156 2155 See :hg:`help patterns` and :hg:`help filesets` for more information
2157 2156 on specifying file patterns.
2158 2157
2159 2158 Returns 0 if a match is found, 1 otherwise.
2160 2159
2161 2160 """
2162 2161
2163 2162 opts = pycompat.byteskwargs(opts)
2164 2163 rev = opts.get('rev')
2165 2164 if rev:
2166 2165 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
2167 2166 ctx = scmutil.revsingle(repo, rev, None)
2168 2167
2169 2168 end = '\n'
2170 2169 if opts.get('print0'):
2171 2170 end = '\0'
2172 2171 fmt = '%s' + end
2173 2172
2174 2173 m = scmutil.match(ctx, pats, opts)
2175 2174 ui.pager('files')
2176 2175 with ui.formatter('files', opts) as fm:
2177 2176 return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos'))
2178 2177
2179 2178 @command(
2180 2179 '^forget',
2181 2180 [('i', 'interactive', None, _('use interactive mode')),
2182 2181 ] + walkopts + dryrunopts,
2183 2182 _('[OPTION]... FILE...'), inferrepo=True)
2184 2183 def forget(ui, repo, *pats, **opts):
2185 2184 """forget the specified files on the next commit
2186 2185
2187 2186 Mark the specified files so they will no longer be tracked
2188 2187 after the next commit.
2189 2188
2190 2189 This only removes files from the current branch, not from the
2191 2190 entire project history, and it does not delete them from the
2192 2191 working directory.
2193 2192
2194 2193 To delete the file from the working directory, see :hg:`remove`.
2195 2194
2196 2195 To undo a forget before the next commit, see :hg:`add`.
2197 2196
2198 2197 .. container:: verbose
2199 2198
2200 2199 Examples:
2201 2200
2202 2201 - forget newly-added binary files::
2203 2202
2204 2203 hg forget "set:added() and binary()"
2205 2204
2206 2205 - forget files that would be excluded by .hgignore::
2207 2206
2208 2207 hg forget "set:hgignore()"
2209 2208
2210 2209 Returns 0 on success.
2211 2210 """
2212 2211
2213 2212 opts = pycompat.byteskwargs(opts)
2214 2213 if not pats:
2215 2214 raise error.Abort(_('no files specified'))
2216 2215
2217 2216 m = scmutil.match(repo[None], pats, opts)
2218 2217 dryrun, interactive = opts.get('dry_run'), opts.get('interactive')
2219 2218 rejected = cmdutil.forget(ui, repo, m, prefix="",
2220 2219 explicitonly=False, dryrun=dryrun,
2221 2220 interactive=interactive)[0]
2222 2221 return rejected and 1 or 0
2223 2222
2224 2223 @command(
2225 2224 'graft',
2226 2225 [('r', 'rev', [], _('revisions to graft'), _('REV')),
2227 2226 ('c', 'continue', False, _('resume interrupted graft')),
2228 2227 ('', 'stop', False, _('stop interrupted graft')),
2229 2228 ('', 'abort', False, _('abort interrupted graft')),
2230 2229 ('e', 'edit', False, _('invoke editor on commit messages')),
2231 2230 ('', 'log', None, _('append graft info to log message')),
2232 2231 ('', 'no-commit', None,
2233 2232 _("don't commit, just apply the changes in working directory")),
2234 2233 ('f', 'force', False, _('force graft')),
2235 2234 ('D', 'currentdate', False,
2236 2235 _('record the current date as commit date')),
2237 2236 ('U', 'currentuser', False,
2238 2237 _('record the current user as committer'), _('DATE'))]
2239 2238 + commitopts2 + mergetoolopts + dryrunopts,
2240 2239 _('[OPTION]... [-r REV]... REV...'))
2241 2240 def graft(ui, repo, *revs, **opts):
2242 2241 '''copy changes from other branches onto the current branch
2243 2242
2244 2243 This command uses Mercurial's merge logic to copy individual
2245 2244 changes from other branches without merging branches in the
2246 2245 history graph. This is sometimes known as 'backporting' or
2247 2246 'cherry-picking'. By default, graft will copy user, date, and
2248 2247 description from the source changesets.
2249 2248
2250 2249 Changesets that are ancestors of the current revision, that have
2251 2250 already been grafted, or that are merges will be skipped.
2252 2251
2253 2252 If --log is specified, log messages will have a comment appended
2254 2253 of the form::
2255 2254
2256 2255 (grafted from CHANGESETHASH)
2257 2256
2258 2257 If --force is specified, revisions will be grafted even if they
2259 2258 are already ancestors of, or have been grafted to, the destination.
2260 2259 This is useful when the revisions have since been backed out.
2261 2260
2262 2261 If a graft merge results in conflicts, the graft process is
2263 2262 interrupted so that the current merge can be manually resolved.
2264 2263 Once all conflicts are addressed, the graft process can be
2265 2264 continued with the -c/--continue option.
2266 2265
2267 2266 The -c/--continue option reapplies all the earlier options.
2268 2267
2269 2268 .. container:: verbose
2270 2269
2271 2270 Examples:
2272 2271
2273 2272 - copy a single change to the stable branch and edit its description::
2274 2273
2275 2274 hg update stable
2276 2275 hg graft --edit 9393
2277 2276
2278 2277 - graft a range of changesets with one exception, updating dates::
2279 2278
2280 2279 hg graft -D "2085::2093 and not 2091"
2281 2280
2282 2281 - continue a graft after resolving conflicts::
2283 2282
2284 2283 hg graft -c
2285 2284
2286 2285 - show the source of a grafted changeset::
2287 2286
2288 2287 hg log --debug -r .
2289 2288
2290 2289 - show revisions sorted by date::
2291 2290
2292 2291 hg log -r "sort(all(), date)"
2293 2292
2294 2293 See :hg:`help revisions` for more about specifying revisions.
2295 2294
2296 2295 Returns 0 on successful completion.
2297 2296 '''
2298 2297 with repo.wlock():
2299 2298 return _dograft(ui, repo, *revs, **opts)
2300 2299
2301 2300 def _dograft(ui, repo, *revs, **opts):
2302 2301 opts = pycompat.byteskwargs(opts)
2303 2302 if revs and opts.get('rev'):
2304 2303 ui.warn(_('warning: inconsistent use of --rev might give unexpected '
2305 2304 'revision ordering!\n'))
2306 2305
2307 2306 revs = list(revs)
2308 2307 revs.extend(opts.get('rev'))
2309 2308 # a dict of data to be stored in state file
2310 2309 statedata = {}
2311 2310 # list of new nodes created by ongoing graft
2312 2311 statedata['newnodes'] = []
2313 2312
2314 2313 if not opts.get('user') and opts.get('currentuser'):
2315 2314 opts['user'] = ui.username()
2316 2315 if not opts.get('date') and opts.get('currentdate'):
2317 2316 opts['date'] = "%d %d" % dateutil.makedate()
2318 2317
2319 2318 editor = cmdutil.getcommiteditor(editform='graft',
2320 2319 **pycompat.strkwargs(opts))
2321 2320
2322 2321 cont = False
2323 2322 if opts.get('no_commit'):
2324 2323 if opts.get('edit'):
2325 2324 raise error.Abort(_("cannot specify --no-commit and "
2326 2325 "--edit together"))
2327 2326 if opts.get('currentuser'):
2328 2327 raise error.Abort(_("cannot specify --no-commit and "
2329 2328 "--currentuser together"))
2330 2329 if opts.get('currentdate'):
2331 2330 raise error.Abort(_("cannot specify --no-commit and "
2332 2331 "--currentdate together"))
2333 2332 if opts.get('log'):
2334 2333 raise error.Abort(_("cannot specify --no-commit and "
2335 2334 "--log together"))
2336 2335
2337 2336 graftstate = statemod.cmdstate(repo, 'graftstate')
2338 2337
2339 2338 if opts.get('stop'):
2340 2339 if opts.get('continue'):
2341 2340 raise error.Abort(_("cannot use '--continue' and "
2342 2341 "'--stop' together"))
2343 2342 if opts.get('abort'):
2344 2343 raise error.Abort(_("cannot use '--abort' and '--stop' together"))
2345 2344
2346 2345 if any((opts.get('edit'), opts.get('log'), opts.get('user'),
2347 2346 opts.get('date'), opts.get('currentdate'),
2348 2347 opts.get('currentuser'), opts.get('rev'))):
2349 2348 raise error.Abort(_("cannot specify any other flag with '--stop'"))
2350 2349 return _stopgraft(ui, repo, graftstate)
2351 2350 elif opts.get('abort'):
2352 2351 if opts.get('continue'):
2353 2352 raise error.Abort(_("cannot use '--continue' and "
2354 2353 "'--abort' together"))
2355 2354 if any((opts.get('edit'), opts.get('log'), opts.get('user'),
2356 2355 opts.get('date'), opts.get('currentdate'),
2357 2356 opts.get('currentuser'), opts.get('rev'))):
2358 2357 raise error.Abort(_("cannot specify any other flag with '--abort'"))
2359 2358
2360 2359 return _abortgraft(ui, repo, graftstate)
2361 2360 elif opts.get('continue'):
2362 2361 cont = True
2363 2362 if revs:
2364 2363 raise error.Abort(_("can't specify --continue and revisions"))
2365 2364 # read in unfinished revisions
2366 2365 if graftstate.exists():
2367 2366 statedata = _readgraftstate(repo, graftstate)
2368 2367 if statedata.get('date'):
2369 2368 opts['date'] = statedata['date']
2370 2369 if statedata.get('user'):
2371 2370 opts['user'] = statedata['user']
2372 2371 if statedata.get('log'):
2373 2372 opts['log'] = True
2374 2373 if statedata.get('no_commit'):
2375 2374 opts['no_commit'] = statedata.get('no_commit')
2376 2375 nodes = statedata['nodes']
2377 2376 revs = [repo[node].rev() for node in nodes]
2378 2377 else:
2379 2378 cmdutil.wrongtooltocontinue(repo, _('graft'))
2380 2379 else:
2381 2380 if not revs:
2382 2381 raise error.Abort(_('no revisions specified'))
2383 2382 cmdutil.checkunfinished(repo)
2384 2383 cmdutil.bailifchanged(repo)
2385 2384 revs = scmutil.revrange(repo, revs)
2386 2385
2387 2386 skipped = set()
2388 2387 # check for merges
2389 2388 for rev in repo.revs('%ld and merge()', revs):
2390 2389 ui.warn(_('skipping ungraftable merge revision %d\n') % rev)
2391 2390 skipped.add(rev)
2392 2391 revs = [r for r in revs if r not in skipped]
2393 2392 if not revs:
2394 2393 return -1
2395 2394
2396 2395 # Don't check in the --continue case, in effect retaining --force across
2397 2396 # --continues. That's because without --force, any revisions we decided to
2398 2397 # skip would have been filtered out here, so they wouldn't have made their
2399 2398 # way to the graftstate. With --force, any revisions we would have otherwise
2400 2399 # skipped would not have been filtered out, and if they hadn't been applied
2401 2400 # already, they'd have been in the graftstate.
2402 2401 if not (cont or opts.get('force')):
2403 2402 # check for ancestors of dest branch
2404 2403 crev = repo['.'].rev()
2405 2404 ancestors = repo.changelog.ancestors([crev], inclusive=True)
2406 2405 # XXX make this lazy in the future
2407 2406 # don't mutate while iterating, create a copy
2408 2407 for rev in list(revs):
2409 2408 if rev in ancestors:
2410 2409 ui.warn(_('skipping ancestor revision %d:%s\n') %
2411 2410 (rev, repo[rev]))
2412 2411 # XXX remove on list is slow
2413 2412 revs.remove(rev)
2414 2413 if not revs:
2415 2414 return -1
2416 2415
2417 2416 # analyze revs for earlier grafts
2418 2417 ids = {}
2419 2418 for ctx in repo.set("%ld", revs):
2420 2419 ids[ctx.hex()] = ctx.rev()
2421 2420 n = ctx.extra().get('source')
2422 2421 if n:
2423 2422 ids[n] = ctx.rev()
2424 2423
2425 2424 # check ancestors for earlier grafts
2426 2425 ui.debug('scanning for duplicate grafts\n')
2427 2426
2428 2427 # The only changesets we can be sure doesn't contain grafts of any
2429 2428 # revs, are the ones that are common ancestors of *all* revs:
2430 2429 for rev in repo.revs('only(%d,ancestor(%ld))', crev, revs):
2431 2430 ctx = repo[rev]
2432 2431 n = ctx.extra().get('source')
2433 2432 if n in ids:
2434 2433 try:
2435 2434 r = repo[n].rev()
2436 2435 except error.RepoLookupError:
2437 2436 r = None
2438 2437 if r in revs:
2439 2438 ui.warn(_('skipping revision %d:%s '
2440 2439 '(already grafted to %d:%s)\n')
2441 2440 % (r, repo[r], rev, ctx))
2442 2441 revs.remove(r)
2443 2442 elif ids[n] in revs:
2444 2443 if r is None:
2445 2444 ui.warn(_('skipping already grafted revision %d:%s '
2446 2445 '(%d:%s also has unknown origin %s)\n')
2447 2446 % (ids[n], repo[ids[n]], rev, ctx, n[:12]))
2448 2447 else:
2449 2448 ui.warn(_('skipping already grafted revision %d:%s '
2450 2449 '(%d:%s also has origin %d:%s)\n')
2451 2450 % (ids[n], repo[ids[n]], rev, ctx, r, n[:12]))
2452 2451 revs.remove(ids[n])
2453 2452 elif ctx.hex() in ids:
2454 2453 r = ids[ctx.hex()]
2455 2454 ui.warn(_('skipping already grafted revision %d:%s '
2456 2455 '(was grafted from %d:%s)\n') %
2457 2456 (r, repo[r], rev, ctx))
2458 2457 revs.remove(r)
2459 2458 if not revs:
2460 2459 return -1
2461 2460
2462 2461 if opts.get('no_commit'):
2463 2462 statedata['no_commit'] = True
2464 2463 for pos, ctx in enumerate(repo.set("%ld", revs)):
2465 2464 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
2466 2465 ctx.description().split('\n', 1)[0])
2467 2466 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
2468 2467 if names:
2469 2468 desc += ' (%s)' % ' '.join(names)
2470 2469 ui.status(_('grafting %s\n') % desc)
2471 2470 if opts.get('dry_run'):
2472 2471 continue
2473 2472
2474 2473 source = ctx.extra().get('source')
2475 2474 extra = {}
2476 2475 if source:
2477 2476 extra['source'] = source
2478 2477 extra['intermediate-source'] = ctx.hex()
2479 2478 else:
2480 2479 extra['source'] = ctx.hex()
2481 2480 user = ctx.user()
2482 2481 if opts.get('user'):
2483 2482 user = opts['user']
2484 2483 statedata['user'] = user
2485 2484 date = ctx.date()
2486 2485 if opts.get('date'):
2487 2486 date = opts['date']
2488 2487 statedata['date'] = date
2489 2488 message = ctx.description()
2490 2489 if opts.get('log'):
2491 2490 message += '\n(grafted from %s)' % ctx.hex()
2492 2491 statedata['log'] = True
2493 2492
2494 2493 # we don't merge the first commit when continuing
2495 2494 if not cont:
2496 2495 # perform the graft merge with p1(rev) as 'ancestor'
2497 2496 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
2498 2497 with ui.configoverride(overrides, 'graft'):
2499 2498 stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'graft'])
2500 2499 # report any conflicts
2501 2500 if stats.unresolvedcount > 0:
2502 2501 # write out state for --continue
2503 2502 nodes = [repo[rev].hex() for rev in revs[pos:]]
2504 2503 statedata['nodes'] = nodes
2505 2504 stateversion = 1
2506 2505 graftstate.save(stateversion, statedata)
2507 2506 hint = _("use 'hg resolve' and 'hg graft --continue'")
2508 2507 raise error.Abort(
2509 2508 _("unresolved conflicts, can't continue"),
2510 2509 hint=hint)
2511 2510 else:
2512 2511 cont = False
2513 2512
2514 2513 # commit if --no-commit is false
2515 2514 if not opts.get('no_commit'):
2516 2515 node = repo.commit(text=message, user=user, date=date, extra=extra,
2517 2516 editor=editor)
2518 2517 if node is None:
2519 2518 ui.warn(
2520 2519 _('note: graft of %d:%s created no changes to commit\n') %
2521 2520 (ctx.rev(), ctx))
2522 2521 # checking that newnodes exist because old state files won't have it
2523 2522 elif statedata.get('newnodes') is not None:
2524 2523 statedata['newnodes'].append(node)
2525 2524
2526 2525 # remove state when we complete successfully
2527 2526 if not opts.get('dry_run'):
2528 2527 graftstate.delete()
2529 2528
2530 2529 return 0
2531 2530
2532 2531 def _abortgraft(ui, repo, graftstate):
2533 2532 """abort the interrupted graft and rollbacks to the state before interrupted
2534 2533 graft"""
2535 2534 if not graftstate.exists():
2536 2535 raise error.Abort(_("no interrupted graft to abort"))
2537 2536 statedata = _readgraftstate(repo, graftstate)
2538 2537 newnodes = statedata.get('newnodes')
2539 2538 if newnodes is None:
2540 2539 # and old graft state which does not have all the data required to abort
2541 2540 # the graft
2542 2541 raise error.Abort(_("cannot abort using an old graftstate"))
2543 2542
2544 2543 # changeset from which graft operation was started
2545 2544 startctx = None
2546 2545 if len(newnodes) > 0:
2547 2546 startctx = repo[newnodes[0]].p1()
2548 2547 else:
2549 2548 startctx = repo['.']
2550 2549 # whether to strip or not
2551 2550 cleanup = False
2552 2551 if newnodes:
2553 2552 newnodes = [repo[r].rev() for r in newnodes]
2554 2553 cleanup = True
2555 2554 # checking that none of the newnodes turned public or is public
2556 2555 immutable = [c for c in newnodes if not repo[c].mutable()]
2557 2556 if immutable:
2558 2557 repo.ui.warn(_("cannot clean up public changesets %s\n")
2559 2558 % ', '.join(bytes(repo[r]) for r in immutable),
2560 2559 hint=_("see 'hg help phases' for details"))
2561 2560 cleanup = False
2562 2561
2563 2562 # checking that no new nodes are created on top of grafted revs
2564 2563 desc = set(repo.changelog.descendants(newnodes))
2565 2564 if desc - set(newnodes):
2566 2565 repo.ui.warn(_("new changesets detected on destination "
2567 2566 "branch, can't strip\n"))
2568 2567 cleanup = False
2569 2568
2570 2569 if cleanup:
2571 2570 with repo.wlock(), repo.lock():
2572 2571 hg.updaterepo(repo, startctx.node(), overwrite=True)
2573 2572 # stripping the new nodes created
2574 2573 strippoints = [c.node() for c in repo.set("roots(%ld)",
2575 2574 newnodes)]
2576 2575 repair.strip(repo.ui, repo, strippoints, backup=False)
2577 2576
2578 2577 if not cleanup:
2579 2578 # we don't update to the startnode if we can't strip
2580 2579 startctx = repo['.']
2581 2580 hg.updaterepo(repo, startctx.node(), overwrite=True)
2582 2581
2583 2582 ui.status(_("graft aborted\n"))
2584 2583 ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
2585 2584 graftstate.delete()
2586 2585 return 0
2587 2586
2588 2587 def _readgraftstate(repo, graftstate):
2589 2588 """read the graft state file and return a dict of the data stored in it"""
2590 2589 try:
2591 2590 return graftstate.read()
2592 2591 except error.CorruptedState:
2593 2592 nodes = repo.vfs.read('graftstate').splitlines()
2594 2593 return {'nodes': nodes}
2595 2594
2596 2595 def _stopgraft(ui, repo, graftstate):
2597 2596 """stop the interrupted graft"""
2598 2597 if not graftstate.exists():
2599 2598 raise error.Abort(_("no interrupted graft found"))
2600 2599 pctx = repo['.']
2601 2600 hg.updaterepo(repo, pctx.node(), overwrite=True)
2602 2601 graftstate.delete()
2603 2602 ui.status(_("stopped the interrupted graft\n"))
2604 2603 ui.status(_("working directory is now at %s\n") % pctx.hex()[:12])
2605 2604 return 0
2606 2605
2607 2606 @command('grep',
2608 2607 [('0', 'print0', None, _('end fields with NUL')),
2609 2608 ('', 'all', None, _('print all revisions that match (DEPRECATED) ')),
2610 2609 ('', 'diff', None, _('print all revisions when the term was introduced '
2611 2610 'or removed')),
2612 2611 ('a', 'text', None, _('treat all files as text')),
2613 2612 ('f', 'follow', None,
2614 2613 _('follow changeset history,'
2615 2614 ' or file history across copies and renames')),
2616 2615 ('i', 'ignore-case', None, _('ignore case when matching')),
2617 2616 ('l', 'files-with-matches', None,
2618 2617 _('print only filenames and revisions that match')),
2619 2618 ('n', 'line-number', None, _('print matching line numbers')),
2620 2619 ('r', 'rev', [],
2621 2620 _('only search files changed within revision range'), _('REV')),
2622 2621 ('', 'all-files', None,
2623 2622 _('include all files in the changeset while grepping (EXPERIMENTAL)')),
2624 2623 ('u', 'user', None, _('list the author (long with -v)')),
2625 2624 ('d', 'date', None, _('list the date (short with -q)')),
2626 2625 ] + formatteropts + walkopts,
2627 2626 _('[OPTION]... PATTERN [FILE]...'),
2628 2627 inferrepo=True,
2629 2628 intents={INTENT_READONLY})
2630 2629 def grep(ui, repo, pattern, *pats, **opts):
2631 2630 """search revision history for a pattern in specified files
2632 2631
2633 2632 Search revision history for a regular expression in the specified
2634 2633 files or the entire project.
2635 2634
2636 2635 By default, grep prints the most recent revision number for each
2637 2636 file in which it finds a match. To get it to print every revision
2638 2637 that contains a change in match status ("-" for a match that becomes
2639 2638 a non-match, or "+" for a non-match that becomes a match), use the
2640 2639 --diff flag.
2641 2640
2642 2641 PATTERN can be any Python (roughly Perl-compatible) regular
2643 2642 expression.
2644 2643
2645 2644 If no FILEs are specified (and -f/--follow isn't set), all files in
2646 2645 the repository are searched, including those that don't exist in the
2647 2646 current branch or have been deleted in a prior changeset.
2648 2647
2649 2648 Returns 0 if a match is found, 1 otherwise.
2650 2649 """
2651 2650 opts = pycompat.byteskwargs(opts)
2652 2651 diff = opts.get('all') or opts.get('diff')
2653 2652 all_files = opts.get('all_files')
2654 2653 if diff and opts.get('all_files'):
2655 2654 raise error.Abort(_('--diff and --all-files are mutually exclusive'))
2656 2655 # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working
2657 2656 if opts.get('all_files') is None and not opts.get('rev') and not diff:
2658 2657 # experimental config: commands.grep.all-files
2659 2658 opts['all_files'] = ui.configbool('commands', 'grep.all-files')
2660 2659 plaingrep = opts.get('all_files') and not opts.get('rev')
2661 2660 if plaingrep:
2662 2661 opts['rev'] = ['wdir()']
2663 2662
2664 2663 reflags = re.M
2665 2664 if opts.get('ignore_case'):
2666 2665 reflags |= re.I
2667 2666 try:
2668 2667 regexp = util.re.compile(pattern, reflags)
2669 2668 except re.error as inst:
2670 2669 ui.warn(_("grep: invalid match pattern: %s\n") % pycompat.bytestr(inst))
2671 2670 return 1
2672 2671 sep, eol = ':', '\n'
2673 2672 if opts.get('print0'):
2674 2673 sep = eol = '\0'
2675 2674
2676 2675 getfile = util.lrucachefunc(repo.file)
2677 2676
2678 2677 def matchlines(body):
2679 2678 begin = 0
2680 2679 linenum = 0
2681 2680 while begin < len(body):
2682 2681 match = regexp.search(body, begin)
2683 2682 if not match:
2684 2683 break
2685 2684 mstart, mend = match.span()
2686 2685 linenum += body.count('\n', begin, mstart) + 1
2687 2686 lstart = body.rfind('\n', begin, mstart) + 1 or begin
2688 2687 begin = body.find('\n', mend) + 1 or len(body) + 1
2689 2688 lend = begin - 1
2690 2689 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
2691 2690
2692 2691 class linestate(object):
2693 2692 def __init__(self, line, linenum, colstart, colend):
2694 2693 self.line = line
2695 2694 self.linenum = linenum
2696 2695 self.colstart = colstart
2697 2696 self.colend = colend
2698 2697
2699 2698 def __hash__(self):
2700 2699 return hash((self.linenum, self.line))
2701 2700
2702 2701 def __eq__(self, other):
2703 2702 return self.line == other.line
2704 2703
2705 2704 def findpos(self):
2706 2705 """Iterate all (start, end) indices of matches"""
2707 2706 yield self.colstart, self.colend
2708 2707 p = self.colend
2709 2708 while p < len(self.line):
2710 2709 m = regexp.search(self.line, p)
2711 2710 if not m:
2712 2711 break
2713 2712 yield m.span()
2714 2713 p = m.end()
2715 2714
2716 2715 matches = {}
2717 2716 copies = {}
2718 2717 def grepbody(fn, rev, body):
2719 2718 matches[rev].setdefault(fn, [])
2720 2719 m = matches[rev][fn]
2721 2720 for lnum, cstart, cend, line in matchlines(body):
2722 2721 s = linestate(line, lnum, cstart, cend)
2723 2722 m.append(s)
2724 2723
2725 2724 def difflinestates(a, b):
2726 2725 sm = difflib.SequenceMatcher(None, a, b)
2727 2726 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2728 2727 if tag == r'insert':
2729 2728 for i in pycompat.xrange(blo, bhi):
2730 2729 yield ('+', b[i])
2731 2730 elif tag == r'delete':
2732 2731 for i in pycompat.xrange(alo, ahi):
2733 2732 yield ('-', a[i])
2734 2733 elif tag == r'replace':
2735 2734 for i in pycompat.xrange(alo, ahi):
2736 2735 yield ('-', a[i])
2737 2736 for i in pycompat.xrange(blo, bhi):
2738 2737 yield ('+', b[i])
2739 2738
2740 2739 def display(fm, fn, ctx, pstates, states):
2741 2740 rev = scmutil.intrev(ctx)
2742 2741 if fm.isplain():
2743 2742 formatuser = ui.shortuser
2744 2743 else:
2745 2744 formatuser = pycompat.bytestr
2746 2745 if ui.quiet:
2747 2746 datefmt = '%Y-%m-%d'
2748 2747 else:
2749 2748 datefmt = '%a %b %d %H:%M:%S %Y %1%2'
2750 2749 found = False
2751 2750 @util.cachefunc
2752 2751 def binary():
2753 2752 flog = getfile(fn)
2754 2753 try:
2755 2754 return stringutil.binary(flog.read(ctx.filenode(fn)))
2756 2755 except error.WdirUnsupported:
2757 2756 return ctx[fn].isbinary()
2758 2757
2759 2758 fieldnamemap = {'filename': 'path', 'linenumber': 'lineno'}
2760 2759 if diff:
2761 2760 iter = difflinestates(pstates, states)
2762 2761 else:
2763 2762 iter = [('', l) for l in states]
2764 2763 for change, l in iter:
2765 2764 fm.startitem()
2766 2765 fm.context(ctx=ctx)
2767 2766 fm.data(node=fm.hexfunc(scmutil.binnode(ctx)))
2768 2767
2769 2768 cols = [
2770 2769 ('filename', '%s', fn, True),
2771 2770 ('rev', '%d', rev, not plaingrep),
2772 2771 ('linenumber', '%d', l.linenum, opts.get('line_number')),
2773 2772 ]
2774 2773 if diff:
2775 2774 cols.append(('change', '%s', change, True))
2776 2775 cols.extend([
2777 2776 ('user', '%s', formatuser(ctx.user()), opts.get('user')),
2778 2777 ('date', '%s', fm.formatdate(ctx.date(), datefmt),
2779 2778 opts.get('date')),
2780 2779 ])
2781 2780 lastcol = next(
2782 2781 name for name, fmt, data, cond in reversed(cols) if cond)
2783 2782 for name, fmt, data, cond in cols:
2784 2783 field = fieldnamemap.get(name, name)
2785 2784 fm.condwrite(cond, field, fmt, data, label='grep.%s' % name)
2786 2785 if cond and name != lastcol:
2787 2786 fm.plain(sep, label='grep.sep')
2788 2787 if not opts.get('files_with_matches'):
2789 2788 fm.plain(sep, label='grep.sep')
2790 2789 if not opts.get('text') and binary():
2791 2790 fm.plain(_(" Binary file matches"))
2792 2791 else:
2793 2792 displaymatches(fm.nested('texts', tmpl='{text}'), l)
2794 2793 fm.plain(eol)
2795 2794 found = True
2796 2795 if opts.get('files_with_matches'):
2797 2796 break
2798 2797 return found
2799 2798
2800 2799 def displaymatches(fm, l):
2801 2800 p = 0
2802 2801 for s, e in l.findpos():
2803 2802 if p < s:
2804 2803 fm.startitem()
2805 2804 fm.write('text', '%s', l.line[p:s])
2806 2805 fm.data(matched=False)
2807 2806 fm.startitem()
2808 2807 fm.write('text', '%s', l.line[s:e], label='grep.match')
2809 2808 fm.data(matched=True)
2810 2809 p = e
2811 2810 if p < len(l.line):
2812 2811 fm.startitem()
2813 2812 fm.write('text', '%s', l.line[p:])
2814 2813 fm.data(matched=False)
2815 2814 fm.end()
2816 2815
2817 2816 skip = {}
2818 2817 revfiles = {}
2819 2818 match = scmutil.match(repo[None], pats, opts)
2820 2819 found = False
2821 2820 follow = opts.get('follow')
2822 2821
2823 2822 def prep(ctx, fns):
2824 2823 rev = ctx.rev()
2825 2824 pctx = ctx.p1()
2826 2825 parent = pctx.rev()
2827 2826 matches.setdefault(rev, {})
2828 2827 matches.setdefault(parent, {})
2829 2828 files = revfiles.setdefault(rev, [])
2830 2829 for fn in fns:
2831 2830 flog = getfile(fn)
2832 2831 try:
2833 2832 fnode = ctx.filenode(fn)
2834 2833 except error.LookupError:
2835 2834 continue
2836 2835 try:
2837 2836 copied = flog.renamed(fnode)
2838 2837 except error.WdirUnsupported:
2839 2838 copied = ctx[fn].renamed()
2840 2839 copy = follow and copied and copied[0]
2841 2840 if copy:
2842 2841 copies.setdefault(rev, {})[fn] = copy
2843 2842 if fn in skip:
2844 2843 if copy:
2845 2844 skip[copy] = True
2846 2845 continue
2847 2846 files.append(fn)
2848 2847
2849 2848 if fn not in matches[rev]:
2850 2849 try:
2851 2850 content = flog.read(fnode)
2852 2851 except error.WdirUnsupported:
2853 2852 content = ctx[fn].data()
2854 2853 grepbody(fn, rev, content)
2855 2854
2856 2855 pfn = copy or fn
2857 2856 if pfn not in matches[parent]:
2858 2857 try:
2859 2858 fnode = pctx.filenode(pfn)
2860 2859 grepbody(pfn, parent, flog.read(fnode))
2861 2860 except error.LookupError:
2862 2861 pass
2863 2862
2864 2863 ui.pager('grep')
2865 2864 fm = ui.formatter('grep', opts)
2866 2865 for ctx in cmdutil.walkchangerevs(repo, match, opts, prep):
2867 2866 rev = ctx.rev()
2868 2867 parent = ctx.p1().rev()
2869 2868 for fn in sorted(revfiles.get(rev, [])):
2870 2869 states = matches[rev][fn]
2871 2870 copy = copies.get(rev, {}).get(fn)
2872 2871 if fn in skip:
2873 2872 if copy:
2874 2873 skip[copy] = True
2875 2874 continue
2876 2875 pstates = matches.get(parent, {}).get(copy or fn, [])
2877 2876 if pstates or states:
2878 2877 r = display(fm, fn, ctx, pstates, states)
2879 2878 found = found or r
2880 2879 if r and not diff and not all_files:
2881 2880 skip[fn] = True
2882 2881 if copy:
2883 2882 skip[copy] = True
2884 2883 del revfiles[rev]
2885 2884 # We will keep the matches dict for the duration of the window
2886 2885 # clear the matches dict once the window is over
2887 2886 if not revfiles:
2888 2887 matches.clear()
2889 2888 fm.end()
2890 2889
2891 2890 return not found
2892 2891
2893 2892 @command('heads',
2894 2893 [('r', 'rev', '',
2895 2894 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
2896 2895 ('t', 'topo', False, _('show topological heads only')),
2897 2896 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
2898 2897 ('c', 'closed', False, _('show normal and closed branch heads')),
2899 2898 ] + templateopts,
2900 2899 _('[-ct] [-r STARTREV] [REV]...'),
2901 2900 intents={INTENT_READONLY})
2902 2901 def heads(ui, repo, *branchrevs, **opts):
2903 2902 """show branch heads
2904 2903
2905 2904 With no arguments, show all open branch heads in the repository.
2906 2905 Branch heads are changesets that have no descendants on the
2907 2906 same branch. They are where development generally takes place and
2908 2907 are the usual targets for update and merge operations.
2909 2908
2910 2909 If one or more REVs are given, only open branch heads on the
2911 2910 branches associated with the specified changesets are shown. This
2912 2911 means that you can use :hg:`heads .` to see the heads on the
2913 2912 currently checked-out branch.
2914 2913
2915 2914 If -c/--closed is specified, also show branch heads marked closed
2916 2915 (see :hg:`commit --close-branch`).
2917 2916
2918 2917 If STARTREV is specified, only those heads that are descendants of
2919 2918 STARTREV will be displayed.
2920 2919
2921 2920 If -t/--topo is specified, named branch mechanics will be ignored and only
2922 2921 topological heads (changesets with no children) will be shown.
2923 2922
2924 2923 Returns 0 if matching heads are found, 1 if not.
2925 2924 """
2926 2925
2927 2926 opts = pycompat.byteskwargs(opts)
2928 2927 start = None
2929 2928 rev = opts.get('rev')
2930 2929 if rev:
2931 2930 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
2932 2931 start = scmutil.revsingle(repo, rev, None).node()
2933 2932
2934 2933 if opts.get('topo'):
2935 2934 heads = [repo[h] for h in repo.heads(start)]
2936 2935 else:
2937 2936 heads = []
2938 2937 for branch in repo.branchmap():
2939 2938 heads += repo.branchheads(branch, start, opts.get('closed'))
2940 2939 heads = [repo[h] for h in heads]
2941 2940
2942 2941 if branchrevs:
2943 2942 branches = set(repo[r].branch()
2944 2943 for r in scmutil.revrange(repo, branchrevs))
2945 2944 heads = [h for h in heads if h.branch() in branches]
2946 2945
2947 2946 if opts.get('active') and branchrevs:
2948 2947 dagheads = repo.heads(start)
2949 2948 heads = [h for h in heads if h.node() in dagheads]
2950 2949
2951 2950 if branchrevs:
2952 2951 haveheads = set(h.branch() for h in heads)
2953 2952 if branches - haveheads:
2954 2953 headless = ', '.join(b for b in branches - haveheads)
2955 2954 msg = _('no open branch heads found on branches %s')
2956 2955 if opts.get('rev'):
2957 2956 msg += _(' (started at %s)') % opts['rev']
2958 2957 ui.warn((msg + '\n') % headless)
2959 2958
2960 2959 if not heads:
2961 2960 return 1
2962 2961
2963 2962 ui.pager('heads')
2964 2963 heads = sorted(heads, key=lambda x: -x.rev())
2965 2964 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
2966 2965 for ctx in heads:
2967 2966 displayer.show(ctx)
2968 2967 displayer.close()
2969 2968
2970 2969 @command('help',
2971 2970 [('e', 'extension', None, _('show only help for extensions')),
2972 2971 ('c', 'command', None, _('show only help for commands')),
2973 2972 ('k', 'keyword', None, _('show topics matching keyword')),
2974 2973 ('s', 'system', [], _('show help for specific platform(s)')),
2975 2974 ],
2976 2975 _('[-ecks] [TOPIC]'),
2977 2976 norepo=True,
2978 2977 intents={INTENT_READONLY})
2979 2978 def help_(ui, name=None, **opts):
2980 2979 """show help for a given topic or a help overview
2981 2980
2982 2981 With no arguments, print a list of commands with short help messages.
2983 2982
2984 2983 Given a topic, extension, or command name, print help for that
2985 2984 topic.
2986 2985
2987 2986 Returns 0 if successful.
2988 2987 """
2989 2988
2990 2989 keep = opts.get(r'system') or []
2991 2990 if len(keep) == 0:
2992 2991 if pycompat.sysplatform.startswith('win'):
2993 2992 keep.append('windows')
2994 2993 elif pycompat.sysplatform == 'OpenVMS':
2995 2994 keep.append('vms')
2996 2995 elif pycompat.sysplatform == 'plan9':
2997 2996 keep.append('plan9')
2998 2997 else:
2999 2998 keep.append('unix')
3000 2999 keep.append(pycompat.sysplatform.lower())
3001 3000 if ui.verbose:
3002 3001 keep.append('verbose')
3003 3002
3004 3003 commands = sys.modules[__name__]
3005 3004 formatted = help.formattedhelp(ui, commands, name, keep=keep, **opts)
3006 3005 ui.pager('help')
3007 3006 ui.write(formatted)
3008 3007
3009 3008
3010 3009 @command('identify|id',
3011 3010 [('r', 'rev', '',
3012 3011 _('identify the specified revision'), _('REV')),
3013 3012 ('n', 'num', None, _('show local revision number')),
3014 3013 ('i', 'id', None, _('show global revision id')),
3015 3014 ('b', 'branch', None, _('show branch')),
3016 3015 ('t', 'tags', None, _('show tags')),
3017 3016 ('B', 'bookmarks', None, _('show bookmarks')),
3018 3017 ] + remoteopts + formatteropts,
3019 3018 _('[-nibtB] [-r REV] [SOURCE]'),
3020 3019 optionalrepo=True,
3021 3020 intents={INTENT_READONLY})
3022 3021 def identify(ui, repo, source=None, rev=None,
3023 3022 num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
3024 3023 """identify the working directory or specified revision
3025 3024
3026 3025 Print a summary identifying the repository state at REV using one or
3027 3026 two parent hash identifiers, followed by a "+" if the working
3028 3027 directory has uncommitted changes, the branch name (if not default),
3029 3028 a list of tags, and a list of bookmarks.
3030 3029
3031 3030 When REV is not given, print a summary of the current state of the
3032 3031 repository including the working directory. Specify -r. to get information
3033 3032 of the working directory parent without scanning uncommitted changes.
3034 3033
3035 3034 Specifying a path to a repository root or Mercurial bundle will
3036 3035 cause lookup to operate on that repository/bundle.
3037 3036
3038 3037 .. container:: verbose
3039 3038
3040 3039 Examples:
3041 3040
3042 3041 - generate a build identifier for the working directory::
3043 3042
3044 3043 hg id --id > build-id.dat
3045 3044
3046 3045 - find the revision corresponding to a tag::
3047 3046
3048 3047 hg id -n -r 1.3
3049 3048
3050 3049 - check the most recent revision of a remote repository::
3051 3050
3052 3051 hg id -r tip https://www.mercurial-scm.org/repo/hg/
3053 3052
3054 3053 See :hg:`log` for generating more information about specific revisions,
3055 3054 including full hash identifiers.
3056 3055
3057 3056 Returns 0 if successful.
3058 3057 """
3059 3058
3060 3059 opts = pycompat.byteskwargs(opts)
3061 3060 if not repo and not source:
3062 3061 raise error.Abort(_("there is no Mercurial repository here "
3063 3062 "(.hg not found)"))
3064 3063
3065 3064 default = not (num or id or branch or tags or bookmarks)
3066 3065 output = []
3067 3066 revs = []
3068 3067
3069 3068 if source:
3070 3069 source, branches = hg.parseurl(ui.expandpath(source))
3071 3070 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3072 3071 repo = peer.local()
3073 3072 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3074 3073
3075 3074 fm = ui.formatter('identify', opts)
3076 3075 fm.startitem()
3077 3076
3078 3077 if not repo:
3079 3078 if num or branch or tags:
3080 3079 raise error.Abort(
3081 3080 _("can't query remote revision number, branch, or tags"))
3082 3081 if not rev and revs:
3083 3082 rev = revs[0]
3084 3083 if not rev:
3085 3084 rev = "tip"
3086 3085
3087 3086 remoterev = peer.lookup(rev)
3088 3087 hexrev = fm.hexfunc(remoterev)
3089 3088 if default or id:
3090 3089 output = [hexrev]
3091 3090 fm.data(id=hexrev)
3092 3091
3093 3092 @util.cachefunc
3094 3093 def getbms():
3095 3094 bms = []
3096 3095
3097 3096 if 'bookmarks' in peer.listkeys('namespaces'):
3098 3097 hexremoterev = hex(remoterev)
3099 3098 bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
3100 3099 if bmr == hexremoterev]
3101 3100
3102 3101 return sorted(bms)
3103 3102
3104 3103 if fm.isplain():
3105 3104 if bookmarks:
3106 3105 output.extend(getbms())
3107 3106 elif default and not ui.quiet:
3108 3107 # multiple bookmarks for a single parent separated by '/'
3109 3108 bm = '/'.join(getbms())
3110 3109 if bm:
3111 3110 output.append(bm)
3112 3111 else:
3113 3112 fm.data(node=hex(remoterev))
3114 3113 if bookmarks or 'bookmarks' in fm.datahint():
3115 3114 fm.data(bookmarks=fm.formatlist(getbms(), name='bookmark'))
3116 3115 else:
3117 3116 if rev:
3118 3117 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
3119 3118 ctx = scmutil.revsingle(repo, rev, None)
3120 3119
3121 3120 if ctx.rev() is None:
3122 3121 ctx = repo[None]
3123 3122 parents = ctx.parents()
3124 3123 taglist = []
3125 3124 for p in parents:
3126 3125 taglist.extend(p.tags())
3127 3126
3128 3127 dirty = ""
3129 3128 if ctx.dirty(missing=True, merge=False, branch=False):
3130 3129 dirty = '+'
3131 3130 fm.data(dirty=dirty)
3132 3131
3133 3132 hexoutput = [fm.hexfunc(p.node()) for p in parents]
3134 3133 if default or id:
3135 3134 output = ["%s%s" % ('+'.join(hexoutput), dirty)]
3136 3135 fm.data(id="%s%s" % ('+'.join(hexoutput), dirty))
3137 3136
3138 3137 if num:
3139 3138 numoutput = ["%d" % p.rev() for p in parents]
3140 3139 output.append("%s%s" % ('+'.join(numoutput), dirty))
3141 3140
3142 3141 fm.data(parents=fm.formatlist([fm.hexfunc(p.node())
3143 3142 for p in parents], name='node'))
3144 3143 else:
3145 3144 hexoutput = fm.hexfunc(ctx.node())
3146 3145 if default or id:
3147 3146 output = [hexoutput]
3148 3147 fm.data(id=hexoutput)
3149 3148
3150 3149 if num:
3151 3150 output.append(pycompat.bytestr(ctx.rev()))
3152 3151 taglist = ctx.tags()
3153 3152
3154 3153 if default and not ui.quiet:
3155 3154 b = ctx.branch()
3156 3155 if b != 'default':
3157 3156 output.append("(%s)" % b)
3158 3157
3159 3158 # multiple tags for a single parent separated by '/'
3160 3159 t = '/'.join(taglist)
3161 3160 if t:
3162 3161 output.append(t)
3163 3162
3164 3163 # multiple bookmarks for a single parent separated by '/'
3165 3164 bm = '/'.join(ctx.bookmarks())
3166 3165 if bm:
3167 3166 output.append(bm)
3168 3167 else:
3169 3168 if branch:
3170 3169 output.append(ctx.branch())
3171 3170
3172 3171 if tags:
3173 3172 output.extend(taglist)
3174 3173
3175 3174 if bookmarks:
3176 3175 output.extend(ctx.bookmarks())
3177 3176
3178 3177 fm.data(node=ctx.hex())
3179 3178 fm.data(branch=ctx.branch())
3180 3179 fm.data(tags=fm.formatlist(taglist, name='tag', sep=':'))
3181 3180 fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'))
3182 3181 fm.context(ctx=ctx)
3183 3182
3184 3183 fm.plain("%s\n" % ' '.join(output))
3185 3184 fm.end()
3186 3185
3187 3186 @command('import|patch',
3188 3187 [('p', 'strip', 1,
3189 3188 _('directory strip option for patch. This has the same '
3190 3189 'meaning as the corresponding patch option'), _('NUM')),
3191 3190 ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
3192 3191 ('e', 'edit', False, _('invoke editor on commit messages')),
3193 3192 ('f', 'force', None,
3194 3193 _('skip check for outstanding uncommitted changes (DEPRECATED)')),
3195 3194 ('', 'no-commit', None,
3196 3195 _("don't commit, just update the working directory")),
3197 3196 ('', 'bypass', None,
3198 3197 _("apply patch without touching the working directory")),
3199 3198 ('', 'partial', None,
3200 3199 _('commit even if some hunks fail')),
3201 3200 ('', 'exact', None,
3202 3201 _('abort if patch would apply lossily')),
3203 3202 ('', 'prefix', '',
3204 3203 _('apply patch to subdirectory'), _('DIR')),
3205 3204 ('', 'import-branch', None,
3206 3205 _('use any branch information in patch (implied by --exact)'))] +
3207 3206 commitopts + commitopts2 + similarityopts,
3208 3207 _('[OPTION]... PATCH...'))
3209 3208 def import_(ui, repo, patch1=None, *patches, **opts):
3210 3209 """import an ordered set of patches
3211 3210
3212 3211 Import a list of patches and commit them individually (unless
3213 3212 --no-commit is specified).
3214 3213
3215 3214 To read a patch from standard input (stdin), use "-" as the patch
3216 3215 name. If a URL is specified, the patch will be downloaded from
3217 3216 there.
3218 3217
3219 3218 Import first applies changes to the working directory (unless
3220 3219 --bypass is specified), import will abort if there are outstanding
3221 3220 changes.
3222 3221
3223 3222 Use --bypass to apply and commit patches directly to the
3224 3223 repository, without affecting the working directory. Without
3225 3224 --exact, patches will be applied on top of the working directory
3226 3225 parent revision.
3227 3226
3228 3227 You can import a patch straight from a mail message. Even patches
3229 3228 as attachments work (to use the body part, it must have type
3230 3229 text/plain or text/x-patch). From and Subject headers of email
3231 3230 message are used as default committer and commit message. All
3232 3231 text/plain body parts before first diff are added to the commit
3233 3232 message.
3234 3233
3235 3234 If the imported patch was generated by :hg:`export`, user and
3236 3235 description from patch override values from message headers and
3237 3236 body. Values given on command line with -m/--message and -u/--user
3238 3237 override these.
3239 3238
3240 3239 If --exact is specified, import will set the working directory to
3241 3240 the parent of each patch before applying it, and will abort if the
3242 3241 resulting changeset has a different ID than the one recorded in
3243 3242 the patch. This will guard against various ways that portable
3244 3243 patch formats and mail systems might fail to transfer Mercurial
3245 3244 data or metadata. See :hg:`bundle` for lossless transmission.
3246 3245
3247 3246 Use --partial to ensure a changeset will be created from the patch
3248 3247 even if some hunks fail to apply. Hunks that fail to apply will be
3249 3248 written to a <target-file>.rej file. Conflicts can then be resolved
3250 3249 by hand before :hg:`commit --amend` is run to update the created
3251 3250 changeset. This flag exists to let people import patches that
3252 3251 partially apply without losing the associated metadata (author,
3253 3252 date, description, ...).
3254 3253
3255 3254 .. note::
3256 3255
3257 3256 When no hunks apply cleanly, :hg:`import --partial` will create
3258 3257 an empty changeset, importing only the patch metadata.
3259 3258
3260 3259 With -s/--similarity, hg will attempt to discover renames and
3261 3260 copies in the patch in the same way as :hg:`addremove`.
3262 3261
3263 3262 It is possible to use external patch programs to perform the patch
3264 3263 by setting the ``ui.patch`` configuration option. For the default
3265 3264 internal tool, the fuzz can also be configured via ``patch.fuzz``.
3266 3265 See :hg:`help config` for more information about configuration
3267 3266 files and how to use these options.
3268 3267
3269 3268 See :hg:`help dates` for a list of formats valid for -d/--date.
3270 3269
3271 3270 .. container:: verbose
3272 3271
3273 3272 Examples:
3274 3273
3275 3274 - import a traditional patch from a website and detect renames::
3276 3275
3277 3276 hg import -s 80 http://example.com/bugfix.patch
3278 3277
3279 3278 - import a changeset from an hgweb server::
3280 3279
3281 3280 hg import https://www.mercurial-scm.org/repo/hg/rev/5ca8c111e9aa
3282 3281
3283 3282 - import all the patches in an Unix-style mbox::
3284 3283
3285 3284 hg import incoming-patches.mbox
3286 3285
3287 3286 - import patches from stdin::
3288 3287
3289 3288 hg import -
3290 3289
3291 3290 - attempt to exactly restore an exported changeset (not always
3292 3291 possible)::
3293 3292
3294 3293 hg import --exact proposed-fix.patch
3295 3294
3296 3295 - use an external tool to apply a patch which is too fuzzy for
3297 3296 the default internal tool.
3298 3297
3299 3298 hg import --config ui.patch="patch --merge" fuzzy.patch
3300 3299
3301 3300 - change the default fuzzing from 2 to a less strict 7
3302 3301
3303 3302 hg import --config ui.fuzz=7 fuzz.patch
3304 3303
3305 3304 Returns 0 on success, 1 on partial success (see --partial).
3306 3305 """
3307 3306
3308 3307 opts = pycompat.byteskwargs(opts)
3309 3308 if not patch1:
3310 3309 raise error.Abort(_('need at least one patch to import'))
3311 3310
3312 3311 patches = (patch1,) + patches
3313 3312
3314 3313 date = opts.get('date')
3315 3314 if date:
3316 3315 opts['date'] = dateutil.parsedate(date)
3317 3316
3318 3317 exact = opts.get('exact')
3319 3318 update = not opts.get('bypass')
3320 3319 if not update and opts.get('no_commit'):
3321 3320 raise error.Abort(_('cannot use --no-commit with --bypass'))
3322 3321 try:
3323 3322 sim = float(opts.get('similarity') or 0)
3324 3323 except ValueError:
3325 3324 raise error.Abort(_('similarity must be a number'))
3326 3325 if sim < 0 or sim > 100:
3327 3326 raise error.Abort(_('similarity must be between 0 and 100'))
3328 3327 if sim and not update:
3329 3328 raise error.Abort(_('cannot use --similarity with --bypass'))
3330 3329 if exact:
3331 3330 if opts.get('edit'):
3332 3331 raise error.Abort(_('cannot use --exact with --edit'))
3333 3332 if opts.get('prefix'):
3334 3333 raise error.Abort(_('cannot use --exact with --prefix'))
3335 3334
3336 3335 base = opts["base"]
3337 3336 msgs = []
3338 3337 ret = 0
3339 3338
3340 3339 with repo.wlock():
3341 3340 if update:
3342 3341 cmdutil.checkunfinished(repo)
3343 3342 if (exact or not opts.get('force')):
3344 3343 cmdutil.bailifchanged(repo)
3345 3344
3346 3345 if not opts.get('no_commit'):
3347 3346 lock = repo.lock
3348 3347 tr = lambda: repo.transaction('import')
3349 3348 dsguard = util.nullcontextmanager
3350 3349 else:
3351 3350 lock = util.nullcontextmanager
3352 3351 tr = util.nullcontextmanager
3353 3352 dsguard = lambda: dirstateguard.dirstateguard(repo, 'import')
3354 3353 with lock(), tr(), dsguard():
3355 3354 parents = repo[None].parents()
3356 3355 for patchurl in patches:
3357 3356 if patchurl == '-':
3358 3357 ui.status(_('applying patch from stdin\n'))
3359 3358 patchfile = ui.fin
3360 3359 patchurl = 'stdin' # for error message
3361 3360 else:
3362 3361 patchurl = os.path.join(base, patchurl)
3363 3362 ui.status(_('applying %s\n') % patchurl)
3364 3363 patchfile = hg.openpath(ui, patchurl)
3365 3364
3366 3365 haspatch = False
3367 3366 for hunk in patch.split(patchfile):
3368 3367 with patch.extract(ui, hunk) as patchdata:
3369 3368 msg, node, rej = cmdutil.tryimportone(ui, repo,
3370 3369 patchdata,
3371 3370 parents, opts,
3372 3371 msgs, hg.clean)
3373 3372 if msg:
3374 3373 haspatch = True
3375 3374 ui.note(msg + '\n')
3376 3375 if update or exact:
3377 3376 parents = repo[None].parents()
3378 3377 else:
3379 3378 parents = [repo[node]]
3380 3379 if rej:
3381 3380 ui.write_err(_("patch applied partially\n"))
3382 3381 ui.write_err(_("(fix the .rej files and run "
3383 3382 "`hg commit --amend`)\n"))
3384 3383 ret = 1
3385 3384 break
3386 3385
3387 3386 if not haspatch:
3388 3387 raise error.Abort(_('%s: no diffs found') % patchurl)
3389 3388
3390 3389 if msgs:
3391 3390 repo.savecommitmessage('\n* * *\n'.join(msgs))
3392 3391 return ret
3393 3392
3394 3393 @command('incoming|in',
3395 3394 [('f', 'force', None,
3396 3395 _('run even if remote repository is unrelated')),
3397 3396 ('n', 'newest-first', None, _('show newest record first')),
3398 3397 ('', 'bundle', '',
3399 3398 _('file to store the bundles into'), _('FILE')),
3400 3399 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3401 3400 ('B', 'bookmarks', False, _("compare bookmarks")),
3402 3401 ('b', 'branch', [],
3403 3402 _('a specific branch you would like to pull'), _('BRANCH')),
3404 3403 ] + logopts + remoteopts + subrepoopts,
3405 3404 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3406 3405 def incoming(ui, repo, source="default", **opts):
3407 3406 """show new changesets found in source
3408 3407
3409 3408 Show new changesets found in the specified path/URL or the default
3410 3409 pull location. These are the changesets that would have been pulled
3411 3410 by :hg:`pull` at the time you issued this command.
3412 3411
3413 3412 See pull for valid source format details.
3414 3413
3415 3414 .. container:: verbose
3416 3415
3417 3416 With -B/--bookmarks, the result of bookmark comparison between
3418 3417 local and remote repositories is displayed. With -v/--verbose,
3419 3418 status is also displayed for each bookmark like below::
3420 3419
3421 3420 BM1 01234567890a added
3422 3421 BM2 1234567890ab advanced
3423 3422 BM3 234567890abc diverged
3424 3423 BM4 34567890abcd changed
3425 3424
3426 3425 The action taken locally when pulling depends on the
3427 3426 status of each bookmark:
3428 3427
3429 3428 :``added``: pull will create it
3430 3429 :``advanced``: pull will update it
3431 3430 :``diverged``: pull will create a divergent bookmark
3432 3431 :``changed``: result depends on remote changesets
3433 3432
3434 3433 From the point of view of pulling behavior, bookmark
3435 3434 existing only in the remote repository are treated as ``added``,
3436 3435 even if it is in fact locally deleted.
3437 3436
3438 3437 .. container:: verbose
3439 3438
3440 3439 For remote repository, using --bundle avoids downloading the
3441 3440 changesets twice if the incoming is followed by a pull.
3442 3441
3443 3442 Examples:
3444 3443
3445 3444 - show incoming changes with patches and full description::
3446 3445
3447 3446 hg incoming -vp
3448 3447
3449 3448 - show incoming changes excluding merges, store a bundle::
3450 3449
3451 3450 hg in -vpM --bundle incoming.hg
3452 3451 hg pull incoming.hg
3453 3452
3454 3453 - briefly list changes inside a bundle::
3455 3454
3456 3455 hg in changes.hg -T "{desc|firstline}\\n"
3457 3456
3458 3457 Returns 0 if there are incoming changes, 1 otherwise.
3459 3458 """
3460 3459 opts = pycompat.byteskwargs(opts)
3461 3460 if opts.get('graph'):
3462 3461 logcmdutil.checkunsupportedgraphflags([], opts)
3463 3462 def display(other, chlist, displayer):
3464 3463 revdag = logcmdutil.graphrevs(other, chlist, opts)
3465 3464 logcmdutil.displaygraph(ui, repo, revdag, displayer,
3466 3465 graphmod.asciiedges)
3467 3466
3468 3467 hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
3469 3468 return 0
3470 3469
3471 3470 if opts.get('bundle') and opts.get('subrepos'):
3472 3471 raise error.Abort(_('cannot combine --bundle and --subrepos'))
3473 3472
3474 3473 if opts.get('bookmarks'):
3475 3474 source, branches = hg.parseurl(ui.expandpath(source),
3476 3475 opts.get('branch'))
3477 3476 other = hg.peer(repo, opts, source)
3478 3477 if 'bookmarks' not in other.listkeys('namespaces'):
3479 3478 ui.warn(_("remote doesn't support bookmarks\n"))
3480 3479 return 0
3481 3480 ui.pager('incoming')
3482 3481 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3483 3482 return bookmarks.incoming(ui, repo, other)
3484 3483
3485 3484 repo._subtoppath = ui.expandpath(source)
3486 3485 try:
3487 3486 return hg.incoming(ui, repo, source, opts)
3488 3487 finally:
3489 3488 del repo._subtoppath
3490 3489
3491 3490
3492 3491 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'),
3493 3492 norepo=True)
3494 3493 def init(ui, dest=".", **opts):
3495 3494 """create a new repository in the given directory
3496 3495
3497 3496 Initialize a new repository in the given directory. If the given
3498 3497 directory does not exist, it will be created.
3499 3498
3500 3499 If no directory is given, the current directory is used.
3501 3500
3502 3501 It is possible to specify an ``ssh://`` URL as the destination.
3503 3502 See :hg:`help urls` for more information.
3504 3503
3505 3504 Returns 0 on success.
3506 3505 """
3507 3506 opts = pycompat.byteskwargs(opts)
3508 3507 hg.peer(ui, opts, ui.expandpath(dest), create=True)
3509 3508
3510 3509 @command('locate',
3511 3510 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3512 3511 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3513 3512 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
3514 3513 ] + walkopts,
3515 3514 _('[OPTION]... [PATTERN]...'))
3516 3515 def locate(ui, repo, *pats, **opts):
3517 3516 """locate files matching specific patterns (DEPRECATED)
3518 3517
3519 3518 Print files under Mercurial control in the working directory whose
3520 3519 names match the given patterns.
3521 3520
3522 3521 By default, this command searches all directories in the working
3523 3522 directory. To search just the current directory and its
3524 3523 subdirectories, use "--include .".
3525 3524
3526 3525 If no patterns are given to match, this command prints the names
3527 3526 of all files under Mercurial control in the working directory.
3528 3527
3529 3528 If you want to feed the output of this command into the "xargs"
3530 3529 command, use the -0 option to both this command and "xargs". This
3531 3530 will avoid the problem of "xargs" treating single filenames that
3532 3531 contain whitespace as multiple filenames.
3533 3532
3534 3533 See :hg:`help files` for a more versatile command.
3535 3534
3536 3535 Returns 0 if a match is found, 1 otherwise.
3537 3536 """
3538 3537 opts = pycompat.byteskwargs(opts)
3539 3538 if opts.get('print0'):
3540 3539 end = '\0'
3541 3540 else:
3542 3541 end = '\n'
3543 3542 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
3544 3543
3545 3544 ret = 1
3546 3545 m = scmutil.match(ctx, pats, opts, default='relglob',
3547 3546 badfn=lambda x, y: False)
3548 3547
3549 3548 ui.pager('locate')
3550 3549 if ctx.rev() is None:
3551 3550 # When run on the working copy, "locate" includes removed files, so
3552 3551 # we get the list of files from the dirstate.
3553 3552 filesgen = sorted(repo.dirstate.matches(m))
3554 3553 else:
3555 3554 filesgen = ctx.matches(m)
3556 3555 for abs in filesgen:
3557 3556 if opts.get('fullpath'):
3558 3557 ui.write(repo.wjoin(abs), end)
3559 3558 else:
3560 3559 ui.write(((pats and m.rel(abs)) or abs), end)
3561 3560 ret = 0
3562 3561
3563 3562 return ret
3564 3563
3565 3564 @command('^log|history',
3566 3565 [('f', 'follow', None,
3567 3566 _('follow changeset history, or file history across copies and renames')),
3568 3567 ('', 'follow-first', None,
3569 3568 _('only follow the first parent of merge changesets (DEPRECATED)')),
3570 3569 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
3571 3570 ('C', 'copies', None, _('show copied files')),
3572 3571 ('k', 'keyword', [],
3573 3572 _('do case-insensitive search for a given text'), _('TEXT')),
3574 3573 ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
3575 3574 ('L', 'line-range', [],
3576 3575 _('follow line range of specified file (EXPERIMENTAL)'),
3577 3576 _('FILE,RANGE')),
3578 3577 ('', 'removed', None, _('include revisions where files were removed')),
3579 3578 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
3580 3579 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3581 3580 ('', 'only-branch', [],
3582 3581 _('show only changesets within the given named branch (DEPRECATED)'),
3583 3582 _('BRANCH')),
3584 3583 ('b', 'branch', [],
3585 3584 _('show changesets within the given named branch'), _('BRANCH')),
3586 3585 ('P', 'prune', [],
3587 3586 _('do not display revision or any of its ancestors'), _('REV')),
3588 3587 ] + logopts + walkopts,
3589 3588 _('[OPTION]... [FILE]'),
3590 3589 inferrepo=True,
3591 3590 intents={INTENT_READONLY})
3592 3591 def log(ui, repo, *pats, **opts):
3593 3592 """show revision history of entire repository or files
3594 3593
3595 3594 Print the revision history of the specified files or the entire
3596 3595 project.
3597 3596
3598 3597 If no revision range is specified, the default is ``tip:0`` unless
3599 3598 --follow is set, in which case the working directory parent is
3600 3599 used as the starting revision.
3601 3600
3602 3601 File history is shown without following rename or copy history of
3603 3602 files. Use -f/--follow with a filename to follow history across
3604 3603 renames and copies. --follow without a filename will only show
3605 3604 ancestors of the starting revision.
3606 3605
3607 3606 By default this command prints revision number and changeset id,
3608 3607 tags, non-trivial parents, user, date and time, and a summary for
3609 3608 each commit. When the -v/--verbose switch is used, the list of
3610 3609 changed files and full commit message are shown.
3611 3610
3612 3611 With --graph the revisions are shown as an ASCII art DAG with the most
3613 3612 recent changeset at the top.
3614 3613 'o' is a changeset, '@' is a working directory parent, '_' closes a branch,
3615 3614 'x' is obsolete, '*' is unstable, and '+' represents a fork where the
3616 3615 changeset from the lines below is a parent of the 'o' merge on the same
3617 3616 line.
3618 3617 Paths in the DAG are represented with '|', '/' and so forth. ':' in place
3619 3618 of a '|' indicates one or more revisions in a path are omitted.
3620 3619
3621 3620 .. container:: verbose
3622 3621
3623 3622 Use -L/--line-range FILE,M:N options to follow the history of lines
3624 3623 from M to N in FILE. With -p/--patch only diff hunks affecting
3625 3624 specified line range will be shown. This option requires --follow;
3626 3625 it can be specified multiple times. Currently, this option is not
3627 3626 compatible with --graph. This option is experimental.
3628 3627
3629 3628 .. note::
3630 3629
3631 3630 :hg:`log --patch` may generate unexpected diff output for merge
3632 3631 changesets, as it will only compare the merge changeset against
3633 3632 its first parent. Also, only files different from BOTH parents
3634 3633 will appear in files:.
3635 3634
3636 3635 .. note::
3637 3636
3638 3637 For performance reasons, :hg:`log FILE` may omit duplicate changes
3639 3638 made on branches and will not show removals or mode changes. To
3640 3639 see all such changes, use the --removed switch.
3641 3640
3642 3641 .. container:: verbose
3643 3642
3644 3643 .. note::
3645 3644
3646 3645 The history resulting from -L/--line-range options depends on diff
3647 3646 options; for instance if white-spaces are ignored, respective changes
3648 3647 with only white-spaces in specified line range will not be listed.
3649 3648
3650 3649 .. container:: verbose
3651 3650
3652 3651 Some examples:
3653 3652
3654 3653 - changesets with full descriptions and file lists::
3655 3654
3656 3655 hg log -v
3657 3656
3658 3657 - changesets ancestral to the working directory::
3659 3658
3660 3659 hg log -f
3661 3660
3662 3661 - last 10 commits on the current branch::
3663 3662
3664 3663 hg log -l 10 -b .
3665 3664
3666 3665 - changesets showing all modifications of a file, including removals::
3667 3666
3668 3667 hg log --removed file.c
3669 3668
3670 3669 - all changesets that touch a directory, with diffs, excluding merges::
3671 3670
3672 3671 hg log -Mp lib/
3673 3672
3674 3673 - all revision numbers that match a keyword::
3675 3674
3676 3675 hg log -k bug --template "{rev}\\n"
3677 3676
3678 3677 - the full hash identifier of the working directory parent::
3679 3678
3680 3679 hg log -r . --template "{node}\\n"
3681 3680
3682 3681 - list available log templates::
3683 3682
3684 3683 hg log -T list
3685 3684
3686 3685 - check if a given changeset is included in a tagged release::
3687 3686
3688 3687 hg log -r "a21ccf and ancestor(1.9)"
3689 3688
3690 3689 - find all changesets by some user in a date range::
3691 3690
3692 3691 hg log -k alice -d "may 2008 to jul 2008"
3693 3692
3694 3693 - summary of all changesets after the last tag::
3695 3694
3696 3695 hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
3697 3696
3698 3697 - changesets touching lines 13 to 23 for file.c::
3699 3698
3700 3699 hg log -L file.c,13:23
3701 3700
3702 3701 - changesets touching lines 13 to 23 for file.c and lines 2 to 6 of
3703 3702 main.c with patch::
3704 3703
3705 3704 hg log -L file.c,13:23 -L main.c,2:6 -p
3706 3705
3707 3706 See :hg:`help dates` for a list of formats valid for -d/--date.
3708 3707
3709 3708 See :hg:`help revisions` for more about specifying and ordering
3710 3709 revisions.
3711 3710
3712 3711 See :hg:`help templates` for more about pre-packaged styles and
3713 3712 specifying custom templates. The default template used by the log
3714 3713 command can be customized via the ``ui.logtemplate`` configuration
3715 3714 setting.
3716 3715
3717 3716 Returns 0 on success.
3718 3717
3719 3718 """
3720 3719 opts = pycompat.byteskwargs(opts)
3721 3720 linerange = opts.get('line_range')
3722 3721
3723 3722 if linerange and not opts.get('follow'):
3724 3723 raise error.Abort(_('--line-range requires --follow'))
3725 3724
3726 3725 if linerange and pats:
3727 3726 # TODO: take pats as patterns with no line-range filter
3728 3727 raise error.Abort(
3729 3728 _('FILE arguments are not compatible with --line-range option')
3730 3729 )
3731 3730
3732 3731 repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn')
3733 3732 revs, differ = logcmdutil.getrevs(repo, pats, opts)
3734 3733 if linerange:
3735 3734 # TODO: should follow file history from logcmdutil._initialrevs(),
3736 3735 # then filter the result by logcmdutil._makerevset() and --limit
3737 3736 revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
3738 3737
3739 3738 getrenamed = None
3740 3739 if opts.get('copies'):
3741 3740 endrev = None
3742 3741 if revs:
3743 3742 endrev = revs.max() + 1
3744 3743 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
3745 3744
3746 3745 ui.pager('log')
3747 3746 displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ,
3748 3747 buffered=True)
3749 3748 if opts.get('graph'):
3750 3749 displayfn = logcmdutil.displaygraphrevs
3751 3750 else:
3752 3751 displayfn = logcmdutil.displayrevs
3753 3752 displayfn(ui, repo, revs, displayer, getrenamed)
3754 3753
3755 3754 @command('manifest',
3756 3755 [('r', 'rev', '', _('revision to display'), _('REV')),
3757 3756 ('', 'all', False, _("list files from all revisions"))]
3758 3757 + formatteropts,
3759 3758 _('[-r REV]'),
3760 3759 intents={INTENT_READONLY})
3761 3760 def manifest(ui, repo, node=None, rev=None, **opts):
3762 3761 """output the current or given revision of the project manifest
3763 3762
3764 3763 Print a list of version controlled files for the given revision.
3765 3764 If no revision is given, the first parent of the working directory
3766 3765 is used, or the null revision if no revision is checked out.
3767 3766
3768 3767 With -v, print file permissions, symlink and executable bits.
3769 3768 With --debug, print file revision hashes.
3770 3769
3771 3770 If option --all is specified, the list of all files from all revisions
3772 3771 is printed. This includes deleted and renamed files.
3773 3772
3774 3773 Returns 0 on success.
3775 3774 """
3776 3775 opts = pycompat.byteskwargs(opts)
3777 3776 fm = ui.formatter('manifest', opts)
3778 3777
3779 3778 if opts.get('all'):
3780 3779 if rev or node:
3781 3780 raise error.Abort(_("can't specify a revision with --all"))
3782 3781
3783 3782 res = set()
3784 3783 for rev in repo:
3785 3784 ctx = repo[rev]
3786 3785 res |= set(ctx.files())
3787 3786
3788 3787 ui.pager('manifest')
3789 3788 for f in sorted(res):
3790 3789 fm.startitem()
3791 3790 fm.write("path", '%s\n', f)
3792 3791 fm.end()
3793 3792 return
3794 3793
3795 3794 if rev and node:
3796 3795 raise error.Abort(_("please specify just one revision"))
3797 3796
3798 3797 if not node:
3799 3798 node = rev
3800 3799
3801 3800 char = {'l': '@', 'x': '*', '': '', 't': 'd'}
3802 3801 mode = {'l': '644', 'x': '755', '': '644', 't': '755'}
3803 3802 if node:
3804 3803 repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn')
3805 3804 ctx = scmutil.revsingle(repo, node)
3806 3805 mf = ctx.manifest()
3807 3806 ui.pager('manifest')
3808 3807 for f in ctx:
3809 3808 fm.startitem()
3810 3809 fm.context(ctx=ctx)
3811 3810 fl = ctx[f].flags()
3812 3811 fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
3813 3812 fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
3814 3813 fm.write('path', '%s\n', f)
3815 3814 fm.end()
3816 3815
3817 3816 @command('^merge',
3818 3817 [('f', 'force', None,
3819 3818 _('force a merge including outstanding changes (DEPRECATED)')),
3820 3819 ('r', 'rev', '', _('revision to merge'), _('REV')),
3821 3820 ('P', 'preview', None,
3822 3821 _('review revisions to merge (no merge is performed)')),
3823 3822 ('', 'abort', None, _('abort the ongoing merge')),
3824 3823 ] + mergetoolopts,
3825 3824 _('[-P] [[-r] REV]'))
3826 3825 def merge(ui, repo, node=None, **opts):
3827 3826 """merge another revision into working directory
3828 3827
3829 3828 The current working directory is updated with all changes made in
3830 3829 the requested revision since the last common predecessor revision.
3831 3830
3832 3831 Files that changed between either parent are marked as changed for
3833 3832 the next commit and a commit must be performed before any further
3834 3833 updates to the repository are allowed. The next commit will have
3835 3834 two parents.
3836 3835
3837 3836 ``--tool`` can be used to specify the merge tool used for file
3838 3837 merges. It overrides the HGMERGE environment variable and your
3839 3838 configuration files. See :hg:`help merge-tools` for options.
3840 3839
3841 3840 If no revision is specified, the working directory's parent is a
3842 3841 head revision, and the current branch contains exactly one other
3843 3842 head, the other head is merged with by default. Otherwise, an
3844 3843 explicit revision with which to merge with must be provided.
3845 3844
3846 3845 See :hg:`help resolve` for information on handling file conflicts.
3847 3846
3848 3847 To undo an uncommitted merge, use :hg:`merge --abort` which
3849 3848 will check out a clean copy of the original merge parent, losing
3850 3849 all changes.
3851 3850
3852 3851 Returns 0 on success, 1 if there are unresolved files.
3853 3852 """
3854 3853
3855 3854 opts = pycompat.byteskwargs(opts)
3856 3855 abort = opts.get('abort')
3857 3856 if abort and repo.dirstate.p2() == nullid:
3858 3857 cmdutil.wrongtooltocontinue(repo, _('merge'))
3859 3858 if abort:
3860 3859 if node:
3861 3860 raise error.Abort(_("cannot specify a node with --abort"))
3862 3861 if opts.get('rev'):
3863 3862 raise error.Abort(_("cannot specify both --rev and --abort"))
3864 3863 if opts.get('preview'):
3865 3864 raise error.Abort(_("cannot specify --preview with --abort"))
3866 3865 if opts.get('rev') and node:
3867 3866 raise error.Abort(_("please specify just one revision"))
3868 3867 if not node:
3869 3868 node = opts.get('rev')
3870 3869
3871 3870 if node:
3872 3871 node = scmutil.revsingle(repo, node).node()
3873 3872
3874 3873 if not node and not abort:
3875 3874 node = repo[destutil.destmerge(repo)].node()
3876 3875
3877 3876 if opts.get('preview'):
3878 3877 # find nodes that are ancestors of p2 but not of p1
3879 3878 p1 = repo.lookup('.')
3880 3879 p2 = node
3881 3880 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
3882 3881
3883 3882 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
3884 3883 for node in nodes:
3885 3884 displayer.show(repo[node])
3886 3885 displayer.close()
3887 3886 return 0
3888 3887
3889 3888 # ui.forcemerge is an internal variable, do not document
3890 3889 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
3891 3890 with ui.configoverride(overrides, 'merge'):
3892 3891 force = opts.get('force')
3893 3892 labels = ['working copy', 'merge rev']
3894 3893 return hg.merge(repo, node, force=force, mergeforce=force,
3895 3894 labels=labels, abort=abort)
3896 3895
3897 3896 @command('outgoing|out',
3898 3897 [('f', 'force', None, _('run even when the destination is unrelated')),
3899 3898 ('r', 'rev', [],
3900 3899 _('a changeset intended to be included in the destination'), _('REV')),
3901 3900 ('n', 'newest-first', None, _('show newest record first')),
3902 3901 ('B', 'bookmarks', False, _('compare bookmarks')),
3903 3902 ('b', 'branch', [], _('a specific branch you would like to push'),
3904 3903 _('BRANCH')),
3905 3904 ] + logopts + remoteopts + subrepoopts,
3906 3905 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
3907 3906 def outgoing(ui, repo, dest=None, **opts):
3908 3907 """show changesets not found in the destination
3909 3908
3910 3909 Show changesets not found in the specified destination repository
3911 3910 or the default push location. These are the changesets that would
3912 3911 be pushed if a push was requested.
3913 3912
3914 3913 See pull for details of valid destination formats.
3915 3914
3916 3915 .. container:: verbose
3917 3916
3918 3917 With -B/--bookmarks, the result of bookmark comparison between
3919 3918 local and remote repositories is displayed. With -v/--verbose,
3920 3919 status is also displayed for each bookmark like below::
3921 3920
3922 3921 BM1 01234567890a added
3923 3922 BM2 deleted
3924 3923 BM3 234567890abc advanced
3925 3924 BM4 34567890abcd diverged
3926 3925 BM5 4567890abcde changed
3927 3926
3928 3927 The action taken when pushing depends on the
3929 3928 status of each bookmark:
3930 3929
3931 3930 :``added``: push with ``-B`` will create it
3932 3931 :``deleted``: push with ``-B`` will delete it
3933 3932 :``advanced``: push will update it
3934 3933 :``diverged``: push with ``-B`` will update it
3935 3934 :``changed``: push with ``-B`` will update it
3936 3935
3937 3936 From the point of view of pushing behavior, bookmarks
3938 3937 existing only in the remote repository are treated as
3939 3938 ``deleted``, even if it is in fact added remotely.
3940 3939
3941 3940 Returns 0 if there are outgoing changes, 1 otherwise.
3942 3941 """
3943 3942 # hg._outgoing() needs to re-resolve the path in order to handle #branch
3944 3943 # style URLs, so don't overwrite dest.
3945 3944 path = ui.paths.getpath(dest, default=('default-push', 'default'))
3946 3945 if not path:
3947 3946 raise error.Abort(_('default repository not configured!'),
3948 3947 hint=_("see 'hg help config.paths'"))
3949 3948
3950 3949 opts = pycompat.byteskwargs(opts)
3951 3950 if opts.get('graph'):
3952 3951 logcmdutil.checkunsupportedgraphflags([], opts)
3953 3952 o, other = hg._outgoing(ui, repo, dest, opts)
3954 3953 if not o:
3955 3954 cmdutil.outgoinghooks(ui, repo, other, opts, o)
3956 3955 return
3957 3956
3958 3957 revdag = logcmdutil.graphrevs(repo, o, opts)
3959 3958 ui.pager('outgoing')
3960 3959 displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True)
3961 3960 logcmdutil.displaygraph(ui, repo, revdag, displayer,
3962 3961 graphmod.asciiedges)
3963 3962 cmdutil.outgoinghooks(ui, repo, other, opts, o)
3964 3963 return 0
3965 3964
3966 3965 if opts.get('bookmarks'):
3967 3966 dest = path.pushloc or path.loc
3968 3967 other = hg.peer(repo, opts, dest)
3969 3968 if 'bookmarks' not in other.listkeys('namespaces'):
3970 3969 ui.warn(_("remote doesn't support bookmarks\n"))
3971 3970 return 0
3972 3971 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
3973 3972 ui.pager('outgoing')
3974 3973 return bookmarks.outgoing(ui, repo, other)
3975 3974
3976 3975 repo._subtoppath = path.pushloc or path.loc
3977 3976 try:
3978 3977 return hg.outgoing(ui, repo, dest, opts)
3979 3978 finally:
3980 3979 del repo._subtoppath
3981 3980
3982 3981 @command('parents',
3983 3982 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
3984 3983 ] + templateopts,
3985 3984 _('[-r REV] [FILE]'),
3986 3985 inferrepo=True)
3987 3986 def parents(ui, repo, file_=None, **opts):
3988 3987 """show the parents of the working directory or revision (DEPRECATED)
3989 3988
3990 3989 Print the working directory's parent revisions. If a revision is
3991 3990 given via -r/--rev, the parent of that revision will be printed.
3992 3991 If a file argument is given, the revision in which the file was
3993 3992 last changed (before the working directory revision or the
3994 3993 argument to --rev if given) is printed.
3995 3994
3996 3995 This command is equivalent to::
3997 3996
3998 3997 hg log -r "p1()+p2()" or
3999 3998 hg log -r "p1(REV)+p2(REV)" or
4000 3999 hg log -r "max(::p1() and file(FILE))+max(::p2() and file(FILE))" or
4001 4000 hg log -r "max(::p1(REV) and file(FILE))+max(::p2(REV) and file(FILE))"
4002 4001
4003 4002 See :hg:`summary` and :hg:`help revsets` for related information.
4004 4003
4005 4004 Returns 0 on success.
4006 4005 """
4007 4006
4008 4007 opts = pycompat.byteskwargs(opts)
4009 4008 rev = opts.get('rev')
4010 4009 if rev:
4011 4010 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
4012 4011 ctx = scmutil.revsingle(repo, rev, None)
4013 4012
4014 4013 if file_:
4015 4014 m = scmutil.match(ctx, (file_,), opts)
4016 4015 if m.anypats() or len(m.files()) != 1:
4017 4016 raise error.Abort(_('can only specify an explicit filename'))
4018 4017 file_ = m.files()[0]
4019 4018 filenodes = []
4020 4019 for cp in ctx.parents():
4021 4020 if not cp:
4022 4021 continue
4023 4022 try:
4024 4023 filenodes.append(cp.filenode(file_))
4025 4024 except error.LookupError:
4026 4025 pass
4027 4026 if not filenodes:
4028 4027 raise error.Abort(_("'%s' not found in manifest!") % file_)
4029 4028 p = []
4030 4029 for fn in filenodes:
4031 4030 fctx = repo.filectx(file_, fileid=fn)
4032 4031 p.append(fctx.node())
4033 4032 else:
4034 4033 p = [cp.node() for cp in ctx.parents()]
4035 4034
4036 4035 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
4037 4036 for n in p:
4038 4037 if n != nullid:
4039 4038 displayer.show(repo[n])
4040 4039 displayer.close()
4041 4040
4042 4041 @command('paths', formatteropts, _('[NAME]'), optionalrepo=True,
4043 4042 intents={INTENT_READONLY})
4044 4043 def paths(ui, repo, search=None, **opts):
4045 4044 """show aliases for remote repositories
4046 4045
4047 4046 Show definition of symbolic path name NAME. If no name is given,
4048 4047 show definition of all available names.
4049 4048
4050 4049 Option -q/--quiet suppresses all output when searching for NAME
4051 4050 and shows only the path names when listing all definitions.
4052 4051
4053 4052 Path names are defined in the [paths] section of your
4054 4053 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
4055 4054 repository, ``.hg/hgrc`` is used, too.
4056 4055
4057 4056 The path names ``default`` and ``default-push`` have a special
4058 4057 meaning. When performing a push or pull operation, they are used
4059 4058 as fallbacks if no location is specified on the command-line.
4060 4059 When ``default-push`` is set, it will be used for push and
4061 4060 ``default`` will be used for pull; otherwise ``default`` is used
4062 4061 as the fallback for both. When cloning a repository, the clone
4063 4062 source is written as ``default`` in ``.hg/hgrc``.
4064 4063
4065 4064 .. note::
4066 4065
4067 4066 ``default`` and ``default-push`` apply to all inbound (e.g.
4068 4067 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email`
4069 4068 and :hg:`bundle`) operations.
4070 4069
4071 4070 See :hg:`help urls` for more information.
4072 4071
4073 4072 Returns 0 on success.
4074 4073 """
4075 4074
4076 4075 opts = pycompat.byteskwargs(opts)
4077 4076 ui.pager('paths')
4078 4077 if search:
4079 4078 pathitems = [(name, path) for name, path in ui.paths.iteritems()
4080 4079 if name == search]
4081 4080 else:
4082 4081 pathitems = sorted(ui.paths.iteritems())
4083 4082
4084 4083 fm = ui.formatter('paths', opts)
4085 4084 if fm.isplain():
4086 4085 hidepassword = util.hidepassword
4087 4086 else:
4088 4087 hidepassword = bytes
4089 4088 if ui.quiet:
4090 4089 namefmt = '%s\n'
4091 4090 else:
4092 4091 namefmt = '%s = '
4093 4092 showsubopts = not search and not ui.quiet
4094 4093
4095 4094 for name, path in pathitems:
4096 4095 fm.startitem()
4097 4096 fm.condwrite(not search, 'name', namefmt, name)
4098 4097 fm.condwrite(not ui.quiet, 'url', '%s\n', hidepassword(path.rawloc))
4099 4098 for subopt, value in sorted(path.suboptions.items()):
4100 4099 assert subopt not in ('name', 'url')
4101 4100 if showsubopts:
4102 4101 fm.plain('%s:%s = ' % (name, subopt))
4103 4102 fm.condwrite(showsubopts, subopt, '%s\n', value)
4104 4103
4105 4104 fm.end()
4106 4105
4107 4106 if search and not pathitems:
4108 4107 if not ui.quiet:
4109 4108 ui.warn(_("not found!\n"))
4110 4109 return 1
4111 4110 else:
4112 4111 return 0
4113 4112
4114 4113 @command('phase',
4115 4114 [('p', 'public', False, _('set changeset phase to public')),
4116 4115 ('d', 'draft', False, _('set changeset phase to draft')),
4117 4116 ('s', 'secret', False, _('set changeset phase to secret')),
4118 4117 ('f', 'force', False, _('allow to move boundary backward')),
4119 4118 ('r', 'rev', [], _('target revision'), _('REV')),
4120 4119 ],
4121 4120 _('[-p|-d|-s] [-f] [-r] [REV...]'))
4122 4121 def phase(ui, repo, *revs, **opts):
4123 4122 """set or show the current phase name
4124 4123
4125 4124 With no argument, show the phase name of the current revision(s).
4126 4125
4127 4126 With one of -p/--public, -d/--draft or -s/--secret, change the
4128 4127 phase value of the specified revisions.
4129 4128
4130 4129 Unless -f/--force is specified, :hg:`phase` won't move changesets from a
4131 4130 lower phase to a higher phase. Phases are ordered as follows::
4132 4131
4133 4132 public < draft < secret
4134 4133
4135 4134 Returns 0 on success, 1 if some phases could not be changed.
4136 4135
4137 4136 (For more information about the phases concept, see :hg:`help phases`.)
4138 4137 """
4139 4138 opts = pycompat.byteskwargs(opts)
4140 4139 # search for a unique phase argument
4141 4140 targetphase = None
4142 4141 for idx, name in enumerate(phases.phasenames):
4143 4142 if opts.get(name, False):
4144 4143 if targetphase is not None:
4145 4144 raise error.Abort(_('only one phase can be specified'))
4146 4145 targetphase = idx
4147 4146
4148 4147 # look for specified revision
4149 4148 revs = list(revs)
4150 4149 revs.extend(opts['rev'])
4151 4150 if not revs:
4152 4151 # display both parents as the second parent phase can influence
4153 4152 # the phase of a merge commit
4154 4153 revs = [c.rev() for c in repo[None].parents()]
4155 4154
4156 4155 revs = scmutil.revrange(repo, revs)
4157 4156
4158 4157 ret = 0
4159 4158 if targetphase is None:
4160 4159 # display
4161 4160 for r in revs:
4162 4161 ctx = repo[r]
4163 4162 ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
4164 4163 else:
4165 4164 with repo.lock(), repo.transaction("phase") as tr:
4166 4165 # set phase
4167 4166 if not revs:
4168 4167 raise error.Abort(_('empty revision set'))
4169 4168 nodes = [repo[r].node() for r in revs]
4170 4169 # moving revision from public to draft may hide them
4171 4170 # We have to check result on an unfiltered repository
4172 4171 unfi = repo.unfiltered()
4173 4172 getphase = unfi._phasecache.phase
4174 4173 olddata = [getphase(unfi, r) for r in unfi]
4175 4174 phases.advanceboundary(repo, tr, targetphase, nodes)
4176 4175 if opts['force']:
4177 4176 phases.retractboundary(repo, tr, targetphase, nodes)
4178 4177 getphase = unfi._phasecache.phase
4179 4178 newdata = [getphase(unfi, r) for r in unfi]
4180 4179 changes = sum(newdata[r] != olddata[r] for r in unfi)
4181 4180 cl = unfi.changelog
4182 4181 rejected = [n for n in nodes
4183 4182 if newdata[cl.rev(n)] < targetphase]
4184 4183 if rejected:
4185 4184 ui.warn(_('cannot move %i changesets to a higher '
4186 4185 'phase, use --force\n') % len(rejected))
4187 4186 ret = 1
4188 4187 if changes:
4189 4188 msg = _('phase changed for %i changesets\n') % changes
4190 4189 if ret:
4191 4190 ui.status(msg)
4192 4191 else:
4193 4192 ui.note(msg)
4194 4193 else:
4195 4194 ui.warn(_('no phases changed\n'))
4196 4195 return ret
4197 4196
4198 4197 def postincoming(ui, repo, modheads, optupdate, checkout, brev):
4199 4198 """Run after a changegroup has been added via pull/unbundle
4200 4199
4201 4200 This takes arguments below:
4202 4201
4203 4202 :modheads: change of heads by pull/unbundle
4204 4203 :optupdate: updating working directory is needed or not
4205 4204 :checkout: update destination revision (or None to default destination)
4206 4205 :brev: a name, which might be a bookmark to be activated after updating
4207 4206 """
4208 4207 if modheads == 0:
4209 4208 return
4210 4209 if optupdate:
4211 4210 try:
4212 4211 return hg.updatetotally(ui, repo, checkout, brev)
4213 4212 except error.UpdateAbort as inst:
4214 4213 msg = _("not updating: %s") % stringutil.forcebytestr(inst)
4215 4214 hint = inst.hint
4216 4215 raise error.UpdateAbort(msg, hint=hint)
4217 4216 if modheads > 1:
4218 4217 currentbranchheads = len(repo.branchheads())
4219 4218 if currentbranchheads == modheads:
4220 4219 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
4221 4220 elif currentbranchheads > 1:
4222 4221 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
4223 4222 "merge)\n"))
4224 4223 else:
4225 4224 ui.status(_("(run 'hg heads' to see heads)\n"))
4226 4225 elif not ui.configbool('commands', 'update.requiredest'):
4227 4226 ui.status(_("(run 'hg update' to get a working copy)\n"))
4228 4227
4229 4228 @command('^pull',
4230 4229 [('u', 'update', None,
4231 4230 _('update to new branch head if new descendants were pulled')),
4232 4231 ('f', 'force', None, _('run even when remote repository is unrelated')),
4233 4232 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
4234 4233 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
4235 4234 ('b', 'branch', [], _('a specific branch you would like to pull'),
4236 4235 _('BRANCH')),
4237 4236 ] + remoteopts,
4238 4237 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
4239 4238 def pull(ui, repo, source="default", **opts):
4240 4239 """pull changes from the specified source
4241 4240
4242 4241 Pull changes from a remote repository to a local one.
4243 4242
4244 4243 This finds all changes from the repository at the specified path
4245 4244 or URL and adds them to a local repository (the current one unless
4246 4245 -R is specified). By default, this does not update the copy of the
4247 4246 project in the working directory.
4248 4247
4249 4248 When cloning from servers that support it, Mercurial may fetch
4250 4249 pre-generated data. When this is done, hooks operating on incoming
4251 4250 changesets and changegroups may fire more than once, once for each
4252 4251 pre-generated bundle and as well as for any additional remaining
4253 4252 data. See :hg:`help -e clonebundles` for more.
4254 4253
4255 4254 Use :hg:`incoming` if you want to see what would have been added
4256 4255 by a pull at the time you issued this command. If you then decide
4257 4256 to add those changes to the repository, you should use :hg:`pull
4258 4257 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
4259 4258
4260 4259 If SOURCE is omitted, the 'default' path will be used.
4261 4260 See :hg:`help urls` for more information.
4262 4261
4263 4262 Specifying bookmark as ``.`` is equivalent to specifying the active
4264 4263 bookmark's name.
4265 4264
4266 4265 Returns 0 on success, 1 if an update had unresolved files.
4267 4266 """
4268 4267
4269 4268 opts = pycompat.byteskwargs(opts)
4270 4269 if ui.configbool('commands', 'update.requiredest') and opts.get('update'):
4271 4270 msg = _('update destination required by configuration')
4272 4271 hint = _('use hg pull followed by hg update DEST')
4273 4272 raise error.Abort(msg, hint=hint)
4274 4273
4275 4274 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
4276 4275 ui.status(_('pulling from %s\n') % util.hidepassword(source))
4277 4276 other = hg.peer(repo, opts, source)
4278 4277 try:
4279 4278 revs, checkout = hg.addbranchrevs(repo, other, branches,
4280 4279 opts.get('rev'))
4281 4280
4282 4281
4283 4282 pullopargs = {}
4284 4283 if opts.get('bookmark'):
4285 4284 if not revs:
4286 4285 revs = []
4287 4286 # The list of bookmark used here is not the one used to actually
4288 4287 # update the bookmark name. This can result in the revision pulled
4289 4288 # not ending up with the name of the bookmark because of a race
4290 4289 # condition on the server. (See issue 4689 for details)
4291 4290 remotebookmarks = other.listkeys('bookmarks')
4292 4291 remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
4293 4292 pullopargs['remotebookmarks'] = remotebookmarks
4294 4293 for b in opts['bookmark']:
4295 4294 b = repo._bookmarks.expandname(b)
4296 4295 if b not in remotebookmarks:
4297 4296 raise error.Abort(_('remote bookmark %s not found!') % b)
4298 4297 revs.append(hex(remotebookmarks[b]))
4299 4298
4300 4299 if revs:
4301 4300 try:
4302 4301 # When 'rev' is a bookmark name, we cannot guarantee that it
4303 4302 # will be updated with that name because of a race condition
4304 4303 # server side. (See issue 4689 for details)
4305 4304 oldrevs = revs
4306 4305 revs = [] # actually, nodes
4307 4306 for r in oldrevs:
4308 4307 with other.commandexecutor() as e:
4309 4308 node = e.callcommand('lookup', {'key': r}).result()
4310 4309
4311 4310 revs.append(node)
4312 4311 if r == checkout:
4313 4312 checkout = node
4314 4313 except error.CapabilityError:
4315 4314 err = _("other repository doesn't support revision lookup, "
4316 4315 "so a rev cannot be specified.")
4317 4316 raise error.Abort(err)
4318 4317
4319 4318 wlock = util.nullcontextmanager()
4320 4319 if opts.get('update'):
4321 4320 wlock = repo.wlock()
4322 4321 with wlock:
4323 4322 pullopargs.update(opts.get('opargs', {}))
4324 4323 modheads = exchange.pull(repo, other, heads=revs,
4325 4324 force=opts.get('force'),
4326 4325 bookmarks=opts.get('bookmark', ()),
4327 4326 opargs=pullopargs).cgresult
4328 4327
4329 4328 # brev is a name, which might be a bookmark to be activated at
4330 4329 # the end of the update. In other words, it is an explicit
4331 4330 # destination of the update
4332 4331 brev = None
4333 4332
4334 4333 if checkout:
4335 4334 checkout = repo.changelog.rev(checkout)
4336 4335
4337 4336 # order below depends on implementation of
4338 4337 # hg.addbranchrevs(). opts['bookmark'] is ignored,
4339 4338 # because 'checkout' is determined without it.
4340 4339 if opts.get('rev'):
4341 4340 brev = opts['rev'][0]
4342 4341 elif opts.get('branch'):
4343 4342 brev = opts['branch'][0]
4344 4343 else:
4345 4344 brev = branches[0]
4346 4345 repo._subtoppath = source
4347 4346 try:
4348 4347 ret = postincoming(ui, repo, modheads, opts.get('update'),
4349 4348 checkout, brev)
4350 4349
4351 4350 finally:
4352 4351 del repo._subtoppath
4353 4352
4354 4353 finally:
4355 4354 other.close()
4356 4355 return ret
4357 4356
4358 4357 @command('^push',
4359 4358 [('f', 'force', None, _('force push')),
4360 4359 ('r', 'rev', [],
4361 4360 _('a changeset intended to be included in the destination'),
4362 4361 _('REV')),
4363 4362 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
4364 4363 ('b', 'branch', [],
4365 4364 _('a specific branch you would like to push'), _('BRANCH')),
4366 4365 ('', 'new-branch', False, _('allow pushing a new branch')),
4367 4366 ('', 'pushvars', [], _('variables that can be sent to server (ADVANCED)')),
4368 4367 ] + remoteopts,
4369 4368 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
4370 4369 def push(ui, repo, dest=None, **opts):
4371 4370 """push changes to the specified destination
4372 4371
4373 4372 Push changesets from the local repository to the specified
4374 4373 destination.
4375 4374
4376 4375 This operation is symmetrical to pull: it is identical to a pull
4377 4376 in the destination repository from the current one.
4378 4377
4379 4378 By default, push will not allow creation of new heads at the
4380 4379 destination, since multiple heads would make it unclear which head
4381 4380 to use. In this situation, it is recommended to pull and merge
4382 4381 before pushing.
4383 4382
4384 4383 Use --new-branch if you want to allow push to create a new named
4385 4384 branch that is not present at the destination. This allows you to
4386 4385 only create a new branch without forcing other changes.
4387 4386
4388 4387 .. note::
4389 4388
4390 4389 Extra care should be taken with the -f/--force option,
4391 4390 which will push all new heads on all branches, an action which will
4392 4391 almost always cause confusion for collaborators.
4393 4392
4394 4393 If -r/--rev is used, the specified revision and all its ancestors
4395 4394 will be pushed to the remote repository.
4396 4395
4397 4396 If -B/--bookmark is used, the specified bookmarked revision, its
4398 4397 ancestors, and the bookmark will be pushed to the remote
4399 4398 repository. Specifying ``.`` is equivalent to specifying the active
4400 4399 bookmark's name.
4401 4400
4402 4401 Please see :hg:`help urls` for important details about ``ssh://``
4403 4402 URLs. If DESTINATION is omitted, a default path will be used.
4404 4403
4405 4404 .. container:: verbose
4406 4405
4407 4406 The --pushvars option sends strings to the server that become
4408 4407 environment variables prepended with ``HG_USERVAR_``. For example,
4409 4408 ``--pushvars ENABLE_FEATURE=true``, provides the server side hooks with
4410 4409 ``HG_USERVAR_ENABLE_FEATURE=true`` as part of their environment.
4411 4410
4412 4411 pushvars can provide for user-overridable hooks as well as set debug
4413 4412 levels. One example is having a hook that blocks commits containing
4414 4413 conflict markers, but enables the user to override the hook if the file
4415 4414 is using conflict markers for testing purposes or the file format has
4416 4415 strings that look like conflict markers.
4417 4416
4418 4417 By default, servers will ignore `--pushvars`. To enable it add the
4419 4418 following to your configuration file::
4420 4419
4421 4420 [push]
4422 4421 pushvars.server = true
4423 4422
4424 4423 Returns 0 if push was successful, 1 if nothing to push.
4425 4424 """
4426 4425
4427 4426 opts = pycompat.byteskwargs(opts)
4428 4427 if opts.get('bookmark'):
4429 4428 ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push')
4430 4429 for b in opts['bookmark']:
4431 4430 # translate -B options to -r so changesets get pushed
4432 4431 b = repo._bookmarks.expandname(b)
4433 4432 if b in repo._bookmarks:
4434 4433 opts.setdefault('rev', []).append(b)
4435 4434 else:
4436 4435 # if we try to push a deleted bookmark, translate it to null
4437 4436 # this lets simultaneous -r, -b options continue working
4438 4437 opts.setdefault('rev', []).append("null")
4439 4438
4440 4439 path = ui.paths.getpath(dest, default=('default-push', 'default'))
4441 4440 if not path:
4442 4441 raise error.Abort(_('default repository not configured!'),
4443 4442 hint=_("see 'hg help config.paths'"))
4444 4443 dest = path.pushloc or path.loc
4445 4444 branches = (path.branch, opts.get('branch') or [])
4446 4445 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
4447 4446 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
4448 4447 other = hg.peer(repo, opts, dest)
4449 4448
4450 4449 if revs:
4451 4450 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
4452 4451 if not revs:
4453 4452 raise error.Abort(_("specified revisions evaluate to an empty set"),
4454 4453 hint=_("use different revision arguments"))
4455 4454 elif path.pushrev:
4456 4455 # It doesn't make any sense to specify ancestor revisions. So limit
4457 4456 # to DAG heads to make discovery simpler.
4458 4457 expr = revsetlang.formatspec('heads(%r)', path.pushrev)
4459 4458 revs = scmutil.revrange(repo, [expr])
4460 4459 revs = [repo[rev].node() for rev in revs]
4461 4460 if not revs:
4462 4461 raise error.Abort(_('default push revset for path evaluates to an '
4463 4462 'empty set'))
4464 4463
4465 4464 repo._subtoppath = dest
4466 4465 try:
4467 4466 # push subrepos depth-first for coherent ordering
4468 4467 c = repo['.']
4469 4468 subs = c.substate # only repos that are committed
4470 4469 for s in sorted(subs):
4471 4470 result = c.sub(s).push(opts)
4472 4471 if result == 0:
4473 4472 return not result
4474 4473 finally:
4475 4474 del repo._subtoppath
4476 4475
4477 4476 opargs = dict(opts.get('opargs', {})) # copy opargs since we may mutate it
4478 4477 opargs.setdefault('pushvars', []).extend(opts.get('pushvars', []))
4479 4478
4480 4479 pushop = exchange.push(repo, other, opts.get('force'), revs=revs,
4481 4480 newbranch=opts.get('new_branch'),
4482 4481 bookmarks=opts.get('bookmark', ()),
4483 4482 opargs=opargs)
4484 4483
4485 4484 result = not pushop.cgresult
4486 4485
4487 4486 if pushop.bkresult is not None:
4488 4487 if pushop.bkresult == 2:
4489 4488 result = 2
4490 4489 elif not result and pushop.bkresult:
4491 4490 result = 2
4492 4491
4493 4492 return result
4494 4493
4495 4494 @command('recover', [])
4496 4495 def recover(ui, repo):
4497 4496 """roll back an interrupted transaction
4498 4497
4499 4498 Recover from an interrupted commit or pull.
4500 4499
4501 4500 This command tries to fix the repository status after an
4502 4501 interrupted operation. It should only be necessary when Mercurial
4503 4502 suggests it.
4504 4503
4505 4504 Returns 0 if successful, 1 if nothing to recover or verify fails.
4506 4505 """
4507 4506 if repo.recover():
4508 4507 return hg.verify(repo)
4509 4508 return 1
4510 4509
4511 4510 @command('^remove|rm',
4512 4511 [('A', 'after', None, _('record delete for missing files')),
4513 4512 ('f', 'force', None,
4514 4513 _('forget added files, delete modified files')),
4515 4514 ] + subrepoopts + walkopts + dryrunopts,
4516 4515 _('[OPTION]... FILE...'),
4517 4516 inferrepo=True)
4518 4517 def remove(ui, repo, *pats, **opts):
4519 4518 """remove the specified files on the next commit
4520 4519
4521 4520 Schedule the indicated files for removal from the current branch.
4522 4521
4523 4522 This command schedules the files to be removed at the next commit.
4524 4523 To undo a remove before that, see :hg:`revert`. To undo added
4525 4524 files, see :hg:`forget`.
4526 4525
4527 4526 .. container:: verbose
4528 4527
4529 4528 -A/--after can be used to remove only files that have already
4530 4529 been deleted, -f/--force can be used to force deletion, and -Af
4531 4530 can be used to remove files from the next revision without
4532 4531 deleting them from the working directory.
4533 4532
4534 4533 The following table details the behavior of remove for different
4535 4534 file states (columns) and option combinations (rows). The file
4536 4535 states are Added [A], Clean [C], Modified [M] and Missing [!]
4537 4536 (as reported by :hg:`status`). The actions are Warn, Remove
4538 4537 (from branch) and Delete (from disk):
4539 4538
4540 4539 ========= == == == ==
4541 4540 opt/state A C M !
4542 4541 ========= == == == ==
4543 4542 none W RD W R
4544 4543 -f R RD RD R
4545 4544 -A W W W R
4546 4545 -Af R R R R
4547 4546 ========= == == == ==
4548 4547
4549 4548 .. note::
4550 4549
4551 4550 :hg:`remove` never deletes files in Added [A] state from the
4552 4551 working directory, not even if ``--force`` is specified.
4553 4552
4554 4553 Returns 0 on success, 1 if any warnings encountered.
4555 4554 """
4556 4555
4557 4556 opts = pycompat.byteskwargs(opts)
4558 4557 after, force = opts.get('after'), opts.get('force')
4559 4558 dryrun = opts.get('dry_run')
4560 4559 if not pats and not after:
4561 4560 raise error.Abort(_('no files specified'))
4562 4561
4563 4562 m = scmutil.match(repo[None], pats, opts)
4564 4563 subrepos = opts.get('subrepos')
4565 4564 return cmdutil.remove(ui, repo, m, "", after, force, subrepos,
4566 4565 dryrun=dryrun)
4567 4566
4568 4567 @command('rename|move|mv',
4569 4568 [('A', 'after', None, _('record a rename that has already occurred')),
4570 4569 ('f', 'force', None, _('forcibly copy over an existing managed file')),
4571 4570 ] + walkopts + dryrunopts,
4572 4571 _('[OPTION]... SOURCE... DEST'))
4573 4572 def rename(ui, repo, *pats, **opts):
4574 4573 """rename files; equivalent of copy + remove
4575 4574
4576 4575 Mark dest as copies of sources; mark sources for deletion. If dest
4577 4576 is a directory, copies are put in that directory. If dest is a
4578 4577 file, there can only be one source.
4579 4578
4580 4579 By default, this command copies the contents of files as they
4581 4580 exist in the working directory. If invoked with -A/--after, the
4582 4581 operation is recorded, but no copying is performed.
4583 4582
4584 4583 This command takes effect at the next commit. To undo a rename
4585 4584 before that, see :hg:`revert`.
4586 4585
4587 4586 Returns 0 on success, 1 if errors are encountered.
4588 4587 """
4589 4588 opts = pycompat.byteskwargs(opts)
4590 4589 with repo.wlock(False):
4591 4590 return cmdutil.copy(ui, repo, pats, opts, rename=True)
4592 4591
4593 4592 @command('resolve',
4594 4593 [('a', 'all', None, _('select all unresolved files')),
4595 4594 ('l', 'list', None, _('list state of files needing merge')),
4596 4595 ('m', 'mark', None, _('mark files as resolved')),
4597 4596 ('u', 'unmark', None, _('mark files as unresolved')),
4598 4597 ('n', 'no-status', None, _('hide status prefix')),
4599 4598 ('', 're-merge', None, _('re-merge files'))]
4600 4599 + mergetoolopts + walkopts + formatteropts,
4601 4600 _('[OPTION]... [FILE]...'),
4602 4601 inferrepo=True)
4603 4602 def resolve(ui, repo, *pats, **opts):
4604 4603 """redo merges or set/view the merge status of files
4605 4604
4606 4605 Merges with unresolved conflicts are often the result of
4607 4606 non-interactive merging using the ``internal:merge`` configuration
4608 4607 setting, or a command-line merge tool like ``diff3``. The resolve
4609 4608 command is used to manage the files involved in a merge, after
4610 4609 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
4611 4610 working directory must have two parents). See :hg:`help
4612 4611 merge-tools` for information on configuring merge tools.
4613 4612
4614 4613 The resolve command can be used in the following ways:
4615 4614
4616 4615 - :hg:`resolve [--re-merge] [--tool TOOL] FILE...`: attempt to re-merge
4617 4616 the specified files, discarding any previous merge attempts. Re-merging
4618 4617 is not performed for files already marked as resolved. Use ``--all/-a``
4619 4618 to select all unresolved files. ``--tool`` can be used to specify
4620 4619 the merge tool used for the given files. It overrides the HGMERGE
4621 4620 environment variable and your configuration files. Previous file
4622 4621 contents are saved with a ``.orig`` suffix.
4623 4622
4624 4623 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4625 4624 (e.g. after having manually fixed-up the files). The default is
4626 4625 to mark all unresolved files.
4627 4626
4628 4627 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
4629 4628 default is to mark all resolved files.
4630 4629
4631 4630 - :hg:`resolve -l`: list files which had or still have conflicts.
4632 4631 In the printed list, ``U`` = unresolved and ``R`` = resolved.
4633 4632 You can use ``set:unresolved()`` or ``set:resolved()`` to filter
4634 4633 the list. See :hg:`help filesets` for details.
4635 4634
4636 4635 .. note::
4637 4636
4638 4637 Mercurial will not let you commit files with unresolved merge
4639 4638 conflicts. You must use :hg:`resolve -m ...` before you can
4640 4639 commit after a conflicting merge.
4641 4640
4642 4641 Returns 0 on success, 1 if any files fail a resolve attempt.
4643 4642 """
4644 4643
4645 4644 opts = pycompat.byteskwargs(opts)
4646 4645 confirm = ui.configbool('commands', 'resolve.confirm')
4647 4646 flaglist = 'all mark unmark list no_status re_merge'.split()
4648 4647 all, mark, unmark, show, nostatus, remerge = \
4649 4648 [opts.get(o) for o in flaglist]
4650 4649
4651 4650 actioncount = len(list(filter(None, [show, mark, unmark, remerge])))
4652 4651 if actioncount > 1:
4653 4652 raise error.Abort(_("too many actions specified"))
4654 4653 elif (actioncount == 0
4655 4654 and ui.configbool('commands', 'resolve.explicit-re-merge')):
4656 4655 hint = _('use --mark, --unmark, --list or --re-merge')
4657 4656 raise error.Abort(_('no action specified'), hint=hint)
4658 4657 if pats and all:
4659 4658 raise error.Abort(_("can't specify --all and patterns"))
4660 4659 if not (all or pats or show or mark or unmark):
4661 4660 raise error.Abort(_('no files or directories specified'),
4662 4661 hint=('use --all to re-merge all unresolved files'))
4663 4662
4664 4663 if confirm:
4665 4664 if all:
4666 4665 if ui.promptchoice(_(b're-merge all unresolved files (yn)?'
4667 4666 b'$$ &Yes $$ &No')):
4668 4667 raise error.Abort(_('user quit'))
4669 4668 if mark and not pats:
4670 4669 if ui.promptchoice(_(b'mark all unresolved files as resolved (yn)?'
4671 4670 b'$$ &Yes $$ &No')):
4672 4671 raise error.Abort(_('user quit'))
4673 4672 if unmark and not pats:
4674 4673 if ui.promptchoice(_(b'mark all resolved files as unresolved (yn)?'
4675 4674 b'$$ &Yes $$ &No')):
4676 4675 raise error.Abort(_('user quit'))
4677 4676
4678 4677 if show:
4679 4678 ui.pager('resolve')
4680 4679 fm = ui.formatter('resolve', opts)
4681 4680 ms = mergemod.mergestate.read(repo)
4682 4681 wctx = repo[None]
4683 4682 m = scmutil.match(wctx, pats, opts)
4684 4683
4685 4684 # Labels and keys based on merge state. Unresolved path conflicts show
4686 4685 # as 'P'. Resolved path conflicts show as 'R', the same as normal
4687 4686 # resolved conflicts.
4688 4687 mergestateinfo = {
4689 4688 mergemod.MERGE_RECORD_UNRESOLVED: ('resolve.unresolved', 'U'),
4690 4689 mergemod.MERGE_RECORD_RESOLVED: ('resolve.resolved', 'R'),
4691 4690 mergemod.MERGE_RECORD_UNRESOLVED_PATH: ('resolve.unresolved', 'P'),
4692 4691 mergemod.MERGE_RECORD_RESOLVED_PATH: ('resolve.resolved', 'R'),
4693 4692 mergemod.MERGE_RECORD_DRIVER_RESOLVED: ('resolve.driverresolved',
4694 4693 'D'),
4695 4694 }
4696 4695
4697 4696 for f in ms:
4698 4697 if not m(f):
4699 4698 continue
4700 4699
4701 4700 label, key = mergestateinfo[ms[f]]
4702 4701 fm.startitem()
4703 4702 fm.context(ctx=wctx)
4704 4703 fm.condwrite(not nostatus, 'mergestatus', '%s ', key, label=label)
4705 4704 fm.write('path', '%s\n', f, label=label)
4706 4705 fm.end()
4707 4706 return 0
4708 4707
4709 4708 with repo.wlock():
4710 4709 ms = mergemod.mergestate.read(repo)
4711 4710
4712 4711 if not (ms.active() or repo.dirstate.p2() != nullid):
4713 4712 raise error.Abort(
4714 4713 _('resolve command not applicable when not merging'))
4715 4714
4716 4715 wctx = repo[None]
4717 4716
4718 4717 if (ms.mergedriver
4719 4718 and ms.mdstate() == mergemod.MERGE_DRIVER_STATE_UNMARKED):
4720 4719 proceed = mergemod.driverpreprocess(repo, ms, wctx)
4721 4720 ms.commit()
4722 4721 # allow mark and unmark to go through
4723 4722 if not mark and not unmark and not proceed:
4724 4723 return 1
4725 4724
4726 4725 m = scmutil.match(wctx, pats, opts)
4727 4726 ret = 0
4728 4727 didwork = False
4729 4728 runconclude = False
4730 4729
4731 4730 tocomplete = []
4732 4731 hasconflictmarkers = []
4733 4732 if mark:
4734 4733 markcheck = ui.config('commands', 'resolve.mark-check')
4735 4734 if markcheck not in ['warn', 'abort']:
4736 4735 # Treat all invalid / unrecognized values as 'none'.
4737 4736 markcheck = False
4738 4737 for f in ms:
4739 4738 if not m(f):
4740 4739 continue
4741 4740
4742 4741 didwork = True
4743 4742
4744 4743 # don't let driver-resolved files be marked, and run the conclude
4745 4744 # step if asked to resolve
4746 4745 if ms[f] == mergemod.MERGE_RECORD_DRIVER_RESOLVED:
4747 4746 exact = m.exact(f)
4748 4747 if mark:
4749 4748 if exact:
4750 4749 ui.warn(_('not marking %s as it is driver-resolved\n')
4751 4750 % f)
4752 4751 elif unmark:
4753 4752 if exact:
4754 4753 ui.warn(_('not unmarking %s as it is driver-resolved\n')
4755 4754 % f)
4756 4755 else:
4757 4756 runconclude = True
4758 4757 continue
4759 4758
4760 4759 # path conflicts must be resolved manually
4761 4760 if ms[f] in (mergemod.MERGE_RECORD_UNRESOLVED_PATH,
4762 4761 mergemod.MERGE_RECORD_RESOLVED_PATH):
4763 4762 if mark:
4764 4763 ms.mark(f, mergemod.MERGE_RECORD_RESOLVED_PATH)
4765 4764 elif unmark:
4766 4765 ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED_PATH)
4767 4766 elif ms[f] == mergemod.MERGE_RECORD_UNRESOLVED_PATH:
4768 4767 ui.warn(_('%s: path conflict must be resolved manually\n')
4769 4768 % f)
4770 4769 continue
4771 4770
4772 4771 if mark:
4773 4772 if markcheck:
4774 4773 with repo.wvfs(f) as fobj:
4775 4774 fdata = fobj.read()
4776 4775 if filemerge.hasconflictmarkers(fdata) and \
4777 4776 ms[f] != mergemod.MERGE_RECORD_RESOLVED:
4778 4777 hasconflictmarkers.append(f)
4779 4778 ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
4780 4779 elif unmark:
4781 4780 ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED)
4782 4781 else:
4783 4782 # backup pre-resolve (merge uses .orig for its own purposes)
4784 4783 a = repo.wjoin(f)
4785 4784 try:
4786 4785 util.copyfile(a, a + ".resolve")
4787 4786 except (IOError, OSError) as inst:
4788 4787 if inst.errno != errno.ENOENT:
4789 4788 raise
4790 4789
4791 4790 try:
4792 4791 # preresolve file
4793 4792 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
4794 4793 with ui.configoverride(overrides, 'resolve'):
4795 4794 complete, r = ms.preresolve(f, wctx)
4796 4795 if not complete:
4797 4796 tocomplete.append(f)
4798 4797 elif r:
4799 4798 ret = 1
4800 4799 finally:
4801 4800 ms.commit()
4802 4801
4803 4802 # replace filemerge's .orig file with our resolve file, but only
4804 4803 # for merges that are complete
4805 4804 if complete:
4806 4805 try:
4807 4806 util.rename(a + ".resolve",
4808 4807 scmutil.origpath(ui, repo, a))
4809 4808 except OSError as inst:
4810 4809 if inst.errno != errno.ENOENT:
4811 4810 raise
4812 4811
4813 4812 if hasconflictmarkers:
4814 4813 ui.warn(_('warning: the following files still have conflict '
4815 4814 'markers:\n ') + '\n '.join(hasconflictmarkers) + '\n')
4816 4815 if markcheck == 'abort' and not all:
4817 4816 raise error.Abort(_('conflict markers detected'),
4818 4817 hint=_('use --all to mark anyway'))
4819 4818
4820 4819 for f in tocomplete:
4821 4820 try:
4822 4821 # resolve file
4823 4822 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
4824 4823 with ui.configoverride(overrides, 'resolve'):
4825 4824 r = ms.resolve(f, wctx)
4826 4825 if r:
4827 4826 ret = 1
4828 4827 finally:
4829 4828 ms.commit()
4830 4829
4831 4830 # replace filemerge's .orig file with our resolve file
4832 4831 a = repo.wjoin(f)
4833 4832 try:
4834 4833 util.rename(a + ".resolve", scmutil.origpath(ui, repo, a))
4835 4834 except OSError as inst:
4836 4835 if inst.errno != errno.ENOENT:
4837 4836 raise
4838 4837
4839 4838 ms.commit()
4840 4839 ms.recordactions()
4841 4840
4842 4841 if not didwork and pats:
4843 4842 hint = None
4844 4843 if not any([p for p in pats if p.find(':') >= 0]):
4845 4844 pats = ['path:%s' % p for p in pats]
4846 4845 m = scmutil.match(wctx, pats, opts)
4847 4846 for f in ms:
4848 4847 if not m(f):
4849 4848 continue
4850 4849 def flag(o):
4851 4850 if o == 're_merge':
4852 4851 return '--re-merge '
4853 4852 return '-%s ' % o[0:1]
4854 4853 flags = ''.join([flag(o) for o in flaglist if opts.get(o)])
4855 4854 hint = _("(try: hg resolve %s%s)\n") % (
4856 4855 flags,
4857 4856 ' '.join(pats))
4858 4857 break
4859 4858 ui.warn(_("arguments do not match paths that need resolving\n"))
4860 4859 if hint:
4861 4860 ui.warn(hint)
4862 4861 elif ms.mergedriver and ms.mdstate() != 's':
4863 4862 # run conclude step when either a driver-resolved file is requested
4864 4863 # or there are no driver-resolved files
4865 4864 # we can't use 'ret' to determine whether any files are unresolved
4866 4865 # because we might not have tried to resolve some
4867 4866 if ((runconclude or not list(ms.driverresolved()))
4868 4867 and not list(ms.unresolved())):
4869 4868 proceed = mergemod.driverconclude(repo, ms, wctx)
4870 4869 ms.commit()
4871 4870 if not proceed:
4872 4871 return 1
4873 4872
4874 4873 # Nudge users into finishing an unfinished operation
4875 4874 unresolvedf = list(ms.unresolved())
4876 4875 driverresolvedf = list(ms.driverresolved())
4877 4876 if not unresolvedf and not driverresolvedf:
4878 4877 ui.status(_('(no more unresolved files)\n'))
4879 4878 cmdutil.checkafterresolved(repo)
4880 4879 elif not unresolvedf:
4881 4880 ui.status(_('(no more unresolved files -- '
4882 4881 'run "hg resolve --all" to conclude)\n'))
4883 4882
4884 4883 return ret
4885 4884
4886 4885 @command('revert',
4887 4886 [('a', 'all', None, _('revert all changes when no arguments given')),
4888 4887 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4889 4888 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
4890 4889 ('C', 'no-backup', None, _('do not save backup copies of files')),
4891 4890 ('i', 'interactive', None, _('interactively select the changes')),
4892 4891 ] + walkopts + dryrunopts,
4893 4892 _('[OPTION]... [-r REV] [NAME]...'))
4894 4893 def revert(ui, repo, *pats, **opts):
4895 4894 """restore files to their checkout state
4896 4895
4897 4896 .. note::
4898 4897
4899 4898 To check out earlier revisions, you should use :hg:`update REV`.
4900 4899 To cancel an uncommitted merge (and lose your changes),
4901 4900 use :hg:`merge --abort`.
4902 4901
4903 4902 With no revision specified, revert the specified files or directories
4904 4903 to the contents they had in the parent of the working directory.
4905 4904 This restores the contents of files to an unmodified
4906 4905 state and unschedules adds, removes, copies, and renames. If the
4907 4906 working directory has two parents, you must explicitly specify a
4908 4907 revision.
4909 4908
4910 4909 Using the -r/--rev or -d/--date options, revert the given files or
4911 4910 directories to their states as of a specific revision. Because
4912 4911 revert does not change the working directory parents, this will
4913 4912 cause these files to appear modified. This can be helpful to "back
4914 4913 out" some or all of an earlier change. See :hg:`backout` for a
4915 4914 related method.
4916 4915
4917 4916 Modified files are saved with a .orig suffix before reverting.
4918 4917 To disable these backups, use --no-backup. It is possible to store
4919 4918 the backup files in a custom directory relative to the root of the
4920 4919 repository by setting the ``ui.origbackuppath`` configuration
4921 4920 option.
4922 4921
4923 4922 See :hg:`help dates` for a list of formats valid for -d/--date.
4924 4923
4925 4924 See :hg:`help backout` for a way to reverse the effect of an
4926 4925 earlier changeset.
4927 4926
4928 4927 Returns 0 on success.
4929 4928 """
4930 4929
4931 4930 opts = pycompat.byteskwargs(opts)
4932 4931 if opts.get("date"):
4933 4932 if opts.get("rev"):
4934 4933 raise error.Abort(_("you can't specify a revision and a date"))
4935 4934 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
4936 4935
4937 4936 parent, p2 = repo.dirstate.parents()
4938 4937 if not opts.get('rev') and p2 != nullid:
4939 4938 # revert after merge is a trap for new users (issue2915)
4940 4939 raise error.Abort(_('uncommitted merge with no revision specified'),
4941 4940 hint=_("use 'hg update' or see 'hg help revert'"))
4942 4941
4943 4942 rev = opts.get('rev')
4944 4943 if rev:
4945 4944 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
4946 4945 ctx = scmutil.revsingle(repo, rev)
4947 4946
4948 4947 if (not (pats or opts.get('include') or opts.get('exclude') or
4949 4948 opts.get('all') or opts.get('interactive'))):
4950 4949 msg = _("no files or directories specified")
4951 4950 if p2 != nullid:
4952 4951 hint = _("uncommitted merge, use --all to discard all changes,"
4953 4952 " or 'hg update -C .' to abort the merge")
4954 4953 raise error.Abort(msg, hint=hint)
4955 4954 dirty = any(repo.status())
4956 4955 node = ctx.node()
4957 4956 if node != parent:
4958 4957 if dirty:
4959 4958 hint = _("uncommitted changes, use --all to discard all"
4960 4959 " changes, or 'hg update %d' to update") % ctx.rev()
4961 4960 else:
4962 4961 hint = _("use --all to revert all files,"
4963 4962 " or 'hg update %d' to update") % ctx.rev()
4964 4963 elif dirty:
4965 4964 hint = _("uncommitted changes, use --all to discard all changes")
4966 4965 else:
4967 4966 hint = _("use --all to revert all files")
4968 4967 raise error.Abort(msg, hint=hint)
4969 4968
4970 4969 return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats,
4971 4970 **pycompat.strkwargs(opts))
4972 4971
4973 4972 @command('rollback', dryrunopts +
4974 4973 [('f', 'force', False, _('ignore safety measures'))])
4975 4974 def rollback(ui, repo, **opts):
4976 4975 """roll back the last transaction (DANGEROUS) (DEPRECATED)
4977 4976
4978 4977 Please use :hg:`commit --amend` instead of rollback to correct
4979 4978 mistakes in the last commit.
4980 4979
4981 4980 This command should be used with care. There is only one level of
4982 4981 rollback, and there is no way to undo a rollback. It will also
4983 4982 restore the dirstate at the time of the last transaction, losing
4984 4983 any dirstate changes since that time. This command does not alter
4985 4984 the working directory.
4986 4985
4987 4986 Transactions are used to encapsulate the effects of all commands
4988 4987 that create new changesets or propagate existing changesets into a
4989 4988 repository.
4990 4989
4991 4990 .. container:: verbose
4992 4991
4993 4992 For example, the following commands are transactional, and their
4994 4993 effects can be rolled back:
4995 4994
4996 4995 - commit
4997 4996 - import
4998 4997 - pull
4999 4998 - push (with this repository as the destination)
5000 4999 - unbundle
5001 5000
5002 5001 To avoid permanent data loss, rollback will refuse to rollback a
5003 5002 commit transaction if it isn't checked out. Use --force to
5004 5003 override this protection.
5005 5004
5006 5005 The rollback command can be entirely disabled by setting the
5007 5006 ``ui.rollback`` configuration setting to false. If you're here
5008 5007 because you want to use rollback and it's disabled, you can
5009 5008 re-enable the command by setting ``ui.rollback`` to true.
5010 5009
5011 5010 This command is not intended for use on public repositories. Once
5012 5011 changes are visible for pull by other users, rolling a transaction
5013 5012 back locally is ineffective (someone else may already have pulled
5014 5013 the changes). Furthermore, a race is possible with readers of the
5015 5014 repository; for example an in-progress pull from the repository
5016 5015 may fail if a rollback is performed.
5017 5016
5018 5017 Returns 0 on success, 1 if no rollback data is available.
5019 5018 """
5020 5019 if not ui.configbool('ui', 'rollback'):
5021 5020 raise error.Abort(_('rollback is disabled because it is unsafe'),
5022 5021 hint=('see `hg help -v rollback` for information'))
5023 5022 return repo.rollback(dryrun=opts.get(r'dry_run'),
5024 5023 force=opts.get(r'force'))
5025 5024
5026 5025 @command('root', [], intents={INTENT_READONLY})
5027 5026 def root(ui, repo):
5028 5027 """print the root (top) of the current working directory
5029 5028
5030 5029 Print the root directory of the current repository.
5031 5030
5032 5031 Returns 0 on success.
5033 5032 """
5034 5033 ui.write(repo.root + "\n")
5035 5034
5036 5035 @command('^serve',
5037 5036 [('A', 'accesslog', '', _('name of access log file to write to'),
5038 5037 _('FILE')),
5039 5038 ('d', 'daemon', None, _('run server in background')),
5040 5039 ('', 'daemon-postexec', [], _('used internally by daemon mode')),
5041 5040 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
5042 5041 # use string type, then we can check if something was passed
5043 5042 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
5044 5043 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
5045 5044 _('ADDR')),
5046 5045 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
5047 5046 _('PREFIX')),
5048 5047 ('n', 'name', '',
5049 5048 _('name to show in web pages (default: working directory)'), _('NAME')),
5050 5049 ('', 'web-conf', '',
5051 5050 _("name of the hgweb config file (see 'hg help hgweb')"), _('FILE')),
5052 5051 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
5053 5052 _('FILE')),
5054 5053 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
5055 5054 ('', 'stdio', None, _('for remote clients (ADVANCED)')),
5056 5055 ('', 'cmdserver', '', _('for remote clients (ADVANCED)'), _('MODE')),
5057 5056 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
5058 5057 ('', 'style', '', _('template style to use'), _('STYLE')),
5059 5058 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
5060 5059 ('', 'certificate', '', _('SSL certificate file'), _('FILE')),
5061 5060 ('', 'print-url', None, _('start and print only the URL'))]
5062 5061 + subrepoopts,
5063 5062 _('[OPTION]...'),
5064 5063 optionalrepo=True)
5065 5064 def serve(ui, repo, **opts):
5066 5065 """start stand-alone webserver
5067 5066
5068 5067 Start a local HTTP repository browser and pull server. You can use
5069 5068 this for ad-hoc sharing and browsing of repositories. It is
5070 5069 recommended to use a real web server to serve a repository for
5071 5070 longer periods of time.
5072 5071
5073 5072 Please note that the server does not implement access control.
5074 5073 This means that, by default, anybody can read from the server and
5075 5074 nobody can write to it by default. Set the ``web.allow-push``
5076 5075 option to ``*`` to allow everybody to push to the server. You
5077 5076 should use a real web server if you need to authenticate users.
5078 5077
5079 5078 By default, the server logs accesses to stdout and errors to
5080 5079 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
5081 5080 files.
5082 5081
5083 5082 To have the server choose a free port number to listen on, specify
5084 5083 a port number of 0; in this case, the server will print the port
5085 5084 number it uses.
5086 5085
5087 5086 Returns 0 on success.
5088 5087 """
5089 5088
5090 5089 opts = pycompat.byteskwargs(opts)
5091 5090 if opts["stdio"] and opts["cmdserver"]:
5092 5091 raise error.Abort(_("cannot use --stdio with --cmdserver"))
5093 5092 if opts["print_url"] and ui.verbose:
5094 5093 raise error.Abort(_("cannot use --print-url with --verbose"))
5095 5094
5096 5095 if opts["stdio"]:
5097 5096 if repo is None:
5098 5097 raise error.RepoError(_("there is no Mercurial repository here"
5099 5098 " (.hg not found)"))
5100 5099 s = wireprotoserver.sshserver(ui, repo)
5101 5100 s.serve_forever()
5102 5101
5103 5102 service = server.createservice(ui, repo, opts)
5104 5103 return server.runservice(opts, initfn=service.init, runfn=service.run)
5105 5104
5106 5105 _NOTTERSE = 'nothing'
5107 5106
5108 5107 @command('^status|st',
5109 5108 [('A', 'all', None, _('show status of all files')),
5110 5109 ('m', 'modified', None, _('show only modified files')),
5111 5110 ('a', 'added', None, _('show only added files')),
5112 5111 ('r', 'removed', None, _('show only removed files')),
5113 5112 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
5114 5113 ('c', 'clean', None, _('show only files without changes')),
5115 5114 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
5116 5115 ('i', 'ignored', None, _('show only ignored files')),
5117 5116 ('n', 'no-status', None, _('hide status prefix')),
5118 5117 ('t', 'terse', _NOTTERSE, _('show the terse output (EXPERIMENTAL)')),
5119 5118 ('C', 'copies', None, _('show source of copied files')),
5120 5119 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
5121 5120 ('', 'rev', [], _('show difference from revision'), _('REV')),
5122 5121 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
5123 5122 ] + walkopts + subrepoopts + formatteropts,
5124 5123 _('[OPTION]... [FILE]...'),
5125 5124 inferrepo=True,
5126 5125 intents={INTENT_READONLY})
5127 5126 def status(ui, repo, *pats, **opts):
5128 5127 """show changed files in the working directory
5129 5128
5130 5129 Show status of files in the repository. If names are given, only
5131 5130 files that match are shown. Files that are clean or ignored or
5132 5131 the source of a copy/move operation, are not listed unless
5133 5132 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
5134 5133 Unless options described with "show only ..." are given, the
5135 5134 options -mardu are used.
5136 5135
5137 5136 Option -q/--quiet hides untracked (unknown and ignored) files
5138 5137 unless explicitly requested with -u/--unknown or -i/--ignored.
5139 5138
5140 5139 .. note::
5141 5140
5142 5141 :hg:`status` may appear to disagree with diff if permissions have
5143 5142 changed or a merge has occurred. The standard diff format does
5144 5143 not report permission changes and diff only reports changes
5145 5144 relative to one merge parent.
5146 5145
5147 5146 If one revision is given, it is used as the base revision.
5148 5147 If two revisions are given, the differences between them are
5149 5148 shown. The --change option can also be used as a shortcut to list
5150 5149 the changed files of a revision from its first parent.
5151 5150
5152 5151 The codes used to show the status of files are::
5153 5152
5154 5153 M = modified
5155 5154 A = added
5156 5155 R = removed
5157 5156 C = clean
5158 5157 ! = missing (deleted by non-hg command, but still tracked)
5159 5158 ? = not tracked
5160 5159 I = ignored
5161 5160 = origin of the previous file (with --copies)
5162 5161
5163 5162 .. container:: verbose
5164 5163
5165 5164 The -t/--terse option abbreviates the output by showing only the directory
5166 5165 name if all the files in it share the same status. The option takes an
5167 5166 argument indicating the statuses to abbreviate: 'm' for 'modified', 'a'
5168 5167 for 'added', 'r' for 'removed', 'd' for 'deleted', 'u' for 'unknown', 'i'
5169 5168 for 'ignored' and 'c' for clean.
5170 5169
5171 5170 It abbreviates only those statuses which are passed. Note that clean and
5172 5171 ignored files are not displayed with '--terse ic' unless the -c/--clean
5173 5172 and -i/--ignored options are also used.
5174 5173
5175 5174 The -v/--verbose option shows information when the repository is in an
5176 5175 unfinished merge, shelve, rebase state etc. You can have this behavior
5177 5176 turned on by default by enabling the ``commands.status.verbose`` option.
5178 5177
5179 5178 You can skip displaying some of these states by setting
5180 5179 ``commands.status.skipstates`` to one or more of: 'bisect', 'graft',
5181 5180 'histedit', 'merge', 'rebase', or 'unshelve'.
5182 5181
5183 5182 Examples:
5184 5183
5185 5184 - show changes in the working directory relative to a
5186 5185 changeset::
5187 5186
5188 5187 hg status --rev 9353
5189 5188
5190 5189 - show changes in the working directory relative to the
5191 5190 current directory (see :hg:`help patterns` for more information)::
5192 5191
5193 5192 hg status re:
5194 5193
5195 5194 - show all changes including copies in an existing changeset::
5196 5195
5197 5196 hg status --copies --change 9353
5198 5197
5199 5198 - get a NUL separated list of added files, suitable for xargs::
5200 5199
5201 5200 hg status -an0
5202 5201
5203 5202 - show more information about the repository status, abbreviating
5204 5203 added, removed, modified, deleted, and untracked paths::
5205 5204
5206 5205 hg status -v -t mardu
5207 5206
5208 5207 Returns 0 on success.
5209 5208
5210 5209 """
5211 5210
5212 5211 opts = pycompat.byteskwargs(opts)
5213 5212 revs = opts.get('rev')
5214 5213 change = opts.get('change')
5215 5214 terse = opts.get('terse')
5216 5215 if terse is _NOTTERSE:
5217 5216 if revs:
5218 5217 terse = ''
5219 5218 else:
5220 5219 terse = ui.config('commands', 'status.terse')
5221 5220
5222 5221 if revs and change:
5223 5222 msg = _('cannot specify --rev and --change at the same time')
5224 5223 raise error.Abort(msg)
5225 5224 elif revs and terse:
5226 5225 msg = _('cannot use --terse with --rev')
5227 5226 raise error.Abort(msg)
5228 5227 elif change:
5229 5228 repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn')
5230 5229 ctx2 = scmutil.revsingle(repo, change, None)
5231 5230 ctx1 = ctx2.p1()
5232 5231 else:
5233 5232 repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn')
5234 5233 ctx1, ctx2 = scmutil.revpair(repo, revs)
5235 5234
5236 5235 if pats or ui.configbool('commands', 'status.relative'):
5237 5236 cwd = repo.getcwd()
5238 5237 else:
5239 5238 cwd = ''
5240 5239
5241 5240 if opts.get('print0'):
5242 5241 end = '\0'
5243 5242 else:
5244 5243 end = '\n'
5245 5244 copy = {}
5246 5245 states = 'modified added removed deleted unknown ignored clean'.split()
5247 5246 show = [k for k in states if opts.get(k)]
5248 5247 if opts.get('all'):
5249 5248 show += ui.quiet and (states[:4] + ['clean']) or states
5250 5249
5251 5250 if not show:
5252 5251 if ui.quiet:
5253 5252 show = states[:4]
5254 5253 else:
5255 5254 show = states[:5]
5256 5255
5257 5256 m = scmutil.match(ctx2, pats, opts)
5258 5257 if terse:
5259 5258 # we need to compute clean and unknown to terse
5260 5259 stat = repo.status(ctx1.node(), ctx2.node(), m,
5261 5260 'ignored' in show or 'i' in terse,
5262 5261 clean=True, unknown=True,
5263 5262 listsubrepos=opts.get('subrepos'))
5264 5263
5265 5264 stat = cmdutil.tersedir(stat, terse)
5266 5265 else:
5267 5266 stat = repo.status(ctx1.node(), ctx2.node(), m,
5268 5267 'ignored' in show, 'clean' in show,
5269 5268 'unknown' in show, opts.get('subrepos'))
5270 5269
5271 5270 changestates = zip(states, pycompat.iterbytestr('MAR!?IC'), stat)
5272 5271
5273 5272 if (opts.get('all') or opts.get('copies')
5274 5273 or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'):
5275 5274 copy = copies.pathcopies(ctx1, ctx2, m)
5276 5275
5277 5276 ui.pager('status')
5278 5277 fm = ui.formatter('status', opts)
5279 5278 fmt = '%s' + end
5280 5279 showchar = not opts.get('no_status')
5281 5280
5282 5281 for state, char, files in changestates:
5283 5282 if state in show:
5284 5283 label = 'status.' + state
5285 5284 for f in files:
5286 5285 fm.startitem()
5287 5286 fm.context(ctx=ctx2)
5288 5287 fm.data(path=f)
5289 5288 fm.condwrite(showchar, 'status', '%s ', char, label=label)
5290 5289 fm.plain(fmt % repo.pathto(f, cwd), label=label)
5291 5290 if f in copy:
5292 5291 fm.data(source=copy[f])
5293 5292 fm.plain((' %s' + end) % repo.pathto(copy[f], cwd),
5294 5293 label='status.copied')
5295 5294
5296 5295 if ((ui.verbose or ui.configbool('commands', 'status.verbose'))
5297 5296 and not ui.plain()):
5298 5297 cmdutil.morestatus(repo, fm)
5299 5298 fm.end()
5300 5299
5301 5300 @command('^summary|sum',
5302 5301 [('', 'remote', None, _('check for push and pull'))],
5303 5302 '[--remote]',
5304 5303 intents={INTENT_READONLY})
5305 5304 def summary(ui, repo, **opts):
5306 5305 """summarize working directory state
5307 5306
5308 5307 This generates a brief summary of the working directory state,
5309 5308 including parents, branch, commit status, phase and available updates.
5310 5309
5311 5310 With the --remote option, this will check the default paths for
5312 5311 incoming and outgoing changes. This can be time-consuming.
5313 5312
5314 5313 Returns 0 on success.
5315 5314 """
5316 5315
5317 5316 opts = pycompat.byteskwargs(opts)
5318 5317 ui.pager('summary')
5319 5318 ctx = repo[None]
5320 5319 parents = ctx.parents()
5321 5320 pnode = parents[0].node()
5322 5321 marks = []
5323 5322
5324 5323 ms = None
5325 5324 try:
5326 5325 ms = mergemod.mergestate.read(repo)
5327 5326 except error.UnsupportedMergeRecords as e:
5328 5327 s = ' '.join(e.recordtypes)
5329 5328 ui.warn(
5330 5329 _('warning: merge state has unsupported record types: %s\n') % s)
5331 5330 unresolved = []
5332 5331 else:
5333 5332 unresolved = list(ms.unresolved())
5334 5333
5335 5334 for p in parents:
5336 5335 # label with log.changeset (instead of log.parent) since this
5337 5336 # shows a working directory parent *changeset*:
5338 5337 # i18n: column positioning for "hg summary"
5339 5338 ui.write(_('parent: %d:%s ') % (p.rev(), p),
5340 5339 label=logcmdutil.changesetlabels(p))
5341 5340 ui.write(' '.join(p.tags()), label='log.tag')
5342 5341 if p.bookmarks():
5343 5342 marks.extend(p.bookmarks())
5344 5343 if p.rev() == -1:
5345 5344 if not len(repo):
5346 5345 ui.write(_(' (empty repository)'))
5347 5346 else:
5348 5347 ui.write(_(' (no revision checked out)'))
5349 5348 if p.obsolete():
5350 5349 ui.write(_(' (obsolete)'))
5351 5350 if p.isunstable():
5352 5351 instabilities = (ui.label(instability, 'trouble.%s' % instability)
5353 5352 for instability in p.instabilities())
5354 5353 ui.write(' ('
5355 5354 + ', '.join(instabilities)
5356 5355 + ')')
5357 5356 ui.write('\n')
5358 5357 if p.description():
5359 5358 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
5360 5359 label='log.summary')
5361 5360
5362 5361 branch = ctx.branch()
5363 5362 bheads = repo.branchheads(branch)
5364 5363 # i18n: column positioning for "hg summary"
5365 5364 m = _('branch: %s\n') % branch
5366 5365 if branch != 'default':
5367 5366 ui.write(m, label='log.branch')
5368 5367 else:
5369 5368 ui.status(m, label='log.branch')
5370 5369
5371 5370 if marks:
5372 5371 active = repo._activebookmark
5373 5372 # i18n: column positioning for "hg summary"
5374 5373 ui.write(_('bookmarks:'), label='log.bookmark')
5375 5374 if active is not None:
5376 5375 if active in marks:
5377 5376 ui.write(' *' + active, label=bookmarks.activebookmarklabel)
5378 5377 marks.remove(active)
5379 5378 else:
5380 5379 ui.write(' [%s]' % active, label=bookmarks.activebookmarklabel)
5381 5380 for m in marks:
5382 5381 ui.write(' ' + m, label='log.bookmark')
5383 5382 ui.write('\n', label='log.bookmark')
5384 5383
5385 5384 status = repo.status(unknown=True)
5386 5385
5387 5386 c = repo.dirstate.copies()
5388 5387 copied, renamed = [], []
5389 5388 for d, s in c.iteritems():
5390 5389 if s in status.removed:
5391 5390 status.removed.remove(s)
5392 5391 renamed.append(d)
5393 5392 else:
5394 5393 copied.append(d)
5395 5394 if d in status.added:
5396 5395 status.added.remove(d)
5397 5396
5398 5397 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
5399 5398
5400 5399 labels = [(ui.label(_('%d modified'), 'status.modified'), status.modified),
5401 5400 (ui.label(_('%d added'), 'status.added'), status.added),
5402 5401 (ui.label(_('%d removed'), 'status.removed'), status.removed),
5403 5402 (ui.label(_('%d renamed'), 'status.copied'), renamed),
5404 5403 (ui.label(_('%d copied'), 'status.copied'), copied),
5405 5404 (ui.label(_('%d deleted'), 'status.deleted'), status.deleted),
5406 5405 (ui.label(_('%d unknown'), 'status.unknown'), status.unknown),
5407 5406 (ui.label(_('%d unresolved'), 'resolve.unresolved'), unresolved),
5408 5407 (ui.label(_('%d subrepos'), 'status.modified'), subs)]
5409 5408 t = []
5410 5409 for l, s in labels:
5411 5410 if s:
5412 5411 t.append(l % len(s))
5413 5412
5414 5413 t = ', '.join(t)
5415 5414 cleanworkdir = False
5416 5415
5417 5416 if repo.vfs.exists('graftstate'):
5418 5417 t += _(' (graft in progress)')
5419 5418 if repo.vfs.exists('updatestate'):
5420 5419 t += _(' (interrupted update)')
5421 5420 elif len(parents) > 1:
5422 5421 t += _(' (merge)')
5423 5422 elif branch != parents[0].branch():
5424 5423 t += _(' (new branch)')
5425 5424 elif (parents[0].closesbranch() and
5426 5425 pnode in repo.branchheads(branch, closed=True)):
5427 5426 t += _(' (head closed)')
5428 5427 elif not (status.modified or status.added or status.removed or renamed or
5429 5428 copied or subs):
5430 5429 t += _(' (clean)')
5431 5430 cleanworkdir = True
5432 5431 elif pnode not in bheads:
5433 5432 t += _(' (new branch head)')
5434 5433
5435 5434 if parents:
5436 5435 pendingphase = max(p.phase() for p in parents)
5437 5436 else:
5438 5437 pendingphase = phases.public
5439 5438
5440 5439 if pendingphase > phases.newcommitphase(ui):
5441 5440 t += ' (%s)' % phases.phasenames[pendingphase]
5442 5441
5443 5442 if cleanworkdir:
5444 5443 # i18n: column positioning for "hg summary"
5445 5444 ui.status(_('commit: %s\n') % t.strip())
5446 5445 else:
5447 5446 # i18n: column positioning for "hg summary"
5448 5447 ui.write(_('commit: %s\n') % t.strip())
5449 5448
5450 5449 # all ancestors of branch heads - all ancestors of parent = new csets
5451 5450 new = len(repo.changelog.findmissing([pctx.node() for pctx in parents],
5452 5451 bheads))
5453 5452
5454 5453 if new == 0:
5455 5454 # i18n: column positioning for "hg summary"
5456 5455 ui.status(_('update: (current)\n'))
5457 5456 elif pnode not in bheads:
5458 5457 # i18n: column positioning for "hg summary"
5459 5458 ui.write(_('update: %d new changesets (update)\n') % new)
5460 5459 else:
5461 5460 # i18n: column positioning for "hg summary"
5462 5461 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
5463 5462 (new, len(bheads)))
5464 5463
5465 5464 t = []
5466 5465 draft = len(repo.revs('draft()'))
5467 5466 if draft:
5468 5467 t.append(_('%d draft') % draft)
5469 5468 secret = len(repo.revs('secret()'))
5470 5469 if secret:
5471 5470 t.append(_('%d secret') % secret)
5472 5471
5473 5472 if draft or secret:
5474 5473 ui.status(_('phases: %s\n') % ', '.join(t))
5475 5474
5476 5475 if obsolete.isenabled(repo, obsolete.createmarkersopt):
5477 5476 for trouble in ("orphan", "contentdivergent", "phasedivergent"):
5478 5477 numtrouble = len(repo.revs(trouble + "()"))
5479 5478 # We write all the possibilities to ease translation
5480 5479 troublemsg = {
5481 5480 "orphan": _("orphan: %d changesets"),
5482 5481 "contentdivergent": _("content-divergent: %d changesets"),
5483 5482 "phasedivergent": _("phase-divergent: %d changesets"),
5484 5483 }
5485 5484 if numtrouble > 0:
5486 5485 ui.status(troublemsg[trouble] % numtrouble + "\n")
5487 5486
5488 5487 cmdutil.summaryhooks(ui, repo)
5489 5488
5490 5489 if opts.get('remote'):
5491 5490 needsincoming, needsoutgoing = True, True
5492 5491 else:
5493 5492 needsincoming, needsoutgoing = False, False
5494 5493 for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None):
5495 5494 if i:
5496 5495 needsincoming = True
5497 5496 if o:
5498 5497 needsoutgoing = True
5499 5498 if not needsincoming and not needsoutgoing:
5500 5499 return
5501 5500
5502 5501 def getincoming():
5503 5502 source, branches = hg.parseurl(ui.expandpath('default'))
5504 5503 sbranch = branches[0]
5505 5504 try:
5506 5505 other = hg.peer(repo, {}, source)
5507 5506 except error.RepoError:
5508 5507 if opts.get('remote'):
5509 5508 raise
5510 5509 return source, sbranch, None, None, None
5511 5510 revs, checkout = hg.addbranchrevs(repo, other, branches, None)
5512 5511 if revs:
5513 5512 revs = [other.lookup(rev) for rev in revs]
5514 5513 ui.debug('comparing with %s\n' % util.hidepassword(source))
5515 5514 repo.ui.pushbuffer()
5516 5515 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
5517 5516 repo.ui.popbuffer()
5518 5517 return source, sbranch, other, commoninc, commoninc[1]
5519 5518
5520 5519 if needsincoming:
5521 5520 source, sbranch, sother, commoninc, incoming = getincoming()
5522 5521 else:
5523 5522 source = sbranch = sother = commoninc = incoming = None
5524 5523
5525 5524 def getoutgoing():
5526 5525 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
5527 5526 dbranch = branches[0]
5528 5527 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
5529 5528 if source != dest:
5530 5529 try:
5531 5530 dother = hg.peer(repo, {}, dest)
5532 5531 except error.RepoError:
5533 5532 if opts.get('remote'):
5534 5533 raise
5535 5534 return dest, dbranch, None, None
5536 5535 ui.debug('comparing with %s\n' % util.hidepassword(dest))
5537 5536 elif sother is None:
5538 5537 # there is no explicit destination peer, but source one is invalid
5539 5538 return dest, dbranch, None, None
5540 5539 else:
5541 5540 dother = sother
5542 5541 if (source != dest or (sbranch is not None and sbranch != dbranch)):
5543 5542 common = None
5544 5543 else:
5545 5544 common = commoninc
5546 5545 if revs:
5547 5546 revs = [repo.lookup(rev) for rev in revs]
5548 5547 repo.ui.pushbuffer()
5549 5548 outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs,
5550 5549 commoninc=common)
5551 5550 repo.ui.popbuffer()
5552 5551 return dest, dbranch, dother, outgoing
5553 5552
5554 5553 if needsoutgoing:
5555 5554 dest, dbranch, dother, outgoing = getoutgoing()
5556 5555 else:
5557 5556 dest = dbranch = dother = outgoing = None
5558 5557
5559 5558 if opts.get('remote'):
5560 5559 t = []
5561 5560 if incoming:
5562 5561 t.append(_('1 or more incoming'))
5563 5562 o = outgoing.missing
5564 5563 if o:
5565 5564 t.append(_('%d outgoing') % len(o))
5566 5565 other = dother or sother
5567 5566 if 'bookmarks' in other.listkeys('namespaces'):
5568 5567 counts = bookmarks.summary(repo, other)
5569 5568 if counts[0] > 0:
5570 5569 t.append(_('%d incoming bookmarks') % counts[0])
5571 5570 if counts[1] > 0:
5572 5571 t.append(_('%d outgoing bookmarks') % counts[1])
5573 5572
5574 5573 if t:
5575 5574 # i18n: column positioning for "hg summary"
5576 5575 ui.write(_('remote: %s\n') % (', '.join(t)))
5577 5576 else:
5578 5577 # i18n: column positioning for "hg summary"
5579 5578 ui.status(_('remote: (synced)\n'))
5580 5579
5581 5580 cmdutil.summaryremotehooks(ui, repo, opts,
5582 5581 ((source, sbranch, sother, commoninc),
5583 5582 (dest, dbranch, dother, outgoing)))
5584 5583
5585 5584 @command('tag',
5586 5585 [('f', 'force', None, _('force tag')),
5587 5586 ('l', 'local', None, _('make the tag local')),
5588 5587 ('r', 'rev', '', _('revision to tag'), _('REV')),
5589 5588 ('', 'remove', None, _('remove a tag')),
5590 5589 # -l/--local is already there, commitopts cannot be used
5591 5590 ('e', 'edit', None, _('invoke editor on commit messages')),
5592 5591 ('m', 'message', '', _('use text as commit message'), _('TEXT')),
5593 5592 ] + commitopts2,
5594 5593 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
5595 5594 def tag(ui, repo, name1, *names, **opts):
5596 5595 """add one or more tags for the current or given revision
5597 5596
5598 5597 Name a particular revision using <name>.
5599 5598
5600 5599 Tags are used to name particular revisions of the repository and are
5601 5600 very useful to compare different revisions, to go back to significant
5602 5601 earlier versions or to mark branch points as releases, etc. Changing
5603 5602 an existing tag is normally disallowed; use -f/--force to override.
5604 5603
5605 5604 If no revision is given, the parent of the working directory is
5606 5605 used.
5607 5606
5608 5607 To facilitate version control, distribution, and merging of tags,
5609 5608 they are stored as a file named ".hgtags" which is managed similarly
5610 5609 to other project files and can be hand-edited if necessary. This
5611 5610 also means that tagging creates a new commit. The file
5612 5611 ".hg/localtags" is used for local tags (not shared among
5613 5612 repositories).
5614 5613
5615 5614 Tag commits are usually made at the head of a branch. If the parent
5616 5615 of the working directory is not a branch head, :hg:`tag` aborts; use
5617 5616 -f/--force to force the tag commit to be based on a non-head
5618 5617 changeset.
5619 5618
5620 5619 See :hg:`help dates` for a list of formats valid for -d/--date.
5621 5620
5622 5621 Since tag names have priority over branch names during revision
5623 5622 lookup, using an existing branch name as a tag name is discouraged.
5624 5623
5625 5624 Returns 0 on success.
5626 5625 """
5627 5626 opts = pycompat.byteskwargs(opts)
5628 5627 with repo.wlock(), repo.lock():
5629 5628 rev_ = "."
5630 5629 names = [t.strip() for t in (name1,) + names]
5631 5630 if len(names) != len(set(names)):
5632 5631 raise error.Abort(_('tag names must be unique'))
5633 5632 for n in names:
5634 5633 scmutil.checknewlabel(repo, n, 'tag')
5635 5634 if not n:
5636 5635 raise error.Abort(_('tag names cannot consist entirely of '
5637 5636 'whitespace'))
5638 5637 if opts.get('rev') and opts.get('remove'):
5639 5638 raise error.Abort(_("--rev and --remove are incompatible"))
5640 5639 if opts.get('rev'):
5641 5640 rev_ = opts['rev']
5642 5641 message = opts.get('message')
5643 5642 if opts.get('remove'):
5644 5643 if opts.get('local'):
5645 5644 expectedtype = 'local'
5646 5645 else:
5647 5646 expectedtype = 'global'
5648 5647
5649 5648 for n in names:
5650 5649 if not repo.tagtype(n):
5651 5650 raise error.Abort(_("tag '%s' does not exist") % n)
5652 5651 if repo.tagtype(n) != expectedtype:
5653 5652 if expectedtype == 'global':
5654 5653 raise error.Abort(_("tag '%s' is not a global tag") % n)
5655 5654 else:
5656 5655 raise error.Abort(_("tag '%s' is not a local tag") % n)
5657 5656 rev_ = 'null'
5658 5657 if not message:
5659 5658 # we don't translate commit messages
5660 5659 message = 'Removed tag %s' % ', '.join(names)
5661 5660 elif not opts.get('force'):
5662 5661 for n in names:
5663 5662 if n in repo.tags():
5664 5663 raise error.Abort(_("tag '%s' already exists "
5665 5664 "(use -f to force)") % n)
5666 5665 if not opts.get('local'):
5667 5666 p1, p2 = repo.dirstate.parents()
5668 5667 if p2 != nullid:
5669 5668 raise error.Abort(_('uncommitted merge'))
5670 5669 bheads = repo.branchheads()
5671 5670 if not opts.get('force') and bheads and p1 not in bheads:
5672 5671 raise error.Abort(_('working directory is not at a branch head '
5673 5672 '(use -f to force)'))
5674 5673 node = scmutil.revsingle(repo, rev_).node()
5675 5674
5676 5675 if not message:
5677 5676 # we don't translate commit messages
5678 5677 message = ('Added tag %s for changeset %s' %
5679 5678 (', '.join(names), short(node)))
5680 5679
5681 5680 date = opts.get('date')
5682 5681 if date:
5683 5682 date = dateutil.parsedate(date)
5684 5683
5685 5684 if opts.get('remove'):
5686 5685 editform = 'tag.remove'
5687 5686 else:
5688 5687 editform = 'tag.add'
5689 5688 editor = cmdutil.getcommiteditor(editform=editform,
5690 5689 **pycompat.strkwargs(opts))
5691 5690
5692 5691 # don't allow tagging the null rev
5693 5692 if (not opts.get('remove') and
5694 5693 scmutil.revsingle(repo, rev_).rev() == nullrev):
5695 5694 raise error.Abort(_("cannot tag null revision"))
5696 5695
5697 5696 tagsmod.tag(repo, names, node, message, opts.get('local'),
5698 5697 opts.get('user'), date, editor=editor)
5699 5698
5700 5699 @command('tags', formatteropts, '', intents={INTENT_READONLY})
5701 5700 def tags(ui, repo, **opts):
5702 5701 """list repository tags
5703 5702
5704 5703 This lists both regular and local tags. When the -v/--verbose
5705 5704 switch is used, a third column "local" is printed for local tags.
5706 5705 When the -q/--quiet switch is used, only the tag name is printed.
5707 5706
5708 5707 Returns 0 on success.
5709 5708 """
5710 5709
5711 5710 opts = pycompat.byteskwargs(opts)
5712 5711 ui.pager('tags')
5713 5712 fm = ui.formatter('tags', opts)
5714 5713 hexfunc = fm.hexfunc
5715 5714 tagtype = ""
5716 5715
5717 5716 for t, n in reversed(repo.tagslist()):
5718 5717 hn = hexfunc(n)
5719 5718 label = 'tags.normal'
5720 5719 tagtype = ''
5721 5720 if repo.tagtype(t) == 'local':
5722 5721 label = 'tags.local'
5723 5722 tagtype = 'local'
5724 5723
5725 5724 fm.startitem()
5726 5725 fm.context(repo=repo)
5727 5726 fm.write('tag', '%s', t, label=label)
5728 5727 fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
5729 5728 fm.condwrite(not ui.quiet, 'rev node', fmt,
5730 5729 repo.changelog.rev(n), hn, label=label)
5731 5730 fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
5732 5731 tagtype, label=label)
5733 5732 fm.plain('\n')
5734 5733 fm.end()
5735 5734
5736 5735 @command('tip',
5737 5736 [('p', 'patch', None, _('show patch')),
5738 5737 ('g', 'git', None, _('use git extended diff format')),
5739 5738 ] + templateopts,
5740 5739 _('[-p] [-g]'))
5741 5740 def tip(ui, repo, **opts):
5742 5741 """show the tip revision (DEPRECATED)
5743 5742
5744 5743 The tip revision (usually just called the tip) is the changeset
5745 5744 most recently added to the repository (and therefore the most
5746 5745 recently changed head).
5747 5746
5748 5747 If you have just made a commit, that commit will be the tip. If
5749 5748 you have just pulled changes from another repository, the tip of
5750 5749 that repository becomes the current tip. The "tip" tag is special
5751 5750 and cannot be renamed or assigned to a different changeset.
5752 5751
5753 5752 This command is deprecated, please use :hg:`heads` instead.
5754 5753
5755 5754 Returns 0 on success.
5756 5755 """
5757 5756 opts = pycompat.byteskwargs(opts)
5758 5757 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
5759 5758 displayer.show(repo['tip'])
5760 5759 displayer.close()
5761 5760
5762 5761 @command('unbundle',
5763 5762 [('u', 'update', None,
5764 5763 _('update to new branch head if changesets were unbundled'))],
5765 5764 _('[-u] FILE...'))
5766 5765 def unbundle(ui, repo, fname1, *fnames, **opts):
5767 5766 """apply one or more bundle files
5768 5767
5769 5768 Apply one or more bundle files generated by :hg:`bundle`.
5770 5769
5771 5770 Returns 0 on success, 1 if an update has unresolved files.
5772 5771 """
5773 5772 fnames = (fname1,) + fnames
5774 5773
5775 5774 with repo.lock():
5776 5775 for fname in fnames:
5777 5776 f = hg.openpath(ui, fname)
5778 5777 gen = exchange.readbundle(ui, f, fname)
5779 5778 if isinstance(gen, streamclone.streamcloneapplier):
5780 5779 raise error.Abort(
5781 5780 _('packed bundles cannot be applied with '
5782 5781 '"hg unbundle"'),
5783 5782 hint=_('use "hg debugapplystreamclonebundle"'))
5784 5783 url = 'bundle:' + fname
5785 5784 try:
5786 5785 txnname = 'unbundle'
5787 5786 if not isinstance(gen, bundle2.unbundle20):
5788 5787 txnname = 'unbundle\n%s' % util.hidepassword(url)
5789 5788 with repo.transaction(txnname) as tr:
5790 5789 op = bundle2.applybundle(repo, gen, tr, source='unbundle',
5791 5790 url=url)
5792 5791 except error.BundleUnknownFeatureError as exc:
5793 5792 raise error.Abort(
5794 5793 _('%s: unknown bundle feature, %s') % (fname, exc),
5795 5794 hint=_("see https://mercurial-scm.org/"
5796 5795 "wiki/BundleFeature for more "
5797 5796 "information"))
5798 5797 modheads = bundle2.combinechangegroupresults(op)
5799 5798
5800 5799 return postincoming(ui, repo, modheads, opts.get(r'update'), None, None)
5801 5800
5802 5801 @command('^update|up|checkout|co',
5803 5802 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
5804 5803 ('c', 'check', None, _('require clean working directory')),
5805 5804 ('m', 'merge', None, _('merge uncommitted changes')),
5806 5805 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
5807 5806 ('r', 'rev', '', _('revision'), _('REV'))
5808 5807 ] + mergetoolopts,
5809 5808 _('[-C|-c|-m] [-d DATE] [[-r] REV]'))
5810 5809 def update(ui, repo, node=None, **opts):
5811 5810 """update working directory (or switch revisions)
5812 5811
5813 5812 Update the repository's working directory to the specified
5814 5813 changeset. If no changeset is specified, update to the tip of the
5815 5814 current named branch and move the active bookmark (see :hg:`help
5816 5815 bookmarks`).
5817 5816
5818 5817 Update sets the working directory's parent revision to the specified
5819 5818 changeset (see :hg:`help parents`).
5820 5819
5821 5820 If the changeset is not a descendant or ancestor of the working
5822 5821 directory's parent and there are uncommitted changes, the update is
5823 5822 aborted. With the -c/--check option, the working directory is checked
5824 5823 for uncommitted changes; if none are found, the working directory is
5825 5824 updated to the specified changeset.
5826 5825
5827 5826 .. container:: verbose
5828 5827
5829 5828 The -C/--clean, -c/--check, and -m/--merge options control what
5830 5829 happens if the working directory contains uncommitted changes.
5831 5830 At most of one of them can be specified.
5832 5831
5833 5832 1. If no option is specified, and if
5834 5833 the requested changeset is an ancestor or descendant of
5835 5834 the working directory's parent, the uncommitted changes
5836 5835 are merged into the requested changeset and the merged
5837 5836 result is left uncommitted. If the requested changeset is
5838 5837 not an ancestor or descendant (that is, it is on another
5839 5838 branch), the update is aborted and the uncommitted changes
5840 5839 are preserved.
5841 5840
5842 5841 2. With the -m/--merge option, the update is allowed even if the
5843 5842 requested changeset is not an ancestor or descendant of
5844 5843 the working directory's parent.
5845 5844
5846 5845 3. With the -c/--check option, the update is aborted and the
5847 5846 uncommitted changes are preserved.
5848 5847
5849 5848 4. With the -C/--clean option, uncommitted changes are discarded and
5850 5849 the working directory is updated to the requested changeset.
5851 5850
5852 5851 To cancel an uncommitted merge (and lose your changes), use
5853 5852 :hg:`merge --abort`.
5854 5853
5855 5854 Use null as the changeset to remove the working directory (like
5856 5855 :hg:`clone -U`).
5857 5856
5858 5857 If you want to revert just one file to an older revision, use
5859 5858 :hg:`revert [-r REV] NAME`.
5860 5859
5861 5860 See :hg:`help dates` for a list of formats valid for -d/--date.
5862 5861
5863 5862 Returns 0 on success, 1 if there are unresolved files.
5864 5863 """
5865 5864 rev = opts.get(r'rev')
5866 5865 date = opts.get(r'date')
5867 5866 clean = opts.get(r'clean')
5868 5867 check = opts.get(r'check')
5869 5868 merge = opts.get(r'merge')
5870 5869 if rev and node:
5871 5870 raise error.Abort(_("please specify just one revision"))
5872 5871
5873 5872 if ui.configbool('commands', 'update.requiredest'):
5874 5873 if not node and not rev and not date:
5875 5874 raise error.Abort(_('you must specify a destination'),
5876 5875 hint=_('for example: hg update ".::"'))
5877 5876
5878 5877 if rev is None or rev == '':
5879 5878 rev = node
5880 5879
5881 5880 if date and rev is not None:
5882 5881 raise error.Abort(_("you can't specify a revision and a date"))
5883 5882
5884 5883 if len([x for x in (clean, check, merge) if x]) > 1:
5885 5884 raise error.Abort(_("can only specify one of -C/--clean, -c/--check, "
5886 5885 "or -m/--merge"))
5887 5886
5888 5887 updatecheck = None
5889 5888 if check:
5890 5889 updatecheck = 'abort'
5891 5890 elif merge:
5892 5891 updatecheck = 'none'
5893 5892
5894 5893 with repo.wlock():
5895 5894 cmdutil.clearunfinished(repo)
5896 5895
5897 5896 if date:
5898 5897 rev = cmdutil.finddate(ui, repo, date)
5899 5898
5900 5899 # if we defined a bookmark, we have to remember the original name
5901 5900 brev = rev
5902 5901 if rev:
5903 5902 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
5904 5903 ctx = scmutil.revsingle(repo, rev, rev)
5905 5904 rev = ctx.rev()
5906 5905 hidden = ctx.hidden()
5907 5906 overrides = {('ui', 'forcemerge'): opts.get(r'tool', '')}
5908 5907 with ui.configoverride(overrides, 'update'):
5909 5908 ret = hg.updatetotally(ui, repo, rev, brev, clean=clean,
5910 5909 updatecheck=updatecheck)
5911 5910 if hidden:
5912 5911 ctxstr = ctx.hex()[:12]
5913 5912 ui.warn(_("updated to hidden changeset %s\n") % ctxstr)
5914 5913
5915 5914 if ctx.obsolete():
5916 5915 obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx)
5917 5916 ui.warn("(%s)\n" % obsfatemsg)
5918 5917 return ret
5919 5918
5920 5919 @command('verify', [])
5921 5920 def verify(ui, repo):
5922 5921 """verify the integrity of the repository
5923 5922
5924 5923 Verify the integrity of the current repository.
5925 5924
5926 5925 This will perform an extensive check of the repository's
5927 5926 integrity, validating the hashes and checksums of each entry in
5928 5927 the changelog, manifest, and tracked files, as well as the
5929 5928 integrity of their crosslinks and indices.
5930 5929
5931 5930 Please see https://mercurial-scm.org/wiki/RepositoryCorruption
5932 5931 for more information about recovery from corruption of the
5933 5932 repository.
5934 5933
5935 5934 Returns 0 on success, 1 if errors are encountered.
5936 5935 """
5937 5936 return hg.verify(repo)
5938 5937
5939 5938 @command('version', [] + formatteropts, norepo=True,
5940 5939 intents={INTENT_READONLY})
5941 5940 def version_(ui, **opts):
5942 5941 """output version and copyright information"""
5943 5942 opts = pycompat.byteskwargs(opts)
5944 5943 if ui.verbose:
5945 5944 ui.pager('version')
5946 5945 fm = ui.formatter("version", opts)
5947 5946 fm.startitem()
5948 5947 fm.write("ver", _("Mercurial Distributed SCM (version %s)\n"),
5949 5948 util.version())
5950 5949 license = _(
5951 5950 "(see https://mercurial-scm.org for more information)\n"
5952 5951 "\nCopyright (C) 2005-2018 Matt Mackall and others\n"
5953 5952 "This is free software; see the source for copying conditions. "
5954 5953 "There is NO\nwarranty; "
5955 5954 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
5956 5955 )
5957 5956 if not ui.quiet:
5958 5957 fm.plain(license)
5959 5958
5960 5959 if ui.verbose:
5961 5960 fm.plain(_("\nEnabled extensions:\n\n"))
5962 5961 # format names and versions into columns
5963 5962 names = []
5964 5963 vers = []
5965 5964 isinternals = []
5966 5965 for name, module in extensions.extensions():
5967 5966 names.append(name)
5968 5967 vers.append(extensions.moduleversion(module) or None)
5969 5968 isinternals.append(extensions.ismoduleinternal(module))
5970 5969 fn = fm.nested("extensions", tmpl='{name}\n')
5971 5970 if names:
5972 5971 namefmt = " %%-%ds " % max(len(n) for n in names)
5973 5972 places = [_("external"), _("internal")]
5974 5973 for n, v, p in zip(names, vers, isinternals):
5975 5974 fn.startitem()
5976 5975 fn.condwrite(ui.verbose, "name", namefmt, n)
5977 5976 if ui.verbose:
5978 5977 fn.plain("%s " % places[p])
5979 5978 fn.data(bundled=p)
5980 5979 fn.condwrite(ui.verbose and v, "ver", "%s", v)
5981 5980 if ui.verbose:
5982 5981 fn.plain("\n")
5983 5982 fn.end()
5984 5983 fm.end()
5985 5984
5986 5985 def loadcmdtable(ui, name, cmdtable):
5987 5986 """Load command functions from specified cmdtable
5988 5987 """
5989 5988 overrides = [cmd for cmd in cmdtable if cmd in table]
5990 5989 if overrides:
5991 5990 ui.warn(_("extension '%s' overrides commands: %s\n")
5992 5991 % (name, " ".join(overrides)))
5993 5992 table.update(cmdtable)
@@ -1,3016 +1,3022
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 95 def __set__(self, repo, value):
96 96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 97 def __delete__(self, repo):
98 98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99 99
100 100 class repofilecache(_basefilecache):
101 101 """filecache for files in .hg but outside of .hg/store"""
102 102 def __init__(self, *paths):
103 103 super(repofilecache, self).__init__(*paths)
104 104 for path in paths:
105 105 _cachedfiles.add((path, 'plain'))
106 106
107 107 def join(self, obj, fname):
108 108 return obj.vfs.join(fname)
109 109
110 110 class storecache(_basefilecache):
111 111 """filecache for files in the store"""
112 112 def __init__(self, *paths):
113 113 super(storecache, self).__init__(*paths)
114 114 for path in paths:
115 115 _cachedfiles.add((path, ''))
116 116
117 117 def join(self, obj, fname):
118 118 return obj.sjoin(fname)
119 119
120 120 def isfilecached(repo, name):
121 121 """check if a repo has already cached "name" filecache-ed property
122 122
123 123 This returns (cachedobj-or-None, iscached) tuple.
124 124 """
125 125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 126 if not cacheentry:
127 127 return None, False
128 128 return cacheentry.obj, True
129 129
130 130 class unfilteredpropertycache(util.propertycache):
131 131 """propertycache that apply to unfiltered repo only"""
132 132
133 133 def __get__(self, repo, type=None):
134 134 unfi = repo.unfiltered()
135 135 if unfi is repo:
136 136 return super(unfilteredpropertycache, self).__get__(unfi)
137 137 return getattr(unfi, self.name)
138 138
139 139 class filteredpropertycache(util.propertycache):
140 140 """propertycache that must take filtering in account"""
141 141
142 142 def cachevalue(self, obj, value):
143 143 object.__setattr__(obj, self.name, value)
144 144
145 145
146 146 def hasunfilteredcache(repo, name):
147 147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 148 return name in vars(repo.unfiltered())
149 149
150 150 def unfilteredmethod(orig):
151 151 """decorate method that always need to be run on unfiltered version"""
152 152 def wrapper(repo, *args, **kwargs):
153 153 return orig(repo.unfiltered(), *args, **kwargs)
154 154 return wrapper
155 155
156 156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 157 'unbundle'}
158 158 legacycaps = moderncaps.union({'changegroupsubset'})
159 159
160 160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 161 class localcommandexecutor(object):
162 162 def __init__(self, peer):
163 163 self._peer = peer
164 164 self._sent = False
165 165 self._closed = False
166 166
167 167 def __enter__(self):
168 168 return self
169 169
170 170 def __exit__(self, exctype, excvalue, exctb):
171 171 self.close()
172 172
173 173 def callcommand(self, command, args):
174 174 if self._sent:
175 175 raise error.ProgrammingError('callcommand() cannot be used after '
176 176 'sendcommands()')
177 177
178 178 if self._closed:
179 179 raise error.ProgrammingError('callcommand() cannot be used after '
180 180 'close()')
181 181
182 182 # We don't need to support anything fancy. Just call the named
183 183 # method on the peer and return a resolved future.
184 184 fn = getattr(self._peer, pycompat.sysstr(command))
185 185
186 186 f = pycompat.futures.Future()
187 187
188 188 try:
189 189 result = fn(**pycompat.strkwargs(args))
190 190 except Exception:
191 191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 192 else:
193 193 f.set_result(result)
194 194
195 195 return f
196 196
197 197 def sendcommands(self):
198 198 self._sent = True
199 199
200 200 def close(self):
201 201 self._closed = True
202 202
203 203 @interfaceutil.implementer(repository.ipeercommands)
204 204 class localpeer(repository.peer):
205 205 '''peer for a local repo; reflects only the most recent API'''
206 206
207 207 def __init__(self, repo, caps=None):
208 208 super(localpeer, self).__init__()
209 209
210 210 if caps is None:
211 211 caps = moderncaps.copy()
212 212 self._repo = repo.filtered('served')
213 213 self.ui = repo.ui
214 214 self._caps = repo._restrictcapabilities(caps)
215 215
216 216 # Begin of _basepeer interface.
217 217
218 218 def url(self):
219 219 return self._repo.url()
220 220
221 221 def local(self):
222 222 return self._repo
223 223
224 224 def peer(self):
225 225 return self
226 226
227 227 def canpush(self):
228 228 return True
229 229
230 230 def close(self):
231 231 self._repo.close()
232 232
233 233 # End of _basepeer interface.
234 234
235 235 # Begin of _basewirecommands interface.
236 236
237 237 def branchmap(self):
238 238 return self._repo.branchmap()
239 239
240 240 def capabilities(self):
241 241 return self._caps
242 242
243 243 def clonebundles(self):
244 244 return self._repo.tryread('clonebundles.manifest')
245 245
246 246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 247 """Used to test argument passing over the wire"""
248 248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 249 pycompat.bytestr(four),
250 250 pycompat.bytestr(five))
251 251
252 252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 253 **kwargs):
254 254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 255 common=common, bundlecaps=bundlecaps,
256 256 **kwargs)[1]
257 257 cb = util.chunkbuffer(chunks)
258 258
259 259 if exchange.bundle2requested(bundlecaps):
260 260 # When requesting a bundle2, getbundle returns a stream to make the
261 261 # wire level function happier. We need to build a proper object
262 262 # from it in local peer.
263 263 return bundle2.getunbundler(self.ui, cb)
264 264 else:
265 265 return changegroup.getunbundler('01', cb, None)
266 266
267 267 def heads(self):
268 268 return self._repo.heads()
269 269
270 270 def known(self, nodes):
271 271 return self._repo.known(nodes)
272 272
273 273 def listkeys(self, namespace):
274 274 return self._repo.listkeys(namespace)
275 275
276 276 def lookup(self, key):
277 277 return self._repo.lookup(key)
278 278
279 279 def pushkey(self, namespace, key, old, new):
280 280 return self._repo.pushkey(namespace, key, old, new)
281 281
282 282 def stream_out(self):
283 283 raise error.Abort(_('cannot perform stream clone against local '
284 284 'peer'))
285 285
286 286 def unbundle(self, bundle, heads, url):
287 287 """apply a bundle on a repo
288 288
289 289 This function handles the repo locking itself."""
290 290 try:
291 291 try:
292 292 bundle = exchange.readbundle(self.ui, bundle, None)
293 293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 294 if util.safehasattr(ret, 'getchunks'):
295 295 # This is a bundle20 object, turn it into an unbundler.
296 296 # This little dance should be dropped eventually when the
297 297 # API is finally improved.
298 298 stream = util.chunkbuffer(ret.getchunks())
299 299 ret = bundle2.getunbundler(self.ui, stream)
300 300 return ret
301 301 except Exception as exc:
302 302 # If the exception contains output salvaged from a bundle2
303 303 # reply, we need to make sure it is printed before continuing
304 304 # to fail. So we build a bundle2 with such output and consume
305 305 # it directly.
306 306 #
307 307 # This is not very elegant but allows a "simple" solution for
308 308 # issue4594
309 309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 310 if output:
311 311 bundler = bundle2.bundle20(self._repo.ui)
312 312 for out in output:
313 313 bundler.addpart(out)
314 314 stream = util.chunkbuffer(bundler.getchunks())
315 315 b = bundle2.getunbundler(self.ui, stream)
316 316 bundle2.processbundle(self._repo, b)
317 317 raise
318 318 except error.PushRaced as exc:
319 319 raise error.ResponseError(_('push failed:'),
320 320 stringutil.forcebytestr(exc))
321 321
322 322 # End of _basewirecommands interface.
323 323
324 324 # Begin of peer interface.
325 325
326 326 def commandexecutor(self):
327 327 return localcommandexecutor(self)
328 328
329 329 # End of peer interface.
330 330
331 331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 332 class locallegacypeer(localpeer):
333 333 '''peer extension which implements legacy methods too; used for tests with
334 334 restricted capabilities'''
335 335
336 336 def __init__(self, repo):
337 337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338 338
339 339 # Begin of baselegacywirecommands interface.
340 340
341 341 def between(self, pairs):
342 342 return self._repo.between(pairs)
343 343
344 344 def branches(self, nodes):
345 345 return self._repo.branches(nodes)
346 346
347 347 def changegroup(self, nodes, source):
348 348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 349 missingheads=self._repo.heads())
350 350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 351
352 352 def changegroupsubset(self, bases, heads, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 354 missingheads=heads)
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 # End of baselegacywirecommands interface.
358 358
359 359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 360 # clients.
361 361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362 362
363 363 # A repository with the sparserevlog feature will have delta chains that
364 364 # can spread over a larger span. Sparse reading cuts these large spans into
365 365 # pieces, so that each piece isn't too big.
366 366 # Without the sparserevlog capability, reading from the repository could use
367 367 # huge amounts of memory, because the whole span would be read at once,
368 368 # including all the intermediate revisions that aren't pertinent for the chain.
369 369 # This is why once a repository has enabled sparse-read, it becomes required.
370 370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371 371
372 372 # Functions receiving (ui, features) that extensions can register to impact
373 373 # the ability to load repositories with custom requirements. Only
374 374 # functions defined in loaded extensions are called.
375 375 #
376 376 # The function receives a set of requirement strings that the repository
377 377 # is capable of opening. Functions will typically add elements to the
378 378 # set to reflect that the extension knows how to handle that requirements.
379 379 featuresetupfuncs = set()
380 380
381 381 def makelocalrepository(baseui, path, intents=None):
382 382 """Create a local repository object.
383 383
384 384 Given arguments needed to construct a local repository, this function
385 385 performs various early repository loading functionality (such as
386 386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 387 the repository can be opened, derives a type suitable for representing
388 388 that repository, and returns an instance of it.
389 389
390 390 The returned object conforms to the ``repository.completelocalrepository``
391 391 interface.
392 392
393 393 The repository type is derived by calling a series of factory functions
394 394 for each aspect/interface of the final repository. These are defined by
395 395 ``REPO_INTERFACES``.
396 396
397 397 Each factory function is called to produce a type implementing a specific
398 398 interface. The cumulative list of returned types will be combined into a
399 399 new type and that type will be instantiated to represent the local
400 400 repository.
401 401
402 402 The factory functions each receive various state that may be consulted
403 403 as part of deriving a type.
404 404
405 405 Extensions should wrap these factory functions to customize repository type
406 406 creation. Note that an extension's wrapped function may be called even if
407 407 that extension is not loaded for the repo being constructed. Extensions
408 408 should check if their ``__name__`` appears in the
409 409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 410 not.
411 411 """
412 412 ui = baseui.copy()
413 413 # Prevent copying repo configuration.
414 414 ui.copy = baseui.copy
415 415
416 416 # Working directory VFS rooted at repository root.
417 417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 418
419 419 # Main VFS for .hg/ directory.
420 420 hgpath = wdirvfs.join(b'.hg')
421 421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422 422
423 423 # The .hg/ path should exist and should be a directory. All other
424 424 # cases are errors.
425 425 if not hgvfs.isdir():
426 426 try:
427 427 hgvfs.stat()
428 428 except OSError as e:
429 429 if e.errno != errno.ENOENT:
430 430 raise
431 431
432 432 raise error.RepoError(_(b'repository %s not found') % path)
433 433
434 434 # .hg/requires file contains a newline-delimited list of
435 435 # features/capabilities the opener (us) must have in order to use
436 436 # the repository. This file was introduced in Mercurial 0.9.2,
437 437 # which means very old repositories may not have one. We assume
438 438 # a missing file translates to no requirements.
439 439 try:
440 440 requirements = set(hgvfs.read(b'requires').splitlines())
441 441 except IOError as e:
442 442 if e.errno != errno.ENOENT:
443 443 raise
444 444 requirements = set()
445 445
446 446 # The .hg/hgrc file may load extensions or contain config options
447 447 # that influence repository construction. Attempt to load it and
448 448 # process any new extensions that it may have pulled in.
449 449 try:
450 450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 451 # Run this before extensions.loadall() so extensions can be
452 452 # automatically enabled.
453 453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 454 except IOError:
455 455 pass
456 456 else:
457 457 extensions.loadall(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511
512 512 # The store has changed over time and the exact layout is dictated by
513 513 # requirements. The store interface abstracts differences across all
514 514 # of them.
515 515 store = makestore(requirements, storebasepath,
516 516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 517 hgvfs.createmode = store.createmode
518 518
519 519 storevfs = store.vfs
520 520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521 521
522 522 # The cache vfs is used to manage cache files.
523 523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 524 cachevfs.createmode = store.createmode
525 525
526 526 # Now resolve the type for the repository object. We do this by repeatedly
527 527 # calling a factory function to produces types for specific aspects of the
528 528 # repo's operation. The aggregate returned types are used as base classes
529 529 # for a dynamically-derived type, which will represent our new repository.
530 530
531 531 bases = []
532 532 extrastate = {}
533 533
534 534 for iface, fn in REPO_INTERFACES:
535 535 # We pass all potentially useful state to give extensions tons of
536 536 # flexibility.
537 537 typ = fn()(ui=ui,
538 538 intents=intents,
539 539 requirements=requirements,
540 540 features=features,
541 541 wdirvfs=wdirvfs,
542 542 hgvfs=hgvfs,
543 543 store=store,
544 544 storevfs=storevfs,
545 545 storeoptions=storevfs.options,
546 546 cachevfs=cachevfs,
547 547 extensionmodulenames=extensionmodulenames,
548 548 extrastate=extrastate,
549 549 baseclasses=bases)
550 550
551 551 if not isinstance(typ, type):
552 552 raise error.ProgrammingError('unable to construct type for %s' %
553 553 iface)
554 554
555 555 bases.append(typ)
556 556
557 557 # type() allows you to use characters in type names that wouldn't be
558 558 # recognized as Python symbols in source code. We abuse that to add
559 559 # rich information about our constructed repo.
560 560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 561 wdirvfs.base,
562 562 b','.join(sorted(requirements))))
563 563
564 564 cls = type(name, tuple(bases), {})
565 565
566 566 return cls(
567 567 baseui=baseui,
568 568 ui=ui,
569 569 origroot=path,
570 570 wdirvfs=wdirvfs,
571 571 hgvfs=hgvfs,
572 572 requirements=requirements,
573 573 supportedrequirements=supportedrequirements,
574 574 sharedpath=storebasepath,
575 575 store=store,
576 576 cachevfs=cachevfs,
577 577 features=features,
578 578 intents=intents)
579 579
580 580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 581 """Perform additional actions after .hg/hgrc is loaded.
582 582
583 583 This function is called during repository loading immediately after
584 584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585 585
586 586 The function can be used to validate configs, automatically add
587 587 options (including extensions) based on requirements, etc.
588 588 """
589 589
590 590 # Map of requirements to list of extensions to load automatically when
591 591 # requirement is present.
592 592 autoextensions = {
593 593 b'largefiles': [b'largefiles'],
594 594 b'lfs': [b'lfs'],
595 595 }
596 596
597 597 for requirement, names in sorted(autoextensions.items()):
598 598 if requirement not in requirements:
599 599 continue
600 600
601 601 for name in names:
602 602 if not ui.hasconfig(b'extensions', name):
603 603 ui.setconfig(b'extensions', name, b'', source='autoload')
604 604
605 605 def gathersupportedrequirements(ui):
606 606 """Determine the complete set of recognized requirements."""
607 607 # Start with all requirements supported by this file.
608 608 supported = set(localrepository._basesupported)
609 609
610 610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 611 # relevant to this ui instance.
612 612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613 613
614 614 for fn in featuresetupfuncs:
615 615 if fn.__module__ in modules:
616 616 fn(ui, supported)
617 617
618 618 # Add derived requirements from registered compression engines.
619 619 for name in util.compengines:
620 620 engine = util.compengines[name]
621 621 if engine.revlogheader():
622 622 supported.add(b'exp-compression-%s' % name)
623 623
624 624 return supported
625 625
626 626 def ensurerequirementsrecognized(requirements, supported):
627 627 """Validate that a set of local requirements is recognized.
628 628
629 629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 630 exists any requirement in that set that currently loaded code doesn't
631 631 recognize.
632 632
633 633 Returns a set of supported requirements.
634 634 """
635 635 missing = set()
636 636
637 637 for requirement in requirements:
638 638 if requirement in supported:
639 639 continue
640 640
641 641 if not requirement or not requirement[0:1].isalnum():
642 642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643 643
644 644 missing.add(requirement)
645 645
646 646 if missing:
647 647 raise error.RequirementError(
648 648 _(b'repository requires features unknown to this Mercurial: %s') %
649 649 b' '.join(sorted(missing)),
650 650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 651 b'for more information'))
652 652
653 653 def ensurerequirementscompatible(ui, requirements):
654 654 """Validates that a set of recognized requirements is mutually compatible.
655 655
656 656 Some requirements may not be compatible with others or require
657 657 config options that aren't enabled. This function is called during
658 658 repository opening to ensure that the set of requirements needed
659 659 to open a repository is sane and compatible with config options.
660 660
661 661 Extensions can monkeypatch this function to perform additional
662 662 checking.
663 663
664 664 ``error.RepoError`` should be raised on failure.
665 665 """
666 666 if b'exp-sparse' in requirements and not sparse.enabled:
667 667 raise error.RepoError(_(b'repository is using sparse feature but '
668 668 b'sparse is not enabled; enable the '
669 669 b'"sparse" extensions to access'))
670 670
671 671 def makestore(requirements, path, vfstype):
672 672 """Construct a storage object for a repository."""
673 673 if b'store' in requirements:
674 674 if b'fncache' in requirements:
675 675 return storemod.fncachestore(path, vfstype,
676 676 b'dotencode' in requirements)
677 677
678 678 return storemod.encodedstore(path, vfstype)
679 679
680 680 return storemod.basicstore(path, vfstype)
681 681
682 682 def resolvestorevfsoptions(ui, requirements, features):
683 683 """Resolve the options to pass to the store vfs opener.
684 684
685 685 The returned dict is used to influence behavior of the storage layer.
686 686 """
687 687 options = {}
688 688
689 689 if b'treemanifest' in requirements:
690 690 options[b'treemanifest'] = True
691 691
692 692 # experimental config: format.manifestcachesize
693 693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 694 if manifestcachesize is not None:
695 695 options[b'manifestcachesize'] = manifestcachesize
696 696
697 697 # In the absence of another requirement superseding a revlog-related
698 698 # requirement, we have to assume the repo is using revlog version 0.
699 699 # This revlog format is super old and we don't bother trying to parse
700 700 # opener options for it because those options wouldn't do anything
701 701 # meaningful on such old repos.
702 702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704 704
705 705 return options
706 706
707 707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 708 """Resolve opener options specific to revlogs."""
709 709
710 710 options = {}
711 711
712 712 if b'revlogv1' in requirements:
713 713 options[b'revlogv1'] = True
714 714 if REVLOGV2_REQUIREMENT in requirements:
715 715 options[b'revlogv2'] = True
716 716
717 717 if b'generaldelta' in requirements:
718 718 options[b'generaldelta'] = True
719 719
720 720 # experimental config: format.chunkcachesize
721 721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 722 if chunkcachesize is not None:
723 723 options[b'chunkcachesize'] = chunkcachesize
724 724
725 725 deltabothparents = ui.configbool(b'storage',
726 726 b'revlog.optimize-delta-parent-choice')
727 727 options[b'deltabothparents'] = deltabothparents
728 728
729 729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 730
731 731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 732 if 0 <= chainspan:
733 733 options[b'maxdeltachainspan'] = chainspan
734 734
735 735 mmapindexthreshold = ui.configbytes(b'experimental',
736 736 b'mmapindexthreshold')
737 737 if mmapindexthreshold is not None:
738 738 options[b'mmapindexthreshold'] = mmapindexthreshold
739 739
740 740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 741 srdensitythres = float(ui.config(b'experimental',
742 742 b'sparse-read.density-threshold'))
743 743 srmingapsize = ui.configbytes(b'experimental',
744 744 b'sparse-read.min-gap-size')
745 745 options[b'with-sparse-read'] = withsparseread
746 746 options[b'sparse-read-density-threshold'] = srdensitythres
747 747 options[b'sparse-read-min-gap-size'] = srmingapsize
748 748
749 749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 750 options[b'sparse-revlog'] = sparserevlog
751 751 if sparserevlog:
752 752 options[b'generaldelta'] = True
753 753
754 754 maxchainlen = None
755 755 if sparserevlog:
756 756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 757 # experimental config: format.maxchainlen
758 758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 759 if maxchainlen is not None:
760 760 options[b'maxchainlen'] = maxchainlen
761 761
762 762 for r in requirements:
763 763 if r.startswith(b'exp-compression-'):
764 764 options[b'compengine'] = r[len(b'exp-compression-'):]
765 765
766 766 if repository.NARROW_REQUIREMENT in requirements:
767 767 options[b'enableellipsis'] = True
768 768
769 769 return options
770 770
771 771 def makemain(**kwargs):
772 772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 773 return localrepository
774 774
775 775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 776 class revlogfilestorage(object):
777 777 """File storage when using revlogs."""
778 778
779 779 def file(self, path):
780 780 if path[0] == b'/':
781 781 path = path[1:]
782 782
783 783 return filelog.filelog(self.svfs, path)
784 784
785 785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 786 class revlognarrowfilestorage(object):
787 787 """File storage when using revlogs and narrow files."""
788 788
789 789 def file(self, path):
790 790 if path[0] == b'/':
791 791 path = path[1:]
792 792
793 793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 794
795 795 def makefilestorage(requirements, features, **kwargs):
796 796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 798 features.add(repository.REPO_FEATURE_STREAM_CLONE)
799 799
800 800 if repository.NARROW_REQUIREMENT in requirements:
801 801 return revlognarrowfilestorage
802 802 else:
803 803 return revlogfilestorage
804 804
805 805 # List of repository interfaces and factory functions for them. Each
806 806 # will be called in order during ``makelocalrepository()`` to iteratively
807 807 # derive the final type for a local repository instance. We capture the
808 808 # function as a lambda so we don't hold a reference and the module-level
809 809 # functions can be wrapped.
810 810 REPO_INTERFACES = [
811 811 (repository.ilocalrepositorymain, lambda: makemain),
812 812 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
813 813 ]
814 814
815 815 @interfaceutil.implementer(repository.ilocalrepositorymain)
816 816 class localrepository(object):
817 817 """Main class for representing local repositories.
818 818
819 819 All local repositories are instances of this class.
820 820
821 821 Constructed on its own, instances of this class are not usable as
822 822 repository objects. To obtain a usable repository object, call
823 823 ``hg.repository()``, ``localrepo.instance()``, or
824 824 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
825 825 ``instance()`` adds support for creating new repositories.
826 826 ``hg.repository()`` adds more extension integration, including calling
827 827 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
828 828 used.
829 829 """
830 830
831 831 # obsolete experimental requirements:
832 832 # - manifestv2: An experimental new manifest format that allowed
833 833 # for stem compression of long paths. Experiment ended up not
834 834 # being successful (repository sizes went up due to worse delta
835 835 # chains), and the code was deleted in 4.6.
836 836 supportedformats = {
837 837 'revlogv1',
838 838 'generaldelta',
839 839 'treemanifest',
840 840 REVLOGV2_REQUIREMENT,
841 841 SPARSEREVLOG_REQUIREMENT,
842 842 }
843 843 _basesupported = supportedformats | {
844 844 'store',
845 845 'fncache',
846 846 'shared',
847 847 'relshared',
848 848 'dotencode',
849 849 'exp-sparse',
850 850 'internal-phase'
851 851 }
852 852
853 853 # list of prefix for file which can be written without 'wlock'
854 854 # Extensions should extend this list when needed
855 855 _wlockfreeprefix = {
856 856 # We migh consider requiring 'wlock' for the next
857 857 # two, but pretty much all the existing code assume
858 858 # wlock is not needed so we keep them excluded for
859 859 # now.
860 860 'hgrc',
861 861 'requires',
862 862 # XXX cache is a complicatged business someone
863 863 # should investigate this in depth at some point
864 864 'cache/',
865 865 # XXX shouldn't be dirstate covered by the wlock?
866 866 'dirstate',
867 867 # XXX bisect was still a bit too messy at the time
868 868 # this changeset was introduced. Someone should fix
869 869 # the remainig bit and drop this line
870 870 'bisect.state',
871 871 }
872 872
873 873 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
874 874 supportedrequirements, sharedpath, store, cachevfs,
875 875 features, intents=None):
876 876 """Create a new local repository instance.
877 877
878 878 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
879 879 or ``localrepo.makelocalrepository()`` for obtaining a new repository
880 880 object.
881 881
882 882 Arguments:
883 883
884 884 baseui
885 885 ``ui.ui`` instance that ``ui`` argument was based off of.
886 886
887 887 ui
888 888 ``ui.ui`` instance for use by the repository.
889 889
890 890 origroot
891 891 ``bytes`` path to working directory root of this repository.
892 892
893 893 wdirvfs
894 894 ``vfs.vfs`` rooted at the working directory.
895 895
896 896 hgvfs
897 897 ``vfs.vfs`` rooted at .hg/
898 898
899 899 requirements
900 900 ``set`` of bytestrings representing repository opening requirements.
901 901
902 902 supportedrequirements
903 903 ``set`` of bytestrings representing repository requirements that we
904 904 know how to open. May be a supetset of ``requirements``.
905 905
906 906 sharedpath
907 907 ``bytes`` Defining path to storage base directory. Points to a
908 908 ``.hg/`` directory somewhere.
909 909
910 910 store
911 911 ``store.basicstore`` (or derived) instance providing access to
912 912 versioned storage.
913 913
914 914 cachevfs
915 915 ``vfs.vfs`` used for cache files.
916 916
917 917 features
918 918 ``set`` of bytestrings defining features/capabilities of this
919 919 instance.
920 920
921 921 intents
922 922 ``set`` of system strings indicating what this repo will be used
923 923 for.
924 924 """
925 925 self.baseui = baseui
926 926 self.ui = ui
927 927 self.origroot = origroot
928 928 # vfs rooted at working directory.
929 929 self.wvfs = wdirvfs
930 930 self.root = wdirvfs.base
931 931 # vfs rooted at .hg/. Used to access most non-store paths.
932 932 self.vfs = hgvfs
933 933 self.path = hgvfs.base
934 934 self.requirements = requirements
935 935 self.supported = supportedrequirements
936 936 self.sharedpath = sharedpath
937 937 self.store = store
938 938 self.cachevfs = cachevfs
939 939 self.features = features
940 940
941 941 self.filtername = None
942 942
943 943 if (self.ui.configbool('devel', 'all-warnings') or
944 944 self.ui.configbool('devel', 'check-locks')):
945 945 self.vfs.audit = self._getvfsward(self.vfs.audit)
946 946 # A list of callback to shape the phase if no data were found.
947 947 # Callback are in the form: func(repo, roots) --> processed root.
948 948 # This list it to be filled by extension during repo setup
949 949 self._phasedefaults = []
950 950
951 951 color.setup(self.ui)
952 952
953 953 self.spath = self.store.path
954 954 self.svfs = self.store.vfs
955 955 self.sjoin = self.store.join
956 956 if (self.ui.configbool('devel', 'all-warnings') or
957 957 self.ui.configbool('devel', 'check-locks')):
958 958 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
959 959 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
960 960 else: # standard vfs
961 961 self.svfs.audit = self._getsvfsward(self.svfs.audit)
962 962
963 963 self._dirstatevalidatewarned = False
964 964
965 965 self._branchcaches = {}
966 966 self._revbranchcache = None
967 967 self._filterpats = {}
968 968 self._datafilters = {}
969 969 self._transref = self._lockref = self._wlockref = None
970 970
971 971 # A cache for various files under .hg/ that tracks file changes,
972 972 # (used by the filecache decorator)
973 973 #
974 974 # Maps a property name to its util.filecacheentry
975 975 self._filecache = {}
976 976
977 977 # hold sets of revision to be filtered
978 978 # should be cleared when something might have changed the filter value:
979 979 # - new changesets,
980 980 # - phase change,
981 981 # - new obsolescence marker,
982 982 # - working directory parent change,
983 983 # - bookmark changes
984 984 self.filteredrevcache = {}
985 985
986 986 # post-dirstate-status hooks
987 987 self._postdsstatus = []
988 988
989 989 # generic mapping between names and nodes
990 990 self.names = namespaces.namespaces()
991 991
992 992 # Key to signature value.
993 993 self._sparsesignaturecache = {}
994 994 # Signature to cached matcher instance.
995 995 self._sparsematchercache = {}
996 996
997 997 def _getvfsward(self, origfunc):
998 998 """build a ward for self.vfs"""
999 999 rref = weakref.ref(self)
1000 1000 def checkvfs(path, mode=None):
1001 1001 ret = origfunc(path, mode=mode)
1002 1002 repo = rref()
1003 1003 if (repo is None
1004 1004 or not util.safehasattr(repo, '_wlockref')
1005 1005 or not util.safehasattr(repo, '_lockref')):
1006 1006 return
1007 1007 if mode in (None, 'r', 'rb'):
1008 1008 return
1009 1009 if path.startswith(repo.path):
1010 1010 # truncate name relative to the repository (.hg)
1011 1011 path = path[len(repo.path) + 1:]
1012 1012 if path.startswith('cache/'):
1013 1013 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1014 1014 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1015 1015 if path.startswith('journal.'):
1016 1016 # journal is covered by 'lock'
1017 1017 if repo._currentlock(repo._lockref) is None:
1018 1018 repo.ui.develwarn('write with no lock: "%s"' % path,
1019 1019 stacklevel=2, config='check-locks')
1020 1020 elif repo._currentlock(repo._wlockref) is None:
1021 1021 # rest of vfs files are covered by 'wlock'
1022 1022 #
1023 1023 # exclude special files
1024 1024 for prefix in self._wlockfreeprefix:
1025 1025 if path.startswith(prefix):
1026 1026 return
1027 1027 repo.ui.develwarn('write with no wlock: "%s"' % path,
1028 1028 stacklevel=2, config='check-locks')
1029 1029 return ret
1030 1030 return checkvfs
1031 1031
1032 1032 def _getsvfsward(self, origfunc):
1033 1033 """build a ward for self.svfs"""
1034 1034 rref = weakref.ref(self)
1035 1035 def checksvfs(path, mode=None):
1036 1036 ret = origfunc(path, mode=mode)
1037 1037 repo = rref()
1038 1038 if repo is None or not util.safehasattr(repo, '_lockref'):
1039 1039 return
1040 1040 if mode in (None, 'r', 'rb'):
1041 1041 return
1042 1042 if path.startswith(repo.sharedpath):
1043 1043 # truncate name relative to the repository (.hg)
1044 1044 path = path[len(repo.sharedpath) + 1:]
1045 1045 if repo._currentlock(repo._lockref) is None:
1046 1046 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 1047 stacklevel=3)
1048 1048 return ret
1049 1049 return checksvfs
1050 1050
1051 1051 def close(self):
1052 1052 self._writecaches()
1053 1053
1054 1054 def _writecaches(self):
1055 1055 if self._revbranchcache:
1056 1056 self._revbranchcache.write()
1057 1057
1058 1058 def _restrictcapabilities(self, caps):
1059 1059 if self.ui.configbool('experimental', 'bundle2-advertise'):
1060 1060 caps = set(caps)
1061 1061 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1062 1062 role='client'))
1063 1063 caps.add('bundle2=' + urlreq.quote(capsblob))
1064 1064 return caps
1065 1065
1066 1066 def _writerequirements(self):
1067 1067 scmutil.writerequires(self.vfs, self.requirements)
1068 1068
1069 1069 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1070 1070 # self -> auditor -> self._checknested -> self
1071 1071
1072 1072 @property
1073 1073 def auditor(self):
1074 1074 # This is only used by context.workingctx.match in order to
1075 1075 # detect files in subrepos.
1076 1076 return pathutil.pathauditor(self.root, callback=self._checknested)
1077 1077
1078 1078 @property
1079 1079 def nofsauditor(self):
1080 1080 # This is only used by context.basectx.match in order to detect
1081 1081 # files in subrepos.
1082 1082 return pathutil.pathauditor(self.root, callback=self._checknested,
1083 1083 realfs=False, cached=True)
1084 1084
1085 1085 def _checknested(self, path):
1086 1086 """Determine if path is a legal nested repository."""
1087 1087 if not path.startswith(self.root):
1088 1088 return False
1089 1089 subpath = path[len(self.root) + 1:]
1090 1090 normsubpath = util.pconvert(subpath)
1091 1091
1092 1092 # XXX: Checking against the current working copy is wrong in
1093 1093 # the sense that it can reject things like
1094 1094 #
1095 1095 # $ hg cat -r 10 sub/x.txt
1096 1096 #
1097 1097 # if sub/ is no longer a subrepository in the working copy
1098 1098 # parent revision.
1099 1099 #
1100 1100 # However, it can of course also allow things that would have
1101 1101 # been rejected before, such as the above cat command if sub/
1102 1102 # is a subrepository now, but was a normal directory before.
1103 1103 # The old path auditor would have rejected by mistake since it
1104 1104 # panics when it sees sub/.hg/.
1105 1105 #
1106 1106 # All in all, checking against the working copy seems sensible
1107 1107 # since we want to prevent access to nested repositories on
1108 1108 # the filesystem *now*.
1109 1109 ctx = self[None]
1110 1110 parts = util.splitpath(subpath)
1111 1111 while parts:
1112 1112 prefix = '/'.join(parts)
1113 1113 if prefix in ctx.substate:
1114 1114 if prefix == normsubpath:
1115 1115 return True
1116 1116 else:
1117 1117 sub = ctx.sub(prefix)
1118 1118 return sub.checknested(subpath[len(prefix) + 1:])
1119 1119 else:
1120 1120 parts.pop()
1121 1121 return False
1122 1122
1123 1123 def peer(self):
1124 1124 return localpeer(self) # not cached to avoid reference cycle
1125 1125
1126 1126 def unfiltered(self):
1127 1127 """Return unfiltered version of the repository
1128 1128
1129 1129 Intended to be overwritten by filtered repo."""
1130 1130 return self
1131 1131
1132 1132 def filtered(self, name, visibilityexceptions=None):
1133 1133 """Return a filtered version of a repository"""
1134 1134 cls = repoview.newtype(self.unfiltered().__class__)
1135 1135 return cls(self, name, visibilityexceptions)
1136 1136
1137 1137 @repofilecache('bookmarks', 'bookmarks.current')
1138 1138 def _bookmarks(self):
1139 1139 return bookmarks.bmstore(self)
1140 1140
1141 1141 @property
1142 1142 def _activebookmark(self):
1143 1143 return self._bookmarks.active
1144 1144
1145 1145 # _phasesets depend on changelog. what we need is to call
1146 1146 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1147 1147 # can't be easily expressed in filecache mechanism.
1148 1148 @storecache('phaseroots', '00changelog.i')
1149 1149 def _phasecache(self):
1150 1150 return phases.phasecache(self, self._phasedefaults)
1151 1151
1152 1152 @storecache('obsstore')
1153 1153 def obsstore(self):
1154 1154 return obsolete.makestore(self.ui, self)
1155 1155
1156 1156 @storecache('00changelog.i')
1157 1157 def changelog(self):
1158 1158 return changelog.changelog(self.svfs,
1159 1159 trypending=txnutil.mayhavepending(self.root))
1160 1160
1161 1161 @storecache('00manifest.i')
1162 1162 def manifestlog(self):
1163 1163 rootstore = manifest.manifestrevlog(self.svfs)
1164 1164 return manifest.manifestlog(self.svfs, self, rootstore)
1165 1165
1166 1166 @repofilecache('dirstate')
1167 1167 def dirstate(self):
1168 1168 return self._makedirstate()
1169 1169
1170 1170 def _makedirstate(self):
1171 1171 """Extension point for wrapping the dirstate per-repo."""
1172 1172 sparsematchfn = lambda: sparse.matcher(self)
1173 1173
1174 1174 return dirstate.dirstate(self.vfs, self.ui, self.root,
1175 1175 self._dirstatevalidate, sparsematchfn)
1176 1176
1177 1177 def _dirstatevalidate(self, node):
1178 1178 try:
1179 1179 self.changelog.rev(node)
1180 1180 return node
1181 1181 except error.LookupError:
1182 1182 if not self._dirstatevalidatewarned:
1183 1183 self._dirstatevalidatewarned = True
1184 1184 self.ui.warn(_("warning: ignoring unknown"
1185 1185 " working parent %s!\n") % short(node))
1186 1186 return nullid
1187 1187
1188 1188 @storecache(narrowspec.FILENAME)
1189 1189 def narrowpats(self):
1190 1190 """matcher patterns for this repository's narrowspec
1191 1191
1192 1192 A tuple of (includes, excludes).
1193 1193 """
1194 1194 return narrowspec.load(self)
1195 1195
1196 1196 @storecache(narrowspec.FILENAME)
1197 1197 def _narrowmatch(self):
1198 1198 if repository.NARROW_REQUIREMENT not in self.requirements:
1199 1199 return matchmod.always(self.root, '')
1200 1200 include, exclude = self.narrowpats
1201 1201 return narrowspec.match(self.root, include=include, exclude=exclude)
1202 1202
1203 # TODO(martinvonz): make this property-like instead?
1204 def narrowmatch(self):
1203 def narrowmatch(self, match=None):
1204 """matcher corresponding the the repo's narrowspec
1205
1206 If `match` is given, then that will be intersected with the narrow
1207 matcher.
1208 """
1209 if match:
1210 return matchmod.intersectmatchers(match, self._narrowmatch)
1205 1211 return self._narrowmatch
1206 1212
1207 1213 def setnarrowpats(self, newincludes, newexcludes):
1208 1214 narrowspec.save(self, newincludes, newexcludes)
1209 1215 self.invalidate(clearfilecache=True)
1210 1216
1211 1217 def __getitem__(self, changeid):
1212 1218 if changeid is None:
1213 1219 return context.workingctx(self)
1214 1220 if isinstance(changeid, context.basectx):
1215 1221 return changeid
1216 1222 if isinstance(changeid, slice):
1217 1223 # wdirrev isn't contiguous so the slice shouldn't include it
1218 1224 return [self[i]
1219 1225 for i in pycompat.xrange(*changeid.indices(len(self)))
1220 1226 if i not in self.changelog.filteredrevs]
1221 1227 try:
1222 1228 if isinstance(changeid, int):
1223 1229 node = self.changelog.node(changeid)
1224 1230 rev = changeid
1225 1231 elif changeid == 'null':
1226 1232 node = nullid
1227 1233 rev = nullrev
1228 1234 elif changeid == 'tip':
1229 1235 node = self.changelog.tip()
1230 1236 rev = self.changelog.rev(node)
1231 1237 elif changeid == '.':
1232 1238 # this is a hack to delay/avoid loading obsmarkers
1233 1239 # when we know that '.' won't be hidden
1234 1240 node = self.dirstate.p1()
1235 1241 rev = self.unfiltered().changelog.rev(node)
1236 1242 elif len(changeid) == 20:
1237 1243 try:
1238 1244 node = changeid
1239 1245 rev = self.changelog.rev(changeid)
1240 1246 except error.FilteredLookupError:
1241 1247 changeid = hex(changeid) # for the error message
1242 1248 raise
1243 1249 except LookupError:
1244 1250 # check if it might have come from damaged dirstate
1245 1251 #
1246 1252 # XXX we could avoid the unfiltered if we had a recognizable
1247 1253 # exception for filtered changeset access
1248 1254 if (self.local()
1249 1255 and changeid in self.unfiltered().dirstate.parents()):
1250 1256 msg = _("working directory has unknown parent '%s'!")
1251 1257 raise error.Abort(msg % short(changeid))
1252 1258 changeid = hex(changeid) # for the error message
1253 1259 raise
1254 1260
1255 1261 elif len(changeid) == 40:
1256 1262 node = bin(changeid)
1257 1263 rev = self.changelog.rev(node)
1258 1264 else:
1259 1265 raise error.ProgrammingError(
1260 1266 "unsupported changeid '%s' of type %s" %
1261 1267 (changeid, type(changeid)))
1262 1268
1263 1269 return context.changectx(self, rev, node)
1264 1270
1265 1271 except (error.FilteredIndexError, error.FilteredLookupError):
1266 1272 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1267 1273 % pycompat.bytestr(changeid))
1268 1274 except (IndexError, LookupError):
1269 1275 raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
1270 1276 except error.WdirUnsupported:
1271 1277 return context.workingctx(self)
1272 1278
1273 1279 def __contains__(self, changeid):
1274 1280 """True if the given changeid exists
1275 1281
1276 1282 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1277 1283 specified.
1278 1284 """
1279 1285 try:
1280 1286 self[changeid]
1281 1287 return True
1282 1288 except error.RepoLookupError:
1283 1289 return False
1284 1290
1285 1291 def __nonzero__(self):
1286 1292 return True
1287 1293
1288 1294 __bool__ = __nonzero__
1289 1295
1290 1296 def __len__(self):
1291 1297 # no need to pay the cost of repoview.changelog
1292 1298 unfi = self.unfiltered()
1293 1299 return len(unfi.changelog)
1294 1300
1295 1301 def __iter__(self):
1296 1302 return iter(self.changelog)
1297 1303
1298 1304 def revs(self, expr, *args):
1299 1305 '''Find revisions matching a revset.
1300 1306
1301 1307 The revset is specified as a string ``expr`` that may contain
1302 1308 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1303 1309
1304 1310 Revset aliases from the configuration are not expanded. To expand
1305 1311 user aliases, consider calling ``scmutil.revrange()`` or
1306 1312 ``repo.anyrevs([expr], user=True)``.
1307 1313
1308 1314 Returns a revset.abstractsmartset, which is a list-like interface
1309 1315 that contains integer revisions.
1310 1316 '''
1311 1317 expr = revsetlang.formatspec(expr, *args)
1312 1318 m = revset.match(None, expr)
1313 1319 return m(self)
1314 1320
1315 1321 def set(self, expr, *args):
1316 1322 '''Find revisions matching a revset and emit changectx instances.
1317 1323
1318 1324 This is a convenience wrapper around ``revs()`` that iterates the
1319 1325 result and is a generator of changectx instances.
1320 1326
1321 1327 Revset aliases from the configuration are not expanded. To expand
1322 1328 user aliases, consider calling ``scmutil.revrange()``.
1323 1329 '''
1324 1330 for r in self.revs(expr, *args):
1325 1331 yield self[r]
1326 1332
1327 1333 def anyrevs(self, specs, user=False, localalias=None):
1328 1334 '''Find revisions matching one of the given revsets.
1329 1335
1330 1336 Revset aliases from the configuration are not expanded by default. To
1331 1337 expand user aliases, specify ``user=True``. To provide some local
1332 1338 definitions overriding user aliases, set ``localalias`` to
1333 1339 ``{name: definitionstring}``.
1334 1340 '''
1335 1341 if user:
1336 1342 m = revset.matchany(self.ui, specs,
1337 1343 lookup=revset.lookupfn(self),
1338 1344 localalias=localalias)
1339 1345 else:
1340 1346 m = revset.matchany(None, specs, localalias=localalias)
1341 1347 return m(self)
1342 1348
1343 1349 def url(self):
1344 1350 return 'file:' + self.root
1345 1351
1346 1352 def hook(self, name, throw=False, **args):
1347 1353 """Call a hook, passing this repo instance.
1348 1354
1349 1355 This a convenience method to aid invoking hooks. Extensions likely
1350 1356 won't call this unless they have registered a custom hook or are
1351 1357 replacing code that is expected to call a hook.
1352 1358 """
1353 1359 return hook.hook(self.ui, self, name, throw, **args)
1354 1360
1355 1361 @filteredpropertycache
1356 1362 def _tagscache(self):
1357 1363 '''Returns a tagscache object that contains various tags related
1358 1364 caches.'''
1359 1365
1360 1366 # This simplifies its cache management by having one decorated
1361 1367 # function (this one) and the rest simply fetch things from it.
1362 1368 class tagscache(object):
1363 1369 def __init__(self):
1364 1370 # These two define the set of tags for this repository. tags
1365 1371 # maps tag name to node; tagtypes maps tag name to 'global' or
1366 1372 # 'local'. (Global tags are defined by .hgtags across all
1367 1373 # heads, and local tags are defined in .hg/localtags.)
1368 1374 # They constitute the in-memory cache of tags.
1369 1375 self.tags = self.tagtypes = None
1370 1376
1371 1377 self.nodetagscache = self.tagslist = None
1372 1378
1373 1379 cache = tagscache()
1374 1380 cache.tags, cache.tagtypes = self._findtags()
1375 1381
1376 1382 return cache
1377 1383
1378 1384 def tags(self):
1379 1385 '''return a mapping of tag to node'''
1380 1386 t = {}
1381 1387 if self.changelog.filteredrevs:
1382 1388 tags, tt = self._findtags()
1383 1389 else:
1384 1390 tags = self._tagscache.tags
1385 1391 for k, v in tags.iteritems():
1386 1392 try:
1387 1393 # ignore tags to unknown nodes
1388 1394 self.changelog.rev(v)
1389 1395 t[k] = v
1390 1396 except (error.LookupError, ValueError):
1391 1397 pass
1392 1398 return t
1393 1399
1394 1400 def _findtags(self):
1395 1401 '''Do the hard work of finding tags. Return a pair of dicts
1396 1402 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1397 1403 maps tag name to a string like \'global\' or \'local\'.
1398 1404 Subclasses or extensions are free to add their own tags, but
1399 1405 should be aware that the returned dicts will be retained for the
1400 1406 duration of the localrepo object.'''
1401 1407
1402 1408 # XXX what tagtype should subclasses/extensions use? Currently
1403 1409 # mq and bookmarks add tags, but do not set the tagtype at all.
1404 1410 # Should each extension invent its own tag type? Should there
1405 1411 # be one tagtype for all such "virtual" tags? Or is the status
1406 1412 # quo fine?
1407 1413
1408 1414
1409 1415 # map tag name to (node, hist)
1410 1416 alltags = tagsmod.findglobaltags(self.ui, self)
1411 1417 # map tag name to tag type
1412 1418 tagtypes = dict((tag, 'global') for tag in alltags)
1413 1419
1414 1420 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1415 1421
1416 1422 # Build the return dicts. Have to re-encode tag names because
1417 1423 # the tags module always uses UTF-8 (in order not to lose info
1418 1424 # writing to the cache), but the rest of Mercurial wants them in
1419 1425 # local encoding.
1420 1426 tags = {}
1421 1427 for (name, (node, hist)) in alltags.iteritems():
1422 1428 if node != nullid:
1423 1429 tags[encoding.tolocal(name)] = node
1424 1430 tags['tip'] = self.changelog.tip()
1425 1431 tagtypes = dict([(encoding.tolocal(name), value)
1426 1432 for (name, value) in tagtypes.iteritems()])
1427 1433 return (tags, tagtypes)
1428 1434
1429 1435 def tagtype(self, tagname):
1430 1436 '''
1431 1437 return the type of the given tag. result can be:
1432 1438
1433 1439 'local' : a local tag
1434 1440 'global' : a global tag
1435 1441 None : tag does not exist
1436 1442 '''
1437 1443
1438 1444 return self._tagscache.tagtypes.get(tagname)
1439 1445
1440 1446 def tagslist(self):
1441 1447 '''return a list of tags ordered by revision'''
1442 1448 if not self._tagscache.tagslist:
1443 1449 l = []
1444 1450 for t, n in self.tags().iteritems():
1445 1451 l.append((self.changelog.rev(n), t, n))
1446 1452 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1447 1453
1448 1454 return self._tagscache.tagslist
1449 1455
1450 1456 def nodetags(self, node):
1451 1457 '''return the tags associated with a node'''
1452 1458 if not self._tagscache.nodetagscache:
1453 1459 nodetagscache = {}
1454 1460 for t, n in self._tagscache.tags.iteritems():
1455 1461 nodetagscache.setdefault(n, []).append(t)
1456 1462 for tags in nodetagscache.itervalues():
1457 1463 tags.sort()
1458 1464 self._tagscache.nodetagscache = nodetagscache
1459 1465 return self._tagscache.nodetagscache.get(node, [])
1460 1466
1461 1467 def nodebookmarks(self, node):
1462 1468 """return the list of bookmarks pointing to the specified node"""
1463 1469 return self._bookmarks.names(node)
1464 1470
1465 1471 def branchmap(self):
1466 1472 '''returns a dictionary {branch: [branchheads]} with branchheads
1467 1473 ordered by increasing revision number'''
1468 1474 branchmap.updatecache(self)
1469 1475 return self._branchcaches[self.filtername]
1470 1476
1471 1477 @unfilteredmethod
1472 1478 def revbranchcache(self):
1473 1479 if not self._revbranchcache:
1474 1480 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1475 1481 return self._revbranchcache
1476 1482
1477 1483 def branchtip(self, branch, ignoremissing=False):
1478 1484 '''return the tip node for a given branch
1479 1485
1480 1486 If ignoremissing is True, then this method will not raise an error.
1481 1487 This is helpful for callers that only expect None for a missing branch
1482 1488 (e.g. namespace).
1483 1489
1484 1490 '''
1485 1491 try:
1486 1492 return self.branchmap().branchtip(branch)
1487 1493 except KeyError:
1488 1494 if not ignoremissing:
1489 1495 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1490 1496 else:
1491 1497 pass
1492 1498
1493 1499 def lookup(self, key):
1494 1500 return scmutil.revsymbol(self, key).node()
1495 1501
1496 1502 def lookupbranch(self, key):
1497 1503 if key in self.branchmap():
1498 1504 return key
1499 1505
1500 1506 return scmutil.revsymbol(self, key).branch()
1501 1507
1502 1508 def known(self, nodes):
1503 1509 cl = self.changelog
1504 1510 nm = cl.nodemap
1505 1511 filtered = cl.filteredrevs
1506 1512 result = []
1507 1513 for n in nodes:
1508 1514 r = nm.get(n)
1509 1515 resp = not (r is None or r in filtered)
1510 1516 result.append(resp)
1511 1517 return result
1512 1518
1513 1519 def local(self):
1514 1520 return self
1515 1521
1516 1522 def publishing(self):
1517 1523 # it's safe (and desirable) to trust the publish flag unconditionally
1518 1524 # so that we don't finalize changes shared between users via ssh or nfs
1519 1525 return self.ui.configbool('phases', 'publish', untrusted=True)
1520 1526
1521 1527 def cancopy(self):
1522 1528 # so statichttprepo's override of local() works
1523 1529 if not self.local():
1524 1530 return False
1525 1531 if not self.publishing():
1526 1532 return True
1527 1533 # if publishing we can't copy if there is filtered content
1528 1534 return not self.filtered('visible').changelog.filteredrevs
1529 1535
1530 1536 def shared(self):
1531 1537 '''the type of shared repository (None if not shared)'''
1532 1538 if self.sharedpath != self.path:
1533 1539 return 'store'
1534 1540 return None
1535 1541
1536 1542 def wjoin(self, f, *insidef):
1537 1543 return self.vfs.reljoin(self.root, f, *insidef)
1538 1544
1539 1545 def setparents(self, p1, p2=nullid):
1540 1546 with self.dirstate.parentchange():
1541 1547 copies = self.dirstate.setparents(p1, p2)
1542 1548 pctx = self[p1]
1543 1549 if copies:
1544 1550 # Adjust copy records, the dirstate cannot do it, it
1545 1551 # requires access to parents manifests. Preserve them
1546 1552 # only for entries added to first parent.
1547 1553 for f in copies:
1548 1554 if f not in pctx and copies[f] in pctx:
1549 1555 self.dirstate.copy(copies[f], f)
1550 1556 if p2 == nullid:
1551 1557 for f, s in sorted(self.dirstate.copies().items()):
1552 1558 if f not in pctx and s not in pctx:
1553 1559 self.dirstate.copy(None, f)
1554 1560
1555 1561 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1556 1562 """changeid can be a changeset revision, node, or tag.
1557 1563 fileid can be a file revision or node."""
1558 1564 return context.filectx(self, path, changeid, fileid,
1559 1565 changectx=changectx)
1560 1566
1561 1567 def getcwd(self):
1562 1568 return self.dirstate.getcwd()
1563 1569
1564 1570 def pathto(self, f, cwd=None):
1565 1571 return self.dirstate.pathto(f, cwd)
1566 1572
1567 1573 def _loadfilter(self, filter):
1568 1574 if filter not in self._filterpats:
1569 1575 l = []
1570 1576 for pat, cmd in self.ui.configitems(filter):
1571 1577 if cmd == '!':
1572 1578 continue
1573 1579 mf = matchmod.match(self.root, '', [pat])
1574 1580 fn = None
1575 1581 params = cmd
1576 1582 for name, filterfn in self._datafilters.iteritems():
1577 1583 if cmd.startswith(name):
1578 1584 fn = filterfn
1579 1585 params = cmd[len(name):].lstrip()
1580 1586 break
1581 1587 if not fn:
1582 1588 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1583 1589 # Wrap old filters not supporting keyword arguments
1584 1590 if not pycompat.getargspec(fn)[2]:
1585 1591 oldfn = fn
1586 1592 fn = lambda s, c, **kwargs: oldfn(s, c)
1587 1593 l.append((mf, fn, params))
1588 1594 self._filterpats[filter] = l
1589 1595 return self._filterpats[filter]
1590 1596
1591 1597 def _filter(self, filterpats, filename, data):
1592 1598 for mf, fn, cmd in filterpats:
1593 1599 if mf(filename):
1594 1600 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1595 1601 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1596 1602 break
1597 1603
1598 1604 return data
1599 1605
1600 1606 @unfilteredpropertycache
1601 1607 def _encodefilterpats(self):
1602 1608 return self._loadfilter('encode')
1603 1609
1604 1610 @unfilteredpropertycache
1605 1611 def _decodefilterpats(self):
1606 1612 return self._loadfilter('decode')
1607 1613
1608 1614 def adddatafilter(self, name, filter):
1609 1615 self._datafilters[name] = filter
1610 1616
1611 1617 def wread(self, filename):
1612 1618 if self.wvfs.islink(filename):
1613 1619 data = self.wvfs.readlink(filename)
1614 1620 else:
1615 1621 data = self.wvfs.read(filename)
1616 1622 return self._filter(self._encodefilterpats, filename, data)
1617 1623
1618 1624 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1619 1625 """write ``data`` into ``filename`` in the working directory
1620 1626
1621 1627 This returns length of written (maybe decoded) data.
1622 1628 """
1623 1629 data = self._filter(self._decodefilterpats, filename, data)
1624 1630 if 'l' in flags:
1625 1631 self.wvfs.symlink(data, filename)
1626 1632 else:
1627 1633 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1628 1634 **kwargs)
1629 1635 if 'x' in flags:
1630 1636 self.wvfs.setflags(filename, False, True)
1631 1637 else:
1632 1638 self.wvfs.setflags(filename, False, False)
1633 1639 return len(data)
1634 1640
1635 1641 def wwritedata(self, filename, data):
1636 1642 return self._filter(self._decodefilterpats, filename, data)
1637 1643
1638 1644 def currenttransaction(self):
1639 1645 """return the current transaction or None if non exists"""
1640 1646 if self._transref:
1641 1647 tr = self._transref()
1642 1648 else:
1643 1649 tr = None
1644 1650
1645 1651 if tr and tr.running():
1646 1652 return tr
1647 1653 return None
1648 1654
1649 1655 def transaction(self, desc, report=None):
1650 1656 if (self.ui.configbool('devel', 'all-warnings')
1651 1657 or self.ui.configbool('devel', 'check-locks')):
1652 1658 if self._currentlock(self._lockref) is None:
1653 1659 raise error.ProgrammingError('transaction requires locking')
1654 1660 tr = self.currenttransaction()
1655 1661 if tr is not None:
1656 1662 return tr.nest(name=desc)
1657 1663
1658 1664 # abort here if the journal already exists
1659 1665 if self.svfs.exists("journal"):
1660 1666 raise error.RepoError(
1661 1667 _("abandoned transaction found"),
1662 1668 hint=_("run 'hg recover' to clean up transaction"))
1663 1669
1664 1670 idbase = "%.40f#%f" % (random.random(), time.time())
1665 1671 ha = hex(hashlib.sha1(idbase).digest())
1666 1672 txnid = 'TXN:' + ha
1667 1673 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1668 1674
1669 1675 self._writejournal(desc)
1670 1676 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1671 1677 if report:
1672 1678 rp = report
1673 1679 else:
1674 1680 rp = self.ui.warn
1675 1681 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1676 1682 # we must avoid cyclic reference between repo and transaction.
1677 1683 reporef = weakref.ref(self)
1678 1684 # Code to track tag movement
1679 1685 #
1680 1686 # Since tags are all handled as file content, it is actually quite hard
1681 1687 # to track these movement from a code perspective. So we fallback to a
1682 1688 # tracking at the repository level. One could envision to track changes
1683 1689 # to the '.hgtags' file through changegroup apply but that fails to
1684 1690 # cope with case where transaction expose new heads without changegroup
1685 1691 # being involved (eg: phase movement).
1686 1692 #
1687 1693 # For now, We gate the feature behind a flag since this likely comes
1688 1694 # with performance impacts. The current code run more often than needed
1689 1695 # and do not use caches as much as it could. The current focus is on
1690 1696 # the behavior of the feature so we disable it by default. The flag
1691 1697 # will be removed when we are happy with the performance impact.
1692 1698 #
1693 1699 # Once this feature is no longer experimental move the following
1694 1700 # documentation to the appropriate help section:
1695 1701 #
1696 1702 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1697 1703 # tags (new or changed or deleted tags). In addition the details of
1698 1704 # these changes are made available in a file at:
1699 1705 # ``REPOROOT/.hg/changes/tags.changes``.
1700 1706 # Make sure you check for HG_TAG_MOVED before reading that file as it
1701 1707 # might exist from a previous transaction even if no tag were touched
1702 1708 # in this one. Changes are recorded in a line base format::
1703 1709 #
1704 1710 # <action> <hex-node> <tag-name>\n
1705 1711 #
1706 1712 # Actions are defined as follow:
1707 1713 # "-R": tag is removed,
1708 1714 # "+A": tag is added,
1709 1715 # "-M": tag is moved (old value),
1710 1716 # "+M": tag is moved (new value),
1711 1717 tracktags = lambda x: None
1712 1718 # experimental config: experimental.hook-track-tags
1713 1719 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1714 1720 if desc != 'strip' and shouldtracktags:
1715 1721 oldheads = self.changelog.headrevs()
1716 1722 def tracktags(tr2):
1717 1723 repo = reporef()
1718 1724 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1719 1725 newheads = repo.changelog.headrevs()
1720 1726 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1721 1727 # notes: we compare lists here.
1722 1728 # As we do it only once buiding set would not be cheaper
1723 1729 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1724 1730 if changes:
1725 1731 tr2.hookargs['tag_moved'] = '1'
1726 1732 with repo.vfs('changes/tags.changes', 'w',
1727 1733 atomictemp=True) as changesfile:
1728 1734 # note: we do not register the file to the transaction
1729 1735 # because we needs it to still exist on the transaction
1730 1736 # is close (for txnclose hooks)
1731 1737 tagsmod.writediff(changesfile, changes)
1732 1738 def validate(tr2):
1733 1739 """will run pre-closing hooks"""
1734 1740 # XXX the transaction API is a bit lacking here so we take a hacky
1735 1741 # path for now
1736 1742 #
1737 1743 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1738 1744 # dict is copied before these run. In addition we needs the data
1739 1745 # available to in memory hooks too.
1740 1746 #
1741 1747 # Moreover, we also need to make sure this runs before txnclose
1742 1748 # hooks and there is no "pending" mechanism that would execute
1743 1749 # logic only if hooks are about to run.
1744 1750 #
1745 1751 # Fixing this limitation of the transaction is also needed to track
1746 1752 # other families of changes (bookmarks, phases, obsolescence).
1747 1753 #
1748 1754 # This will have to be fixed before we remove the experimental
1749 1755 # gating.
1750 1756 tracktags(tr2)
1751 1757 repo = reporef()
1752 1758 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1753 1759 scmutil.enforcesinglehead(repo, tr2, desc)
1754 1760 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1755 1761 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1756 1762 args = tr.hookargs.copy()
1757 1763 args.update(bookmarks.preparehookargs(name, old, new))
1758 1764 repo.hook('pretxnclose-bookmark', throw=True,
1759 1765 txnname=desc,
1760 1766 **pycompat.strkwargs(args))
1761 1767 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1762 1768 cl = repo.unfiltered().changelog
1763 1769 for rev, (old, new) in tr.changes['phases'].items():
1764 1770 args = tr.hookargs.copy()
1765 1771 node = hex(cl.node(rev))
1766 1772 args.update(phases.preparehookargs(node, old, new))
1767 1773 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1768 1774 **pycompat.strkwargs(args))
1769 1775
1770 1776 repo.hook('pretxnclose', throw=True,
1771 1777 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1772 1778 def releasefn(tr, success):
1773 1779 repo = reporef()
1774 1780 if success:
1775 1781 # this should be explicitly invoked here, because
1776 1782 # in-memory changes aren't written out at closing
1777 1783 # transaction, if tr.addfilegenerator (via
1778 1784 # dirstate.write or so) isn't invoked while
1779 1785 # transaction running
1780 1786 repo.dirstate.write(None)
1781 1787 else:
1782 1788 # discard all changes (including ones already written
1783 1789 # out) in this transaction
1784 1790 narrowspec.restorebackup(self, 'journal.narrowspec')
1785 1791 repo.dirstate.restorebackup(None, 'journal.dirstate')
1786 1792
1787 1793 repo.invalidate(clearfilecache=True)
1788 1794
1789 1795 tr = transaction.transaction(rp, self.svfs, vfsmap,
1790 1796 "journal",
1791 1797 "undo",
1792 1798 aftertrans(renames),
1793 1799 self.store.createmode,
1794 1800 validator=validate,
1795 1801 releasefn=releasefn,
1796 1802 checkambigfiles=_cachedfiles,
1797 1803 name=desc)
1798 1804 tr.changes['origrepolen'] = len(self)
1799 1805 tr.changes['obsmarkers'] = set()
1800 1806 tr.changes['phases'] = {}
1801 1807 tr.changes['bookmarks'] = {}
1802 1808
1803 1809 tr.hookargs['txnid'] = txnid
1804 1810 # note: writing the fncache only during finalize mean that the file is
1805 1811 # outdated when running hooks. As fncache is used for streaming clone,
1806 1812 # this is not expected to break anything that happen during the hooks.
1807 1813 tr.addfinalize('flush-fncache', self.store.write)
1808 1814 def txnclosehook(tr2):
1809 1815 """To be run if transaction is successful, will schedule a hook run
1810 1816 """
1811 1817 # Don't reference tr2 in hook() so we don't hold a reference.
1812 1818 # This reduces memory consumption when there are multiple
1813 1819 # transactions per lock. This can likely go away if issue5045
1814 1820 # fixes the function accumulation.
1815 1821 hookargs = tr2.hookargs
1816 1822
1817 1823 def hookfunc():
1818 1824 repo = reporef()
1819 1825 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1820 1826 bmchanges = sorted(tr.changes['bookmarks'].items())
1821 1827 for name, (old, new) in bmchanges:
1822 1828 args = tr.hookargs.copy()
1823 1829 args.update(bookmarks.preparehookargs(name, old, new))
1824 1830 repo.hook('txnclose-bookmark', throw=False,
1825 1831 txnname=desc, **pycompat.strkwargs(args))
1826 1832
1827 1833 if hook.hashook(repo.ui, 'txnclose-phase'):
1828 1834 cl = repo.unfiltered().changelog
1829 1835 phasemv = sorted(tr.changes['phases'].items())
1830 1836 for rev, (old, new) in phasemv:
1831 1837 args = tr.hookargs.copy()
1832 1838 node = hex(cl.node(rev))
1833 1839 args.update(phases.preparehookargs(node, old, new))
1834 1840 repo.hook('txnclose-phase', throw=False, txnname=desc,
1835 1841 **pycompat.strkwargs(args))
1836 1842
1837 1843 repo.hook('txnclose', throw=False, txnname=desc,
1838 1844 **pycompat.strkwargs(hookargs))
1839 1845 reporef()._afterlock(hookfunc)
1840 1846 tr.addfinalize('txnclose-hook', txnclosehook)
1841 1847 # Include a leading "-" to make it happen before the transaction summary
1842 1848 # reports registered via scmutil.registersummarycallback() whose names
1843 1849 # are 00-txnreport etc. That way, the caches will be warm when the
1844 1850 # callbacks run.
1845 1851 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1846 1852 def txnaborthook(tr2):
1847 1853 """To be run if transaction is aborted
1848 1854 """
1849 1855 reporef().hook('txnabort', throw=False, txnname=desc,
1850 1856 **pycompat.strkwargs(tr2.hookargs))
1851 1857 tr.addabort('txnabort-hook', txnaborthook)
1852 1858 # avoid eager cache invalidation. in-memory data should be identical
1853 1859 # to stored data if transaction has no error.
1854 1860 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1855 1861 self._transref = weakref.ref(tr)
1856 1862 scmutil.registersummarycallback(self, tr, desc)
1857 1863 return tr
1858 1864
1859 1865 def _journalfiles(self):
1860 1866 return ((self.svfs, 'journal'),
1861 1867 (self.vfs, 'journal.dirstate'),
1862 1868 (self.vfs, 'journal.branch'),
1863 1869 (self.vfs, 'journal.desc'),
1864 1870 (self.vfs, 'journal.bookmarks'),
1865 1871 (self.svfs, 'journal.phaseroots'))
1866 1872
1867 1873 def undofiles(self):
1868 1874 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1869 1875
1870 1876 @unfilteredmethod
1871 1877 def _writejournal(self, desc):
1872 1878 self.dirstate.savebackup(None, 'journal.dirstate')
1873 1879 narrowspec.savebackup(self, 'journal.narrowspec')
1874 1880 self.vfs.write("journal.branch",
1875 1881 encoding.fromlocal(self.dirstate.branch()))
1876 1882 self.vfs.write("journal.desc",
1877 1883 "%d\n%s\n" % (len(self), desc))
1878 1884 self.vfs.write("journal.bookmarks",
1879 1885 self.vfs.tryread("bookmarks"))
1880 1886 self.svfs.write("journal.phaseroots",
1881 1887 self.svfs.tryread("phaseroots"))
1882 1888
1883 1889 def recover(self):
1884 1890 with self.lock():
1885 1891 if self.svfs.exists("journal"):
1886 1892 self.ui.status(_("rolling back interrupted transaction\n"))
1887 1893 vfsmap = {'': self.svfs,
1888 1894 'plain': self.vfs,}
1889 1895 transaction.rollback(self.svfs, vfsmap, "journal",
1890 1896 self.ui.warn,
1891 1897 checkambigfiles=_cachedfiles)
1892 1898 self.invalidate()
1893 1899 return True
1894 1900 else:
1895 1901 self.ui.warn(_("no interrupted transaction available\n"))
1896 1902 return False
1897 1903
1898 1904 def rollback(self, dryrun=False, force=False):
1899 1905 wlock = lock = dsguard = None
1900 1906 try:
1901 1907 wlock = self.wlock()
1902 1908 lock = self.lock()
1903 1909 if self.svfs.exists("undo"):
1904 1910 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1905 1911
1906 1912 return self._rollback(dryrun, force, dsguard)
1907 1913 else:
1908 1914 self.ui.warn(_("no rollback information available\n"))
1909 1915 return 1
1910 1916 finally:
1911 1917 release(dsguard, lock, wlock)
1912 1918
1913 1919 @unfilteredmethod # Until we get smarter cache management
1914 1920 def _rollback(self, dryrun, force, dsguard):
1915 1921 ui = self.ui
1916 1922 try:
1917 1923 args = self.vfs.read('undo.desc').splitlines()
1918 1924 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1919 1925 if len(args) >= 3:
1920 1926 detail = args[2]
1921 1927 oldtip = oldlen - 1
1922 1928
1923 1929 if detail and ui.verbose:
1924 1930 msg = (_('repository tip rolled back to revision %d'
1925 1931 ' (undo %s: %s)\n')
1926 1932 % (oldtip, desc, detail))
1927 1933 else:
1928 1934 msg = (_('repository tip rolled back to revision %d'
1929 1935 ' (undo %s)\n')
1930 1936 % (oldtip, desc))
1931 1937 except IOError:
1932 1938 msg = _('rolling back unknown transaction\n')
1933 1939 desc = None
1934 1940
1935 1941 if not force and self['.'] != self['tip'] and desc == 'commit':
1936 1942 raise error.Abort(
1937 1943 _('rollback of last commit while not checked out '
1938 1944 'may lose data'), hint=_('use -f to force'))
1939 1945
1940 1946 ui.status(msg)
1941 1947 if dryrun:
1942 1948 return 0
1943 1949
1944 1950 parents = self.dirstate.parents()
1945 1951 self.destroying()
1946 1952 vfsmap = {'plain': self.vfs, '': self.svfs}
1947 1953 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1948 1954 checkambigfiles=_cachedfiles)
1949 1955 if self.vfs.exists('undo.bookmarks'):
1950 1956 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1951 1957 if self.svfs.exists('undo.phaseroots'):
1952 1958 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1953 1959 self.invalidate()
1954 1960
1955 1961 parentgone = (parents[0] not in self.changelog.nodemap or
1956 1962 parents[1] not in self.changelog.nodemap)
1957 1963 if parentgone:
1958 1964 # prevent dirstateguard from overwriting already restored one
1959 1965 dsguard.close()
1960 1966
1961 1967 narrowspec.restorebackup(self, 'undo.narrowspec')
1962 1968 self.dirstate.restorebackup(None, 'undo.dirstate')
1963 1969 try:
1964 1970 branch = self.vfs.read('undo.branch')
1965 1971 self.dirstate.setbranch(encoding.tolocal(branch))
1966 1972 except IOError:
1967 1973 ui.warn(_('named branch could not be reset: '
1968 1974 'current branch is still \'%s\'\n')
1969 1975 % self.dirstate.branch())
1970 1976
1971 1977 parents = tuple([p.rev() for p in self[None].parents()])
1972 1978 if len(parents) > 1:
1973 1979 ui.status(_('working directory now based on '
1974 1980 'revisions %d and %d\n') % parents)
1975 1981 else:
1976 1982 ui.status(_('working directory now based on '
1977 1983 'revision %d\n') % parents)
1978 1984 mergemod.mergestate.clean(self, self['.'].node())
1979 1985
1980 1986 # TODO: if we know which new heads may result from this rollback, pass
1981 1987 # them to destroy(), which will prevent the branchhead cache from being
1982 1988 # invalidated.
1983 1989 self.destroyed()
1984 1990 return 0
1985 1991
1986 1992 def _buildcacheupdater(self, newtransaction):
1987 1993 """called during transaction to build the callback updating cache
1988 1994
1989 1995 Lives on the repository to help extension who might want to augment
1990 1996 this logic. For this purpose, the created transaction is passed to the
1991 1997 method.
1992 1998 """
1993 1999 # we must avoid cyclic reference between repo and transaction.
1994 2000 reporef = weakref.ref(self)
1995 2001 def updater(tr):
1996 2002 repo = reporef()
1997 2003 repo.updatecaches(tr)
1998 2004 return updater
1999 2005
2000 2006 @unfilteredmethod
2001 2007 def updatecaches(self, tr=None, full=False):
2002 2008 """warm appropriate caches
2003 2009
2004 2010 If this function is called after a transaction closed. The transaction
2005 2011 will be available in the 'tr' argument. This can be used to selectively
2006 2012 update caches relevant to the changes in that transaction.
2007 2013
2008 2014 If 'full' is set, make sure all caches the function knows about have
2009 2015 up-to-date data. Even the ones usually loaded more lazily.
2010 2016 """
2011 2017 if tr is not None and tr.hookargs.get('source') == 'strip':
2012 2018 # During strip, many caches are invalid but
2013 2019 # later call to `destroyed` will refresh them.
2014 2020 return
2015 2021
2016 2022 if tr is None or tr.changes['origrepolen'] < len(self):
2017 2023 # updating the unfiltered branchmap should refresh all the others,
2018 2024 self.ui.debug('updating the branch cache\n')
2019 2025 branchmap.updatecache(self.filtered('served'))
2020 2026
2021 2027 if full:
2022 2028 rbc = self.revbranchcache()
2023 2029 for r in self.changelog:
2024 2030 rbc.branchinfo(r)
2025 2031 rbc.write()
2026 2032
2027 2033 # ensure the working copy parents are in the manifestfulltextcache
2028 2034 for ctx in self['.'].parents():
2029 2035 ctx.manifest() # accessing the manifest is enough
2030 2036
2031 2037 def invalidatecaches(self):
2032 2038
2033 2039 if '_tagscache' in vars(self):
2034 2040 # can't use delattr on proxy
2035 2041 del self.__dict__['_tagscache']
2036 2042
2037 2043 self.unfiltered()._branchcaches.clear()
2038 2044 self.invalidatevolatilesets()
2039 2045 self._sparsesignaturecache.clear()
2040 2046
2041 2047 def invalidatevolatilesets(self):
2042 2048 self.filteredrevcache.clear()
2043 2049 obsolete.clearobscaches(self)
2044 2050
2045 2051 def invalidatedirstate(self):
2046 2052 '''Invalidates the dirstate, causing the next call to dirstate
2047 2053 to check if it was modified since the last time it was read,
2048 2054 rereading it if it has.
2049 2055
2050 2056 This is different to dirstate.invalidate() that it doesn't always
2051 2057 rereads the dirstate. Use dirstate.invalidate() if you want to
2052 2058 explicitly read the dirstate again (i.e. restoring it to a previous
2053 2059 known good state).'''
2054 2060 if hasunfilteredcache(self, 'dirstate'):
2055 2061 for k in self.dirstate._filecache:
2056 2062 try:
2057 2063 delattr(self.dirstate, k)
2058 2064 except AttributeError:
2059 2065 pass
2060 2066 delattr(self.unfiltered(), 'dirstate')
2061 2067
2062 2068 def invalidate(self, clearfilecache=False):
2063 2069 '''Invalidates both store and non-store parts other than dirstate
2064 2070
2065 2071 If a transaction is running, invalidation of store is omitted,
2066 2072 because discarding in-memory changes might cause inconsistency
2067 2073 (e.g. incomplete fncache causes unintentional failure, but
2068 2074 redundant one doesn't).
2069 2075 '''
2070 2076 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2071 2077 for k in list(self._filecache.keys()):
2072 2078 # dirstate is invalidated separately in invalidatedirstate()
2073 2079 if k == 'dirstate':
2074 2080 continue
2075 2081 if (k == 'changelog' and
2076 2082 self.currenttransaction() and
2077 2083 self.changelog._delayed):
2078 2084 # The changelog object may store unwritten revisions. We don't
2079 2085 # want to lose them.
2080 2086 # TODO: Solve the problem instead of working around it.
2081 2087 continue
2082 2088
2083 2089 if clearfilecache:
2084 2090 del self._filecache[k]
2085 2091 try:
2086 2092 delattr(unfiltered, k)
2087 2093 except AttributeError:
2088 2094 pass
2089 2095 self.invalidatecaches()
2090 2096 if not self.currenttransaction():
2091 2097 # TODO: Changing contents of store outside transaction
2092 2098 # causes inconsistency. We should make in-memory store
2093 2099 # changes detectable, and abort if changed.
2094 2100 self.store.invalidatecaches()
2095 2101
2096 2102 def invalidateall(self):
2097 2103 '''Fully invalidates both store and non-store parts, causing the
2098 2104 subsequent operation to reread any outside changes.'''
2099 2105 # extension should hook this to invalidate its caches
2100 2106 self.invalidate()
2101 2107 self.invalidatedirstate()
2102 2108
2103 2109 @unfilteredmethod
2104 2110 def _refreshfilecachestats(self, tr):
2105 2111 """Reload stats of cached files so that they are flagged as valid"""
2106 2112 for k, ce in self._filecache.items():
2107 2113 k = pycompat.sysstr(k)
2108 2114 if k == r'dirstate' or k not in self.__dict__:
2109 2115 continue
2110 2116 ce.refresh()
2111 2117
2112 2118 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2113 2119 inheritchecker=None, parentenvvar=None):
2114 2120 parentlock = None
2115 2121 # the contents of parentenvvar are used by the underlying lock to
2116 2122 # determine whether it can be inherited
2117 2123 if parentenvvar is not None:
2118 2124 parentlock = encoding.environ.get(parentenvvar)
2119 2125
2120 2126 timeout = 0
2121 2127 warntimeout = 0
2122 2128 if wait:
2123 2129 timeout = self.ui.configint("ui", "timeout")
2124 2130 warntimeout = self.ui.configint("ui", "timeout.warn")
2125 2131 # internal config: ui.signal-safe-lock
2126 2132 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2127 2133
2128 2134 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2129 2135 releasefn=releasefn,
2130 2136 acquirefn=acquirefn, desc=desc,
2131 2137 inheritchecker=inheritchecker,
2132 2138 parentlock=parentlock,
2133 2139 signalsafe=signalsafe)
2134 2140 return l
2135 2141
2136 2142 def _afterlock(self, callback):
2137 2143 """add a callback to be run when the repository is fully unlocked
2138 2144
2139 2145 The callback will be executed when the outermost lock is released
2140 2146 (with wlock being higher level than 'lock')."""
2141 2147 for ref in (self._wlockref, self._lockref):
2142 2148 l = ref and ref()
2143 2149 if l and l.held:
2144 2150 l.postrelease.append(callback)
2145 2151 break
2146 2152 else: # no lock have been found.
2147 2153 callback()
2148 2154
2149 2155 def lock(self, wait=True):
2150 2156 '''Lock the repository store (.hg/store) and return a weak reference
2151 2157 to the lock. Use this before modifying the store (e.g. committing or
2152 2158 stripping). If you are opening a transaction, get a lock as well.)
2153 2159
2154 2160 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2155 2161 'wlock' first to avoid a dead-lock hazard.'''
2156 2162 l = self._currentlock(self._lockref)
2157 2163 if l is not None:
2158 2164 l.lock()
2159 2165 return l
2160 2166
2161 2167 l = self._lock(self.svfs, "lock", wait, None,
2162 2168 self.invalidate, _('repository %s') % self.origroot)
2163 2169 self._lockref = weakref.ref(l)
2164 2170 return l
2165 2171
2166 2172 def _wlockchecktransaction(self):
2167 2173 if self.currenttransaction() is not None:
2168 2174 raise error.LockInheritanceContractViolation(
2169 2175 'wlock cannot be inherited in the middle of a transaction')
2170 2176
2171 2177 def wlock(self, wait=True):
2172 2178 '''Lock the non-store parts of the repository (everything under
2173 2179 .hg except .hg/store) and return a weak reference to the lock.
2174 2180
2175 2181 Use this before modifying files in .hg.
2176 2182
2177 2183 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2178 2184 'wlock' first to avoid a dead-lock hazard.'''
2179 2185 l = self._wlockref and self._wlockref()
2180 2186 if l is not None and l.held:
2181 2187 l.lock()
2182 2188 return l
2183 2189
2184 2190 # We do not need to check for non-waiting lock acquisition. Such
2185 2191 # acquisition would not cause dead-lock as they would just fail.
2186 2192 if wait and (self.ui.configbool('devel', 'all-warnings')
2187 2193 or self.ui.configbool('devel', 'check-locks')):
2188 2194 if self._currentlock(self._lockref) is not None:
2189 2195 self.ui.develwarn('"wlock" acquired after "lock"')
2190 2196
2191 2197 def unlock():
2192 2198 if self.dirstate.pendingparentchange():
2193 2199 self.dirstate.invalidate()
2194 2200 else:
2195 2201 self.dirstate.write(None)
2196 2202
2197 2203 self._filecache['dirstate'].refresh()
2198 2204
2199 2205 l = self._lock(self.vfs, "wlock", wait, unlock,
2200 2206 self.invalidatedirstate, _('working directory of %s') %
2201 2207 self.origroot,
2202 2208 inheritchecker=self._wlockchecktransaction,
2203 2209 parentenvvar='HG_WLOCK_LOCKER')
2204 2210 self._wlockref = weakref.ref(l)
2205 2211 return l
2206 2212
2207 2213 def _currentlock(self, lockref):
2208 2214 """Returns the lock if it's held, or None if it's not."""
2209 2215 if lockref is None:
2210 2216 return None
2211 2217 l = lockref()
2212 2218 if l is None or not l.held:
2213 2219 return None
2214 2220 return l
2215 2221
2216 2222 def currentwlock(self):
2217 2223 """Returns the wlock if it's held, or None if it's not."""
2218 2224 return self._currentlock(self._wlockref)
2219 2225
2220 2226 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2221 2227 """
2222 2228 commit an individual file as part of a larger transaction
2223 2229 """
2224 2230
2225 2231 fname = fctx.path()
2226 2232 fparent1 = manifest1.get(fname, nullid)
2227 2233 fparent2 = manifest2.get(fname, nullid)
2228 2234 if isinstance(fctx, context.filectx):
2229 2235 node = fctx.filenode()
2230 2236 if node in [fparent1, fparent2]:
2231 2237 self.ui.debug('reusing %s filelog entry\n' % fname)
2232 2238 if manifest1.flags(fname) != fctx.flags():
2233 2239 changelist.append(fname)
2234 2240 return node
2235 2241
2236 2242 flog = self.file(fname)
2237 2243 meta = {}
2238 2244 copy = fctx.renamed()
2239 2245 if copy and copy[0] != fname:
2240 2246 # Mark the new revision of this file as a copy of another
2241 2247 # file. This copy data will effectively act as a parent
2242 2248 # of this new revision. If this is a merge, the first
2243 2249 # parent will be the nullid (meaning "look up the copy data")
2244 2250 # and the second one will be the other parent. For example:
2245 2251 #
2246 2252 # 0 --- 1 --- 3 rev1 changes file foo
2247 2253 # \ / rev2 renames foo to bar and changes it
2248 2254 # \- 2 -/ rev3 should have bar with all changes and
2249 2255 # should record that bar descends from
2250 2256 # bar in rev2 and foo in rev1
2251 2257 #
2252 2258 # this allows this merge to succeed:
2253 2259 #
2254 2260 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2255 2261 # \ / merging rev3 and rev4 should use bar@rev2
2256 2262 # \- 2 --- 4 as the merge base
2257 2263 #
2258 2264
2259 2265 cfname = copy[0]
2260 2266 crev = manifest1.get(cfname)
2261 2267 newfparent = fparent2
2262 2268
2263 2269 if manifest2: # branch merge
2264 2270 if fparent2 == nullid or crev is None: # copied on remote side
2265 2271 if cfname in manifest2:
2266 2272 crev = manifest2[cfname]
2267 2273 newfparent = fparent1
2268 2274
2269 2275 # Here, we used to search backwards through history to try to find
2270 2276 # where the file copy came from if the source of a copy was not in
2271 2277 # the parent directory. However, this doesn't actually make sense to
2272 2278 # do (what does a copy from something not in your working copy even
2273 2279 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2274 2280 # the user that copy information was dropped, so if they didn't
2275 2281 # expect this outcome it can be fixed, but this is the correct
2276 2282 # behavior in this circumstance.
2277 2283
2278 2284 if crev:
2279 2285 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2280 2286 meta["copy"] = cfname
2281 2287 meta["copyrev"] = hex(crev)
2282 2288 fparent1, fparent2 = nullid, newfparent
2283 2289 else:
2284 2290 self.ui.warn(_("warning: can't find ancestor for '%s' "
2285 2291 "copied from '%s'!\n") % (fname, cfname))
2286 2292
2287 2293 elif fparent1 == nullid:
2288 2294 fparent1, fparent2 = fparent2, nullid
2289 2295 elif fparent2 != nullid:
2290 2296 # is one parent an ancestor of the other?
2291 2297 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2292 2298 if fparent1 in fparentancestors:
2293 2299 fparent1, fparent2 = fparent2, nullid
2294 2300 elif fparent2 in fparentancestors:
2295 2301 fparent2 = nullid
2296 2302
2297 2303 # is the file changed?
2298 2304 text = fctx.data()
2299 2305 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2300 2306 changelist.append(fname)
2301 2307 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2302 2308 # are just the flags changed during merge?
2303 2309 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2304 2310 changelist.append(fname)
2305 2311
2306 2312 return fparent1
2307 2313
2308 2314 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2309 2315 """check for commit arguments that aren't committable"""
2310 2316 if match.isexact() or match.prefix():
2311 2317 matched = set(status.modified + status.added + status.removed)
2312 2318
2313 2319 for f in match.files():
2314 2320 f = self.dirstate.normalize(f)
2315 2321 if f == '.' or f in matched or f in wctx.substate:
2316 2322 continue
2317 2323 if f in status.deleted:
2318 2324 fail(f, _('file not found!'))
2319 2325 if f in vdirs: # visited directory
2320 2326 d = f + '/'
2321 2327 for mf in matched:
2322 2328 if mf.startswith(d):
2323 2329 break
2324 2330 else:
2325 2331 fail(f, _("no match under directory!"))
2326 2332 elif f not in self.dirstate:
2327 2333 fail(f, _("file not tracked!"))
2328 2334
2329 2335 @unfilteredmethod
2330 2336 def commit(self, text="", user=None, date=None, match=None, force=False,
2331 2337 editor=False, extra=None):
2332 2338 """Add a new revision to current repository.
2333 2339
2334 2340 Revision information is gathered from the working directory,
2335 2341 match can be used to filter the committed files. If editor is
2336 2342 supplied, it is called to get a commit message.
2337 2343 """
2338 2344 if extra is None:
2339 2345 extra = {}
2340 2346
2341 2347 def fail(f, msg):
2342 2348 raise error.Abort('%s: %s' % (f, msg))
2343 2349
2344 2350 if not match:
2345 2351 match = matchmod.always(self.root, '')
2346 2352
2347 2353 if not force:
2348 2354 vdirs = []
2349 2355 match.explicitdir = vdirs.append
2350 2356 match.bad = fail
2351 2357
2352 2358 wlock = lock = tr = None
2353 2359 try:
2354 2360 wlock = self.wlock()
2355 2361 lock = self.lock() # for recent changelog (see issue4368)
2356 2362
2357 2363 wctx = self[None]
2358 2364 merge = len(wctx.parents()) > 1
2359 2365
2360 2366 if not force and merge and not match.always():
2361 2367 raise error.Abort(_('cannot partially commit a merge '
2362 2368 '(do not specify files or patterns)'))
2363 2369
2364 2370 status = self.status(match=match, clean=force)
2365 2371 if force:
2366 2372 status.modified.extend(status.clean) # mq may commit clean files
2367 2373
2368 2374 # check subrepos
2369 2375 subs, commitsubs, newstate = subrepoutil.precommit(
2370 2376 self.ui, wctx, status, match, force=force)
2371 2377
2372 2378 # make sure all explicit patterns are matched
2373 2379 if not force:
2374 2380 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2375 2381
2376 2382 cctx = context.workingcommitctx(self, status,
2377 2383 text, user, date, extra)
2378 2384
2379 2385 # internal config: ui.allowemptycommit
2380 2386 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2381 2387 or extra.get('close') or merge or cctx.files()
2382 2388 or self.ui.configbool('ui', 'allowemptycommit'))
2383 2389 if not allowemptycommit:
2384 2390 return None
2385 2391
2386 2392 if merge and cctx.deleted():
2387 2393 raise error.Abort(_("cannot commit merge with missing files"))
2388 2394
2389 2395 ms = mergemod.mergestate.read(self)
2390 2396 mergeutil.checkunresolved(ms)
2391 2397
2392 2398 if editor:
2393 2399 cctx._text = editor(self, cctx, subs)
2394 2400 edited = (text != cctx._text)
2395 2401
2396 2402 # Save commit message in case this transaction gets rolled back
2397 2403 # (e.g. by a pretxncommit hook). Leave the content alone on
2398 2404 # the assumption that the user will use the same editor again.
2399 2405 msgfn = self.savecommitmessage(cctx._text)
2400 2406
2401 2407 # commit subs and write new state
2402 2408 if subs:
2403 2409 for s in sorted(commitsubs):
2404 2410 sub = wctx.sub(s)
2405 2411 self.ui.status(_('committing subrepository %s\n') %
2406 2412 subrepoutil.subrelpath(sub))
2407 2413 sr = sub.commit(cctx._text, user, date)
2408 2414 newstate[s] = (newstate[s][0], sr)
2409 2415 subrepoutil.writestate(self, newstate)
2410 2416
2411 2417 p1, p2 = self.dirstate.parents()
2412 2418 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2413 2419 try:
2414 2420 self.hook("precommit", throw=True, parent1=hookp1,
2415 2421 parent2=hookp2)
2416 2422 tr = self.transaction('commit')
2417 2423 ret = self.commitctx(cctx, True)
2418 2424 except: # re-raises
2419 2425 if edited:
2420 2426 self.ui.write(
2421 2427 _('note: commit message saved in %s\n') % msgfn)
2422 2428 raise
2423 2429 # update bookmarks, dirstate and mergestate
2424 2430 bookmarks.update(self, [p1, p2], ret)
2425 2431 cctx.markcommitted(ret)
2426 2432 ms.reset()
2427 2433 tr.close()
2428 2434
2429 2435 finally:
2430 2436 lockmod.release(tr, lock, wlock)
2431 2437
2432 2438 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2433 2439 # hack for command that use a temporary commit (eg: histedit)
2434 2440 # temporary commit got stripped before hook release
2435 2441 if self.changelog.hasnode(ret):
2436 2442 self.hook("commit", node=node, parent1=parent1,
2437 2443 parent2=parent2)
2438 2444 self._afterlock(commithook)
2439 2445 return ret
2440 2446
2441 2447 @unfilteredmethod
2442 2448 def commitctx(self, ctx, error=False):
2443 2449 """Add a new revision to current repository.
2444 2450 Revision information is passed via the context argument.
2445 2451
2446 2452 ctx.files() should list all files involved in this commit, i.e.
2447 2453 modified/added/removed files. On merge, it may be wider than the
2448 2454 ctx.files() to be committed, since any file nodes derived directly
2449 2455 from p1 or p2 are excluded from the committed ctx.files().
2450 2456 """
2451 2457
2452 2458 tr = None
2453 2459 p1, p2 = ctx.p1(), ctx.p2()
2454 2460 user = ctx.user()
2455 2461
2456 2462 lock = self.lock()
2457 2463 try:
2458 2464 tr = self.transaction("commit")
2459 2465 trp = weakref.proxy(tr)
2460 2466
2461 2467 if ctx.manifestnode():
2462 2468 # reuse an existing manifest revision
2463 2469 self.ui.debug('reusing known manifest\n')
2464 2470 mn = ctx.manifestnode()
2465 2471 files = ctx.files()
2466 2472 elif ctx.files():
2467 2473 m1ctx = p1.manifestctx()
2468 2474 m2ctx = p2.manifestctx()
2469 2475 mctx = m1ctx.copy()
2470 2476
2471 2477 m = mctx.read()
2472 2478 m1 = m1ctx.read()
2473 2479 m2 = m2ctx.read()
2474 2480
2475 2481 # check in files
2476 2482 added = []
2477 2483 changed = []
2478 2484 removed = list(ctx.removed())
2479 2485 linkrev = len(self)
2480 2486 self.ui.note(_("committing files:\n"))
2481 2487 for f in sorted(ctx.modified() + ctx.added()):
2482 2488 self.ui.note(f + "\n")
2483 2489 try:
2484 2490 fctx = ctx[f]
2485 2491 if fctx is None:
2486 2492 removed.append(f)
2487 2493 else:
2488 2494 added.append(f)
2489 2495 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2490 2496 trp, changed)
2491 2497 m.setflag(f, fctx.flags())
2492 2498 except OSError as inst:
2493 2499 self.ui.warn(_("trouble committing %s!\n") % f)
2494 2500 raise
2495 2501 except IOError as inst:
2496 2502 errcode = getattr(inst, 'errno', errno.ENOENT)
2497 2503 if error or errcode and errcode != errno.ENOENT:
2498 2504 self.ui.warn(_("trouble committing %s!\n") % f)
2499 2505 raise
2500 2506
2501 2507 # update manifest
2502 2508 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2503 2509 drop = [f for f in removed if f in m]
2504 2510 for f in drop:
2505 2511 del m[f]
2506 2512 files = changed + removed
2507 2513 md = None
2508 2514 if not files:
2509 2515 # if no "files" actually changed in terms of the changelog,
2510 2516 # try hard to detect unmodified manifest entry so that the
2511 2517 # exact same commit can be reproduced later on convert.
2512 2518 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2513 2519 if not files and md:
2514 2520 self.ui.debug('not reusing manifest (no file change in '
2515 2521 'changelog, but manifest differs)\n')
2516 2522 if files or md:
2517 2523 self.ui.note(_("committing manifest\n"))
2518 2524 # we're using narrowmatch here since it's already applied at
2519 2525 # other stages (such as dirstate.walk), so we're already
2520 2526 # ignoring things outside of narrowspec in most cases. The
2521 2527 # one case where we might have files outside the narrowspec
2522 2528 # at this point is merges, and we already error out in the
2523 2529 # case where the merge has files outside of the narrowspec,
2524 2530 # so this is safe.
2525 2531 mn = mctx.write(trp, linkrev,
2526 2532 p1.manifestnode(), p2.manifestnode(),
2527 2533 added, drop, match=self.narrowmatch())
2528 2534 else:
2529 2535 self.ui.debug('reusing manifest form p1 (listed files '
2530 2536 'actually unchanged)\n')
2531 2537 mn = p1.manifestnode()
2532 2538 else:
2533 2539 self.ui.debug('reusing manifest from p1 (no file change)\n')
2534 2540 mn = p1.manifestnode()
2535 2541 files = []
2536 2542
2537 2543 # update changelog
2538 2544 self.ui.note(_("committing changelog\n"))
2539 2545 self.changelog.delayupdate(tr)
2540 2546 n = self.changelog.add(mn, files, ctx.description(),
2541 2547 trp, p1.node(), p2.node(),
2542 2548 user, ctx.date(), ctx.extra().copy())
2543 2549 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2544 2550 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2545 2551 parent2=xp2)
2546 2552 # set the new commit is proper phase
2547 2553 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2548 2554 if targetphase:
2549 2555 # retract boundary do not alter parent changeset.
2550 2556 # if a parent have higher the resulting phase will
2551 2557 # be compliant anyway
2552 2558 #
2553 2559 # if minimal phase was 0 we don't need to retract anything
2554 2560 phases.registernew(self, tr, targetphase, [n])
2555 2561 tr.close()
2556 2562 return n
2557 2563 finally:
2558 2564 if tr:
2559 2565 tr.release()
2560 2566 lock.release()
2561 2567
2562 2568 @unfilteredmethod
2563 2569 def destroying(self):
2564 2570 '''Inform the repository that nodes are about to be destroyed.
2565 2571 Intended for use by strip and rollback, so there's a common
2566 2572 place for anything that has to be done before destroying history.
2567 2573
2568 2574 This is mostly useful for saving state that is in memory and waiting
2569 2575 to be flushed when the current lock is released. Because a call to
2570 2576 destroyed is imminent, the repo will be invalidated causing those
2571 2577 changes to stay in memory (waiting for the next unlock), or vanish
2572 2578 completely.
2573 2579 '''
2574 2580 # When using the same lock to commit and strip, the phasecache is left
2575 2581 # dirty after committing. Then when we strip, the repo is invalidated,
2576 2582 # causing those changes to disappear.
2577 2583 if '_phasecache' in vars(self):
2578 2584 self._phasecache.write()
2579 2585
2580 2586 @unfilteredmethod
2581 2587 def destroyed(self):
2582 2588 '''Inform the repository that nodes have been destroyed.
2583 2589 Intended for use by strip and rollback, so there's a common
2584 2590 place for anything that has to be done after destroying history.
2585 2591 '''
2586 2592 # When one tries to:
2587 2593 # 1) destroy nodes thus calling this method (e.g. strip)
2588 2594 # 2) use phasecache somewhere (e.g. commit)
2589 2595 #
2590 2596 # then 2) will fail because the phasecache contains nodes that were
2591 2597 # removed. We can either remove phasecache from the filecache,
2592 2598 # causing it to reload next time it is accessed, or simply filter
2593 2599 # the removed nodes now and write the updated cache.
2594 2600 self._phasecache.filterunknown(self)
2595 2601 self._phasecache.write()
2596 2602
2597 2603 # refresh all repository caches
2598 2604 self.updatecaches()
2599 2605
2600 2606 # Ensure the persistent tag cache is updated. Doing it now
2601 2607 # means that the tag cache only has to worry about destroyed
2602 2608 # heads immediately after a strip/rollback. That in turn
2603 2609 # guarantees that "cachetip == currenttip" (comparing both rev
2604 2610 # and node) always means no nodes have been added or destroyed.
2605 2611
2606 2612 # XXX this is suboptimal when qrefresh'ing: we strip the current
2607 2613 # head, refresh the tag cache, then immediately add a new head.
2608 2614 # But I think doing it this way is necessary for the "instant
2609 2615 # tag cache retrieval" case to work.
2610 2616 self.invalidate()
2611 2617
2612 2618 def status(self, node1='.', node2=None, match=None,
2613 2619 ignored=False, clean=False, unknown=False,
2614 2620 listsubrepos=False):
2615 2621 '''a convenience method that calls node1.status(node2)'''
2616 2622 return self[node1].status(node2, match, ignored, clean, unknown,
2617 2623 listsubrepos)
2618 2624
2619 2625 def addpostdsstatus(self, ps):
2620 2626 """Add a callback to run within the wlock, at the point at which status
2621 2627 fixups happen.
2622 2628
2623 2629 On status completion, callback(wctx, status) will be called with the
2624 2630 wlock held, unless the dirstate has changed from underneath or the wlock
2625 2631 couldn't be grabbed.
2626 2632
2627 2633 Callbacks should not capture and use a cached copy of the dirstate --
2628 2634 it might change in the meanwhile. Instead, they should access the
2629 2635 dirstate via wctx.repo().dirstate.
2630 2636
2631 2637 This list is emptied out after each status run -- extensions should
2632 2638 make sure it adds to this list each time dirstate.status is called.
2633 2639 Extensions should also make sure they don't call this for statuses
2634 2640 that don't involve the dirstate.
2635 2641 """
2636 2642
2637 2643 # The list is located here for uniqueness reasons -- it is actually
2638 2644 # managed by the workingctx, but that isn't unique per-repo.
2639 2645 self._postdsstatus.append(ps)
2640 2646
2641 2647 def postdsstatus(self):
2642 2648 """Used by workingctx to get the list of post-dirstate-status hooks."""
2643 2649 return self._postdsstatus
2644 2650
2645 2651 def clearpostdsstatus(self):
2646 2652 """Used by workingctx to clear post-dirstate-status hooks."""
2647 2653 del self._postdsstatus[:]
2648 2654
2649 2655 def heads(self, start=None):
2650 2656 if start is None:
2651 2657 cl = self.changelog
2652 2658 headrevs = reversed(cl.headrevs())
2653 2659 return [cl.node(rev) for rev in headrevs]
2654 2660
2655 2661 heads = self.changelog.heads(start)
2656 2662 # sort the output in rev descending order
2657 2663 return sorted(heads, key=self.changelog.rev, reverse=True)
2658 2664
2659 2665 def branchheads(self, branch=None, start=None, closed=False):
2660 2666 '''return a (possibly filtered) list of heads for the given branch
2661 2667
2662 2668 Heads are returned in topological order, from newest to oldest.
2663 2669 If branch is None, use the dirstate branch.
2664 2670 If start is not None, return only heads reachable from start.
2665 2671 If closed is True, return heads that are marked as closed as well.
2666 2672 '''
2667 2673 if branch is None:
2668 2674 branch = self[None].branch()
2669 2675 branches = self.branchmap()
2670 2676 if branch not in branches:
2671 2677 return []
2672 2678 # the cache returns heads ordered lowest to highest
2673 2679 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2674 2680 if start is not None:
2675 2681 # filter out the heads that cannot be reached from startrev
2676 2682 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2677 2683 bheads = [h for h in bheads if h in fbheads]
2678 2684 return bheads
2679 2685
2680 2686 def branches(self, nodes):
2681 2687 if not nodes:
2682 2688 nodes = [self.changelog.tip()]
2683 2689 b = []
2684 2690 for n in nodes:
2685 2691 t = n
2686 2692 while True:
2687 2693 p = self.changelog.parents(n)
2688 2694 if p[1] != nullid or p[0] == nullid:
2689 2695 b.append((t, n, p[0], p[1]))
2690 2696 break
2691 2697 n = p[0]
2692 2698 return b
2693 2699
2694 2700 def between(self, pairs):
2695 2701 r = []
2696 2702
2697 2703 for top, bottom in pairs:
2698 2704 n, l, i = top, [], 0
2699 2705 f = 1
2700 2706
2701 2707 while n != bottom and n != nullid:
2702 2708 p = self.changelog.parents(n)[0]
2703 2709 if i == f:
2704 2710 l.append(n)
2705 2711 f = f * 2
2706 2712 n = p
2707 2713 i += 1
2708 2714
2709 2715 r.append(l)
2710 2716
2711 2717 return r
2712 2718
2713 2719 def checkpush(self, pushop):
2714 2720 """Extensions can override this function if additional checks have
2715 2721 to be performed before pushing, or call it if they override push
2716 2722 command.
2717 2723 """
2718 2724
2719 2725 @unfilteredpropertycache
2720 2726 def prepushoutgoinghooks(self):
2721 2727 """Return util.hooks consists of a pushop with repo, remote, outgoing
2722 2728 methods, which are called before pushing changesets.
2723 2729 """
2724 2730 return util.hooks()
2725 2731
2726 2732 def pushkey(self, namespace, key, old, new):
2727 2733 try:
2728 2734 tr = self.currenttransaction()
2729 2735 hookargs = {}
2730 2736 if tr is not None:
2731 2737 hookargs.update(tr.hookargs)
2732 2738 hookargs = pycompat.strkwargs(hookargs)
2733 2739 hookargs[r'namespace'] = namespace
2734 2740 hookargs[r'key'] = key
2735 2741 hookargs[r'old'] = old
2736 2742 hookargs[r'new'] = new
2737 2743 self.hook('prepushkey', throw=True, **hookargs)
2738 2744 except error.HookAbort as exc:
2739 2745 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2740 2746 if exc.hint:
2741 2747 self.ui.write_err(_("(%s)\n") % exc.hint)
2742 2748 return False
2743 2749 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2744 2750 ret = pushkey.push(self, namespace, key, old, new)
2745 2751 def runhook():
2746 2752 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2747 2753 ret=ret)
2748 2754 self._afterlock(runhook)
2749 2755 return ret
2750 2756
2751 2757 def listkeys(self, namespace):
2752 2758 self.hook('prelistkeys', throw=True, namespace=namespace)
2753 2759 self.ui.debug('listing keys for "%s"\n' % namespace)
2754 2760 values = pushkey.list(self, namespace)
2755 2761 self.hook('listkeys', namespace=namespace, values=values)
2756 2762 return values
2757 2763
2758 2764 def debugwireargs(self, one, two, three=None, four=None, five=None):
2759 2765 '''used to test argument passing over the wire'''
2760 2766 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2761 2767 pycompat.bytestr(four),
2762 2768 pycompat.bytestr(five))
2763 2769
2764 2770 def savecommitmessage(self, text):
2765 2771 fp = self.vfs('last-message.txt', 'wb')
2766 2772 try:
2767 2773 fp.write(text)
2768 2774 finally:
2769 2775 fp.close()
2770 2776 return self.pathto(fp.name[len(self.root) + 1:])
2771 2777
2772 2778 # used to avoid circular references so destructors work
2773 2779 def aftertrans(files):
2774 2780 renamefiles = [tuple(t) for t in files]
2775 2781 def a():
2776 2782 for vfs, src, dest in renamefiles:
2777 2783 # if src and dest refer to a same file, vfs.rename is a no-op,
2778 2784 # leaving both src and dest on disk. delete dest to make sure
2779 2785 # the rename couldn't be such a no-op.
2780 2786 vfs.tryunlink(dest)
2781 2787 try:
2782 2788 vfs.rename(src, dest)
2783 2789 except OSError: # journal file does not yet exist
2784 2790 pass
2785 2791 return a
2786 2792
2787 2793 def undoname(fn):
2788 2794 base, name = os.path.split(fn)
2789 2795 assert name.startswith('journal')
2790 2796 return os.path.join(base, name.replace('journal', 'undo', 1))
2791 2797
2792 2798 def instance(ui, path, create, intents=None, createopts=None):
2793 2799 localpath = util.urllocalpath(path)
2794 2800 if create:
2795 2801 createrepository(ui, localpath, createopts=createopts)
2796 2802
2797 2803 return makelocalrepository(ui, localpath, intents=intents)
2798 2804
2799 2805 def islocal(path):
2800 2806 return True
2801 2807
2802 2808 def defaultcreateopts(ui, createopts=None):
2803 2809 """Populate the default creation options for a repository.
2804 2810
2805 2811 A dictionary of explicitly requested creation options can be passed
2806 2812 in. Missing keys will be populated.
2807 2813 """
2808 2814 createopts = dict(createopts or {})
2809 2815
2810 2816 if 'backend' not in createopts:
2811 2817 # experimental config: storage.new-repo-backend
2812 2818 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2813 2819
2814 2820 return createopts
2815 2821
2816 2822 def newreporequirements(ui, createopts):
2817 2823 """Determine the set of requirements for a new local repository.
2818 2824
2819 2825 Extensions can wrap this function to specify custom requirements for
2820 2826 new repositories.
2821 2827 """
2822 2828 # If the repo is being created from a shared repository, we copy
2823 2829 # its requirements.
2824 2830 if 'sharedrepo' in createopts:
2825 2831 requirements = set(createopts['sharedrepo'].requirements)
2826 2832 if createopts.get('sharedrelative'):
2827 2833 requirements.add('relshared')
2828 2834 else:
2829 2835 requirements.add('shared')
2830 2836
2831 2837 return requirements
2832 2838
2833 2839 if 'backend' not in createopts:
2834 2840 raise error.ProgrammingError('backend key not present in createopts; '
2835 2841 'was defaultcreateopts() called?')
2836 2842
2837 2843 if createopts['backend'] != 'revlogv1':
2838 2844 raise error.Abort(_('unable to determine repository requirements for '
2839 2845 'storage backend: %s') % createopts['backend'])
2840 2846
2841 2847 requirements = {'revlogv1'}
2842 2848 if ui.configbool('format', 'usestore'):
2843 2849 requirements.add('store')
2844 2850 if ui.configbool('format', 'usefncache'):
2845 2851 requirements.add('fncache')
2846 2852 if ui.configbool('format', 'dotencode'):
2847 2853 requirements.add('dotencode')
2848 2854
2849 2855 compengine = ui.config('experimental', 'format.compression')
2850 2856 if compengine not in util.compengines:
2851 2857 raise error.Abort(_('compression engine %s defined by '
2852 2858 'experimental.format.compression not available') %
2853 2859 compengine,
2854 2860 hint=_('run "hg debuginstall" to list available '
2855 2861 'compression engines'))
2856 2862
2857 2863 # zlib is the historical default and doesn't need an explicit requirement.
2858 2864 if compengine != 'zlib':
2859 2865 requirements.add('exp-compression-%s' % compengine)
2860 2866
2861 2867 if scmutil.gdinitconfig(ui):
2862 2868 requirements.add('generaldelta')
2863 2869 if ui.configbool('experimental', 'treemanifest'):
2864 2870 requirements.add('treemanifest')
2865 2871 # experimental config: format.sparse-revlog
2866 2872 if ui.configbool('format', 'sparse-revlog'):
2867 2873 requirements.add(SPARSEREVLOG_REQUIREMENT)
2868 2874
2869 2875 revlogv2 = ui.config('experimental', 'revlogv2')
2870 2876 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2871 2877 requirements.remove('revlogv1')
2872 2878 # generaldelta is implied by revlogv2.
2873 2879 requirements.discard('generaldelta')
2874 2880 requirements.add(REVLOGV2_REQUIREMENT)
2875 2881 # experimental config: format.internal-phase
2876 2882 if ui.configbool('format', 'internal-phase'):
2877 2883 requirements.add('internal-phase')
2878 2884
2879 2885 if createopts.get('narrowfiles'):
2880 2886 requirements.add(repository.NARROW_REQUIREMENT)
2881 2887
2882 2888 return requirements
2883 2889
2884 2890 def filterknowncreateopts(ui, createopts):
2885 2891 """Filters a dict of repo creation options against options that are known.
2886 2892
2887 2893 Receives a dict of repo creation options and returns a dict of those
2888 2894 options that we don't know how to handle.
2889 2895
2890 2896 This function is called as part of repository creation. If the
2891 2897 returned dict contains any items, repository creation will not
2892 2898 be allowed, as it means there was a request to create a repository
2893 2899 with options not recognized by loaded code.
2894 2900
2895 2901 Extensions can wrap this function to filter out creation options
2896 2902 they know how to handle.
2897 2903 """
2898 2904 known = {
2899 2905 'backend',
2900 2906 'narrowfiles',
2901 2907 'sharedrepo',
2902 2908 'sharedrelative',
2903 2909 'shareditems',
2904 2910 }
2905 2911
2906 2912 return {k: v for k, v in createopts.items() if k not in known}
2907 2913
2908 2914 def createrepository(ui, path, createopts=None):
2909 2915 """Create a new repository in a vfs.
2910 2916
2911 2917 ``path`` path to the new repo's working directory.
2912 2918 ``createopts`` options for the new repository.
2913 2919
2914 2920 The following keys for ``createopts`` are recognized:
2915 2921
2916 2922 backend
2917 2923 The storage backend to use.
2918 2924 narrowfiles
2919 2925 Set up repository to support narrow file storage.
2920 2926 sharedrepo
2921 2927 Repository object from which storage should be shared.
2922 2928 sharedrelative
2923 2929 Boolean indicating if the path to the shared repo should be
2924 2930 stored as relative. By default, the pointer to the "parent" repo
2925 2931 is stored as an absolute path.
2926 2932 shareditems
2927 2933 Set of items to share to the new repository (in addition to storage).
2928 2934 """
2929 2935 createopts = defaultcreateopts(ui, createopts=createopts)
2930 2936
2931 2937 unknownopts = filterknowncreateopts(ui, createopts)
2932 2938
2933 2939 if not isinstance(unknownopts, dict):
2934 2940 raise error.ProgrammingError('filterknowncreateopts() did not return '
2935 2941 'a dict')
2936 2942
2937 2943 if unknownopts:
2938 2944 raise error.Abort(_('unable to create repository because of unknown '
2939 2945 'creation option: %s') %
2940 2946 ', '.join(sorted(unknownopts)),
2941 2947 hint=_('is a required extension not loaded?'))
2942 2948
2943 2949 requirements = newreporequirements(ui, createopts=createopts)
2944 2950
2945 2951 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2946 2952
2947 2953 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2948 2954 if hgvfs.exists():
2949 2955 raise error.RepoError(_('repository %s already exists') % path)
2950 2956
2951 2957 if 'sharedrepo' in createopts:
2952 2958 sharedpath = createopts['sharedrepo'].sharedpath
2953 2959
2954 2960 if createopts.get('sharedrelative'):
2955 2961 try:
2956 2962 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2957 2963 except (IOError, ValueError) as e:
2958 2964 # ValueError is raised on Windows if the drive letters differ
2959 2965 # on each path.
2960 2966 raise error.Abort(_('cannot calculate relative path'),
2961 2967 hint=stringutil.forcebytestr(e))
2962 2968
2963 2969 if not wdirvfs.exists():
2964 2970 wdirvfs.makedirs()
2965 2971
2966 2972 hgvfs.makedir(notindexed=True)
2967 2973
2968 2974 if b'store' in requirements and 'sharedrepo' not in createopts:
2969 2975 hgvfs.mkdir(b'store')
2970 2976
2971 2977 # We create an invalid changelog outside the store so very old
2972 2978 # Mercurial versions (which didn't know about the requirements
2973 2979 # file) encounter an error on reading the changelog. This
2974 2980 # effectively locks out old clients and prevents them from
2975 2981 # mucking with a repo in an unknown format.
2976 2982 #
2977 2983 # The revlog header has version 2, which won't be recognized by
2978 2984 # such old clients.
2979 2985 hgvfs.append(b'00changelog.i',
2980 2986 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2981 2987 b'layout')
2982 2988
2983 2989 scmutil.writerequires(hgvfs, requirements)
2984 2990
2985 2991 # Write out file telling readers where to find the shared store.
2986 2992 if 'sharedrepo' in createopts:
2987 2993 hgvfs.write(b'sharedpath', sharedpath)
2988 2994
2989 2995 if createopts.get('shareditems'):
2990 2996 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2991 2997 hgvfs.write(b'shared', shared)
2992 2998
2993 2999 def poisonrepository(repo):
2994 3000 """Poison a repository instance so it can no longer be used."""
2995 3001 # Perform any cleanup on the instance.
2996 3002 repo.close()
2997 3003
2998 3004 # Our strategy is to replace the type of the object with one that
2999 3005 # has all attribute lookups result in error.
3000 3006 #
3001 3007 # But we have to allow the close() method because some constructors
3002 3008 # of repos call close() on repo references.
3003 3009 class poisonedrepository(object):
3004 3010 def __getattribute__(self, item):
3005 3011 if item == r'close':
3006 3012 return object.__getattribute__(self, item)
3007 3013
3008 3014 raise error.ProgrammingError('repo instances should not be used '
3009 3015 'after unshare')
3010 3016
3011 3017 def close(self):
3012 3018 pass
3013 3019
3014 3020 # We may have a repoview, which intercepts __setattr__. So be sure
3015 3021 # we operate at the lowest level possible.
3016 3022 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now