##// END OF EJS Templates
bookmarks: extract function that looks up bookmark names by node
Yuya Nishihara -
r37867:6e225984 default
parent child Browse files
Show More
@@ -1,937 +1,945
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import struct
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 short,
18 18 wdirid,
19 19 )
20 20 from . import (
21 21 encoding,
22 22 error,
23 23 obsutil,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 # label constants
31 31 # until 3.5, bookmarks.current was the advertised name, not
32 32 # bookmarks.active, so we must use both to avoid breaking old
33 33 # custom styles
34 34 activebookmarklabel = 'bookmarks.active bookmarks.current'
35 35
36 36 def _getbkfile(repo):
37 37 """Hook so that extensions that mess with the store can hook bm storage.
38 38
39 39 For core, this just handles wether we should see pending
40 40 bookmarks or the committed ones. Other extensions (like share)
41 41 may need to tweak this behavior further.
42 42 """
43 43 fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
44 44 return fp
45 45
46 46 class bmstore(object):
47 47 """Storage for bookmarks.
48 48
49 49 This object should do all bookmark-related reads and writes, so
50 50 that it's fairly simple to replace the storage underlying
51 51 bookmarks without having to clone the logic surrounding
52 52 bookmarks. This type also should manage the active bookmark, if
53 53 any.
54 54
55 55 This particular bmstore implementation stores bookmarks as
56 56 {hash}\s{name}\n (the same format as localtags) in
57 57 .hg/bookmarks. The mapping is stored as {name: nodeid}.
58 58 """
59 59
60 60 def __init__(self, repo):
61 61 self._repo = repo
62 62 self._refmap = refmap = {} # refspec: node
63 63 self._clean = True
64 64 self._aclean = True
65 65 nm = repo.changelog.nodemap
66 66 tonode = bin # force local lookup
67 67 try:
68 68 with _getbkfile(repo) as bkfile:
69 69 for line in bkfile:
70 70 line = line.strip()
71 71 if not line:
72 72 continue
73 73 try:
74 74 sha, refspec = line.split(' ', 1)
75 75 node = tonode(sha)
76 76 if node in nm:
77 77 refspec = encoding.tolocal(refspec)
78 78 refmap[refspec] = node
79 79 except (TypeError, ValueError):
80 80 # TypeError:
81 81 # - bin(...)
82 82 # ValueError:
83 83 # - node in nm, for non-20-bytes entry
84 84 # - split(...), for string without ' '
85 85 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
86 86 % pycompat.bytestr(line))
87 87 except IOError as inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90 self._active = _readactive(repo, self)
91 91
92 92 @property
93 93 def active(self):
94 94 return self._active
95 95
96 96 @active.setter
97 97 def active(self, mark):
98 98 if mark is not None and mark not in self._refmap:
99 99 raise AssertionError('bookmark %s does not exist!' % mark)
100 100
101 101 self._active = mark
102 102 self._aclean = False
103 103
104 104 def __len__(self):
105 105 return len(self._refmap)
106 106
107 107 def __iter__(self):
108 108 return iter(self._refmap)
109 109
110 110 def iteritems(self):
111 111 return self._refmap.iteritems()
112 112
113 113 def items(self):
114 114 return self._refmap.items()
115 115
116 116 # TODO: maybe rename to allnames()?
117 117 def keys(self):
118 118 return self._refmap.keys()
119 119
120 120 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
121 121 def values(self):
122 122 return self._refmap.values()
123 123
124 124 def __contains__(self, mark):
125 125 return mark in self._refmap
126 126
127 127 def __getitem__(self, mark):
128 128 return self._refmap[mark]
129 129
130 130 def get(self, mark, default=None):
131 131 return self._refmap.get(mark, default)
132 132
133 133 def _set(self, key, value):
134 134 self._clean = False
135 135 self._refmap[key] = value
136 136
137 137 def _del(self, key):
138 138 self._clean = False
139 139 del self._refmap[key]
140 140
141 def names(self, node):
142 """Return a sorted list of bookmarks pointing to the specified node"""
143 marks = []
144 for m, n in self._refmap.iteritems():
145 if n == node:
146 marks.append(m)
147 return sorted(marks)
148
141 149 def changectx(self, mark):
142 150 node = self._refmap[mark]
143 151 return self._repo[node]
144 152
145 153 def applychanges(self, repo, tr, changes):
146 154 """Apply a list of changes to bookmarks
147 155 """
148 156 bmchanges = tr.changes.get('bookmarks')
149 157 for name, node in changes:
150 158 old = self._refmap.get(name)
151 159 if node is None:
152 160 self._del(name)
153 161 else:
154 162 self._set(name, node)
155 163 if bmchanges is not None:
156 164 # if a previous value exist preserve the "initial" value
157 165 previous = bmchanges.get(name)
158 166 if previous is not None:
159 167 old = previous[0]
160 168 bmchanges[name] = (old, node)
161 169 self._recordchange(tr)
162 170
163 171 def _recordchange(self, tr):
164 172 """record that bookmarks have been changed in a transaction
165 173
166 174 The transaction is then responsible for updating the file content."""
167 175 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
168 176 location='plain')
169 177 tr.hookargs['bookmark_moved'] = '1'
170 178
171 179 def _writerepo(self, repo):
172 180 """Factored out for extensibility"""
173 181 rbm = repo._bookmarks
174 182 if rbm.active not in self._refmap:
175 183 rbm.active = None
176 184 rbm._writeactive()
177 185
178 186 with repo.wlock():
179 187 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
180 188 checkambig=True)
181 189 try:
182 190 self._write(file_)
183 191 except: # re-raises
184 192 file_.discard()
185 193 raise
186 194 finally:
187 195 file_.close()
188 196
189 197 def _writeactive(self):
190 198 if self._aclean:
191 199 return
192 200 with self._repo.wlock():
193 201 if self._active is not None:
194 202 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
195 203 checkambig=True)
196 204 try:
197 205 f.write(encoding.fromlocal(self._active))
198 206 finally:
199 207 f.close()
200 208 else:
201 209 self._repo.vfs.tryunlink('bookmarks.current')
202 210 self._aclean = True
203 211
204 212 def _write(self, fp):
205 213 for name, node in sorted(self._refmap.iteritems()):
206 214 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
207 215 self._clean = True
208 216 self._repo.invalidatevolatilesets()
209 217
210 218 def expandname(self, bname):
211 219 if bname == '.':
212 220 if self.active:
213 221 return self.active
214 222 else:
215 223 raise error.Abort(_("no active bookmark"))
216 224 return bname
217 225
218 226 def checkconflict(self, mark, force=False, target=None):
219 227 """check repo for a potential clash of mark with an existing bookmark,
220 228 branch, or hash
221 229
222 230 If target is supplied, then check that we are moving the bookmark
223 231 forward.
224 232
225 233 If force is supplied, then forcibly move the bookmark to a new commit
226 234 regardless if it is a move forward.
227 235
228 236 If divergent bookmark are to be deleted, they will be returned as list.
229 237 """
230 238 cur = self._repo['.'].node()
231 239 if mark in self._refmap and not force:
232 240 if target:
233 241 if self._refmap[mark] == target and target == cur:
234 242 # re-activating a bookmark
235 243 return []
236 244 rev = self._repo[target].rev()
237 245 anc = self._repo.changelog.ancestors([rev])
238 246 bmctx = self.changectx(mark)
239 247 divs = [self._refmap[b] for b in self._refmap
240 248 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
241 249
242 250 # allow resolving a single divergent bookmark even if moving
243 251 # the bookmark across branches when a revision is specified
244 252 # that contains a divergent bookmark
245 253 if bmctx.rev() not in anc and target in divs:
246 254 return divergent2delete(self._repo, [target], mark)
247 255
248 256 deletefrom = [b for b in divs
249 257 if self._repo[b].rev() in anc or b == target]
250 258 delbms = divergent2delete(self._repo, deletefrom, mark)
251 259 if validdest(self._repo, bmctx, self._repo[target]):
252 260 self._repo.ui.status(
253 261 _("moving bookmark '%s' forward from %s\n") %
254 262 (mark, short(bmctx.node())))
255 263 return delbms
256 264 raise error.Abort(_("bookmark '%s' already exists "
257 265 "(use -f to force)") % mark)
258 266 if ((mark in self._repo.branchmap() or
259 267 mark == self._repo.dirstate.branch()) and not force):
260 268 raise error.Abort(
261 269 _("a bookmark cannot have the name of an existing branch"))
262 270 if len(mark) > 3 and not force:
263 271 try:
264 272 shadowhash = scmutil.isrevsymbol(self._repo, mark)
265 273 except error.LookupError: # ambiguous identifier
266 274 shadowhash = False
267 275 if shadowhash:
268 276 self._repo.ui.warn(
269 277 _("bookmark %s matches a changeset hash\n"
270 278 "(did you leave a -r out of an 'hg bookmark' "
271 279 "command?)\n")
272 280 % mark)
273 281 return []
274 282
275 283 def _readactive(repo, marks):
276 284 """
277 285 Get the active bookmark. We can have an active bookmark that updates
278 286 itself as we commit. This function returns the name of that bookmark.
279 287 It is stored in .hg/bookmarks.current
280 288 """
281 289 mark = None
282 290 try:
283 291 file = repo.vfs('bookmarks.current')
284 292 except IOError as inst:
285 293 if inst.errno != errno.ENOENT:
286 294 raise
287 295 return None
288 296 try:
289 297 # No readline() in osutil.posixfile, reading everything is
290 298 # cheap.
291 299 # Note that it's possible for readlines() here to raise
292 300 # IOError, since we might be reading the active mark over
293 301 # static-http which only tries to load the file when we try
294 302 # to read from it.
295 303 mark = encoding.tolocal((file.readlines() or [''])[0])
296 304 if mark == '' or mark not in marks:
297 305 mark = None
298 306 except IOError as inst:
299 307 if inst.errno != errno.ENOENT:
300 308 raise
301 309 return None
302 310 finally:
303 311 file.close()
304 312 return mark
305 313
306 314 def activate(repo, mark):
307 315 """
308 316 Set the given bookmark to be 'active', meaning that this bookmark will
309 317 follow new commits that are made.
310 318 The name is recorded in .hg/bookmarks.current
311 319 """
312 320 repo._bookmarks.active = mark
313 321 repo._bookmarks._writeactive()
314 322
315 323 def deactivate(repo):
316 324 """
317 325 Unset the active bookmark in this repository.
318 326 """
319 327 repo._bookmarks.active = None
320 328 repo._bookmarks._writeactive()
321 329
322 330 def isactivewdirparent(repo):
323 331 """
324 332 Tell whether the 'active' bookmark (the one that follows new commits)
325 333 points to one of the parents of the current working directory (wdir).
326 334
327 335 While this is normally the case, it can on occasion be false; for example,
328 336 immediately after a pull, the active bookmark can be moved to point
329 337 to a place different than the wdir. This is solved by running `hg update`.
330 338 """
331 339 mark = repo._activebookmark
332 340 marks = repo._bookmarks
333 341 parents = [p.node() for p in repo[None].parents()]
334 342 return (mark in marks and marks[mark] in parents)
335 343
336 344 def divergent2delete(repo, deletefrom, bm):
337 345 """find divergent versions of bm on nodes in deletefrom.
338 346
339 347 the list of bookmark to delete."""
340 348 todelete = []
341 349 marks = repo._bookmarks
342 350 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
343 351 for mark in divergent:
344 352 if mark == '@' or '@' not in mark:
345 353 # can't be divergent by definition
346 354 continue
347 355 if mark and marks[mark] in deletefrom:
348 356 if mark != bm:
349 357 todelete.append(mark)
350 358 return todelete
351 359
352 360 def headsforactive(repo):
353 361 """Given a repo with an active bookmark, return divergent bookmark nodes.
354 362
355 363 Args:
356 364 repo: A repository with an active bookmark.
357 365
358 366 Returns:
359 367 A list of binary node ids that is the full list of other
360 368 revisions with bookmarks divergent from the active bookmark. If
361 369 there were no divergent bookmarks, then this list will contain
362 370 only one entry.
363 371 """
364 372 if not repo._activebookmark:
365 373 raise ValueError(
366 374 'headsforactive() only makes sense with an active bookmark')
367 375 name = repo._activebookmark.split('@', 1)[0]
368 376 heads = []
369 377 for mark, n in repo._bookmarks.iteritems():
370 378 if mark.split('@', 1)[0] == name:
371 379 heads.append(n)
372 380 return heads
373 381
374 382 def calculateupdate(ui, repo):
375 383 '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
376 384 and where to move the active bookmark from, if needed.'''
377 385 checkout, movemarkfrom = None, None
378 386 activemark = repo._activebookmark
379 387 if isactivewdirparent(repo):
380 388 movemarkfrom = repo['.'].node()
381 389 elif activemark:
382 390 ui.status(_("updating to active bookmark %s\n") % activemark)
383 391 checkout = activemark
384 392 return (checkout, movemarkfrom)
385 393
386 394 def update(repo, parents, node):
387 395 deletefrom = parents
388 396 marks = repo._bookmarks
389 397 active = marks.active
390 398 if not active:
391 399 return False
392 400
393 401 bmchanges = []
394 402 if marks[active] in parents:
395 403 new = repo[node]
396 404 divs = [marks.changectx(b) for b in marks
397 405 if b.split('@', 1)[0] == active.split('@', 1)[0]]
398 406 anc = repo.changelog.ancestors([new.rev()])
399 407 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
400 408 if validdest(repo, marks.changectx(active), new):
401 409 bmchanges.append((active, new.node()))
402 410
403 411 for bm in divergent2delete(repo, deletefrom, active):
404 412 bmchanges.append((bm, None))
405 413
406 414 if bmchanges:
407 415 with repo.lock(), repo.transaction('bookmark') as tr:
408 416 marks.applychanges(repo, tr, bmchanges)
409 417 return bool(bmchanges)
410 418
411 419 def listbinbookmarks(repo):
412 420 # We may try to list bookmarks on a repo type that does not
413 421 # support it (e.g., statichttprepository).
414 422 marks = getattr(repo, '_bookmarks', {})
415 423
416 424 hasnode = repo.changelog.hasnode
417 425 for k, v in marks.iteritems():
418 426 # don't expose local divergent bookmarks
419 427 if hasnode(v) and ('@' not in k or k.endswith('@')):
420 428 yield k, v
421 429
422 430 def listbookmarks(repo):
423 431 d = {}
424 432 for book, node in listbinbookmarks(repo):
425 433 d[book] = hex(node)
426 434 return d
427 435
428 436 def pushbookmark(repo, key, old, new):
429 437 with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr:
430 438 marks = repo._bookmarks
431 439 existing = hex(marks.get(key, ''))
432 440 if existing != old and existing != new:
433 441 return False
434 442 if new == '':
435 443 changes = [(key, None)]
436 444 else:
437 445 if new not in repo:
438 446 return False
439 447 changes = [(key, repo[new].node())]
440 448 marks.applychanges(repo, tr, changes)
441 449 return True
442 450
443 451 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
444 452 '''Compare bookmarks between srcmarks and dstmarks
445 453
446 454 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
447 455 differ, invalid)", each are list of bookmarks below:
448 456
449 457 :addsrc: added on src side (removed on dst side, perhaps)
450 458 :adddst: added on dst side (removed on src side, perhaps)
451 459 :advsrc: advanced on src side
452 460 :advdst: advanced on dst side
453 461 :diverge: diverge
454 462 :differ: changed, but changeset referred on src is unknown on dst
455 463 :invalid: unknown on both side
456 464 :same: same on both side
457 465
458 466 Each elements of lists in result tuple is tuple "(bookmark name,
459 467 changeset ID on source side, changeset ID on destination
460 468 side)". Each changeset IDs are 40 hexadecimal digit string or
461 469 None.
462 470
463 471 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
464 472 "invalid" list may be unknown for repo.
465 473
466 474 If "targets" is specified, only bookmarks listed in it are
467 475 examined.
468 476 '''
469 477
470 478 if targets:
471 479 bset = set(targets)
472 480 else:
473 481 srcmarkset = set(srcmarks)
474 482 dstmarkset = set(dstmarks)
475 483 bset = srcmarkset | dstmarkset
476 484
477 485 results = ([], [], [], [], [], [], [], [])
478 486 addsrc = results[0].append
479 487 adddst = results[1].append
480 488 advsrc = results[2].append
481 489 advdst = results[3].append
482 490 diverge = results[4].append
483 491 differ = results[5].append
484 492 invalid = results[6].append
485 493 same = results[7].append
486 494
487 495 for b in sorted(bset):
488 496 if b not in srcmarks:
489 497 if b in dstmarks:
490 498 adddst((b, None, dstmarks[b]))
491 499 else:
492 500 invalid((b, None, None))
493 501 elif b not in dstmarks:
494 502 addsrc((b, srcmarks[b], None))
495 503 else:
496 504 scid = srcmarks[b]
497 505 dcid = dstmarks[b]
498 506 if scid == dcid:
499 507 same((b, scid, dcid))
500 508 elif scid in repo and dcid in repo:
501 509 sctx = repo[scid]
502 510 dctx = repo[dcid]
503 511 if sctx.rev() < dctx.rev():
504 512 if validdest(repo, sctx, dctx):
505 513 advdst((b, scid, dcid))
506 514 else:
507 515 diverge((b, scid, dcid))
508 516 else:
509 517 if validdest(repo, dctx, sctx):
510 518 advsrc((b, scid, dcid))
511 519 else:
512 520 diverge((b, scid, dcid))
513 521 else:
514 522 # it is too expensive to examine in detail, in this case
515 523 differ((b, scid, dcid))
516 524
517 525 return results
518 526
519 527 def _diverge(ui, b, path, localmarks, remotenode):
520 528 '''Return appropriate diverged bookmark for specified ``path``
521 529
522 530 This returns None, if it is failed to assign any divergent
523 531 bookmark name.
524 532
525 533 This reuses already existing one with "@number" suffix, if it
526 534 refers ``remotenode``.
527 535 '''
528 536 if b == '@':
529 537 b = ''
530 538 # try to use an @pathalias suffix
531 539 # if an @pathalias already exists, we overwrite (update) it
532 540 if path.startswith("file:"):
533 541 path = util.url(path).path
534 542 for p, u in ui.configitems("paths"):
535 543 if u.startswith("file:"):
536 544 u = util.url(u).path
537 545 if path == u:
538 546 return '%s@%s' % (b, p)
539 547
540 548 # assign a unique "@number" suffix newly
541 549 for x in range(1, 100):
542 550 n = '%s@%d' % (b, x)
543 551 if n not in localmarks or localmarks[n] == remotenode:
544 552 return n
545 553
546 554 return None
547 555
548 556 def unhexlifybookmarks(marks):
549 557 binremotemarks = {}
550 558 for name, node in marks.items():
551 559 binremotemarks[name] = bin(node)
552 560 return binremotemarks
553 561
554 562 _binaryentry = struct.Struct('>20sH')
555 563
556 564 def binaryencode(bookmarks):
557 565 """encode a '(bookmark, node)' iterable into a binary stream
558 566
559 567 the binary format is:
560 568
561 569 <node><bookmark-length><bookmark-name>
562 570
563 571 :node: is a 20 bytes binary node,
564 572 :bookmark-length: an unsigned short,
565 573 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
566 574
567 575 wdirid (all bits set) will be used as a special value for "missing"
568 576 """
569 577 binarydata = []
570 578 for book, node in bookmarks:
571 579 if not node: # None or ''
572 580 node = wdirid
573 581 binarydata.append(_binaryentry.pack(node, len(book)))
574 582 binarydata.append(book)
575 583 return ''.join(binarydata)
576 584
577 585 def binarydecode(stream):
578 586 """decode a binary stream into an '(bookmark, node)' iterable
579 587
580 588 the binary format is:
581 589
582 590 <node><bookmark-length><bookmark-name>
583 591
584 592 :node: is a 20 bytes binary node,
585 593 :bookmark-length: an unsigned short,
586 594 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
587 595
588 596 wdirid (all bits set) will be used as a special value for "missing"
589 597 """
590 598 entrysize = _binaryentry.size
591 599 books = []
592 600 while True:
593 601 entry = stream.read(entrysize)
594 602 if len(entry) < entrysize:
595 603 if entry:
596 604 raise error.Abort(_('bad bookmark stream'))
597 605 break
598 606 node, length = _binaryentry.unpack(entry)
599 607 bookmark = stream.read(length)
600 608 if len(bookmark) < length:
601 609 if entry:
602 610 raise error.Abort(_('bad bookmark stream'))
603 611 if node == wdirid:
604 612 node = None
605 613 books.append((bookmark, node))
606 614 return books
607 615
608 616 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
609 617 ui.debug("checking for updated bookmarks\n")
610 618 localmarks = repo._bookmarks
611 619 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
612 620 ) = comparebookmarks(repo, remotemarks, localmarks)
613 621
614 622 status = ui.status
615 623 warn = ui.warn
616 624 if ui.configbool('ui', 'quietbookmarkmove'):
617 625 status = warn = ui.debug
618 626
619 627 explicit = set(explicit)
620 628 changed = []
621 629 for b, scid, dcid in addsrc:
622 630 if scid in repo: # add remote bookmarks for changes we already have
623 631 changed.append((b, scid, status,
624 632 _("adding remote bookmark %s\n") % (b)))
625 633 elif b in explicit:
626 634 explicit.remove(b)
627 635 ui.warn(_("remote bookmark %s points to locally missing %s\n")
628 636 % (b, hex(scid)[:12]))
629 637
630 638 for b, scid, dcid in advsrc:
631 639 changed.append((b, scid, status,
632 640 _("updating bookmark %s\n") % (b)))
633 641 # remove normal movement from explicit set
634 642 explicit.difference_update(d[0] for d in changed)
635 643
636 644 for b, scid, dcid in diverge:
637 645 if b in explicit:
638 646 explicit.discard(b)
639 647 changed.append((b, scid, status,
640 648 _("importing bookmark %s\n") % (b)))
641 649 else:
642 650 db = _diverge(ui, b, path, localmarks, scid)
643 651 if db:
644 652 changed.append((db, scid, warn,
645 653 _("divergent bookmark %s stored as %s\n") %
646 654 (b, db)))
647 655 else:
648 656 warn(_("warning: failed to assign numbered name "
649 657 "to divergent bookmark %s\n") % (b))
650 658 for b, scid, dcid in adddst + advdst:
651 659 if b in explicit:
652 660 explicit.discard(b)
653 661 changed.append((b, scid, status,
654 662 _("importing bookmark %s\n") % (b)))
655 663 for b, scid, dcid in differ:
656 664 if b in explicit:
657 665 explicit.remove(b)
658 666 ui.warn(_("remote bookmark %s points to locally missing %s\n")
659 667 % (b, hex(scid)[:12]))
660 668
661 669 if changed:
662 670 tr = trfunc()
663 671 changes = []
664 672 for b, node, writer, msg in sorted(changed):
665 673 changes.append((b, node))
666 674 writer(msg)
667 675 localmarks.applychanges(repo, tr, changes)
668 676
669 677 def incoming(ui, repo, peer):
670 678 '''Show bookmarks incoming from other to repo
671 679 '''
672 680 ui.status(_("searching for changed bookmarks\n"))
673 681
674 682 with peer.commandexecutor() as e:
675 683 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
676 684 'namespace': 'bookmarks',
677 685 }).result())
678 686
679 687 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
680 688 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
681 689
682 690 incomings = []
683 691 if ui.debugflag:
684 692 getid = lambda id: id
685 693 else:
686 694 getid = lambda id: id[:12]
687 695 if ui.verbose:
688 696 def add(b, id, st):
689 697 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
690 698 else:
691 699 def add(b, id, st):
692 700 incomings.append(" %-25s %s\n" % (b, getid(id)))
693 701 for b, scid, dcid in addsrc:
694 702 # i18n: "added" refers to a bookmark
695 703 add(b, hex(scid), _('added'))
696 704 for b, scid, dcid in advsrc:
697 705 # i18n: "advanced" refers to a bookmark
698 706 add(b, hex(scid), _('advanced'))
699 707 for b, scid, dcid in diverge:
700 708 # i18n: "diverged" refers to a bookmark
701 709 add(b, hex(scid), _('diverged'))
702 710 for b, scid, dcid in differ:
703 711 # i18n: "changed" refers to a bookmark
704 712 add(b, hex(scid), _('changed'))
705 713
706 714 if not incomings:
707 715 ui.status(_("no changed bookmarks found\n"))
708 716 return 1
709 717
710 718 for s in sorted(incomings):
711 719 ui.write(s)
712 720
713 721 return 0
714 722
715 723 def outgoing(ui, repo, other):
716 724 '''Show bookmarks outgoing from repo to other
717 725 '''
718 726 ui.status(_("searching for changed bookmarks\n"))
719 727
720 728 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
721 729 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
722 730 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
723 731
724 732 outgoings = []
725 733 if ui.debugflag:
726 734 getid = lambda id: id
727 735 else:
728 736 getid = lambda id: id[:12]
729 737 if ui.verbose:
730 738 def add(b, id, st):
731 739 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
732 740 else:
733 741 def add(b, id, st):
734 742 outgoings.append(" %-25s %s\n" % (b, getid(id)))
735 743 for b, scid, dcid in addsrc:
736 744 # i18n: "added refers to a bookmark
737 745 add(b, hex(scid), _('added'))
738 746 for b, scid, dcid in adddst:
739 747 # i18n: "deleted" refers to a bookmark
740 748 add(b, ' ' * 40, _('deleted'))
741 749 for b, scid, dcid in advsrc:
742 750 # i18n: "advanced" refers to a bookmark
743 751 add(b, hex(scid), _('advanced'))
744 752 for b, scid, dcid in diverge:
745 753 # i18n: "diverged" refers to a bookmark
746 754 add(b, hex(scid), _('diverged'))
747 755 for b, scid, dcid in differ:
748 756 # i18n: "changed" refers to a bookmark
749 757 add(b, hex(scid), _('changed'))
750 758
751 759 if not outgoings:
752 760 ui.status(_("no changed bookmarks found\n"))
753 761 return 1
754 762
755 763 for s in sorted(outgoings):
756 764 ui.write(s)
757 765
758 766 return 0
759 767
760 768 def summary(repo, peer):
761 769 '''Compare bookmarks between repo and other for "hg summary" output
762 770
763 771 This returns "(# of incoming, # of outgoing)" tuple.
764 772 '''
765 773 with peer.commandexecutor() as e:
766 774 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
767 775 'namespace': 'bookmarks',
768 776 }).result())
769 777
770 778 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
771 779 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
772 780 return (len(addsrc), len(adddst))
773 781
774 782 def validdest(repo, old, new):
775 783 """Is the new bookmark destination a valid update from the old one"""
776 784 repo = repo.unfiltered()
777 785 if old == new:
778 786 # Old == new -> nothing to update.
779 787 return False
780 788 elif not old:
781 789 # old is nullrev, anything is valid.
782 790 # (new != nullrev has been excluded by the previous check)
783 791 return True
784 792 elif repo.obsstore:
785 793 return new.node() in obsutil.foreground(repo, [old.node()])
786 794 else:
787 795 # still an independent clause as it is lazier (and therefore faster)
788 796 return old.descendant(new)
789 797
790 798 def checkformat(repo, mark):
791 799 """return a valid version of a potential bookmark name
792 800
793 801 Raises an abort error if the bookmark name is not valid.
794 802 """
795 803 mark = mark.strip()
796 804 if not mark:
797 805 raise error.Abort(_("bookmark names cannot consist entirely of "
798 806 "whitespace"))
799 807 scmutil.checknewlabel(repo, mark, 'bookmark')
800 808 return mark
801 809
802 810 def delete(repo, tr, names):
803 811 """remove a mark from the bookmark store
804 812
805 813 Raises an abort error if mark does not exist.
806 814 """
807 815 marks = repo._bookmarks
808 816 changes = []
809 817 for mark in names:
810 818 if mark not in marks:
811 819 raise error.Abort(_("bookmark '%s' does not exist") % mark)
812 820 if mark == repo._activebookmark:
813 821 deactivate(repo)
814 822 changes.append((mark, None))
815 823 marks.applychanges(repo, tr, changes)
816 824
817 825 def rename(repo, tr, old, new, force=False, inactive=False):
818 826 """rename a bookmark from old to new
819 827
820 828 If force is specified, then the new name can overwrite an existing
821 829 bookmark.
822 830
823 831 If inactive is specified, then do not activate the new bookmark.
824 832
825 833 Raises an abort error if old is not in the bookmark store.
826 834 """
827 835 marks = repo._bookmarks
828 836 mark = checkformat(repo, new)
829 837 if old not in marks:
830 838 raise error.Abort(_("bookmark '%s' does not exist") % old)
831 839 changes = []
832 840 for bm in marks.checkconflict(mark, force):
833 841 changes.append((bm, None))
834 842 changes.extend([(mark, marks[old]), (old, None)])
835 843 marks.applychanges(repo, tr, changes)
836 844 if repo._activebookmark == old and not inactive:
837 845 activate(repo, mark)
838 846
839 847 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
840 848 """add a list of bookmarks
841 849
842 850 If force is specified, then the new name can overwrite an existing
843 851 bookmark.
844 852
845 853 If inactive is specified, then do not activate any bookmark. Otherwise, the
846 854 first bookmark is activated.
847 855
848 856 Raises an abort error if old is not in the bookmark store.
849 857 """
850 858 marks = repo._bookmarks
851 859 cur = repo['.'].node()
852 860 newact = None
853 861 changes = []
854 862 hiddenrev = None
855 863
856 864 # unhide revs if any
857 865 if rev:
858 866 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
859 867
860 868 for mark in names:
861 869 mark = checkformat(repo, mark)
862 870 if newact is None:
863 871 newact = mark
864 872 if inactive and mark == repo._activebookmark:
865 873 deactivate(repo)
866 874 return
867 875 tgt = cur
868 876 if rev:
869 877 ctx = scmutil.revsingle(repo, rev)
870 878 if ctx.hidden():
871 879 hiddenrev = ctx.hex()[:12]
872 880 tgt = ctx.node()
873 881 for bm in marks.checkconflict(mark, force, tgt):
874 882 changes.append((bm, None))
875 883 changes.append((mark, tgt))
876 884
877 885 if hiddenrev:
878 886 repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
879 887
880 888 if ctx.obsolete():
881 889 msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
882 890 repo.ui.warn("(%s)\n" % msg)
883 891
884 892 marks.applychanges(repo, tr, changes)
885 893 if not inactive and cur == marks[newact] and not rev:
886 894 activate(repo, newact)
887 895 elif cur != tgt and newact == repo._activebookmark:
888 896 deactivate(repo)
889 897
890 898 def _printbookmarks(ui, repo, bmarks, **opts):
891 899 """private method to print bookmarks
892 900
893 901 Provides a way for extensions to control how bookmarks are printed (e.g.
894 902 prepend or postpend names)
895 903 """
896 904 opts = pycompat.byteskwargs(opts)
897 905 fm = ui.formatter('bookmarks', opts)
898 906 hexfn = fm.hexfunc
899 907 if len(bmarks) == 0 and fm.isplain():
900 908 ui.status(_("no bookmarks set\n"))
901 909 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
902 910 fm.startitem()
903 911 if not ui.quiet:
904 912 fm.plain(' %s ' % prefix, label=label)
905 913 fm.write('bookmark', '%s', bmark, label=label)
906 914 pad = " " * (25 - encoding.colwidth(bmark))
907 915 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
908 916 repo.changelog.rev(n), hexfn(n), label=label)
909 917 fm.data(active=(activebookmarklabel in label))
910 918 fm.plain('\n')
911 919 fm.end()
912 920
913 921 def printbookmarks(ui, repo, **opts):
914 922 """print bookmarks to a formatter
915 923
916 924 Provides a way for extensions to control how bookmarks are printed.
917 925 """
918 926 marks = repo._bookmarks
919 927 bmarks = {}
920 928 for bmark, n in sorted(marks.iteritems()):
921 929 active = repo._activebookmark
922 930 if bmark == active:
923 931 prefix, label = '*', activebookmarklabel
924 932 else:
925 933 prefix, label = ' ', ''
926 934
927 935 bmarks[bmark] = (n, prefix, label)
928 936 _printbookmarks(ui, repo, bmarks, **opts)
929 937
930 938 def preparehookargs(name, old, new):
931 939 if new is None:
932 940 new = ''
933 941 if old is None:
934 942 old = ''
935 943 return {'bookmark': name,
936 944 'node': hex(new),
937 945 'oldnode': hex(old)}
@@ -1,2378 +1,2374
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 release = lockmod.release
74 74 urlerr = util.urlerr
75 75 urlreq = util.urlreq
76 76
77 77 # set of (path, vfs-location) tuples. vfs-location is:
78 78 # - 'plain for vfs relative paths
79 79 # - '' for svfs relative paths
80 80 _cachedfiles = set()
81 81
82 82 class _basefilecache(scmutil.filecache):
83 83 """All filecache usage on repo are done for logic that should be unfiltered
84 84 """
85 85 def __get__(self, repo, type=None):
86 86 if repo is None:
87 87 return self
88 88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 89 def __set__(self, repo, value):
90 90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 91 def __delete__(self, repo):
92 92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93 93
94 94 class repofilecache(_basefilecache):
95 95 """filecache for files in .hg but outside of .hg/store"""
96 96 def __init__(self, *paths):
97 97 super(repofilecache, self).__init__(*paths)
98 98 for path in paths:
99 99 _cachedfiles.add((path, 'plain'))
100 100
101 101 def join(self, obj, fname):
102 102 return obj.vfs.join(fname)
103 103
104 104 class storecache(_basefilecache):
105 105 """filecache for files in the store"""
106 106 def __init__(self, *paths):
107 107 super(storecache, self).__init__(*paths)
108 108 for path in paths:
109 109 _cachedfiles.add((path, ''))
110 110
111 111 def join(self, obj, fname):
112 112 return obj.sjoin(fname)
113 113
114 114 def isfilecached(repo, name):
115 115 """check if a repo has already cached "name" filecache-ed property
116 116
117 117 This returns (cachedobj-or-None, iscached) tuple.
118 118 """
119 119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 120 if not cacheentry:
121 121 return None, False
122 122 return cacheentry.obj, True
123 123
124 124 class unfilteredpropertycache(util.propertycache):
125 125 """propertycache that apply to unfiltered repo only"""
126 126
127 127 def __get__(self, repo, type=None):
128 128 unfi = repo.unfiltered()
129 129 if unfi is repo:
130 130 return super(unfilteredpropertycache, self).__get__(unfi)
131 131 return getattr(unfi, self.name)
132 132
133 133 class filteredpropertycache(util.propertycache):
134 134 """propertycache that must take filtering in account"""
135 135
136 136 def cachevalue(self, obj, value):
137 137 object.__setattr__(obj, self.name, value)
138 138
139 139
140 140 def hasunfilteredcache(repo, name):
141 141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 142 return name in vars(repo.unfiltered())
143 143
144 144 def unfilteredmethod(orig):
145 145 """decorate method that always need to be run on unfiltered version"""
146 146 def wrapper(repo, *args, **kwargs):
147 147 return orig(repo.unfiltered(), *args, **kwargs)
148 148 return wrapper
149 149
150 150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 151 'unbundle'}
152 152 legacycaps = moderncaps.union({'changegroupsubset'})
153 153
154 154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 155 class localcommandexecutor(object):
156 156 def __init__(self, peer):
157 157 self._peer = peer
158 158 self._sent = False
159 159 self._closed = False
160 160
161 161 def __enter__(self):
162 162 return self
163 163
164 164 def __exit__(self, exctype, excvalue, exctb):
165 165 self.close()
166 166
167 167 def callcommand(self, command, args):
168 168 if self._sent:
169 169 raise error.ProgrammingError('callcommand() cannot be used after '
170 170 'sendcommands()')
171 171
172 172 if self._closed:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'close()')
175 175
176 176 # We don't need to support anything fancy. Just call the named
177 177 # method on the peer and return a resolved future.
178 178 fn = getattr(self._peer, pycompat.sysstr(command))
179 179
180 180 f = pycompat.futures.Future()
181 181
182 182 try:
183 183 result = fn(**pycompat.strkwargs(args))
184 184 except Exception:
185 185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 186 else:
187 187 f.set_result(result)
188 188
189 189 return f
190 190
191 191 def sendcommands(self):
192 192 self._sent = True
193 193
194 194 def close(self):
195 195 self._closed = True
196 196
197 197 @interfaceutil.implementer(repository.ipeercommands)
198 198 class localpeer(repository.peer):
199 199 '''peer for a local repo; reflects only the most recent API'''
200 200
201 201 def __init__(self, repo, caps=None):
202 202 super(localpeer, self).__init__()
203 203
204 204 if caps is None:
205 205 caps = moderncaps.copy()
206 206 self._repo = repo.filtered('served')
207 207 self.ui = repo.ui
208 208 self._caps = repo._restrictcapabilities(caps)
209 209
210 210 # Begin of _basepeer interface.
211 211
212 212 def url(self):
213 213 return self._repo.url()
214 214
215 215 def local(self):
216 216 return self._repo
217 217
218 218 def peer(self):
219 219 return self
220 220
221 221 def canpush(self):
222 222 return True
223 223
224 224 def close(self):
225 225 self._repo.close()
226 226
227 227 # End of _basepeer interface.
228 228
229 229 # Begin of _basewirecommands interface.
230 230
231 231 def branchmap(self):
232 232 return self._repo.branchmap()
233 233
234 234 def capabilities(self):
235 235 return self._caps
236 236
237 237 def clonebundles(self):
238 238 return self._repo.tryread('clonebundles.manifest')
239 239
240 240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 241 """Used to test argument passing over the wire"""
242 242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 243 pycompat.bytestr(four),
244 244 pycompat.bytestr(five))
245 245
246 246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 247 **kwargs):
248 248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 249 common=common, bundlecaps=bundlecaps,
250 250 **kwargs)[1]
251 251 cb = util.chunkbuffer(chunks)
252 252
253 253 if exchange.bundle2requested(bundlecaps):
254 254 # When requesting a bundle2, getbundle returns a stream to make the
255 255 # wire level function happier. We need to build a proper object
256 256 # from it in local peer.
257 257 return bundle2.getunbundler(self.ui, cb)
258 258 else:
259 259 return changegroup.getunbundler('01', cb, None)
260 260
261 261 def heads(self):
262 262 return self._repo.heads()
263 263
264 264 def known(self, nodes):
265 265 return self._repo.known(nodes)
266 266
267 267 def listkeys(self, namespace):
268 268 return self._repo.listkeys(namespace)
269 269
270 270 def lookup(self, key):
271 271 return self._repo.lookup(key)
272 272
273 273 def pushkey(self, namespace, key, old, new):
274 274 return self._repo.pushkey(namespace, key, old, new)
275 275
276 276 def stream_out(self):
277 277 raise error.Abort(_('cannot perform stream clone against local '
278 278 'peer'))
279 279
280 280 def unbundle(self, bundle, heads, url):
281 281 """apply a bundle on a repo
282 282
283 283 This function handles the repo locking itself."""
284 284 try:
285 285 try:
286 286 bundle = exchange.readbundle(self.ui, bundle, None)
287 287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 288 if util.safehasattr(ret, 'getchunks'):
289 289 # This is a bundle20 object, turn it into an unbundler.
290 290 # This little dance should be dropped eventually when the
291 291 # API is finally improved.
292 292 stream = util.chunkbuffer(ret.getchunks())
293 293 ret = bundle2.getunbundler(self.ui, stream)
294 294 return ret
295 295 except Exception as exc:
296 296 # If the exception contains output salvaged from a bundle2
297 297 # reply, we need to make sure it is printed before continuing
298 298 # to fail. So we build a bundle2 with such output and consume
299 299 # it directly.
300 300 #
301 301 # This is not very elegant but allows a "simple" solution for
302 302 # issue4594
303 303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 304 if output:
305 305 bundler = bundle2.bundle20(self._repo.ui)
306 306 for out in output:
307 307 bundler.addpart(out)
308 308 stream = util.chunkbuffer(bundler.getchunks())
309 309 b = bundle2.getunbundler(self.ui, stream)
310 310 bundle2.processbundle(self._repo, b)
311 311 raise
312 312 except error.PushRaced as exc:
313 313 raise error.ResponseError(_('push failed:'),
314 314 stringutil.forcebytestr(exc))
315 315
316 316 # End of _basewirecommands interface.
317 317
318 318 # Begin of peer interface.
319 319
320 320 def commandexecutor(self):
321 321 return localcommandexecutor(self)
322 322
323 323 # End of peer interface.
324 324
325 325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 326 class locallegacypeer(localpeer):
327 327 '''peer extension which implements legacy methods too; used for tests with
328 328 restricted capabilities'''
329 329
330 330 def __init__(self, repo):
331 331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332 332
333 333 # Begin of baselegacywirecommands interface.
334 334
335 335 def between(self, pairs):
336 336 return self._repo.between(pairs)
337 337
338 338 def branches(self, nodes):
339 339 return self._repo.branches(nodes)
340 340
341 341 def changegroup(self, nodes, source):
342 342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 343 missingheads=self._repo.heads())
344 344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345 345
346 346 def changegroupsubset(self, bases, heads, source):
347 347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 348 missingheads=heads)
349 349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 350
351 351 # End of baselegacywirecommands interface.
352 352
353 353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 354 # clients.
355 355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356 356
357 357 # Functions receiving (ui, features) that extensions can register to impact
358 358 # the ability to load repositories with custom requirements. Only
359 359 # functions defined in loaded extensions are called.
360 360 #
361 361 # The function receives a set of requirement strings that the repository
362 362 # is capable of opening. Functions will typically add elements to the
363 363 # set to reflect that the extension knows how to handle that requirements.
364 364 featuresetupfuncs = set()
365 365
366 366 @interfaceutil.implementer(repository.completelocalrepository)
367 367 class localrepository(object):
368 368
369 369 # obsolete experimental requirements:
370 370 # - manifestv2: An experimental new manifest format that allowed
371 371 # for stem compression of long paths. Experiment ended up not
372 372 # being successful (repository sizes went up due to worse delta
373 373 # chains), and the code was deleted in 4.6.
374 374 supportedformats = {
375 375 'revlogv1',
376 376 'generaldelta',
377 377 'treemanifest',
378 378 REVLOGV2_REQUIREMENT,
379 379 }
380 380 _basesupported = supportedformats | {
381 381 'store',
382 382 'fncache',
383 383 'shared',
384 384 'relshared',
385 385 'dotencode',
386 386 'exp-sparse',
387 387 }
388 388 openerreqs = {
389 389 'revlogv1',
390 390 'generaldelta',
391 391 'treemanifest',
392 392 }
393 393
394 394 # list of prefix for file which can be written without 'wlock'
395 395 # Extensions should extend this list when needed
396 396 _wlockfreeprefix = {
397 397 # We migh consider requiring 'wlock' for the next
398 398 # two, but pretty much all the existing code assume
399 399 # wlock is not needed so we keep them excluded for
400 400 # now.
401 401 'hgrc',
402 402 'requires',
403 403 # XXX cache is a complicatged business someone
404 404 # should investigate this in depth at some point
405 405 'cache/',
406 406 # XXX shouldn't be dirstate covered by the wlock?
407 407 'dirstate',
408 408 # XXX bisect was still a bit too messy at the time
409 409 # this changeset was introduced. Someone should fix
410 410 # the remainig bit and drop this line
411 411 'bisect.state',
412 412 }
413 413
414 414 def __init__(self, baseui, path, create=False, intents=None):
415 415 self.requirements = set()
416 416 self.filtername = None
417 417 # wvfs: rooted at the repository root, used to access the working copy
418 418 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
419 419 # vfs: rooted at .hg, used to access repo files outside of .hg/store
420 420 self.vfs = None
421 421 # svfs: usually rooted at .hg/store, used to access repository history
422 422 # If this is a shared repository, this vfs may point to another
423 423 # repository's .hg/store directory.
424 424 self.svfs = None
425 425 self.root = self.wvfs.base
426 426 self.path = self.wvfs.join(".hg")
427 427 self.origroot = path
428 428 # This is only used by context.workingctx.match in order to
429 429 # detect files in subrepos.
430 430 self.auditor = pathutil.pathauditor(
431 431 self.root, callback=self._checknested)
432 432 # This is only used by context.basectx.match in order to detect
433 433 # files in subrepos.
434 434 self.nofsauditor = pathutil.pathauditor(
435 435 self.root, callback=self._checknested, realfs=False, cached=True)
436 436 self.baseui = baseui
437 437 self.ui = baseui.copy()
438 438 self.ui.copy = baseui.copy # prevent copying repo configuration
439 439 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
440 440 if (self.ui.configbool('devel', 'all-warnings') or
441 441 self.ui.configbool('devel', 'check-locks')):
442 442 self.vfs.audit = self._getvfsward(self.vfs.audit)
443 443 # A list of callback to shape the phase if no data were found.
444 444 # Callback are in the form: func(repo, roots) --> processed root.
445 445 # This list it to be filled by extension during repo setup
446 446 self._phasedefaults = []
447 447 try:
448 448 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
449 449 self._loadextensions()
450 450 except IOError:
451 451 pass
452 452
453 453 if featuresetupfuncs:
454 454 self.supported = set(self._basesupported) # use private copy
455 455 extmods = set(m.__name__ for n, m
456 456 in extensions.extensions(self.ui))
457 457 for setupfunc in featuresetupfuncs:
458 458 if setupfunc.__module__ in extmods:
459 459 setupfunc(self.ui, self.supported)
460 460 else:
461 461 self.supported = self._basesupported
462 462 color.setup(self.ui)
463 463
464 464 # Add compression engines.
465 465 for name in util.compengines:
466 466 engine = util.compengines[name]
467 467 if engine.revlogheader():
468 468 self.supported.add('exp-compression-%s' % name)
469 469
470 470 if not self.vfs.isdir():
471 471 if create:
472 472 self.requirements = newreporequirements(self)
473 473
474 474 if not self.wvfs.exists():
475 475 self.wvfs.makedirs()
476 476 self.vfs.makedir(notindexed=True)
477 477
478 478 if 'store' in self.requirements:
479 479 self.vfs.mkdir("store")
480 480
481 481 # create an invalid changelog
482 482 self.vfs.append(
483 483 "00changelog.i",
484 484 '\0\0\0\2' # represents revlogv2
485 485 ' dummy changelog to prevent using the old repo layout'
486 486 )
487 487 else:
488 488 raise error.RepoError(_("repository %s not found") % path)
489 489 elif create:
490 490 raise error.RepoError(_("repository %s already exists") % path)
491 491 else:
492 492 try:
493 493 self.requirements = scmutil.readrequires(
494 494 self.vfs, self.supported)
495 495 except IOError as inst:
496 496 if inst.errno != errno.ENOENT:
497 497 raise
498 498
499 499 cachepath = self.vfs.join('cache')
500 500 self.sharedpath = self.path
501 501 try:
502 502 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
503 503 if 'relshared' in self.requirements:
504 504 sharedpath = self.vfs.join(sharedpath)
505 505 vfs = vfsmod.vfs(sharedpath, realpath=True)
506 506 cachepath = vfs.join('cache')
507 507 s = vfs.base
508 508 if not vfs.exists():
509 509 raise error.RepoError(
510 510 _('.hg/sharedpath points to nonexistent directory %s') % s)
511 511 self.sharedpath = s
512 512 except IOError as inst:
513 513 if inst.errno != errno.ENOENT:
514 514 raise
515 515
516 516 if 'exp-sparse' in self.requirements and not sparse.enabled:
517 517 raise error.RepoError(_('repository is using sparse feature but '
518 518 'sparse is not enabled; enable the '
519 519 '"sparse" extensions to access'))
520 520
521 521 self.store = store.store(
522 522 self.requirements, self.sharedpath,
523 523 lambda base: vfsmod.vfs(base, cacheaudited=True))
524 524 self.spath = self.store.path
525 525 self.svfs = self.store.vfs
526 526 self.sjoin = self.store.join
527 527 self.vfs.createmode = self.store.createmode
528 528 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
529 529 self.cachevfs.createmode = self.store.createmode
530 530 if (self.ui.configbool('devel', 'all-warnings') or
531 531 self.ui.configbool('devel', 'check-locks')):
532 532 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
533 533 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
534 534 else: # standard vfs
535 535 self.svfs.audit = self._getsvfsward(self.svfs.audit)
536 536 self._applyopenerreqs()
537 537 if create:
538 538 self._writerequirements()
539 539
540 540 self._dirstatevalidatewarned = False
541 541
542 542 self._branchcaches = {}
543 543 self._revbranchcache = None
544 544 self._filterpats = {}
545 545 self._datafilters = {}
546 546 self._transref = self._lockref = self._wlockref = None
547 547
548 548 # A cache for various files under .hg/ that tracks file changes,
549 549 # (used by the filecache decorator)
550 550 #
551 551 # Maps a property name to its util.filecacheentry
552 552 self._filecache = {}
553 553
554 554 # hold sets of revision to be filtered
555 555 # should be cleared when something might have changed the filter value:
556 556 # - new changesets,
557 557 # - phase change,
558 558 # - new obsolescence marker,
559 559 # - working directory parent change,
560 560 # - bookmark changes
561 561 self.filteredrevcache = {}
562 562
563 563 # post-dirstate-status hooks
564 564 self._postdsstatus = []
565 565
566 566 # generic mapping between names and nodes
567 567 self.names = namespaces.namespaces()
568 568
569 569 # Key to signature value.
570 570 self._sparsesignaturecache = {}
571 571 # Signature to cached matcher instance.
572 572 self._sparsematchercache = {}
573 573
574 574 def _getvfsward(self, origfunc):
575 575 """build a ward for self.vfs"""
576 576 rref = weakref.ref(self)
577 577 def checkvfs(path, mode=None):
578 578 ret = origfunc(path, mode=mode)
579 579 repo = rref()
580 580 if (repo is None
581 581 or not util.safehasattr(repo, '_wlockref')
582 582 or not util.safehasattr(repo, '_lockref')):
583 583 return
584 584 if mode in (None, 'r', 'rb'):
585 585 return
586 586 if path.startswith(repo.path):
587 587 # truncate name relative to the repository (.hg)
588 588 path = path[len(repo.path) + 1:]
589 589 if path.startswith('cache/'):
590 590 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
591 591 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
592 592 if path.startswith('journal.'):
593 593 # journal is covered by 'lock'
594 594 if repo._currentlock(repo._lockref) is None:
595 595 repo.ui.develwarn('write with no lock: "%s"' % path,
596 596 stacklevel=2, config='check-locks')
597 597 elif repo._currentlock(repo._wlockref) is None:
598 598 # rest of vfs files are covered by 'wlock'
599 599 #
600 600 # exclude special files
601 601 for prefix in self._wlockfreeprefix:
602 602 if path.startswith(prefix):
603 603 return
604 604 repo.ui.develwarn('write with no wlock: "%s"' % path,
605 605 stacklevel=2, config='check-locks')
606 606 return ret
607 607 return checkvfs
608 608
609 609 def _getsvfsward(self, origfunc):
610 610 """build a ward for self.svfs"""
611 611 rref = weakref.ref(self)
612 612 def checksvfs(path, mode=None):
613 613 ret = origfunc(path, mode=mode)
614 614 repo = rref()
615 615 if repo is None or not util.safehasattr(repo, '_lockref'):
616 616 return
617 617 if mode in (None, 'r', 'rb'):
618 618 return
619 619 if path.startswith(repo.sharedpath):
620 620 # truncate name relative to the repository (.hg)
621 621 path = path[len(repo.sharedpath) + 1:]
622 622 if repo._currentlock(repo._lockref) is None:
623 623 repo.ui.develwarn('write with no lock: "%s"' % path,
624 624 stacklevel=3)
625 625 return ret
626 626 return checksvfs
627 627
628 628 def close(self):
629 629 self._writecaches()
630 630
631 631 def _loadextensions(self):
632 632 extensions.loadall(self.ui)
633 633
634 634 def _writecaches(self):
635 635 if self._revbranchcache:
636 636 self._revbranchcache.write()
637 637
638 638 def _restrictcapabilities(self, caps):
639 639 if self.ui.configbool('experimental', 'bundle2-advertise'):
640 640 caps = set(caps)
641 641 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
642 642 role='client'))
643 643 caps.add('bundle2=' + urlreq.quote(capsblob))
644 644 return caps
645 645
646 646 def _applyopenerreqs(self):
647 647 self.svfs.options = dict((r, 1) for r in self.requirements
648 648 if r in self.openerreqs)
649 649 # experimental config: format.chunkcachesize
650 650 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
651 651 if chunkcachesize is not None:
652 652 self.svfs.options['chunkcachesize'] = chunkcachesize
653 653 # experimental config: format.maxchainlen
654 654 maxchainlen = self.ui.configint('format', 'maxchainlen')
655 655 if maxchainlen is not None:
656 656 self.svfs.options['maxchainlen'] = maxchainlen
657 657 # experimental config: format.manifestcachesize
658 658 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
659 659 if manifestcachesize is not None:
660 660 self.svfs.options['manifestcachesize'] = manifestcachesize
661 661 # experimental config: format.aggressivemergedeltas
662 662 aggressivemergedeltas = self.ui.configbool('format',
663 663 'aggressivemergedeltas')
664 664 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
665 665 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
666 666 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
667 667 if 0 <= chainspan:
668 668 self.svfs.options['maxdeltachainspan'] = chainspan
669 669 mmapindexthreshold = self.ui.configbytes('experimental',
670 670 'mmapindexthreshold')
671 671 if mmapindexthreshold is not None:
672 672 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
673 673 withsparseread = self.ui.configbool('experimental', 'sparse-read')
674 674 srdensitythres = float(self.ui.config('experimental',
675 675 'sparse-read.density-threshold'))
676 676 srmingapsize = self.ui.configbytes('experimental',
677 677 'sparse-read.min-gap-size')
678 678 self.svfs.options['with-sparse-read'] = withsparseread
679 679 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
680 680 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
681 681
682 682 for r in self.requirements:
683 683 if r.startswith('exp-compression-'):
684 684 self.svfs.options['compengine'] = r[len('exp-compression-'):]
685 685
686 686 # TODO move "revlogv2" to openerreqs once finalized.
687 687 if REVLOGV2_REQUIREMENT in self.requirements:
688 688 self.svfs.options['revlogv2'] = True
689 689
690 690 def _writerequirements(self):
691 691 scmutil.writerequires(self.vfs, self.requirements)
692 692
693 693 def _checknested(self, path):
694 694 """Determine if path is a legal nested repository."""
695 695 if not path.startswith(self.root):
696 696 return False
697 697 subpath = path[len(self.root) + 1:]
698 698 normsubpath = util.pconvert(subpath)
699 699
700 700 # XXX: Checking against the current working copy is wrong in
701 701 # the sense that it can reject things like
702 702 #
703 703 # $ hg cat -r 10 sub/x.txt
704 704 #
705 705 # if sub/ is no longer a subrepository in the working copy
706 706 # parent revision.
707 707 #
708 708 # However, it can of course also allow things that would have
709 709 # been rejected before, such as the above cat command if sub/
710 710 # is a subrepository now, but was a normal directory before.
711 711 # The old path auditor would have rejected by mistake since it
712 712 # panics when it sees sub/.hg/.
713 713 #
714 714 # All in all, checking against the working copy seems sensible
715 715 # since we want to prevent access to nested repositories on
716 716 # the filesystem *now*.
717 717 ctx = self[None]
718 718 parts = util.splitpath(subpath)
719 719 while parts:
720 720 prefix = '/'.join(parts)
721 721 if prefix in ctx.substate:
722 722 if prefix == normsubpath:
723 723 return True
724 724 else:
725 725 sub = ctx.sub(prefix)
726 726 return sub.checknested(subpath[len(prefix) + 1:])
727 727 else:
728 728 parts.pop()
729 729 return False
730 730
731 731 def peer(self):
732 732 return localpeer(self) # not cached to avoid reference cycle
733 733
734 734 def unfiltered(self):
735 735 """Return unfiltered version of the repository
736 736
737 737 Intended to be overwritten by filtered repo."""
738 738 return self
739 739
740 740 def filtered(self, name, visibilityexceptions=None):
741 741 """Return a filtered version of a repository"""
742 742 cls = repoview.newtype(self.unfiltered().__class__)
743 743 return cls(self, name, visibilityexceptions)
744 744
745 745 @repofilecache('bookmarks', 'bookmarks.current')
746 746 def _bookmarks(self):
747 747 return bookmarks.bmstore(self)
748 748
749 749 @property
750 750 def _activebookmark(self):
751 751 return self._bookmarks.active
752 752
753 753 # _phasesets depend on changelog. what we need is to call
754 754 # _phasecache.invalidate() if '00changelog.i' was changed, but it
755 755 # can't be easily expressed in filecache mechanism.
756 756 @storecache('phaseroots', '00changelog.i')
757 757 def _phasecache(self):
758 758 return phases.phasecache(self, self._phasedefaults)
759 759
760 760 @storecache('obsstore')
761 761 def obsstore(self):
762 762 return obsolete.makestore(self.ui, self)
763 763
764 764 @storecache('00changelog.i')
765 765 def changelog(self):
766 766 return changelog.changelog(self.svfs,
767 767 trypending=txnutil.mayhavepending(self.root))
768 768
769 769 def _constructmanifest(self):
770 770 # This is a temporary function while we migrate from manifest to
771 771 # manifestlog. It allows bundlerepo and unionrepo to intercept the
772 772 # manifest creation.
773 773 return manifest.manifestrevlog(self.svfs)
774 774
775 775 @storecache('00manifest.i')
776 776 def manifestlog(self):
777 777 return manifest.manifestlog(self.svfs, self)
778 778
779 779 @repofilecache('dirstate')
780 780 def dirstate(self):
781 781 sparsematchfn = lambda: sparse.matcher(self)
782 782
783 783 return dirstate.dirstate(self.vfs, self.ui, self.root,
784 784 self._dirstatevalidate, sparsematchfn)
785 785
786 786 def _dirstatevalidate(self, node):
787 787 try:
788 788 self.changelog.rev(node)
789 789 return node
790 790 except error.LookupError:
791 791 if not self._dirstatevalidatewarned:
792 792 self._dirstatevalidatewarned = True
793 793 self.ui.warn(_("warning: ignoring unknown"
794 794 " working parent %s!\n") % short(node))
795 795 return nullid
796 796
797 797 @repofilecache(narrowspec.FILENAME)
798 798 def narrowpats(self):
799 799 """matcher patterns for this repository's narrowspec
800 800
801 801 A tuple of (includes, excludes).
802 802 """
803 803 source = self
804 804 if self.shared():
805 805 from . import hg
806 806 source = hg.sharedreposource(self)
807 807 return narrowspec.load(source)
808 808
809 809 @repofilecache(narrowspec.FILENAME)
810 810 def _narrowmatch(self):
811 811 if changegroup.NARROW_REQUIREMENT not in self.requirements:
812 812 return matchmod.always(self.root, '')
813 813 include, exclude = self.narrowpats
814 814 return narrowspec.match(self.root, include=include, exclude=exclude)
815 815
816 816 # TODO(martinvonz): make this property-like instead?
817 817 def narrowmatch(self):
818 818 return self._narrowmatch
819 819
820 820 def setnarrowpats(self, newincludes, newexcludes):
821 821 target = self
822 822 if self.shared():
823 823 from . import hg
824 824 target = hg.sharedreposource(self)
825 825 narrowspec.save(target, newincludes, newexcludes)
826 826 self.invalidate(clearfilecache=True)
827 827
828 828 def __getitem__(self, changeid):
829 829 if changeid is None:
830 830 return context.workingctx(self)
831 831 if isinstance(changeid, context.basectx):
832 832 return changeid
833 833 if isinstance(changeid, slice):
834 834 # wdirrev isn't contiguous so the slice shouldn't include it
835 835 return [context.changectx(self, i)
836 836 for i in xrange(*changeid.indices(len(self)))
837 837 if i not in self.changelog.filteredrevs]
838 838 try:
839 839 return context.changectx(self, changeid)
840 840 except error.WdirUnsupported:
841 841 return context.workingctx(self)
842 842
843 843 def __contains__(self, changeid):
844 844 """True if the given changeid exists
845 845
846 846 error.LookupError is raised if an ambiguous node specified.
847 847 """
848 848 try:
849 849 self[changeid]
850 850 return True
851 851 except error.RepoLookupError:
852 852 return False
853 853
854 854 def __nonzero__(self):
855 855 return True
856 856
857 857 __bool__ = __nonzero__
858 858
859 859 def __len__(self):
860 860 # no need to pay the cost of repoview.changelog
861 861 unfi = self.unfiltered()
862 862 return len(unfi.changelog)
863 863
864 864 def __iter__(self):
865 865 return iter(self.changelog)
866 866
867 867 def revs(self, expr, *args):
868 868 '''Find revisions matching a revset.
869 869
870 870 The revset is specified as a string ``expr`` that may contain
871 871 %-formatting to escape certain types. See ``revsetlang.formatspec``.
872 872
873 873 Revset aliases from the configuration are not expanded. To expand
874 874 user aliases, consider calling ``scmutil.revrange()`` or
875 875 ``repo.anyrevs([expr], user=True)``.
876 876
877 877 Returns a revset.abstractsmartset, which is a list-like interface
878 878 that contains integer revisions.
879 879 '''
880 880 expr = revsetlang.formatspec(expr, *args)
881 881 m = revset.match(None, expr)
882 882 return m(self)
883 883
884 884 def set(self, expr, *args):
885 885 '''Find revisions matching a revset and emit changectx instances.
886 886
887 887 This is a convenience wrapper around ``revs()`` that iterates the
888 888 result and is a generator of changectx instances.
889 889
890 890 Revset aliases from the configuration are not expanded. To expand
891 891 user aliases, consider calling ``scmutil.revrange()``.
892 892 '''
893 893 for r in self.revs(expr, *args):
894 894 yield self[r]
895 895
896 896 def anyrevs(self, specs, user=False, localalias=None):
897 897 '''Find revisions matching one of the given revsets.
898 898
899 899 Revset aliases from the configuration are not expanded by default. To
900 900 expand user aliases, specify ``user=True``. To provide some local
901 901 definitions overriding user aliases, set ``localalias`` to
902 902 ``{name: definitionstring}``.
903 903 '''
904 904 if user:
905 905 m = revset.matchany(self.ui, specs,
906 906 lookup=revset.lookupfn(self),
907 907 localalias=localalias)
908 908 else:
909 909 m = revset.matchany(None, specs, localalias=localalias)
910 910 return m(self)
911 911
912 912 def url(self):
913 913 return 'file:' + self.root
914 914
915 915 def hook(self, name, throw=False, **args):
916 916 """Call a hook, passing this repo instance.
917 917
918 918 This a convenience method to aid invoking hooks. Extensions likely
919 919 won't call this unless they have registered a custom hook or are
920 920 replacing code that is expected to call a hook.
921 921 """
922 922 return hook.hook(self.ui, self, name, throw, **args)
923 923
924 924 @filteredpropertycache
925 925 def _tagscache(self):
926 926 '''Returns a tagscache object that contains various tags related
927 927 caches.'''
928 928
929 929 # This simplifies its cache management by having one decorated
930 930 # function (this one) and the rest simply fetch things from it.
931 931 class tagscache(object):
932 932 def __init__(self):
933 933 # These two define the set of tags for this repository. tags
934 934 # maps tag name to node; tagtypes maps tag name to 'global' or
935 935 # 'local'. (Global tags are defined by .hgtags across all
936 936 # heads, and local tags are defined in .hg/localtags.)
937 937 # They constitute the in-memory cache of tags.
938 938 self.tags = self.tagtypes = None
939 939
940 940 self.nodetagscache = self.tagslist = None
941 941
942 942 cache = tagscache()
943 943 cache.tags, cache.tagtypes = self._findtags()
944 944
945 945 return cache
946 946
947 947 def tags(self):
948 948 '''return a mapping of tag to node'''
949 949 t = {}
950 950 if self.changelog.filteredrevs:
951 951 tags, tt = self._findtags()
952 952 else:
953 953 tags = self._tagscache.tags
954 954 for k, v in tags.iteritems():
955 955 try:
956 956 # ignore tags to unknown nodes
957 957 self.changelog.rev(v)
958 958 t[k] = v
959 959 except (error.LookupError, ValueError):
960 960 pass
961 961 return t
962 962
963 963 def _findtags(self):
964 964 '''Do the hard work of finding tags. Return a pair of dicts
965 965 (tags, tagtypes) where tags maps tag name to node, and tagtypes
966 966 maps tag name to a string like \'global\' or \'local\'.
967 967 Subclasses or extensions are free to add their own tags, but
968 968 should be aware that the returned dicts will be retained for the
969 969 duration of the localrepo object.'''
970 970
971 971 # XXX what tagtype should subclasses/extensions use? Currently
972 972 # mq and bookmarks add tags, but do not set the tagtype at all.
973 973 # Should each extension invent its own tag type? Should there
974 974 # be one tagtype for all such "virtual" tags? Or is the status
975 975 # quo fine?
976 976
977 977
978 978 # map tag name to (node, hist)
979 979 alltags = tagsmod.findglobaltags(self.ui, self)
980 980 # map tag name to tag type
981 981 tagtypes = dict((tag, 'global') for tag in alltags)
982 982
983 983 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
984 984
985 985 # Build the return dicts. Have to re-encode tag names because
986 986 # the tags module always uses UTF-8 (in order not to lose info
987 987 # writing to the cache), but the rest of Mercurial wants them in
988 988 # local encoding.
989 989 tags = {}
990 990 for (name, (node, hist)) in alltags.iteritems():
991 991 if node != nullid:
992 992 tags[encoding.tolocal(name)] = node
993 993 tags['tip'] = self.changelog.tip()
994 994 tagtypes = dict([(encoding.tolocal(name), value)
995 995 for (name, value) in tagtypes.iteritems()])
996 996 return (tags, tagtypes)
997 997
998 998 def tagtype(self, tagname):
999 999 '''
1000 1000 return the type of the given tag. result can be:
1001 1001
1002 1002 'local' : a local tag
1003 1003 'global' : a global tag
1004 1004 None : tag does not exist
1005 1005 '''
1006 1006
1007 1007 return self._tagscache.tagtypes.get(tagname)
1008 1008
1009 1009 def tagslist(self):
1010 1010 '''return a list of tags ordered by revision'''
1011 1011 if not self._tagscache.tagslist:
1012 1012 l = []
1013 1013 for t, n in self.tags().iteritems():
1014 1014 l.append((self.changelog.rev(n), t, n))
1015 1015 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1016 1016
1017 1017 return self._tagscache.tagslist
1018 1018
1019 1019 def nodetags(self, node):
1020 1020 '''return the tags associated with a node'''
1021 1021 if not self._tagscache.nodetagscache:
1022 1022 nodetagscache = {}
1023 1023 for t, n in self._tagscache.tags.iteritems():
1024 1024 nodetagscache.setdefault(n, []).append(t)
1025 1025 for tags in nodetagscache.itervalues():
1026 1026 tags.sort()
1027 1027 self._tagscache.nodetagscache = nodetagscache
1028 1028 return self._tagscache.nodetagscache.get(node, [])
1029 1029
1030 1030 def nodebookmarks(self, node):
1031 1031 """return the list of bookmarks pointing to the specified node"""
1032 marks = []
1033 for bookmark, n in self._bookmarks.iteritems():
1034 if n == node:
1035 marks.append(bookmark)
1036 return sorted(marks)
1032 return self._bookmarks.names(node)
1037 1033
1038 1034 def branchmap(self):
1039 1035 '''returns a dictionary {branch: [branchheads]} with branchheads
1040 1036 ordered by increasing revision number'''
1041 1037 branchmap.updatecache(self)
1042 1038 return self._branchcaches[self.filtername]
1043 1039
1044 1040 @unfilteredmethod
1045 1041 def revbranchcache(self):
1046 1042 if not self._revbranchcache:
1047 1043 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1048 1044 return self._revbranchcache
1049 1045
1050 1046 def branchtip(self, branch, ignoremissing=False):
1051 1047 '''return the tip node for a given branch
1052 1048
1053 1049 If ignoremissing is True, then this method will not raise an error.
1054 1050 This is helpful for callers that only expect None for a missing branch
1055 1051 (e.g. namespace).
1056 1052
1057 1053 '''
1058 1054 try:
1059 1055 return self.branchmap().branchtip(branch)
1060 1056 except KeyError:
1061 1057 if not ignoremissing:
1062 1058 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1063 1059 else:
1064 1060 pass
1065 1061
1066 1062 def lookup(self, key):
1067 1063 return scmutil.revsymbol(self, key).node()
1068 1064
1069 1065 def lookupbranch(self, key):
1070 1066 if key in self.branchmap():
1071 1067 return key
1072 1068
1073 1069 return scmutil.revsymbol(self, key).branch()
1074 1070
1075 1071 def known(self, nodes):
1076 1072 cl = self.changelog
1077 1073 nm = cl.nodemap
1078 1074 filtered = cl.filteredrevs
1079 1075 result = []
1080 1076 for n in nodes:
1081 1077 r = nm.get(n)
1082 1078 resp = not (r is None or r in filtered)
1083 1079 result.append(resp)
1084 1080 return result
1085 1081
1086 1082 def local(self):
1087 1083 return self
1088 1084
1089 1085 def publishing(self):
1090 1086 # it's safe (and desirable) to trust the publish flag unconditionally
1091 1087 # so that we don't finalize changes shared between users via ssh or nfs
1092 1088 return self.ui.configbool('phases', 'publish', untrusted=True)
1093 1089
1094 1090 def cancopy(self):
1095 1091 # so statichttprepo's override of local() works
1096 1092 if not self.local():
1097 1093 return False
1098 1094 if not self.publishing():
1099 1095 return True
1100 1096 # if publishing we can't copy if there is filtered content
1101 1097 return not self.filtered('visible').changelog.filteredrevs
1102 1098
1103 1099 def shared(self):
1104 1100 '''the type of shared repository (None if not shared)'''
1105 1101 if self.sharedpath != self.path:
1106 1102 return 'store'
1107 1103 return None
1108 1104
1109 1105 def wjoin(self, f, *insidef):
1110 1106 return self.vfs.reljoin(self.root, f, *insidef)
1111 1107
1112 1108 def file(self, f):
1113 1109 if f[0] == '/':
1114 1110 f = f[1:]
1115 1111 return filelog.filelog(self.svfs, f)
1116 1112
1117 1113 def setparents(self, p1, p2=nullid):
1118 1114 with self.dirstate.parentchange():
1119 1115 copies = self.dirstate.setparents(p1, p2)
1120 1116 pctx = self[p1]
1121 1117 if copies:
1122 1118 # Adjust copy records, the dirstate cannot do it, it
1123 1119 # requires access to parents manifests. Preserve them
1124 1120 # only for entries added to first parent.
1125 1121 for f in copies:
1126 1122 if f not in pctx and copies[f] in pctx:
1127 1123 self.dirstate.copy(copies[f], f)
1128 1124 if p2 == nullid:
1129 1125 for f, s in sorted(self.dirstate.copies().items()):
1130 1126 if f not in pctx and s not in pctx:
1131 1127 self.dirstate.copy(None, f)
1132 1128
1133 1129 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1134 1130 """changeid can be a changeset revision, node, or tag.
1135 1131 fileid can be a file revision or node."""
1136 1132 return context.filectx(self, path, changeid, fileid,
1137 1133 changectx=changectx)
1138 1134
1139 1135 def getcwd(self):
1140 1136 return self.dirstate.getcwd()
1141 1137
1142 1138 def pathto(self, f, cwd=None):
1143 1139 return self.dirstate.pathto(f, cwd)
1144 1140
1145 1141 def _loadfilter(self, filter):
1146 1142 if filter not in self._filterpats:
1147 1143 l = []
1148 1144 for pat, cmd in self.ui.configitems(filter):
1149 1145 if cmd == '!':
1150 1146 continue
1151 1147 mf = matchmod.match(self.root, '', [pat])
1152 1148 fn = None
1153 1149 params = cmd
1154 1150 for name, filterfn in self._datafilters.iteritems():
1155 1151 if cmd.startswith(name):
1156 1152 fn = filterfn
1157 1153 params = cmd[len(name):].lstrip()
1158 1154 break
1159 1155 if not fn:
1160 1156 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1161 1157 # Wrap old filters not supporting keyword arguments
1162 1158 if not pycompat.getargspec(fn)[2]:
1163 1159 oldfn = fn
1164 1160 fn = lambda s, c, **kwargs: oldfn(s, c)
1165 1161 l.append((mf, fn, params))
1166 1162 self._filterpats[filter] = l
1167 1163 return self._filterpats[filter]
1168 1164
1169 1165 def _filter(self, filterpats, filename, data):
1170 1166 for mf, fn, cmd in filterpats:
1171 1167 if mf(filename):
1172 1168 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1173 1169 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1174 1170 break
1175 1171
1176 1172 return data
1177 1173
1178 1174 @unfilteredpropertycache
1179 1175 def _encodefilterpats(self):
1180 1176 return self._loadfilter('encode')
1181 1177
1182 1178 @unfilteredpropertycache
1183 1179 def _decodefilterpats(self):
1184 1180 return self._loadfilter('decode')
1185 1181
1186 1182 def adddatafilter(self, name, filter):
1187 1183 self._datafilters[name] = filter
1188 1184
1189 1185 def wread(self, filename):
1190 1186 if self.wvfs.islink(filename):
1191 1187 data = self.wvfs.readlink(filename)
1192 1188 else:
1193 1189 data = self.wvfs.read(filename)
1194 1190 return self._filter(self._encodefilterpats, filename, data)
1195 1191
1196 1192 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1197 1193 """write ``data`` into ``filename`` in the working directory
1198 1194
1199 1195 This returns length of written (maybe decoded) data.
1200 1196 """
1201 1197 data = self._filter(self._decodefilterpats, filename, data)
1202 1198 if 'l' in flags:
1203 1199 self.wvfs.symlink(data, filename)
1204 1200 else:
1205 1201 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1206 1202 **kwargs)
1207 1203 if 'x' in flags:
1208 1204 self.wvfs.setflags(filename, False, True)
1209 1205 else:
1210 1206 self.wvfs.setflags(filename, False, False)
1211 1207 return len(data)
1212 1208
1213 1209 def wwritedata(self, filename, data):
1214 1210 return self._filter(self._decodefilterpats, filename, data)
1215 1211
1216 1212 def currenttransaction(self):
1217 1213 """return the current transaction or None if non exists"""
1218 1214 if self._transref:
1219 1215 tr = self._transref()
1220 1216 else:
1221 1217 tr = None
1222 1218
1223 1219 if tr and tr.running():
1224 1220 return tr
1225 1221 return None
1226 1222
1227 1223 def transaction(self, desc, report=None):
1228 1224 if (self.ui.configbool('devel', 'all-warnings')
1229 1225 or self.ui.configbool('devel', 'check-locks')):
1230 1226 if self._currentlock(self._lockref) is None:
1231 1227 raise error.ProgrammingError('transaction requires locking')
1232 1228 tr = self.currenttransaction()
1233 1229 if tr is not None:
1234 1230 return tr.nest(name=desc)
1235 1231
1236 1232 # abort here if the journal already exists
1237 1233 if self.svfs.exists("journal"):
1238 1234 raise error.RepoError(
1239 1235 _("abandoned transaction found"),
1240 1236 hint=_("run 'hg recover' to clean up transaction"))
1241 1237
1242 1238 idbase = "%.40f#%f" % (random.random(), time.time())
1243 1239 ha = hex(hashlib.sha1(idbase).digest())
1244 1240 txnid = 'TXN:' + ha
1245 1241 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1246 1242
1247 1243 self._writejournal(desc)
1248 1244 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1249 1245 if report:
1250 1246 rp = report
1251 1247 else:
1252 1248 rp = self.ui.warn
1253 1249 vfsmap = {'plain': self.vfs} # root of .hg/
1254 1250 # we must avoid cyclic reference between repo and transaction.
1255 1251 reporef = weakref.ref(self)
1256 1252 # Code to track tag movement
1257 1253 #
1258 1254 # Since tags are all handled as file content, it is actually quite hard
1259 1255 # to track these movement from a code perspective. So we fallback to a
1260 1256 # tracking at the repository level. One could envision to track changes
1261 1257 # to the '.hgtags' file through changegroup apply but that fails to
1262 1258 # cope with case where transaction expose new heads without changegroup
1263 1259 # being involved (eg: phase movement).
1264 1260 #
1265 1261 # For now, We gate the feature behind a flag since this likely comes
1266 1262 # with performance impacts. The current code run more often than needed
1267 1263 # and do not use caches as much as it could. The current focus is on
1268 1264 # the behavior of the feature so we disable it by default. The flag
1269 1265 # will be removed when we are happy with the performance impact.
1270 1266 #
1271 1267 # Once this feature is no longer experimental move the following
1272 1268 # documentation to the appropriate help section:
1273 1269 #
1274 1270 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1275 1271 # tags (new or changed or deleted tags). In addition the details of
1276 1272 # these changes are made available in a file at:
1277 1273 # ``REPOROOT/.hg/changes/tags.changes``.
1278 1274 # Make sure you check for HG_TAG_MOVED before reading that file as it
1279 1275 # might exist from a previous transaction even if no tag were touched
1280 1276 # in this one. Changes are recorded in a line base format::
1281 1277 #
1282 1278 # <action> <hex-node> <tag-name>\n
1283 1279 #
1284 1280 # Actions are defined as follow:
1285 1281 # "-R": tag is removed,
1286 1282 # "+A": tag is added,
1287 1283 # "-M": tag is moved (old value),
1288 1284 # "+M": tag is moved (new value),
1289 1285 tracktags = lambda x: None
1290 1286 # experimental config: experimental.hook-track-tags
1291 1287 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1292 1288 if desc != 'strip' and shouldtracktags:
1293 1289 oldheads = self.changelog.headrevs()
1294 1290 def tracktags(tr2):
1295 1291 repo = reporef()
1296 1292 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1297 1293 newheads = repo.changelog.headrevs()
1298 1294 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1299 1295 # notes: we compare lists here.
1300 1296 # As we do it only once buiding set would not be cheaper
1301 1297 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1302 1298 if changes:
1303 1299 tr2.hookargs['tag_moved'] = '1'
1304 1300 with repo.vfs('changes/tags.changes', 'w',
1305 1301 atomictemp=True) as changesfile:
1306 1302 # note: we do not register the file to the transaction
1307 1303 # because we needs it to still exist on the transaction
1308 1304 # is close (for txnclose hooks)
1309 1305 tagsmod.writediff(changesfile, changes)
1310 1306 def validate(tr2):
1311 1307 """will run pre-closing hooks"""
1312 1308 # XXX the transaction API is a bit lacking here so we take a hacky
1313 1309 # path for now
1314 1310 #
1315 1311 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1316 1312 # dict is copied before these run. In addition we needs the data
1317 1313 # available to in memory hooks too.
1318 1314 #
1319 1315 # Moreover, we also need to make sure this runs before txnclose
1320 1316 # hooks and there is no "pending" mechanism that would execute
1321 1317 # logic only if hooks are about to run.
1322 1318 #
1323 1319 # Fixing this limitation of the transaction is also needed to track
1324 1320 # other families of changes (bookmarks, phases, obsolescence).
1325 1321 #
1326 1322 # This will have to be fixed before we remove the experimental
1327 1323 # gating.
1328 1324 tracktags(tr2)
1329 1325 repo = reporef()
1330 1326 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1331 1327 scmutil.enforcesinglehead(repo, tr2, desc)
1332 1328 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1333 1329 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1334 1330 args = tr.hookargs.copy()
1335 1331 args.update(bookmarks.preparehookargs(name, old, new))
1336 1332 repo.hook('pretxnclose-bookmark', throw=True,
1337 1333 txnname=desc,
1338 1334 **pycompat.strkwargs(args))
1339 1335 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1340 1336 cl = repo.unfiltered().changelog
1341 1337 for rev, (old, new) in tr.changes['phases'].items():
1342 1338 args = tr.hookargs.copy()
1343 1339 node = hex(cl.node(rev))
1344 1340 args.update(phases.preparehookargs(node, old, new))
1345 1341 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1346 1342 **pycompat.strkwargs(args))
1347 1343
1348 1344 repo.hook('pretxnclose', throw=True,
1349 1345 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1350 1346 def releasefn(tr, success):
1351 1347 repo = reporef()
1352 1348 if success:
1353 1349 # this should be explicitly invoked here, because
1354 1350 # in-memory changes aren't written out at closing
1355 1351 # transaction, if tr.addfilegenerator (via
1356 1352 # dirstate.write or so) isn't invoked while
1357 1353 # transaction running
1358 1354 repo.dirstate.write(None)
1359 1355 else:
1360 1356 # discard all changes (including ones already written
1361 1357 # out) in this transaction
1362 1358 repo.dirstate.restorebackup(None, 'journal.dirstate')
1363 1359
1364 1360 repo.invalidate(clearfilecache=True)
1365 1361
1366 1362 tr = transaction.transaction(rp, self.svfs, vfsmap,
1367 1363 "journal",
1368 1364 "undo",
1369 1365 aftertrans(renames),
1370 1366 self.store.createmode,
1371 1367 validator=validate,
1372 1368 releasefn=releasefn,
1373 1369 checkambigfiles=_cachedfiles,
1374 1370 name=desc)
1375 1371 tr.changes['revs'] = xrange(0, 0)
1376 1372 tr.changes['obsmarkers'] = set()
1377 1373 tr.changes['phases'] = {}
1378 1374 tr.changes['bookmarks'] = {}
1379 1375
1380 1376 tr.hookargs['txnid'] = txnid
1381 1377 # note: writing the fncache only during finalize mean that the file is
1382 1378 # outdated when running hooks. As fncache is used for streaming clone,
1383 1379 # this is not expected to break anything that happen during the hooks.
1384 1380 tr.addfinalize('flush-fncache', self.store.write)
1385 1381 def txnclosehook(tr2):
1386 1382 """To be run if transaction is successful, will schedule a hook run
1387 1383 """
1388 1384 # Don't reference tr2 in hook() so we don't hold a reference.
1389 1385 # This reduces memory consumption when there are multiple
1390 1386 # transactions per lock. This can likely go away if issue5045
1391 1387 # fixes the function accumulation.
1392 1388 hookargs = tr2.hookargs
1393 1389
1394 1390 def hookfunc():
1395 1391 repo = reporef()
1396 1392 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1397 1393 bmchanges = sorted(tr.changes['bookmarks'].items())
1398 1394 for name, (old, new) in bmchanges:
1399 1395 args = tr.hookargs.copy()
1400 1396 args.update(bookmarks.preparehookargs(name, old, new))
1401 1397 repo.hook('txnclose-bookmark', throw=False,
1402 1398 txnname=desc, **pycompat.strkwargs(args))
1403 1399
1404 1400 if hook.hashook(repo.ui, 'txnclose-phase'):
1405 1401 cl = repo.unfiltered().changelog
1406 1402 phasemv = sorted(tr.changes['phases'].items())
1407 1403 for rev, (old, new) in phasemv:
1408 1404 args = tr.hookargs.copy()
1409 1405 node = hex(cl.node(rev))
1410 1406 args.update(phases.preparehookargs(node, old, new))
1411 1407 repo.hook('txnclose-phase', throw=False, txnname=desc,
1412 1408 **pycompat.strkwargs(args))
1413 1409
1414 1410 repo.hook('txnclose', throw=False, txnname=desc,
1415 1411 **pycompat.strkwargs(hookargs))
1416 1412 reporef()._afterlock(hookfunc)
1417 1413 tr.addfinalize('txnclose-hook', txnclosehook)
1418 1414 # Include a leading "-" to make it happen before the transaction summary
1419 1415 # reports registered via scmutil.registersummarycallback() whose names
1420 1416 # are 00-txnreport etc. That way, the caches will be warm when the
1421 1417 # callbacks run.
1422 1418 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1423 1419 def txnaborthook(tr2):
1424 1420 """To be run if transaction is aborted
1425 1421 """
1426 1422 reporef().hook('txnabort', throw=False, txnname=desc,
1427 1423 **pycompat.strkwargs(tr2.hookargs))
1428 1424 tr.addabort('txnabort-hook', txnaborthook)
1429 1425 # avoid eager cache invalidation. in-memory data should be identical
1430 1426 # to stored data if transaction has no error.
1431 1427 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1432 1428 self._transref = weakref.ref(tr)
1433 1429 scmutil.registersummarycallback(self, tr, desc)
1434 1430 return tr
1435 1431
1436 1432 def _journalfiles(self):
1437 1433 return ((self.svfs, 'journal'),
1438 1434 (self.vfs, 'journal.dirstate'),
1439 1435 (self.vfs, 'journal.branch'),
1440 1436 (self.vfs, 'journal.desc'),
1441 1437 (self.vfs, 'journal.bookmarks'),
1442 1438 (self.svfs, 'journal.phaseroots'))
1443 1439
1444 1440 def undofiles(self):
1445 1441 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1446 1442
1447 1443 @unfilteredmethod
1448 1444 def _writejournal(self, desc):
1449 1445 self.dirstate.savebackup(None, 'journal.dirstate')
1450 1446 self.vfs.write("journal.branch",
1451 1447 encoding.fromlocal(self.dirstate.branch()))
1452 1448 self.vfs.write("journal.desc",
1453 1449 "%d\n%s\n" % (len(self), desc))
1454 1450 self.vfs.write("journal.bookmarks",
1455 1451 self.vfs.tryread("bookmarks"))
1456 1452 self.svfs.write("journal.phaseroots",
1457 1453 self.svfs.tryread("phaseroots"))
1458 1454
1459 1455 def recover(self):
1460 1456 with self.lock():
1461 1457 if self.svfs.exists("journal"):
1462 1458 self.ui.status(_("rolling back interrupted transaction\n"))
1463 1459 vfsmap = {'': self.svfs,
1464 1460 'plain': self.vfs,}
1465 1461 transaction.rollback(self.svfs, vfsmap, "journal",
1466 1462 self.ui.warn,
1467 1463 checkambigfiles=_cachedfiles)
1468 1464 self.invalidate()
1469 1465 return True
1470 1466 else:
1471 1467 self.ui.warn(_("no interrupted transaction available\n"))
1472 1468 return False
1473 1469
1474 1470 def rollback(self, dryrun=False, force=False):
1475 1471 wlock = lock = dsguard = None
1476 1472 try:
1477 1473 wlock = self.wlock()
1478 1474 lock = self.lock()
1479 1475 if self.svfs.exists("undo"):
1480 1476 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1481 1477
1482 1478 return self._rollback(dryrun, force, dsguard)
1483 1479 else:
1484 1480 self.ui.warn(_("no rollback information available\n"))
1485 1481 return 1
1486 1482 finally:
1487 1483 release(dsguard, lock, wlock)
1488 1484
1489 1485 @unfilteredmethod # Until we get smarter cache management
1490 1486 def _rollback(self, dryrun, force, dsguard):
1491 1487 ui = self.ui
1492 1488 try:
1493 1489 args = self.vfs.read('undo.desc').splitlines()
1494 1490 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1495 1491 if len(args) >= 3:
1496 1492 detail = args[2]
1497 1493 oldtip = oldlen - 1
1498 1494
1499 1495 if detail and ui.verbose:
1500 1496 msg = (_('repository tip rolled back to revision %d'
1501 1497 ' (undo %s: %s)\n')
1502 1498 % (oldtip, desc, detail))
1503 1499 else:
1504 1500 msg = (_('repository tip rolled back to revision %d'
1505 1501 ' (undo %s)\n')
1506 1502 % (oldtip, desc))
1507 1503 except IOError:
1508 1504 msg = _('rolling back unknown transaction\n')
1509 1505 desc = None
1510 1506
1511 1507 if not force and self['.'] != self['tip'] and desc == 'commit':
1512 1508 raise error.Abort(
1513 1509 _('rollback of last commit while not checked out '
1514 1510 'may lose data'), hint=_('use -f to force'))
1515 1511
1516 1512 ui.status(msg)
1517 1513 if dryrun:
1518 1514 return 0
1519 1515
1520 1516 parents = self.dirstate.parents()
1521 1517 self.destroying()
1522 1518 vfsmap = {'plain': self.vfs, '': self.svfs}
1523 1519 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1524 1520 checkambigfiles=_cachedfiles)
1525 1521 if self.vfs.exists('undo.bookmarks'):
1526 1522 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1527 1523 if self.svfs.exists('undo.phaseroots'):
1528 1524 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1529 1525 self.invalidate()
1530 1526
1531 1527 parentgone = (parents[0] not in self.changelog.nodemap or
1532 1528 parents[1] not in self.changelog.nodemap)
1533 1529 if parentgone:
1534 1530 # prevent dirstateguard from overwriting already restored one
1535 1531 dsguard.close()
1536 1532
1537 1533 self.dirstate.restorebackup(None, 'undo.dirstate')
1538 1534 try:
1539 1535 branch = self.vfs.read('undo.branch')
1540 1536 self.dirstate.setbranch(encoding.tolocal(branch))
1541 1537 except IOError:
1542 1538 ui.warn(_('named branch could not be reset: '
1543 1539 'current branch is still \'%s\'\n')
1544 1540 % self.dirstate.branch())
1545 1541
1546 1542 parents = tuple([p.rev() for p in self[None].parents()])
1547 1543 if len(parents) > 1:
1548 1544 ui.status(_('working directory now based on '
1549 1545 'revisions %d and %d\n') % parents)
1550 1546 else:
1551 1547 ui.status(_('working directory now based on '
1552 1548 'revision %d\n') % parents)
1553 1549 mergemod.mergestate.clean(self, self['.'].node())
1554 1550
1555 1551 # TODO: if we know which new heads may result from this rollback, pass
1556 1552 # them to destroy(), which will prevent the branchhead cache from being
1557 1553 # invalidated.
1558 1554 self.destroyed()
1559 1555 return 0
1560 1556
1561 1557 def _buildcacheupdater(self, newtransaction):
1562 1558 """called during transaction to build the callback updating cache
1563 1559
1564 1560 Lives on the repository to help extension who might want to augment
1565 1561 this logic. For this purpose, the created transaction is passed to the
1566 1562 method.
1567 1563 """
1568 1564 # we must avoid cyclic reference between repo and transaction.
1569 1565 reporef = weakref.ref(self)
1570 1566 def updater(tr):
1571 1567 repo = reporef()
1572 1568 repo.updatecaches(tr)
1573 1569 return updater
1574 1570
1575 1571 @unfilteredmethod
1576 1572 def updatecaches(self, tr=None, full=False):
1577 1573 """warm appropriate caches
1578 1574
1579 1575 If this function is called after a transaction closed. The transaction
1580 1576 will be available in the 'tr' argument. This can be used to selectively
1581 1577 update caches relevant to the changes in that transaction.
1582 1578
1583 1579 If 'full' is set, make sure all caches the function knows about have
1584 1580 up-to-date data. Even the ones usually loaded more lazily.
1585 1581 """
1586 1582 if tr is not None and tr.hookargs.get('source') == 'strip':
1587 1583 # During strip, many caches are invalid but
1588 1584 # later call to `destroyed` will refresh them.
1589 1585 return
1590 1586
1591 1587 if tr is None or tr.changes['revs']:
1592 1588 # updating the unfiltered branchmap should refresh all the others,
1593 1589 self.ui.debug('updating the branch cache\n')
1594 1590 branchmap.updatecache(self.filtered('served'))
1595 1591
1596 1592 if full:
1597 1593 rbc = self.revbranchcache()
1598 1594 for r in self.changelog:
1599 1595 rbc.branchinfo(r)
1600 1596 rbc.write()
1601 1597
1602 1598 def invalidatecaches(self):
1603 1599
1604 1600 if '_tagscache' in vars(self):
1605 1601 # can't use delattr on proxy
1606 1602 del self.__dict__['_tagscache']
1607 1603
1608 1604 self.unfiltered()._branchcaches.clear()
1609 1605 self.invalidatevolatilesets()
1610 1606 self._sparsesignaturecache.clear()
1611 1607
1612 1608 def invalidatevolatilesets(self):
1613 1609 self.filteredrevcache.clear()
1614 1610 obsolete.clearobscaches(self)
1615 1611
1616 1612 def invalidatedirstate(self):
1617 1613 '''Invalidates the dirstate, causing the next call to dirstate
1618 1614 to check if it was modified since the last time it was read,
1619 1615 rereading it if it has.
1620 1616
1621 1617 This is different to dirstate.invalidate() that it doesn't always
1622 1618 rereads the dirstate. Use dirstate.invalidate() if you want to
1623 1619 explicitly read the dirstate again (i.e. restoring it to a previous
1624 1620 known good state).'''
1625 1621 if hasunfilteredcache(self, 'dirstate'):
1626 1622 for k in self.dirstate._filecache:
1627 1623 try:
1628 1624 delattr(self.dirstate, k)
1629 1625 except AttributeError:
1630 1626 pass
1631 1627 delattr(self.unfiltered(), 'dirstate')
1632 1628
1633 1629 def invalidate(self, clearfilecache=False):
1634 1630 '''Invalidates both store and non-store parts other than dirstate
1635 1631
1636 1632 If a transaction is running, invalidation of store is omitted,
1637 1633 because discarding in-memory changes might cause inconsistency
1638 1634 (e.g. incomplete fncache causes unintentional failure, but
1639 1635 redundant one doesn't).
1640 1636 '''
1641 1637 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1642 1638 for k in list(self._filecache.keys()):
1643 1639 # dirstate is invalidated separately in invalidatedirstate()
1644 1640 if k == 'dirstate':
1645 1641 continue
1646 1642 if (k == 'changelog' and
1647 1643 self.currenttransaction() and
1648 1644 self.changelog._delayed):
1649 1645 # The changelog object may store unwritten revisions. We don't
1650 1646 # want to lose them.
1651 1647 # TODO: Solve the problem instead of working around it.
1652 1648 continue
1653 1649
1654 1650 if clearfilecache:
1655 1651 del self._filecache[k]
1656 1652 try:
1657 1653 delattr(unfiltered, k)
1658 1654 except AttributeError:
1659 1655 pass
1660 1656 self.invalidatecaches()
1661 1657 if not self.currenttransaction():
1662 1658 # TODO: Changing contents of store outside transaction
1663 1659 # causes inconsistency. We should make in-memory store
1664 1660 # changes detectable, and abort if changed.
1665 1661 self.store.invalidatecaches()
1666 1662
1667 1663 def invalidateall(self):
1668 1664 '''Fully invalidates both store and non-store parts, causing the
1669 1665 subsequent operation to reread any outside changes.'''
1670 1666 # extension should hook this to invalidate its caches
1671 1667 self.invalidate()
1672 1668 self.invalidatedirstate()
1673 1669
1674 1670 @unfilteredmethod
1675 1671 def _refreshfilecachestats(self, tr):
1676 1672 """Reload stats of cached files so that they are flagged as valid"""
1677 1673 for k, ce in self._filecache.items():
1678 1674 k = pycompat.sysstr(k)
1679 1675 if k == r'dirstate' or k not in self.__dict__:
1680 1676 continue
1681 1677 ce.refresh()
1682 1678
1683 1679 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1684 1680 inheritchecker=None, parentenvvar=None):
1685 1681 parentlock = None
1686 1682 # the contents of parentenvvar are used by the underlying lock to
1687 1683 # determine whether it can be inherited
1688 1684 if parentenvvar is not None:
1689 1685 parentlock = encoding.environ.get(parentenvvar)
1690 1686
1691 1687 timeout = 0
1692 1688 warntimeout = 0
1693 1689 if wait:
1694 1690 timeout = self.ui.configint("ui", "timeout")
1695 1691 warntimeout = self.ui.configint("ui", "timeout.warn")
1696 1692
1697 1693 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1698 1694 releasefn=releasefn,
1699 1695 acquirefn=acquirefn, desc=desc,
1700 1696 inheritchecker=inheritchecker,
1701 1697 parentlock=parentlock)
1702 1698 return l
1703 1699
1704 1700 def _afterlock(self, callback):
1705 1701 """add a callback to be run when the repository is fully unlocked
1706 1702
1707 1703 The callback will be executed when the outermost lock is released
1708 1704 (with wlock being higher level than 'lock')."""
1709 1705 for ref in (self._wlockref, self._lockref):
1710 1706 l = ref and ref()
1711 1707 if l and l.held:
1712 1708 l.postrelease.append(callback)
1713 1709 break
1714 1710 else: # no lock have been found.
1715 1711 callback()
1716 1712
1717 1713 def lock(self, wait=True):
1718 1714 '''Lock the repository store (.hg/store) and return a weak reference
1719 1715 to the lock. Use this before modifying the store (e.g. committing or
1720 1716 stripping). If you are opening a transaction, get a lock as well.)
1721 1717
1722 1718 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1723 1719 'wlock' first to avoid a dead-lock hazard.'''
1724 1720 l = self._currentlock(self._lockref)
1725 1721 if l is not None:
1726 1722 l.lock()
1727 1723 return l
1728 1724
1729 1725 l = self._lock(self.svfs, "lock", wait, None,
1730 1726 self.invalidate, _('repository %s') % self.origroot)
1731 1727 self._lockref = weakref.ref(l)
1732 1728 return l
1733 1729
1734 1730 def _wlockchecktransaction(self):
1735 1731 if self.currenttransaction() is not None:
1736 1732 raise error.LockInheritanceContractViolation(
1737 1733 'wlock cannot be inherited in the middle of a transaction')
1738 1734
1739 1735 def wlock(self, wait=True):
1740 1736 '''Lock the non-store parts of the repository (everything under
1741 1737 .hg except .hg/store) and return a weak reference to the lock.
1742 1738
1743 1739 Use this before modifying files in .hg.
1744 1740
1745 1741 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1746 1742 'wlock' first to avoid a dead-lock hazard.'''
1747 1743 l = self._wlockref and self._wlockref()
1748 1744 if l is not None and l.held:
1749 1745 l.lock()
1750 1746 return l
1751 1747
1752 1748 # We do not need to check for non-waiting lock acquisition. Such
1753 1749 # acquisition would not cause dead-lock as they would just fail.
1754 1750 if wait and (self.ui.configbool('devel', 'all-warnings')
1755 1751 or self.ui.configbool('devel', 'check-locks')):
1756 1752 if self._currentlock(self._lockref) is not None:
1757 1753 self.ui.develwarn('"wlock" acquired after "lock"')
1758 1754
1759 1755 def unlock():
1760 1756 if self.dirstate.pendingparentchange():
1761 1757 self.dirstate.invalidate()
1762 1758 else:
1763 1759 self.dirstate.write(None)
1764 1760
1765 1761 self._filecache['dirstate'].refresh()
1766 1762
1767 1763 l = self._lock(self.vfs, "wlock", wait, unlock,
1768 1764 self.invalidatedirstate, _('working directory of %s') %
1769 1765 self.origroot,
1770 1766 inheritchecker=self._wlockchecktransaction,
1771 1767 parentenvvar='HG_WLOCK_LOCKER')
1772 1768 self._wlockref = weakref.ref(l)
1773 1769 return l
1774 1770
1775 1771 def _currentlock(self, lockref):
1776 1772 """Returns the lock if it's held, or None if it's not."""
1777 1773 if lockref is None:
1778 1774 return None
1779 1775 l = lockref()
1780 1776 if l is None or not l.held:
1781 1777 return None
1782 1778 return l
1783 1779
1784 1780 def currentwlock(self):
1785 1781 """Returns the wlock if it's held, or None if it's not."""
1786 1782 return self._currentlock(self._wlockref)
1787 1783
1788 1784 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1789 1785 """
1790 1786 commit an individual file as part of a larger transaction
1791 1787 """
1792 1788
1793 1789 fname = fctx.path()
1794 1790 fparent1 = manifest1.get(fname, nullid)
1795 1791 fparent2 = manifest2.get(fname, nullid)
1796 1792 if isinstance(fctx, context.filectx):
1797 1793 node = fctx.filenode()
1798 1794 if node in [fparent1, fparent2]:
1799 1795 self.ui.debug('reusing %s filelog entry\n' % fname)
1800 1796 if manifest1.flags(fname) != fctx.flags():
1801 1797 changelist.append(fname)
1802 1798 return node
1803 1799
1804 1800 flog = self.file(fname)
1805 1801 meta = {}
1806 1802 copy = fctx.renamed()
1807 1803 if copy and copy[0] != fname:
1808 1804 # Mark the new revision of this file as a copy of another
1809 1805 # file. This copy data will effectively act as a parent
1810 1806 # of this new revision. If this is a merge, the first
1811 1807 # parent will be the nullid (meaning "look up the copy data")
1812 1808 # and the second one will be the other parent. For example:
1813 1809 #
1814 1810 # 0 --- 1 --- 3 rev1 changes file foo
1815 1811 # \ / rev2 renames foo to bar and changes it
1816 1812 # \- 2 -/ rev3 should have bar with all changes and
1817 1813 # should record that bar descends from
1818 1814 # bar in rev2 and foo in rev1
1819 1815 #
1820 1816 # this allows this merge to succeed:
1821 1817 #
1822 1818 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1823 1819 # \ / merging rev3 and rev4 should use bar@rev2
1824 1820 # \- 2 --- 4 as the merge base
1825 1821 #
1826 1822
1827 1823 cfname = copy[0]
1828 1824 crev = manifest1.get(cfname)
1829 1825 newfparent = fparent2
1830 1826
1831 1827 if manifest2: # branch merge
1832 1828 if fparent2 == nullid or crev is None: # copied on remote side
1833 1829 if cfname in manifest2:
1834 1830 crev = manifest2[cfname]
1835 1831 newfparent = fparent1
1836 1832
1837 1833 # Here, we used to search backwards through history to try to find
1838 1834 # where the file copy came from if the source of a copy was not in
1839 1835 # the parent directory. However, this doesn't actually make sense to
1840 1836 # do (what does a copy from something not in your working copy even
1841 1837 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1842 1838 # the user that copy information was dropped, so if they didn't
1843 1839 # expect this outcome it can be fixed, but this is the correct
1844 1840 # behavior in this circumstance.
1845 1841
1846 1842 if crev:
1847 1843 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1848 1844 meta["copy"] = cfname
1849 1845 meta["copyrev"] = hex(crev)
1850 1846 fparent1, fparent2 = nullid, newfparent
1851 1847 else:
1852 1848 self.ui.warn(_("warning: can't find ancestor for '%s' "
1853 1849 "copied from '%s'!\n") % (fname, cfname))
1854 1850
1855 1851 elif fparent1 == nullid:
1856 1852 fparent1, fparent2 = fparent2, nullid
1857 1853 elif fparent2 != nullid:
1858 1854 # is one parent an ancestor of the other?
1859 1855 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1860 1856 if fparent1 in fparentancestors:
1861 1857 fparent1, fparent2 = fparent2, nullid
1862 1858 elif fparent2 in fparentancestors:
1863 1859 fparent2 = nullid
1864 1860
1865 1861 # is the file changed?
1866 1862 text = fctx.data()
1867 1863 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1868 1864 changelist.append(fname)
1869 1865 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1870 1866 # are just the flags changed during merge?
1871 1867 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1872 1868 changelist.append(fname)
1873 1869
1874 1870 return fparent1
1875 1871
1876 1872 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1877 1873 """check for commit arguments that aren't committable"""
1878 1874 if match.isexact() or match.prefix():
1879 1875 matched = set(status.modified + status.added + status.removed)
1880 1876
1881 1877 for f in match.files():
1882 1878 f = self.dirstate.normalize(f)
1883 1879 if f == '.' or f in matched or f in wctx.substate:
1884 1880 continue
1885 1881 if f in status.deleted:
1886 1882 fail(f, _('file not found!'))
1887 1883 if f in vdirs: # visited directory
1888 1884 d = f + '/'
1889 1885 for mf in matched:
1890 1886 if mf.startswith(d):
1891 1887 break
1892 1888 else:
1893 1889 fail(f, _("no match under directory!"))
1894 1890 elif f not in self.dirstate:
1895 1891 fail(f, _("file not tracked!"))
1896 1892
1897 1893 @unfilteredmethod
1898 1894 def commit(self, text="", user=None, date=None, match=None, force=False,
1899 1895 editor=False, extra=None):
1900 1896 """Add a new revision to current repository.
1901 1897
1902 1898 Revision information is gathered from the working directory,
1903 1899 match can be used to filter the committed files. If editor is
1904 1900 supplied, it is called to get a commit message.
1905 1901 """
1906 1902 if extra is None:
1907 1903 extra = {}
1908 1904
1909 1905 def fail(f, msg):
1910 1906 raise error.Abort('%s: %s' % (f, msg))
1911 1907
1912 1908 if not match:
1913 1909 match = matchmod.always(self.root, '')
1914 1910
1915 1911 if not force:
1916 1912 vdirs = []
1917 1913 match.explicitdir = vdirs.append
1918 1914 match.bad = fail
1919 1915
1920 1916 wlock = lock = tr = None
1921 1917 try:
1922 1918 wlock = self.wlock()
1923 1919 lock = self.lock() # for recent changelog (see issue4368)
1924 1920
1925 1921 wctx = self[None]
1926 1922 merge = len(wctx.parents()) > 1
1927 1923
1928 1924 if not force and merge and not match.always():
1929 1925 raise error.Abort(_('cannot partially commit a merge '
1930 1926 '(do not specify files or patterns)'))
1931 1927
1932 1928 status = self.status(match=match, clean=force)
1933 1929 if force:
1934 1930 status.modified.extend(status.clean) # mq may commit clean files
1935 1931
1936 1932 # check subrepos
1937 1933 subs, commitsubs, newstate = subrepoutil.precommit(
1938 1934 self.ui, wctx, status, match, force=force)
1939 1935
1940 1936 # make sure all explicit patterns are matched
1941 1937 if not force:
1942 1938 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1943 1939
1944 1940 cctx = context.workingcommitctx(self, status,
1945 1941 text, user, date, extra)
1946 1942
1947 1943 # internal config: ui.allowemptycommit
1948 1944 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1949 1945 or extra.get('close') or merge or cctx.files()
1950 1946 or self.ui.configbool('ui', 'allowemptycommit'))
1951 1947 if not allowemptycommit:
1952 1948 return None
1953 1949
1954 1950 if merge and cctx.deleted():
1955 1951 raise error.Abort(_("cannot commit merge with missing files"))
1956 1952
1957 1953 ms = mergemod.mergestate.read(self)
1958 1954 mergeutil.checkunresolved(ms)
1959 1955
1960 1956 if editor:
1961 1957 cctx._text = editor(self, cctx, subs)
1962 1958 edited = (text != cctx._text)
1963 1959
1964 1960 # Save commit message in case this transaction gets rolled back
1965 1961 # (e.g. by a pretxncommit hook). Leave the content alone on
1966 1962 # the assumption that the user will use the same editor again.
1967 1963 msgfn = self.savecommitmessage(cctx._text)
1968 1964
1969 1965 # commit subs and write new state
1970 1966 if subs:
1971 1967 for s in sorted(commitsubs):
1972 1968 sub = wctx.sub(s)
1973 1969 self.ui.status(_('committing subrepository %s\n') %
1974 1970 subrepoutil.subrelpath(sub))
1975 1971 sr = sub.commit(cctx._text, user, date)
1976 1972 newstate[s] = (newstate[s][0], sr)
1977 1973 subrepoutil.writestate(self, newstate)
1978 1974
1979 1975 p1, p2 = self.dirstate.parents()
1980 1976 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1981 1977 try:
1982 1978 self.hook("precommit", throw=True, parent1=hookp1,
1983 1979 parent2=hookp2)
1984 1980 tr = self.transaction('commit')
1985 1981 ret = self.commitctx(cctx, True)
1986 1982 except: # re-raises
1987 1983 if edited:
1988 1984 self.ui.write(
1989 1985 _('note: commit message saved in %s\n') % msgfn)
1990 1986 raise
1991 1987 # update bookmarks, dirstate and mergestate
1992 1988 bookmarks.update(self, [p1, p2], ret)
1993 1989 cctx.markcommitted(ret)
1994 1990 ms.reset()
1995 1991 tr.close()
1996 1992
1997 1993 finally:
1998 1994 lockmod.release(tr, lock, wlock)
1999 1995
2000 1996 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2001 1997 # hack for command that use a temporary commit (eg: histedit)
2002 1998 # temporary commit got stripped before hook release
2003 1999 if self.changelog.hasnode(ret):
2004 2000 self.hook("commit", node=node, parent1=parent1,
2005 2001 parent2=parent2)
2006 2002 self._afterlock(commithook)
2007 2003 return ret
2008 2004
2009 2005 @unfilteredmethod
2010 2006 def commitctx(self, ctx, error=False):
2011 2007 """Add a new revision to current repository.
2012 2008 Revision information is passed via the context argument.
2013 2009 """
2014 2010
2015 2011 tr = None
2016 2012 p1, p2 = ctx.p1(), ctx.p2()
2017 2013 user = ctx.user()
2018 2014
2019 2015 lock = self.lock()
2020 2016 try:
2021 2017 tr = self.transaction("commit")
2022 2018 trp = weakref.proxy(tr)
2023 2019
2024 2020 if ctx.manifestnode():
2025 2021 # reuse an existing manifest revision
2026 2022 mn = ctx.manifestnode()
2027 2023 files = ctx.files()
2028 2024 elif ctx.files():
2029 2025 m1ctx = p1.manifestctx()
2030 2026 m2ctx = p2.manifestctx()
2031 2027 mctx = m1ctx.copy()
2032 2028
2033 2029 m = mctx.read()
2034 2030 m1 = m1ctx.read()
2035 2031 m2 = m2ctx.read()
2036 2032
2037 2033 # check in files
2038 2034 added = []
2039 2035 changed = []
2040 2036 removed = list(ctx.removed())
2041 2037 linkrev = len(self)
2042 2038 self.ui.note(_("committing files:\n"))
2043 2039 for f in sorted(ctx.modified() + ctx.added()):
2044 2040 self.ui.note(f + "\n")
2045 2041 try:
2046 2042 fctx = ctx[f]
2047 2043 if fctx is None:
2048 2044 removed.append(f)
2049 2045 else:
2050 2046 added.append(f)
2051 2047 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2052 2048 trp, changed)
2053 2049 m.setflag(f, fctx.flags())
2054 2050 except OSError as inst:
2055 2051 self.ui.warn(_("trouble committing %s!\n") % f)
2056 2052 raise
2057 2053 except IOError as inst:
2058 2054 errcode = getattr(inst, 'errno', errno.ENOENT)
2059 2055 if error or errcode and errcode != errno.ENOENT:
2060 2056 self.ui.warn(_("trouble committing %s!\n") % f)
2061 2057 raise
2062 2058
2063 2059 # update manifest
2064 2060 self.ui.note(_("committing manifest\n"))
2065 2061 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2066 2062 drop = [f for f in removed if f in m]
2067 2063 for f in drop:
2068 2064 del m[f]
2069 2065 mn = mctx.write(trp, linkrev,
2070 2066 p1.manifestnode(), p2.manifestnode(),
2071 2067 added, drop)
2072 2068 files = changed + removed
2073 2069 else:
2074 2070 mn = p1.manifestnode()
2075 2071 files = []
2076 2072
2077 2073 # update changelog
2078 2074 self.ui.note(_("committing changelog\n"))
2079 2075 self.changelog.delayupdate(tr)
2080 2076 n = self.changelog.add(mn, files, ctx.description(),
2081 2077 trp, p1.node(), p2.node(),
2082 2078 user, ctx.date(), ctx.extra().copy())
2083 2079 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2084 2080 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2085 2081 parent2=xp2)
2086 2082 # set the new commit is proper phase
2087 2083 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2088 2084 if targetphase:
2089 2085 # retract boundary do not alter parent changeset.
2090 2086 # if a parent have higher the resulting phase will
2091 2087 # be compliant anyway
2092 2088 #
2093 2089 # if minimal phase was 0 we don't need to retract anything
2094 2090 phases.registernew(self, tr, targetphase, [n])
2095 2091 tr.close()
2096 2092 return n
2097 2093 finally:
2098 2094 if tr:
2099 2095 tr.release()
2100 2096 lock.release()
2101 2097
2102 2098 @unfilteredmethod
2103 2099 def destroying(self):
2104 2100 '''Inform the repository that nodes are about to be destroyed.
2105 2101 Intended for use by strip and rollback, so there's a common
2106 2102 place for anything that has to be done before destroying history.
2107 2103
2108 2104 This is mostly useful for saving state that is in memory and waiting
2109 2105 to be flushed when the current lock is released. Because a call to
2110 2106 destroyed is imminent, the repo will be invalidated causing those
2111 2107 changes to stay in memory (waiting for the next unlock), or vanish
2112 2108 completely.
2113 2109 '''
2114 2110 # When using the same lock to commit and strip, the phasecache is left
2115 2111 # dirty after committing. Then when we strip, the repo is invalidated,
2116 2112 # causing those changes to disappear.
2117 2113 if '_phasecache' in vars(self):
2118 2114 self._phasecache.write()
2119 2115
2120 2116 @unfilteredmethod
2121 2117 def destroyed(self):
2122 2118 '''Inform the repository that nodes have been destroyed.
2123 2119 Intended for use by strip and rollback, so there's a common
2124 2120 place for anything that has to be done after destroying history.
2125 2121 '''
2126 2122 # When one tries to:
2127 2123 # 1) destroy nodes thus calling this method (e.g. strip)
2128 2124 # 2) use phasecache somewhere (e.g. commit)
2129 2125 #
2130 2126 # then 2) will fail because the phasecache contains nodes that were
2131 2127 # removed. We can either remove phasecache from the filecache,
2132 2128 # causing it to reload next time it is accessed, or simply filter
2133 2129 # the removed nodes now and write the updated cache.
2134 2130 self._phasecache.filterunknown(self)
2135 2131 self._phasecache.write()
2136 2132
2137 2133 # refresh all repository caches
2138 2134 self.updatecaches()
2139 2135
2140 2136 # Ensure the persistent tag cache is updated. Doing it now
2141 2137 # means that the tag cache only has to worry about destroyed
2142 2138 # heads immediately after a strip/rollback. That in turn
2143 2139 # guarantees that "cachetip == currenttip" (comparing both rev
2144 2140 # and node) always means no nodes have been added or destroyed.
2145 2141
2146 2142 # XXX this is suboptimal when qrefresh'ing: we strip the current
2147 2143 # head, refresh the tag cache, then immediately add a new head.
2148 2144 # But I think doing it this way is necessary for the "instant
2149 2145 # tag cache retrieval" case to work.
2150 2146 self.invalidate()
2151 2147
2152 2148 def status(self, node1='.', node2=None, match=None,
2153 2149 ignored=False, clean=False, unknown=False,
2154 2150 listsubrepos=False):
2155 2151 '''a convenience method that calls node1.status(node2)'''
2156 2152 return self[node1].status(node2, match, ignored, clean, unknown,
2157 2153 listsubrepos)
2158 2154
2159 2155 def addpostdsstatus(self, ps):
2160 2156 """Add a callback to run within the wlock, at the point at which status
2161 2157 fixups happen.
2162 2158
2163 2159 On status completion, callback(wctx, status) will be called with the
2164 2160 wlock held, unless the dirstate has changed from underneath or the wlock
2165 2161 couldn't be grabbed.
2166 2162
2167 2163 Callbacks should not capture and use a cached copy of the dirstate --
2168 2164 it might change in the meanwhile. Instead, they should access the
2169 2165 dirstate via wctx.repo().dirstate.
2170 2166
2171 2167 This list is emptied out after each status run -- extensions should
2172 2168 make sure it adds to this list each time dirstate.status is called.
2173 2169 Extensions should also make sure they don't call this for statuses
2174 2170 that don't involve the dirstate.
2175 2171 """
2176 2172
2177 2173 # The list is located here for uniqueness reasons -- it is actually
2178 2174 # managed by the workingctx, but that isn't unique per-repo.
2179 2175 self._postdsstatus.append(ps)
2180 2176
2181 2177 def postdsstatus(self):
2182 2178 """Used by workingctx to get the list of post-dirstate-status hooks."""
2183 2179 return self._postdsstatus
2184 2180
2185 2181 def clearpostdsstatus(self):
2186 2182 """Used by workingctx to clear post-dirstate-status hooks."""
2187 2183 del self._postdsstatus[:]
2188 2184
2189 2185 def heads(self, start=None):
2190 2186 if start is None:
2191 2187 cl = self.changelog
2192 2188 headrevs = reversed(cl.headrevs())
2193 2189 return [cl.node(rev) for rev in headrevs]
2194 2190
2195 2191 heads = self.changelog.heads(start)
2196 2192 # sort the output in rev descending order
2197 2193 return sorted(heads, key=self.changelog.rev, reverse=True)
2198 2194
2199 2195 def branchheads(self, branch=None, start=None, closed=False):
2200 2196 '''return a (possibly filtered) list of heads for the given branch
2201 2197
2202 2198 Heads are returned in topological order, from newest to oldest.
2203 2199 If branch is None, use the dirstate branch.
2204 2200 If start is not None, return only heads reachable from start.
2205 2201 If closed is True, return heads that are marked as closed as well.
2206 2202 '''
2207 2203 if branch is None:
2208 2204 branch = self[None].branch()
2209 2205 branches = self.branchmap()
2210 2206 if branch not in branches:
2211 2207 return []
2212 2208 # the cache returns heads ordered lowest to highest
2213 2209 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2214 2210 if start is not None:
2215 2211 # filter out the heads that cannot be reached from startrev
2216 2212 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2217 2213 bheads = [h for h in bheads if h in fbheads]
2218 2214 return bheads
2219 2215
2220 2216 def branches(self, nodes):
2221 2217 if not nodes:
2222 2218 nodes = [self.changelog.tip()]
2223 2219 b = []
2224 2220 for n in nodes:
2225 2221 t = n
2226 2222 while True:
2227 2223 p = self.changelog.parents(n)
2228 2224 if p[1] != nullid or p[0] == nullid:
2229 2225 b.append((t, n, p[0], p[1]))
2230 2226 break
2231 2227 n = p[0]
2232 2228 return b
2233 2229
2234 2230 def between(self, pairs):
2235 2231 r = []
2236 2232
2237 2233 for top, bottom in pairs:
2238 2234 n, l, i = top, [], 0
2239 2235 f = 1
2240 2236
2241 2237 while n != bottom and n != nullid:
2242 2238 p = self.changelog.parents(n)[0]
2243 2239 if i == f:
2244 2240 l.append(n)
2245 2241 f = f * 2
2246 2242 n = p
2247 2243 i += 1
2248 2244
2249 2245 r.append(l)
2250 2246
2251 2247 return r
2252 2248
2253 2249 def checkpush(self, pushop):
2254 2250 """Extensions can override this function if additional checks have
2255 2251 to be performed before pushing, or call it if they override push
2256 2252 command.
2257 2253 """
2258 2254
2259 2255 @unfilteredpropertycache
2260 2256 def prepushoutgoinghooks(self):
2261 2257 """Return util.hooks consists of a pushop with repo, remote, outgoing
2262 2258 methods, which are called before pushing changesets.
2263 2259 """
2264 2260 return util.hooks()
2265 2261
2266 2262 def pushkey(self, namespace, key, old, new):
2267 2263 try:
2268 2264 tr = self.currenttransaction()
2269 2265 hookargs = {}
2270 2266 if tr is not None:
2271 2267 hookargs.update(tr.hookargs)
2272 2268 hookargs = pycompat.strkwargs(hookargs)
2273 2269 hookargs[r'namespace'] = namespace
2274 2270 hookargs[r'key'] = key
2275 2271 hookargs[r'old'] = old
2276 2272 hookargs[r'new'] = new
2277 2273 self.hook('prepushkey', throw=True, **hookargs)
2278 2274 except error.HookAbort as exc:
2279 2275 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2280 2276 if exc.hint:
2281 2277 self.ui.write_err(_("(%s)\n") % exc.hint)
2282 2278 return False
2283 2279 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2284 2280 ret = pushkey.push(self, namespace, key, old, new)
2285 2281 def runhook():
2286 2282 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2287 2283 ret=ret)
2288 2284 self._afterlock(runhook)
2289 2285 return ret
2290 2286
2291 2287 def listkeys(self, namespace):
2292 2288 self.hook('prelistkeys', throw=True, namespace=namespace)
2293 2289 self.ui.debug('listing keys for "%s"\n' % namespace)
2294 2290 values = pushkey.list(self, namespace)
2295 2291 self.hook('listkeys', namespace=namespace, values=values)
2296 2292 return values
2297 2293
2298 2294 def debugwireargs(self, one, two, three=None, four=None, five=None):
2299 2295 '''used to test argument passing over the wire'''
2300 2296 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2301 2297 pycompat.bytestr(four),
2302 2298 pycompat.bytestr(five))
2303 2299
2304 2300 def savecommitmessage(self, text):
2305 2301 fp = self.vfs('last-message.txt', 'wb')
2306 2302 try:
2307 2303 fp.write(text)
2308 2304 finally:
2309 2305 fp.close()
2310 2306 return self.pathto(fp.name[len(self.root) + 1:])
2311 2307
2312 2308 # used to avoid circular references so destructors work
2313 2309 def aftertrans(files):
2314 2310 renamefiles = [tuple(t) for t in files]
2315 2311 def a():
2316 2312 for vfs, src, dest in renamefiles:
2317 2313 # if src and dest refer to a same file, vfs.rename is a no-op,
2318 2314 # leaving both src and dest on disk. delete dest to make sure
2319 2315 # the rename couldn't be such a no-op.
2320 2316 vfs.tryunlink(dest)
2321 2317 try:
2322 2318 vfs.rename(src, dest)
2323 2319 except OSError: # journal file does not yet exist
2324 2320 pass
2325 2321 return a
2326 2322
2327 2323 def undoname(fn):
2328 2324 base, name = os.path.split(fn)
2329 2325 assert name.startswith('journal')
2330 2326 return os.path.join(base, name.replace('journal', 'undo', 1))
2331 2327
2332 2328 def instance(ui, path, create, intents=None):
2333 2329 return localrepository(ui, util.urllocalpath(path), create,
2334 2330 intents=intents)
2335 2331
2336 2332 def islocal(path):
2337 2333 return True
2338 2334
2339 2335 def newreporequirements(repo):
2340 2336 """Determine the set of requirements for a new local repository.
2341 2337
2342 2338 Extensions can wrap this function to specify custom requirements for
2343 2339 new repositories.
2344 2340 """
2345 2341 ui = repo.ui
2346 2342 requirements = {'revlogv1'}
2347 2343 if ui.configbool('format', 'usestore'):
2348 2344 requirements.add('store')
2349 2345 if ui.configbool('format', 'usefncache'):
2350 2346 requirements.add('fncache')
2351 2347 if ui.configbool('format', 'dotencode'):
2352 2348 requirements.add('dotencode')
2353 2349
2354 2350 compengine = ui.config('experimental', 'format.compression')
2355 2351 if compengine not in util.compengines:
2356 2352 raise error.Abort(_('compression engine %s defined by '
2357 2353 'experimental.format.compression not available') %
2358 2354 compengine,
2359 2355 hint=_('run "hg debuginstall" to list available '
2360 2356 'compression engines'))
2361 2357
2362 2358 # zlib is the historical default and doesn't need an explicit requirement.
2363 2359 if compengine != 'zlib':
2364 2360 requirements.add('exp-compression-%s' % compengine)
2365 2361
2366 2362 if scmutil.gdinitconfig(ui):
2367 2363 requirements.add('generaldelta')
2368 2364 if ui.configbool('experimental', 'treemanifest'):
2369 2365 requirements.add('treemanifest')
2370 2366
2371 2367 revlogv2 = ui.config('experimental', 'revlogv2')
2372 2368 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2373 2369 requirements.remove('revlogv1')
2374 2370 # generaldelta is implied by revlogv2.
2375 2371 requirements.discard('generaldelta')
2376 2372 requirements.add(REVLOGV2_REQUIREMENT)
2377 2373
2378 2374 return requirements
General Comments 0
You need to be logged in to leave comments. Login now