##// END OF EJS Templates
py3: replace os.environ with encoding.environ (part 1 of 5)...
Pulkit Goyal -
r30634:ad15646d default
parent child Browse files
Show More
@@ -1,608 +1,607
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 import os
12 11
13 12 from .i18n import _
14 13 from .node import (
15 14 bin,
16 15 hex,
17 16 )
18 17 from . import (
19 18 encoding,
20 19 error,
21 20 lock as lockmod,
22 21 obsolete,
23 22 util,
24 23 )
25 24
26 25 def _getbkfile(repo):
27 26 """Hook so that extensions that mess with the store can hook bm storage.
28 27
29 28 For core, this just handles wether we should see pending
30 29 bookmarks or the committed ones. Other extensions (like share)
31 30 may need to tweak this behavior further.
32 31 """
33 32 bkfile = None
34 if 'HG_PENDING' in os.environ:
33 if 'HG_PENDING' in encoding.environ:
35 34 try:
36 35 bkfile = repo.vfs('bookmarks.pending')
37 36 except IOError as inst:
38 37 if inst.errno != errno.ENOENT:
39 38 raise
40 39 if bkfile is None:
41 40 bkfile = repo.vfs('bookmarks')
42 41 return bkfile
43 42
44 43
45 44 class bmstore(dict):
46 45 """Storage for bookmarks.
47 46
48 47 This object should do all bookmark-related reads and writes, so
49 48 that it's fairly simple to replace the storage underlying
50 49 bookmarks without having to clone the logic surrounding
51 50 bookmarks. This type also should manage the active bookmark, if
52 51 any.
53 52
54 53 This particular bmstore implementation stores bookmarks as
55 54 {hash}\s{name}\n (the same format as localtags) in
56 55 .hg/bookmarks. The mapping is stored as {name: nodeid}.
57 56 """
58 57
59 58 def __init__(self, repo):
60 59 dict.__init__(self)
61 60 self._repo = repo
62 61 try:
63 62 bkfile = _getbkfile(repo)
64 63 for line in bkfile:
65 64 line = line.strip()
66 65 if not line:
67 66 continue
68 67 if ' ' not in line:
69 68 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
70 69 % line)
71 70 continue
72 71 sha, refspec = line.split(' ', 1)
73 72 refspec = encoding.tolocal(refspec)
74 73 try:
75 74 self[refspec] = repo.changelog.lookup(sha)
76 75 except LookupError:
77 76 pass
78 77 except IOError as inst:
79 78 if inst.errno != errno.ENOENT:
80 79 raise
81 80 self._clean = True
82 81 self._active = _readactive(repo, self)
83 82 self._aclean = True
84 83
85 84 @property
86 85 def active(self):
87 86 return self._active
88 87
89 88 @active.setter
90 89 def active(self, mark):
91 90 if mark is not None and mark not in self:
92 91 raise AssertionError('bookmark %s does not exist!' % mark)
93 92
94 93 self._active = mark
95 94 self._aclean = False
96 95
97 96 def __setitem__(self, *args, **kwargs):
98 97 self._clean = False
99 98 return dict.__setitem__(self, *args, **kwargs)
100 99
101 100 def __delitem__(self, key):
102 101 self._clean = False
103 102 return dict.__delitem__(self, key)
104 103
105 104 def recordchange(self, tr):
106 105 """record that bookmarks have been changed in a transaction
107 106
108 107 The transaction is then responsible for updating the file content."""
109 108 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
110 109 location='plain')
111 110 tr.hookargs['bookmark_moved'] = '1'
112 111
113 112 def _writerepo(self, repo):
114 113 """Factored out for extensibility"""
115 114 rbm = repo._bookmarks
116 115 if rbm.active not in self:
117 116 rbm.active = None
118 117 rbm._writeactive()
119 118
120 119 with repo.wlock():
121 120 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
122 121 checkambig=True)
123 122 try:
124 123 self._write(file_)
125 124 except: # re-raises
126 125 file_.discard()
127 126 raise
128 127 finally:
129 128 file_.close()
130 129
131 130 def _writeactive(self):
132 131 if self._aclean:
133 132 return
134 133 with self._repo.wlock():
135 134 if self._active is not None:
136 135 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
137 136 checkambig=True)
138 137 try:
139 138 f.write(encoding.fromlocal(self._active))
140 139 finally:
141 140 f.close()
142 141 else:
143 142 try:
144 143 self._repo.vfs.unlink('bookmarks.current')
145 144 except OSError as inst:
146 145 if inst.errno != errno.ENOENT:
147 146 raise
148 147 self._aclean = True
149 148
150 149 def _write(self, fp):
151 150 for name, node in self.iteritems():
152 151 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
153 152 self._clean = True
154 153 self._repo.invalidatevolatilesets()
155 154
156 155 def expandname(self, bname):
157 156 if bname == '.':
158 157 if self.active:
159 158 return self.active
160 159 else:
161 160 raise error.Abort(_("no active bookmark"))
162 161 return bname
163 162
164 163 def _readactive(repo, marks):
165 164 """
166 165 Get the active bookmark. We can have an active bookmark that updates
167 166 itself as we commit. This function returns the name of that bookmark.
168 167 It is stored in .hg/bookmarks.current
169 168 """
170 169 mark = None
171 170 try:
172 171 file = repo.vfs('bookmarks.current')
173 172 except IOError as inst:
174 173 if inst.errno != errno.ENOENT:
175 174 raise
176 175 return None
177 176 try:
178 177 # No readline() in osutil.posixfile, reading everything is
179 178 # cheap.
180 179 # Note that it's possible for readlines() here to raise
181 180 # IOError, since we might be reading the active mark over
182 181 # static-http which only tries to load the file when we try
183 182 # to read from it.
184 183 mark = encoding.tolocal((file.readlines() or [''])[0])
185 184 if mark == '' or mark not in marks:
186 185 mark = None
187 186 except IOError as inst:
188 187 if inst.errno != errno.ENOENT:
189 188 raise
190 189 return None
191 190 finally:
192 191 file.close()
193 192 return mark
194 193
195 194 def activate(repo, mark):
196 195 """
197 196 Set the given bookmark to be 'active', meaning that this bookmark will
198 197 follow new commits that are made.
199 198 The name is recorded in .hg/bookmarks.current
200 199 """
201 200 repo._bookmarks.active = mark
202 201 repo._bookmarks._writeactive()
203 202
204 203 def deactivate(repo):
205 204 """
206 205 Unset the active bookmark in this repository.
207 206 """
208 207 repo._bookmarks.active = None
209 208 repo._bookmarks._writeactive()
210 209
211 210 def isactivewdirparent(repo):
212 211 """
213 212 Tell whether the 'active' bookmark (the one that follows new commits)
214 213 points to one of the parents of the current working directory (wdir).
215 214
216 215 While this is normally the case, it can on occasion be false; for example,
217 216 immediately after a pull, the active bookmark can be moved to point
218 217 to a place different than the wdir. This is solved by running `hg update`.
219 218 """
220 219 mark = repo._activebookmark
221 220 marks = repo._bookmarks
222 221 parents = [p.node() for p in repo[None].parents()]
223 222 return (mark in marks and marks[mark] in parents)
224 223
225 224 def deletedivergent(repo, deletefrom, bm):
226 225 '''Delete divergent versions of bm on nodes in deletefrom.
227 226
228 227 Return True if at least one bookmark was deleted, False otherwise.'''
229 228 deleted = False
230 229 marks = repo._bookmarks
231 230 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
232 231 for mark in divergent:
233 232 if mark == '@' or '@' not in mark:
234 233 # can't be divergent by definition
235 234 continue
236 235 if mark and marks[mark] in deletefrom:
237 236 if mark != bm:
238 237 del marks[mark]
239 238 deleted = True
240 239 return deleted
241 240
242 241 def calculateupdate(ui, repo, checkout):
243 242 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
244 243 check out and where to move the active bookmark from, if needed.'''
245 244 movemarkfrom = None
246 245 if checkout is None:
247 246 activemark = repo._activebookmark
248 247 if isactivewdirparent(repo):
249 248 movemarkfrom = repo['.'].node()
250 249 elif activemark:
251 250 ui.status(_("updating to active bookmark %s\n") % activemark)
252 251 checkout = activemark
253 252 return (checkout, movemarkfrom)
254 253
255 254 def update(repo, parents, node):
256 255 deletefrom = parents
257 256 marks = repo._bookmarks
258 257 update = False
259 258 active = marks.active
260 259 if not active:
261 260 return False
262 261
263 262 if marks[active] in parents:
264 263 new = repo[node]
265 264 divs = [repo[b] for b in marks
266 265 if b.split('@', 1)[0] == active.split('@', 1)[0]]
267 266 anc = repo.changelog.ancestors([new.rev()])
268 267 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
269 268 if validdest(repo, repo[marks[active]], new):
270 269 marks[active] = new.node()
271 270 update = True
272 271
273 272 if deletedivergent(repo, deletefrom, active):
274 273 update = True
275 274
276 275 if update:
277 276 lock = tr = None
278 277 try:
279 278 lock = repo.lock()
280 279 tr = repo.transaction('bookmark')
281 280 marks.recordchange(tr)
282 281 tr.close()
283 282 finally:
284 283 lockmod.release(tr, lock)
285 284 return update
286 285
287 286 def listbinbookmarks(repo):
288 287 # We may try to list bookmarks on a repo type that does not
289 288 # support it (e.g., statichttprepository).
290 289 marks = getattr(repo, '_bookmarks', {})
291 290
292 291 hasnode = repo.changelog.hasnode
293 292 for k, v in marks.iteritems():
294 293 # don't expose local divergent bookmarks
295 294 if hasnode(v) and ('@' not in k or k.endswith('@')):
296 295 yield k, v
297 296
298 297 def listbookmarks(repo):
299 298 d = {}
300 299 for book, node in listbinbookmarks(repo):
301 300 d[book] = hex(node)
302 301 return d
303 302
304 303 def pushbookmark(repo, key, old, new):
305 304 w = l = tr = None
306 305 try:
307 306 w = repo.wlock()
308 307 l = repo.lock()
309 308 tr = repo.transaction('bookmarks')
310 309 marks = repo._bookmarks
311 310 existing = hex(marks.get(key, ''))
312 311 if existing != old and existing != new:
313 312 return False
314 313 if new == '':
315 314 del marks[key]
316 315 else:
317 316 if new not in repo:
318 317 return False
319 318 marks[key] = repo[new].node()
320 319 marks.recordchange(tr)
321 320 tr.close()
322 321 return True
323 322 finally:
324 323 lockmod.release(tr, l, w)
325 324
326 325 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
327 326 '''Compare bookmarks between srcmarks and dstmarks
328 327
329 328 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
330 329 differ, invalid)", each are list of bookmarks below:
331 330
332 331 :addsrc: added on src side (removed on dst side, perhaps)
333 332 :adddst: added on dst side (removed on src side, perhaps)
334 333 :advsrc: advanced on src side
335 334 :advdst: advanced on dst side
336 335 :diverge: diverge
337 336 :differ: changed, but changeset referred on src is unknown on dst
338 337 :invalid: unknown on both side
339 338 :same: same on both side
340 339
341 340 Each elements of lists in result tuple is tuple "(bookmark name,
342 341 changeset ID on source side, changeset ID on destination
343 342 side)". Each changeset IDs are 40 hexadecimal digit string or
344 343 None.
345 344
346 345 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
347 346 "invalid" list may be unknown for repo.
348 347
349 348 If "targets" is specified, only bookmarks listed in it are
350 349 examined.
351 350 '''
352 351
353 352 if targets:
354 353 bset = set(targets)
355 354 else:
356 355 srcmarkset = set(srcmarks)
357 356 dstmarkset = set(dstmarks)
358 357 bset = srcmarkset | dstmarkset
359 358
360 359 results = ([], [], [], [], [], [], [], [])
361 360 addsrc = results[0].append
362 361 adddst = results[1].append
363 362 advsrc = results[2].append
364 363 advdst = results[3].append
365 364 diverge = results[4].append
366 365 differ = results[5].append
367 366 invalid = results[6].append
368 367 same = results[7].append
369 368
370 369 for b in sorted(bset):
371 370 if b not in srcmarks:
372 371 if b in dstmarks:
373 372 adddst((b, None, dstmarks[b]))
374 373 else:
375 374 invalid((b, None, None))
376 375 elif b not in dstmarks:
377 376 addsrc((b, srcmarks[b], None))
378 377 else:
379 378 scid = srcmarks[b]
380 379 dcid = dstmarks[b]
381 380 if scid == dcid:
382 381 same((b, scid, dcid))
383 382 elif scid in repo and dcid in repo:
384 383 sctx = repo[scid]
385 384 dctx = repo[dcid]
386 385 if sctx.rev() < dctx.rev():
387 386 if validdest(repo, sctx, dctx):
388 387 advdst((b, scid, dcid))
389 388 else:
390 389 diverge((b, scid, dcid))
391 390 else:
392 391 if validdest(repo, dctx, sctx):
393 392 advsrc((b, scid, dcid))
394 393 else:
395 394 diverge((b, scid, dcid))
396 395 else:
397 396 # it is too expensive to examine in detail, in this case
398 397 differ((b, scid, dcid))
399 398
400 399 return results
401 400
402 401 def _diverge(ui, b, path, localmarks, remotenode):
403 402 '''Return appropriate diverged bookmark for specified ``path``
404 403
405 404 This returns None, if it is failed to assign any divergent
406 405 bookmark name.
407 406
408 407 This reuses already existing one with "@number" suffix, if it
409 408 refers ``remotenode``.
410 409 '''
411 410 if b == '@':
412 411 b = ''
413 412 # try to use an @pathalias suffix
414 413 # if an @pathalias already exists, we overwrite (update) it
415 414 if path.startswith("file:"):
416 415 path = util.url(path).path
417 416 for p, u in ui.configitems("paths"):
418 417 if u.startswith("file:"):
419 418 u = util.url(u).path
420 419 if path == u:
421 420 return '%s@%s' % (b, p)
422 421
423 422 # assign a unique "@number" suffix newly
424 423 for x in range(1, 100):
425 424 n = '%s@%d' % (b, x)
426 425 if n not in localmarks or localmarks[n] == remotenode:
427 426 return n
428 427
429 428 return None
430 429
431 430 def unhexlifybookmarks(marks):
432 431 binremotemarks = {}
433 432 for name, node in marks.items():
434 433 binremotemarks[name] = bin(node)
435 434 return binremotemarks
436 435
437 436 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
438 437 ui.debug("checking for updated bookmarks\n")
439 438 localmarks = repo._bookmarks
440 439 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
441 440 ) = comparebookmarks(repo, remotemarks, localmarks)
442 441
443 442 status = ui.status
444 443 warn = ui.warn
445 444 if ui.configbool('ui', 'quietbookmarkmove', False):
446 445 status = warn = ui.debug
447 446
448 447 explicit = set(explicit)
449 448 changed = []
450 449 for b, scid, dcid in addsrc:
451 450 if scid in repo: # add remote bookmarks for changes we already have
452 451 changed.append((b, scid, status,
453 452 _("adding remote bookmark %s\n") % (b)))
454 453 elif b in explicit:
455 454 explicit.remove(b)
456 455 ui.warn(_("remote bookmark %s points to locally missing %s\n")
457 456 % (b, hex(scid)[:12]))
458 457
459 458 for b, scid, dcid in advsrc:
460 459 changed.append((b, scid, status,
461 460 _("updating bookmark %s\n") % (b)))
462 461 # remove normal movement from explicit set
463 462 explicit.difference_update(d[0] for d in changed)
464 463
465 464 for b, scid, dcid in diverge:
466 465 if b in explicit:
467 466 explicit.discard(b)
468 467 changed.append((b, scid, status,
469 468 _("importing bookmark %s\n") % (b)))
470 469 else:
471 470 db = _diverge(ui, b, path, localmarks, scid)
472 471 if db:
473 472 changed.append((db, scid, warn,
474 473 _("divergent bookmark %s stored as %s\n") %
475 474 (b, db)))
476 475 else:
477 476 warn(_("warning: failed to assign numbered name "
478 477 "to divergent bookmark %s\n") % (b))
479 478 for b, scid, dcid in adddst + advdst:
480 479 if b in explicit:
481 480 explicit.discard(b)
482 481 changed.append((b, scid, status,
483 482 _("importing bookmark %s\n") % (b)))
484 483 for b, scid, dcid in differ:
485 484 if b in explicit:
486 485 explicit.remove(b)
487 486 ui.warn(_("remote bookmark %s points to locally missing %s\n")
488 487 % (b, hex(scid)[:12]))
489 488
490 489 if changed:
491 490 tr = trfunc()
492 491 for b, node, writer, msg in sorted(changed):
493 492 localmarks[b] = node
494 493 writer(msg)
495 494 localmarks.recordchange(tr)
496 495
497 496 def incoming(ui, repo, other):
498 497 '''Show bookmarks incoming from other to repo
499 498 '''
500 499 ui.status(_("searching for changed bookmarks\n"))
501 500
502 501 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
503 502 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
504 503 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
505 504
506 505 incomings = []
507 506 if ui.debugflag:
508 507 getid = lambda id: id
509 508 else:
510 509 getid = lambda id: id[:12]
511 510 if ui.verbose:
512 511 def add(b, id, st):
513 512 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
514 513 else:
515 514 def add(b, id, st):
516 515 incomings.append(" %-25s %s\n" % (b, getid(id)))
517 516 for b, scid, dcid in addsrc:
518 517 # i18n: "added" refers to a bookmark
519 518 add(b, hex(scid), _('added'))
520 519 for b, scid, dcid in advsrc:
521 520 # i18n: "advanced" refers to a bookmark
522 521 add(b, hex(scid), _('advanced'))
523 522 for b, scid, dcid in diverge:
524 523 # i18n: "diverged" refers to a bookmark
525 524 add(b, hex(scid), _('diverged'))
526 525 for b, scid, dcid in differ:
527 526 # i18n: "changed" refers to a bookmark
528 527 add(b, hex(scid), _('changed'))
529 528
530 529 if not incomings:
531 530 ui.status(_("no changed bookmarks found\n"))
532 531 return 1
533 532
534 533 for s in sorted(incomings):
535 534 ui.write(s)
536 535
537 536 return 0
538 537
539 538 def outgoing(ui, repo, other):
540 539 '''Show bookmarks outgoing from repo to other
541 540 '''
542 541 ui.status(_("searching for changed bookmarks\n"))
543 542
544 543 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
545 544 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
546 545 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
547 546
548 547 outgoings = []
549 548 if ui.debugflag:
550 549 getid = lambda id: id
551 550 else:
552 551 getid = lambda id: id[:12]
553 552 if ui.verbose:
554 553 def add(b, id, st):
555 554 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
556 555 else:
557 556 def add(b, id, st):
558 557 outgoings.append(" %-25s %s\n" % (b, getid(id)))
559 558 for b, scid, dcid in addsrc:
560 559 # i18n: "added refers to a bookmark
561 560 add(b, hex(scid), _('added'))
562 561 for b, scid, dcid in adddst:
563 562 # i18n: "deleted" refers to a bookmark
564 563 add(b, ' ' * 40, _('deleted'))
565 564 for b, scid, dcid in advsrc:
566 565 # i18n: "advanced" refers to a bookmark
567 566 add(b, hex(scid), _('advanced'))
568 567 for b, scid, dcid in diverge:
569 568 # i18n: "diverged" refers to a bookmark
570 569 add(b, hex(scid), _('diverged'))
571 570 for b, scid, dcid in differ:
572 571 # i18n: "changed" refers to a bookmark
573 572 add(b, hex(scid), _('changed'))
574 573
575 574 if not outgoings:
576 575 ui.status(_("no changed bookmarks found\n"))
577 576 return 1
578 577
579 578 for s in sorted(outgoings):
580 579 ui.write(s)
581 580
582 581 return 0
583 582
584 583 def summary(repo, other):
585 584 '''Compare bookmarks between repo and other for "hg summary" output
586 585
587 586 This returns "(# of incoming, # of outgoing)" tuple.
588 587 '''
589 588 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
590 589 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
591 590 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
592 591 return (len(addsrc), len(adddst))
593 592
594 593 def validdest(repo, old, new):
595 594 """Is the new bookmark destination a valid update from the old one"""
596 595 repo = repo.unfiltered()
597 596 if old == new:
598 597 # Old == new -> nothing to update.
599 598 return False
600 599 elif not old:
601 600 # old is nullrev, anything is valid.
602 601 # (new != nullrev has been excluded by the previous check)
603 602 return True
604 603 elif repo.obsstore:
605 604 return new.node() in obsolete.foreground(repo, [old.node()])
606 605 else:
607 606 # still an independent clause as it is lazier (and therefore faster)
608 607 return old.descendant(new)
@@ -1,1260 +1,1260
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 match as matchmod,
21 21 osutil,
22 22 parsers,
23 23 pathutil,
24 24 pycompat,
25 25 scmutil,
26 26 util,
27 27 )
28 28
29 29 propertycache = util.propertycache
30 30 filecache = scmutil.filecache
31 31 _rangemask = 0x7fffffff
32 32
33 33 dirstatetuple = parsers.dirstatetuple
34 34
35 35 class repocache(filecache):
36 36 """filecache for files in .hg/"""
37 37 def join(self, obj, fname):
38 38 return obj._opener.join(fname)
39 39
40 40 class rootcache(filecache):
41 41 """filecache for files in the repository root"""
42 42 def join(self, obj, fname):
43 43 return obj._join(fname)
44 44
45 45 def _getfsnow(vfs):
46 46 '''Get "now" timestamp on filesystem'''
47 47 tmpfd, tmpname = vfs.mkstemp()
48 48 try:
49 49 return os.fstat(tmpfd).st_mtime
50 50 finally:
51 51 os.close(tmpfd)
52 52 vfs.unlink(tmpname)
53 53
54 54 def nonnormalentries(dmap):
55 55 '''Compute the nonnormal dirstate entries from the dmap'''
56 56 try:
57 57 return parsers.nonnormalentries(dmap)
58 58 except AttributeError:
59 59 return set(fname for fname, e in dmap.iteritems()
60 60 if e[0] != 'n' or e[3] == -1)
61 61
62 62 def _trypending(root, vfs, filename):
63 63 '''Open file to be read according to HG_PENDING environment variable
64 64
65 65 This opens '.pending' of specified 'filename' only when HG_PENDING
66 66 is equal to 'root'.
67 67
68 68 This returns '(fp, is_pending_opened)' tuple.
69 69 '''
70 if root == os.environ.get('HG_PENDING'):
70 if root == encoding.environ.get('HG_PENDING'):
71 71 try:
72 72 return (vfs('%s.pending' % filename), True)
73 73 except IOError as inst:
74 74 if inst.errno != errno.ENOENT:
75 75 raise
76 76 return (vfs(filename), False)
77 77
78 78 class dirstate(object):
79 79
80 80 def __init__(self, opener, ui, root, validate):
81 81 '''Create a new dirstate object.
82 82
83 83 opener is an open()-like callable that can be used to open the
84 84 dirstate file; root is the root of the directory tracked by
85 85 the dirstate.
86 86 '''
87 87 self._opener = opener
88 88 self._validate = validate
89 89 self._root = root
90 90 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
91 91 # UNC path pointing to root share (issue4557)
92 92 self._rootdir = pathutil.normasprefix(root)
93 93 # internal config: ui.forcecwd
94 94 forcecwd = ui.config('ui', 'forcecwd')
95 95 if forcecwd:
96 96 self._cwd = forcecwd
97 97 self._dirty = False
98 98 self._dirtypl = False
99 99 self._lastnormaltime = 0
100 100 self._ui = ui
101 101 self._filecache = {}
102 102 self._parentwriters = 0
103 103 self._filename = 'dirstate'
104 104 self._pendingfilename = '%s.pending' % self._filename
105 105 self._plchangecallbacks = {}
106 106 self._origpl = None
107 107
108 108 # for consistent view between _pl() and _read() invocations
109 109 self._pendingmode = None
110 110
111 111 def beginparentchange(self):
112 112 '''Marks the beginning of a set of changes that involve changing
113 113 the dirstate parents. If there is an exception during this time,
114 114 the dirstate will not be written when the wlock is released. This
115 115 prevents writing an incoherent dirstate where the parent doesn't
116 116 match the contents.
117 117 '''
118 118 self._parentwriters += 1
119 119
120 120 def endparentchange(self):
121 121 '''Marks the end of a set of changes that involve changing the
122 122 dirstate parents. Once all parent changes have been marked done,
123 123 the wlock will be free to write the dirstate on release.
124 124 '''
125 125 if self._parentwriters > 0:
126 126 self._parentwriters -= 1
127 127
128 128 def pendingparentchange(self):
129 129 '''Returns true if the dirstate is in the middle of a set of changes
130 130 that modify the dirstate parent.
131 131 '''
132 132 return self._parentwriters > 0
133 133
134 134 @propertycache
135 135 def _map(self):
136 136 '''Return the dirstate contents as a map from filename to
137 137 (state, mode, size, time).'''
138 138 self._read()
139 139 return self._map
140 140
141 141 @propertycache
142 142 def _copymap(self):
143 143 self._read()
144 144 return self._copymap
145 145
146 146 @propertycache
147 147 def _nonnormalset(self):
148 148 return nonnormalentries(self._map)
149 149
150 150 @propertycache
151 151 def _filefoldmap(self):
152 152 try:
153 153 makefilefoldmap = parsers.make_file_foldmap
154 154 except AttributeError:
155 155 pass
156 156 else:
157 157 return makefilefoldmap(self._map, util.normcasespec,
158 158 util.normcasefallback)
159 159
160 160 f = {}
161 161 normcase = util.normcase
162 162 for name, s in self._map.iteritems():
163 163 if s[0] != 'r':
164 164 f[normcase(name)] = name
165 165 f['.'] = '.' # prevents useless util.fspath() invocation
166 166 return f
167 167
168 168 @propertycache
169 169 def _dirfoldmap(self):
170 170 f = {}
171 171 normcase = util.normcase
172 172 for name in self._dirs:
173 173 f[normcase(name)] = name
174 174 return f
175 175
176 176 @repocache('branch')
177 177 def _branch(self):
178 178 try:
179 179 return self._opener.read("branch").strip() or "default"
180 180 except IOError as inst:
181 181 if inst.errno != errno.ENOENT:
182 182 raise
183 183 return "default"
184 184
185 185 @propertycache
186 186 def _pl(self):
187 187 try:
188 188 fp = self._opendirstatefile()
189 189 st = fp.read(40)
190 190 fp.close()
191 191 l = len(st)
192 192 if l == 40:
193 193 return st[:20], st[20:40]
194 194 elif l > 0 and l < 40:
195 195 raise error.Abort(_('working directory state appears damaged!'))
196 196 except IOError as err:
197 197 if err.errno != errno.ENOENT:
198 198 raise
199 199 return [nullid, nullid]
200 200
201 201 @propertycache
202 202 def _dirs(self):
203 203 return util.dirs(self._map, 'r')
204 204
205 205 def dirs(self):
206 206 return self._dirs
207 207
208 208 @rootcache('.hgignore')
209 209 def _ignore(self):
210 210 files = self._ignorefiles()
211 211 if not files:
212 212 return util.never
213 213
214 214 pats = ['include:%s' % f for f in files]
215 215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216 216
217 217 @propertycache
218 218 def _slash(self):
219 219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
220 220
221 221 @propertycache
222 222 def _checklink(self):
223 223 return util.checklink(self._root)
224 224
225 225 @propertycache
226 226 def _checkexec(self):
227 227 return util.checkexec(self._root)
228 228
229 229 @propertycache
230 230 def _checkcase(self):
231 231 return not util.fscasesensitive(self._join('.hg'))
232 232
233 233 def _join(self, f):
234 234 # much faster than os.path.join()
235 235 # it's safe because f is always a relative path
236 236 return self._rootdir + f
237 237
238 238 def flagfunc(self, buildfallback):
239 239 if self._checklink and self._checkexec:
240 240 def f(x):
241 241 try:
242 242 st = os.lstat(self._join(x))
243 243 if util.statislink(st):
244 244 return 'l'
245 245 if util.statisexec(st):
246 246 return 'x'
247 247 except OSError:
248 248 pass
249 249 return ''
250 250 return f
251 251
252 252 fallback = buildfallback()
253 253 if self._checklink:
254 254 def f(x):
255 255 if os.path.islink(self._join(x)):
256 256 return 'l'
257 257 if 'x' in fallback(x):
258 258 return 'x'
259 259 return ''
260 260 return f
261 261 if self._checkexec:
262 262 def f(x):
263 263 if 'l' in fallback(x):
264 264 return 'l'
265 265 if util.isexec(self._join(x)):
266 266 return 'x'
267 267 return ''
268 268 return f
269 269 else:
270 270 return fallback
271 271
272 272 @propertycache
273 273 def _cwd(self):
274 274 return pycompat.getcwd()
275 275
276 276 def getcwd(self):
277 277 '''Return the path from which a canonical path is calculated.
278 278
279 279 This path should be used to resolve file patterns or to convert
280 280 canonical paths back to file paths for display. It shouldn't be
281 281 used to get real file paths. Use vfs functions instead.
282 282 '''
283 283 cwd = self._cwd
284 284 if cwd == self._root:
285 285 return ''
286 286 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 287 rootsep = self._root
288 288 if not util.endswithsep(rootsep):
289 289 rootsep += pycompat.ossep
290 290 if cwd.startswith(rootsep):
291 291 return cwd[len(rootsep):]
292 292 else:
293 293 # we're outside the repo. return an absolute path.
294 294 return cwd
295 295
296 296 def pathto(self, f, cwd=None):
297 297 if cwd is None:
298 298 cwd = self.getcwd()
299 299 path = util.pathto(self._root, cwd, f)
300 300 if self._slash:
301 301 return util.pconvert(path)
302 302 return path
303 303
304 304 def __getitem__(self, key):
305 305 '''Return the current state of key (a filename) in the dirstate.
306 306
307 307 States are:
308 308 n normal
309 309 m needs merging
310 310 r marked for removal
311 311 a marked for addition
312 312 ? not tracked
313 313 '''
314 314 return self._map.get(key, ("?",))[0]
315 315
316 316 def __contains__(self, key):
317 317 return key in self._map
318 318
319 319 def __iter__(self):
320 320 for x in sorted(self._map):
321 321 yield x
322 322
323 323 def iteritems(self):
324 324 return self._map.iteritems()
325 325
326 326 def parents(self):
327 327 return [self._validate(p) for p in self._pl]
328 328
329 329 def p1(self):
330 330 return self._validate(self._pl[0])
331 331
332 332 def p2(self):
333 333 return self._validate(self._pl[1])
334 334
335 335 def branch(self):
336 336 return encoding.tolocal(self._branch)
337 337
338 338 def setparents(self, p1, p2=nullid):
339 339 """Set dirstate parents to p1 and p2.
340 340
341 341 When moving from two parents to one, 'm' merged entries a
342 342 adjusted to normal and previous copy records discarded and
343 343 returned by the call.
344 344
345 345 See localrepo.setparents()
346 346 """
347 347 if self._parentwriters == 0:
348 348 raise ValueError("cannot set dirstate parent without "
349 349 "calling dirstate.beginparentchange")
350 350
351 351 self._dirty = self._dirtypl = True
352 352 oldp2 = self._pl[1]
353 353 if self._origpl is None:
354 354 self._origpl = self._pl
355 355 self._pl = p1, p2
356 356 copies = {}
357 357 if oldp2 != nullid and p2 == nullid:
358 358 for f, s in self._map.iteritems():
359 359 # Discard 'm' markers when moving away from a merge state
360 360 if s[0] == 'm':
361 361 if f in self._copymap:
362 362 copies[f] = self._copymap[f]
363 363 self.normallookup(f)
364 364 # Also fix up otherparent markers
365 365 elif s[0] == 'n' and s[2] == -2:
366 366 if f in self._copymap:
367 367 copies[f] = self._copymap[f]
368 368 self.add(f)
369 369 return copies
370 370
371 371 def setbranch(self, branch):
372 372 self._branch = encoding.fromlocal(branch)
373 373 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
374 374 try:
375 375 f.write(self._branch + '\n')
376 376 f.close()
377 377
378 378 # make sure filecache has the correct stat info for _branch after
379 379 # replacing the underlying file
380 380 ce = self._filecache['_branch']
381 381 if ce:
382 382 ce.refresh()
383 383 except: # re-raises
384 384 f.discard()
385 385 raise
386 386
387 387 def _opendirstatefile(self):
388 388 fp, mode = _trypending(self._root, self._opener, self._filename)
389 389 if self._pendingmode is not None and self._pendingmode != mode:
390 390 fp.close()
391 391 raise error.Abort(_('working directory state may be '
392 392 'changed parallelly'))
393 393 self._pendingmode = mode
394 394 return fp
395 395
396 396 def _read(self):
397 397 self._map = {}
398 398 self._copymap = {}
399 399 try:
400 400 fp = self._opendirstatefile()
401 401 try:
402 402 st = fp.read()
403 403 finally:
404 404 fp.close()
405 405 except IOError as err:
406 406 if err.errno != errno.ENOENT:
407 407 raise
408 408 return
409 409 if not st:
410 410 return
411 411
412 412 if util.safehasattr(parsers, 'dict_new_presized'):
413 413 # Make an estimate of the number of files in the dirstate based on
414 414 # its size. From a linear regression on a set of real-world repos,
415 415 # all over 10,000 files, the size of a dirstate entry is 85
416 416 # bytes. The cost of resizing is significantly higher than the cost
417 417 # of filling in a larger presized dict, so subtract 20% from the
418 418 # size.
419 419 #
420 420 # This heuristic is imperfect in many ways, so in a future dirstate
421 421 # format update it makes sense to just record the number of entries
422 422 # on write.
423 423 self._map = parsers.dict_new_presized(len(st) / 71)
424 424
425 425 # Python's garbage collector triggers a GC each time a certain number
426 426 # of container objects (the number being defined by
427 427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
428 428 # for each file in the dirstate. The C version then immediately marks
429 429 # them as not to be tracked by the collector. However, this has no
430 430 # effect on when GCs are triggered, only on what objects the GC looks
431 431 # into. This means that O(number of files) GCs are unavoidable.
432 432 # Depending on when in the process's lifetime the dirstate is parsed,
433 433 # this can get very expensive. As a workaround, disable GC while
434 434 # parsing the dirstate.
435 435 #
436 436 # (we cannot decorate the function directly since it is in a C module)
437 437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 438 p = parse_dirstate(self._map, self._copymap, st)
439 439 if not self._dirtypl:
440 440 self._pl = p
441 441
442 442 def invalidate(self):
443 443 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
444 444 "_pl", "_dirs", "_ignore", "_nonnormalset"):
445 445 if a in self.__dict__:
446 446 delattr(self, a)
447 447 self._lastnormaltime = 0
448 448 self._dirty = False
449 449 self._parentwriters = 0
450 450 self._origpl = None
451 451
452 452 def copy(self, source, dest):
453 453 """Mark dest as a copy of source. Unmark dest if source is None."""
454 454 if source == dest:
455 455 return
456 456 self._dirty = True
457 457 if source is not None:
458 458 self._copymap[dest] = source
459 459 elif dest in self._copymap:
460 460 del self._copymap[dest]
461 461
462 462 def copied(self, file):
463 463 return self._copymap.get(file, None)
464 464
465 465 def copies(self):
466 466 return self._copymap
467 467
468 468 def _droppath(self, f):
469 469 if self[f] not in "?r" and "_dirs" in self.__dict__:
470 470 self._dirs.delpath(f)
471 471
472 472 if "_filefoldmap" in self.__dict__:
473 473 normed = util.normcase(f)
474 474 if normed in self._filefoldmap:
475 475 del self._filefoldmap[normed]
476 476
477 477 def _addpath(self, f, state, mode, size, mtime):
478 478 oldstate = self[f]
479 479 if state == 'a' or oldstate == 'r':
480 480 scmutil.checkfilename(f)
481 481 if f in self._dirs:
482 482 raise error.Abort(_('directory %r already in dirstate') % f)
483 483 # shadows
484 484 for d in util.finddirs(f):
485 485 if d in self._dirs:
486 486 break
487 487 if d in self._map and self[d] != 'r':
488 488 raise error.Abort(
489 489 _('file %r in dirstate clashes with %r') % (d, f))
490 490 if oldstate in "?r" and "_dirs" in self.__dict__:
491 491 self._dirs.addpath(f)
492 492 self._dirty = True
493 493 self._map[f] = dirstatetuple(state, mode, size, mtime)
494 494 if state != 'n' or mtime == -1:
495 495 self._nonnormalset.add(f)
496 496
497 497 def normal(self, f):
498 498 '''Mark a file normal and clean.'''
499 499 s = os.lstat(self._join(f))
500 500 mtime = s.st_mtime
501 501 self._addpath(f, 'n', s.st_mode,
502 502 s.st_size & _rangemask, mtime & _rangemask)
503 503 if f in self._copymap:
504 504 del self._copymap[f]
505 505 if f in self._nonnormalset:
506 506 self._nonnormalset.remove(f)
507 507 if mtime > self._lastnormaltime:
508 508 # Remember the most recent modification timeslot for status(),
509 509 # to make sure we won't miss future size-preserving file content
510 510 # modifications that happen within the same timeslot.
511 511 self._lastnormaltime = mtime
512 512
513 513 def normallookup(self, f):
514 514 '''Mark a file normal, but possibly dirty.'''
515 515 if self._pl[1] != nullid and f in self._map:
516 516 # if there is a merge going on and the file was either
517 517 # in state 'm' (-1) or coming from other parent (-2) before
518 518 # being removed, restore that state.
519 519 entry = self._map[f]
520 520 if entry[0] == 'r' and entry[2] in (-1, -2):
521 521 source = self._copymap.get(f)
522 522 if entry[2] == -1:
523 523 self.merge(f)
524 524 elif entry[2] == -2:
525 525 self.otherparent(f)
526 526 if source:
527 527 self.copy(source, f)
528 528 return
529 529 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
530 530 return
531 531 self._addpath(f, 'n', 0, -1, -1)
532 532 if f in self._copymap:
533 533 del self._copymap[f]
534 534 if f in self._nonnormalset:
535 535 self._nonnormalset.remove(f)
536 536
537 537 def otherparent(self, f):
538 538 '''Mark as coming from the other parent, always dirty.'''
539 539 if self._pl[1] == nullid:
540 540 raise error.Abort(_("setting %r to other parent "
541 541 "only allowed in merges") % f)
542 542 if f in self and self[f] == 'n':
543 543 # merge-like
544 544 self._addpath(f, 'm', 0, -2, -1)
545 545 else:
546 546 # add-like
547 547 self._addpath(f, 'n', 0, -2, -1)
548 548
549 549 if f in self._copymap:
550 550 del self._copymap[f]
551 551
552 552 def add(self, f):
553 553 '''Mark a file added.'''
554 554 self._addpath(f, 'a', 0, -1, -1)
555 555 if f in self._copymap:
556 556 del self._copymap[f]
557 557
558 558 def remove(self, f):
559 559 '''Mark a file removed.'''
560 560 self._dirty = True
561 561 self._droppath(f)
562 562 size = 0
563 563 if self._pl[1] != nullid and f in self._map:
564 564 # backup the previous state
565 565 entry = self._map[f]
566 566 if entry[0] == 'm': # merge
567 567 size = -1
568 568 elif entry[0] == 'n' and entry[2] == -2: # other parent
569 569 size = -2
570 570 self._map[f] = dirstatetuple('r', 0, size, 0)
571 571 self._nonnormalset.add(f)
572 572 if size == 0 and f in self._copymap:
573 573 del self._copymap[f]
574 574
575 575 def merge(self, f):
576 576 '''Mark a file merged.'''
577 577 if self._pl[1] == nullid:
578 578 return self.normallookup(f)
579 579 return self.otherparent(f)
580 580
581 581 def drop(self, f):
582 582 '''Drop a file from the dirstate'''
583 583 if f in self._map:
584 584 self._dirty = True
585 585 self._droppath(f)
586 586 del self._map[f]
587 587 if f in self._nonnormalset:
588 588 self._nonnormalset.remove(f)
589 589 if f in self._copymap:
590 590 del self._copymap[f]
591 591
592 592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
593 593 if exists is None:
594 594 exists = os.path.lexists(os.path.join(self._root, path))
595 595 if not exists:
596 596 # Maybe a path component exists
597 597 if not ignoremissing and '/' in path:
598 598 d, f = path.rsplit('/', 1)
599 599 d = self._normalize(d, False, ignoremissing, None)
600 600 folded = d + "/" + f
601 601 else:
602 602 # No path components, preserve original case
603 603 folded = path
604 604 else:
605 605 # recursively normalize leading directory components
606 606 # against dirstate
607 607 if '/' in normed:
608 608 d, f = normed.rsplit('/', 1)
609 609 d = self._normalize(d, False, ignoremissing, True)
610 610 r = self._root + "/" + d
611 611 folded = d + "/" + util.fspath(f, r)
612 612 else:
613 613 folded = util.fspath(normed, self._root)
614 614 storemap[normed] = folded
615 615
616 616 return folded
617 617
618 618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
619 619 normed = util.normcase(path)
620 620 folded = self._filefoldmap.get(normed, None)
621 621 if folded is None:
622 622 if isknown:
623 623 folded = path
624 624 else:
625 625 folded = self._discoverpath(path, normed, ignoremissing, exists,
626 626 self._filefoldmap)
627 627 return folded
628 628
629 629 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
630 630 normed = util.normcase(path)
631 631 folded = self._filefoldmap.get(normed, None)
632 632 if folded is None:
633 633 folded = self._dirfoldmap.get(normed, None)
634 634 if folded is None:
635 635 if isknown:
636 636 folded = path
637 637 else:
638 638 # store discovered result in dirfoldmap so that future
639 639 # normalizefile calls don't start matching directories
640 640 folded = self._discoverpath(path, normed, ignoremissing, exists,
641 641 self._dirfoldmap)
642 642 return folded
643 643
644 644 def normalize(self, path, isknown=False, ignoremissing=False):
645 645 '''
646 646 normalize the case of a pathname when on a casefolding filesystem
647 647
648 648 isknown specifies whether the filename came from walking the
649 649 disk, to avoid extra filesystem access.
650 650
651 651 If ignoremissing is True, missing path are returned
652 652 unchanged. Otherwise, we try harder to normalize possibly
653 653 existing path components.
654 654
655 655 The normalized case is determined based on the following precedence:
656 656
657 657 - version of name already stored in the dirstate
658 658 - version of name stored on disk
659 659 - version provided via command arguments
660 660 '''
661 661
662 662 if self._checkcase:
663 663 return self._normalize(path, isknown, ignoremissing)
664 664 return path
665 665
666 666 def clear(self):
667 667 self._map = {}
668 668 self._nonnormalset = set()
669 669 if "_dirs" in self.__dict__:
670 670 delattr(self, "_dirs")
671 671 self._copymap = {}
672 672 self._pl = [nullid, nullid]
673 673 self._lastnormaltime = 0
674 674 self._dirty = True
675 675
676 676 def rebuild(self, parent, allfiles, changedfiles=None):
677 677 if changedfiles is None:
678 678 # Rebuild entire dirstate
679 679 changedfiles = allfiles
680 680 lastnormaltime = self._lastnormaltime
681 681 self.clear()
682 682 self._lastnormaltime = lastnormaltime
683 683
684 684 if self._origpl is None:
685 685 self._origpl = self._pl
686 686 self._pl = (parent, nullid)
687 687 for f in changedfiles:
688 688 if f in allfiles:
689 689 self.normallookup(f)
690 690 else:
691 691 self.drop(f)
692 692
693 693 self._dirty = True
694 694
695 695 def write(self, tr):
696 696 if not self._dirty:
697 697 return
698 698
699 699 filename = self._filename
700 700 if tr:
701 701 # 'dirstate.write()' is not only for writing in-memory
702 702 # changes out, but also for dropping ambiguous timestamp.
703 703 # delayed writing re-raise "ambiguous timestamp issue".
704 704 # See also the wiki page below for detail:
705 705 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
706 706
707 707 # emulate dropping timestamp in 'parsers.pack_dirstate'
708 708 now = _getfsnow(self._opener)
709 709 dmap = self._map
710 710 for f, e in dmap.iteritems():
711 711 if e[0] == 'n' and e[3] == now:
712 712 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
713 713 self._nonnormalset.add(f)
714 714
715 715 # emulate that all 'dirstate.normal' results are written out
716 716 self._lastnormaltime = 0
717 717
718 718 # delay writing in-memory changes out
719 719 tr.addfilegenerator('dirstate', (self._filename,),
720 720 self._writedirstate, location='plain')
721 721 return
722 722
723 723 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
724 724 self._writedirstate(st)
725 725
726 726 def addparentchangecallback(self, category, callback):
727 727 """add a callback to be called when the wd parents are changed
728 728
729 729 Callback will be called with the following arguments:
730 730 dirstate, (oldp1, oldp2), (newp1, newp2)
731 731
732 732 Category is a unique identifier to allow overwriting an old callback
733 733 with a newer callback.
734 734 """
735 735 self._plchangecallbacks[category] = callback
736 736
737 737 def _writedirstate(self, st):
738 738 # notify callbacks about parents change
739 739 if self._origpl is not None and self._origpl != self._pl:
740 740 for c, callback in sorted(self._plchangecallbacks.iteritems()):
741 741 callback(self, self._origpl, self._pl)
742 742 self._origpl = None
743 743 # use the modification time of the newly created temporary file as the
744 744 # filesystem's notion of 'now'
745 745 now = util.fstat(st).st_mtime & _rangemask
746 746
747 747 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
748 748 # timestamp of each entries in dirstate, because of 'now > mtime'
749 749 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
750 750 if delaywrite > 0:
751 751 # do we have any files to delay for?
752 752 for f, e in self._map.iteritems():
753 753 if e[0] == 'n' and e[3] == now:
754 754 import time # to avoid useless import
755 755 # rather than sleep n seconds, sleep until the next
756 756 # multiple of n seconds
757 757 clock = time.time()
758 758 start = int(clock) - (int(clock) % delaywrite)
759 759 end = start + delaywrite
760 760 time.sleep(end - clock)
761 761 now = end # trust our estimate that the end is near now
762 762 break
763 763
764 764 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
765 765 self._nonnormalset = nonnormalentries(self._map)
766 766 st.close()
767 767 self._lastnormaltime = 0
768 768 self._dirty = self._dirtypl = False
769 769
770 770 def _dirignore(self, f):
771 771 if f == '.':
772 772 return False
773 773 if self._ignore(f):
774 774 return True
775 775 for p in util.finddirs(f):
776 776 if self._ignore(p):
777 777 return True
778 778 return False
779 779
780 780 def _ignorefiles(self):
781 781 files = []
782 782 if os.path.exists(self._join('.hgignore')):
783 783 files.append(self._join('.hgignore'))
784 784 for name, path in self._ui.configitems("ui"):
785 785 if name == 'ignore' or name.startswith('ignore.'):
786 786 # we need to use os.path.join here rather than self._join
787 787 # because path is arbitrary and user-specified
788 788 files.append(os.path.join(self._rootdir, util.expandpath(path)))
789 789 return files
790 790
791 791 def _ignorefileandline(self, f):
792 792 files = collections.deque(self._ignorefiles())
793 793 visited = set()
794 794 while files:
795 795 i = files.popleft()
796 796 patterns = matchmod.readpatternfile(i, self._ui.warn,
797 797 sourceinfo=True)
798 798 for pattern, lineno, line in patterns:
799 799 kind, p = matchmod._patsplit(pattern, 'glob')
800 800 if kind == "subinclude":
801 801 if p not in visited:
802 802 files.append(p)
803 803 continue
804 804 m = matchmod.match(self._root, '', [], [pattern],
805 805 warn=self._ui.warn)
806 806 if m(f):
807 807 return (i, lineno, line)
808 808 visited.add(i)
809 809 return (None, -1, "")
810 810
811 811 def _walkexplicit(self, match, subrepos):
812 812 '''Get stat data about the files explicitly specified by match.
813 813
814 814 Return a triple (results, dirsfound, dirsnotfound).
815 815 - results is a mapping from filename to stat result. It also contains
816 816 listings mapping subrepos and .hg to None.
817 817 - dirsfound is a list of files found to be directories.
818 818 - dirsnotfound is a list of files that the dirstate thinks are
819 819 directories and that were not found.'''
820 820
821 821 def badtype(mode):
822 822 kind = _('unknown')
823 823 if stat.S_ISCHR(mode):
824 824 kind = _('character device')
825 825 elif stat.S_ISBLK(mode):
826 826 kind = _('block device')
827 827 elif stat.S_ISFIFO(mode):
828 828 kind = _('fifo')
829 829 elif stat.S_ISSOCK(mode):
830 830 kind = _('socket')
831 831 elif stat.S_ISDIR(mode):
832 832 kind = _('directory')
833 833 return _('unsupported file type (type is %s)') % kind
834 834
835 835 matchedir = match.explicitdir
836 836 badfn = match.bad
837 837 dmap = self._map
838 838 lstat = os.lstat
839 839 getkind = stat.S_IFMT
840 840 dirkind = stat.S_IFDIR
841 841 regkind = stat.S_IFREG
842 842 lnkkind = stat.S_IFLNK
843 843 join = self._join
844 844 dirsfound = []
845 845 foundadd = dirsfound.append
846 846 dirsnotfound = []
847 847 notfoundadd = dirsnotfound.append
848 848
849 849 if not match.isexact() and self._checkcase:
850 850 normalize = self._normalize
851 851 else:
852 852 normalize = None
853 853
854 854 files = sorted(match.files())
855 855 subrepos.sort()
856 856 i, j = 0, 0
857 857 while i < len(files) and j < len(subrepos):
858 858 subpath = subrepos[j] + "/"
859 859 if files[i] < subpath:
860 860 i += 1
861 861 continue
862 862 while i < len(files) and files[i].startswith(subpath):
863 863 del files[i]
864 864 j += 1
865 865
866 866 if not files or '.' in files:
867 867 files = ['.']
868 868 results = dict.fromkeys(subrepos)
869 869 results['.hg'] = None
870 870
871 871 alldirs = None
872 872 for ff in files:
873 873 # constructing the foldmap is expensive, so don't do it for the
874 874 # common case where files is ['.']
875 875 if normalize and ff != '.':
876 876 nf = normalize(ff, False, True)
877 877 else:
878 878 nf = ff
879 879 if nf in results:
880 880 continue
881 881
882 882 try:
883 883 st = lstat(join(nf))
884 884 kind = getkind(st.st_mode)
885 885 if kind == dirkind:
886 886 if nf in dmap:
887 887 # file replaced by dir on disk but still in dirstate
888 888 results[nf] = None
889 889 if matchedir:
890 890 matchedir(nf)
891 891 foundadd((nf, ff))
892 892 elif kind == regkind or kind == lnkkind:
893 893 results[nf] = st
894 894 else:
895 895 badfn(ff, badtype(kind))
896 896 if nf in dmap:
897 897 results[nf] = None
898 898 except OSError as inst: # nf not found on disk - it is dirstate only
899 899 if nf in dmap: # does it exactly match a missing file?
900 900 results[nf] = None
901 901 else: # does it match a missing directory?
902 902 if alldirs is None:
903 903 alldirs = util.dirs(dmap)
904 904 if nf in alldirs:
905 905 if matchedir:
906 906 matchedir(nf)
907 907 notfoundadd(nf)
908 908 else:
909 909 badfn(ff, inst.strerror)
910 910
911 911 # Case insensitive filesystems cannot rely on lstat() failing to detect
912 912 # a case-only rename. Prune the stat object for any file that does not
913 913 # match the case in the filesystem, if there are multiple files that
914 914 # normalize to the same path.
915 915 if match.isexact() and self._checkcase:
916 916 normed = {}
917 917
918 918 for f, st in results.iteritems():
919 919 if st is None:
920 920 continue
921 921
922 922 nc = util.normcase(f)
923 923 paths = normed.get(nc)
924 924
925 925 if paths is None:
926 926 paths = set()
927 927 normed[nc] = paths
928 928
929 929 paths.add(f)
930 930
931 931 for norm, paths in normed.iteritems():
932 932 if len(paths) > 1:
933 933 for path in paths:
934 934 folded = self._discoverpath(path, norm, True, None,
935 935 self._dirfoldmap)
936 936 if path != folded:
937 937 results[path] = None
938 938
939 939 return results, dirsfound, dirsnotfound
940 940
941 941 def walk(self, match, subrepos, unknown, ignored, full=True):
942 942 '''
943 943 Walk recursively through the directory tree, finding all files
944 944 matched by match.
945 945
946 946 If full is False, maybe skip some known-clean files.
947 947
948 948 Return a dict mapping filename to stat-like object (either
949 949 mercurial.osutil.stat instance or return value of os.stat()).
950 950
951 951 '''
952 952 # full is a flag that extensions that hook into walk can use -- this
953 953 # implementation doesn't use it at all. This satisfies the contract
954 954 # because we only guarantee a "maybe".
955 955
956 956 if ignored:
957 957 ignore = util.never
958 958 dirignore = util.never
959 959 elif unknown:
960 960 ignore = self._ignore
961 961 dirignore = self._dirignore
962 962 else:
963 963 # if not unknown and not ignored, drop dir recursion and step 2
964 964 ignore = util.always
965 965 dirignore = util.always
966 966
967 967 matchfn = match.matchfn
968 968 matchalways = match.always()
969 969 matchtdir = match.traversedir
970 970 dmap = self._map
971 971 listdir = osutil.listdir
972 972 lstat = os.lstat
973 973 dirkind = stat.S_IFDIR
974 974 regkind = stat.S_IFREG
975 975 lnkkind = stat.S_IFLNK
976 976 join = self._join
977 977
978 978 exact = skipstep3 = False
979 979 if match.isexact(): # match.exact
980 980 exact = True
981 981 dirignore = util.always # skip step 2
982 982 elif match.prefix(): # match.match, no patterns
983 983 skipstep3 = True
984 984
985 985 if not exact and self._checkcase:
986 986 normalize = self._normalize
987 987 normalizefile = self._normalizefile
988 988 skipstep3 = False
989 989 else:
990 990 normalize = self._normalize
991 991 normalizefile = None
992 992
993 993 # step 1: find all explicit files
994 994 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
995 995
996 996 skipstep3 = skipstep3 and not (work or dirsnotfound)
997 997 work = [d for d in work if not dirignore(d[0])]
998 998
999 999 # step 2: visit subdirectories
1000 1000 def traverse(work, alreadynormed):
1001 1001 wadd = work.append
1002 1002 while work:
1003 1003 nd = work.pop()
1004 1004 skip = None
1005 1005 if nd == '.':
1006 1006 nd = ''
1007 1007 else:
1008 1008 skip = '.hg'
1009 1009 try:
1010 1010 entries = listdir(join(nd), stat=True, skip=skip)
1011 1011 except OSError as inst:
1012 1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1013 1013 match.bad(self.pathto(nd), inst.strerror)
1014 1014 continue
1015 1015 raise
1016 1016 for f, kind, st in entries:
1017 1017 if normalizefile:
1018 1018 # even though f might be a directory, we're only
1019 1019 # interested in comparing it to files currently in the
1020 1020 # dmap -- therefore normalizefile is enough
1021 1021 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1022 1022 True)
1023 1023 else:
1024 1024 nf = nd and (nd + "/" + f) or f
1025 1025 if nf not in results:
1026 1026 if kind == dirkind:
1027 1027 if not ignore(nf):
1028 1028 if matchtdir:
1029 1029 matchtdir(nf)
1030 1030 wadd(nf)
1031 1031 if nf in dmap and (matchalways or matchfn(nf)):
1032 1032 results[nf] = None
1033 1033 elif kind == regkind or kind == lnkkind:
1034 1034 if nf in dmap:
1035 1035 if matchalways or matchfn(nf):
1036 1036 results[nf] = st
1037 1037 elif ((matchalways or matchfn(nf))
1038 1038 and not ignore(nf)):
1039 1039 # unknown file -- normalize if necessary
1040 1040 if not alreadynormed:
1041 1041 nf = normalize(nf, False, True)
1042 1042 results[nf] = st
1043 1043 elif nf in dmap and (matchalways or matchfn(nf)):
1044 1044 results[nf] = None
1045 1045
1046 1046 for nd, d in work:
1047 1047 # alreadynormed means that processwork doesn't have to do any
1048 1048 # expensive directory normalization
1049 1049 alreadynormed = not normalize or nd == d
1050 1050 traverse([d], alreadynormed)
1051 1051
1052 1052 for s in subrepos:
1053 1053 del results[s]
1054 1054 del results['.hg']
1055 1055
1056 1056 # step 3: visit remaining files from dmap
1057 1057 if not skipstep3 and not exact:
1058 1058 # If a dmap file is not in results yet, it was either
1059 1059 # a) not matching matchfn b) ignored, c) missing, or d) under a
1060 1060 # symlink directory.
1061 1061 if not results and matchalways:
1062 1062 visit = dmap.keys()
1063 1063 else:
1064 1064 visit = [f for f in dmap if f not in results and matchfn(f)]
1065 1065 visit.sort()
1066 1066
1067 1067 if unknown:
1068 1068 # unknown == True means we walked all dirs under the roots
1069 1069 # that wasn't ignored, and everything that matched was stat'ed
1070 1070 # and is already in results.
1071 1071 # The rest must thus be ignored or under a symlink.
1072 1072 audit_path = pathutil.pathauditor(self._root)
1073 1073
1074 1074 for nf in iter(visit):
1075 1075 # If a stat for the same file was already added with a
1076 1076 # different case, don't add one for this, since that would
1077 1077 # make it appear as if the file exists under both names
1078 1078 # on disk.
1079 1079 if (normalizefile and
1080 1080 normalizefile(nf, True, True) in results):
1081 1081 results[nf] = None
1082 1082 # Report ignored items in the dmap as long as they are not
1083 1083 # under a symlink directory.
1084 1084 elif audit_path.check(nf):
1085 1085 try:
1086 1086 results[nf] = lstat(join(nf))
1087 1087 # file was just ignored, no links, and exists
1088 1088 except OSError:
1089 1089 # file doesn't exist
1090 1090 results[nf] = None
1091 1091 else:
1092 1092 # It's either missing or under a symlink directory
1093 1093 # which we in this case report as missing
1094 1094 results[nf] = None
1095 1095 else:
1096 1096 # We may not have walked the full directory tree above,
1097 1097 # so stat and check everything we missed.
1098 1098 nf = iter(visit).next
1099 1099 for st in util.statfiles([join(i) for i in visit]):
1100 1100 results[nf()] = st
1101 1101 return results
1102 1102
1103 1103 def status(self, match, subrepos, ignored, clean, unknown):
1104 1104 '''Determine the status of the working copy relative to the
1105 1105 dirstate and return a pair of (unsure, status), where status is of type
1106 1106 scmutil.status and:
1107 1107
1108 1108 unsure:
1109 1109 files that might have been modified since the dirstate was
1110 1110 written, but need to be read to be sure (size is the same
1111 1111 but mtime differs)
1112 1112 status.modified:
1113 1113 files that have definitely been modified since the dirstate
1114 1114 was written (different size or mode)
1115 1115 status.clean:
1116 1116 files that have definitely not been modified since the
1117 1117 dirstate was written
1118 1118 '''
1119 1119 listignored, listclean, listunknown = ignored, clean, unknown
1120 1120 lookup, modified, added, unknown, ignored = [], [], [], [], []
1121 1121 removed, deleted, clean = [], [], []
1122 1122
1123 1123 dmap = self._map
1124 1124 ladd = lookup.append # aka "unsure"
1125 1125 madd = modified.append
1126 1126 aadd = added.append
1127 1127 uadd = unknown.append
1128 1128 iadd = ignored.append
1129 1129 radd = removed.append
1130 1130 dadd = deleted.append
1131 1131 cadd = clean.append
1132 1132 mexact = match.exact
1133 1133 dirignore = self._dirignore
1134 1134 checkexec = self._checkexec
1135 1135 copymap = self._copymap
1136 1136 lastnormaltime = self._lastnormaltime
1137 1137
1138 1138 # We need to do full walks when either
1139 1139 # - we're listing all clean files, or
1140 1140 # - match.traversedir does something, because match.traversedir should
1141 1141 # be called for every dir in the working dir
1142 1142 full = listclean or match.traversedir is not None
1143 1143 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1144 1144 full=full).iteritems():
1145 1145 if fn not in dmap:
1146 1146 if (listignored or mexact(fn)) and dirignore(fn):
1147 1147 if listignored:
1148 1148 iadd(fn)
1149 1149 else:
1150 1150 uadd(fn)
1151 1151 continue
1152 1152
1153 1153 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1154 1154 # written like that for performance reasons. dmap[fn] is not a
1155 1155 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1156 1156 # opcode has fast paths when the value to be unpacked is a tuple or
1157 1157 # a list, but falls back to creating a full-fledged iterator in
1158 1158 # general. That is much slower than simply accessing and storing the
1159 1159 # tuple members one by one.
1160 1160 t = dmap[fn]
1161 1161 state = t[0]
1162 1162 mode = t[1]
1163 1163 size = t[2]
1164 1164 time = t[3]
1165 1165
1166 1166 if not st and state in "nma":
1167 1167 dadd(fn)
1168 1168 elif state == 'n':
1169 1169 if (size >= 0 and
1170 1170 ((size != st.st_size and size != st.st_size & _rangemask)
1171 1171 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1172 1172 or size == -2 # other parent
1173 1173 or fn in copymap):
1174 1174 madd(fn)
1175 1175 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1176 1176 ladd(fn)
1177 1177 elif st.st_mtime == lastnormaltime:
1178 1178 # fn may have just been marked as normal and it may have
1179 1179 # changed in the same second without changing its size.
1180 1180 # This can happen if we quickly do multiple commits.
1181 1181 # Force lookup, so we don't miss such a racy file change.
1182 1182 ladd(fn)
1183 1183 elif listclean:
1184 1184 cadd(fn)
1185 1185 elif state == 'm':
1186 1186 madd(fn)
1187 1187 elif state == 'a':
1188 1188 aadd(fn)
1189 1189 elif state == 'r':
1190 1190 radd(fn)
1191 1191
1192 1192 return (lookup, scmutil.status(modified, added, removed, deleted,
1193 1193 unknown, ignored, clean))
1194 1194
1195 1195 def matches(self, match):
1196 1196 '''
1197 1197 return files in the dirstate (in whatever state) filtered by match
1198 1198 '''
1199 1199 dmap = self._map
1200 1200 if match.always():
1201 1201 return dmap.keys()
1202 1202 files = match.files()
1203 1203 if match.isexact():
1204 1204 # fast path -- filter the other way around, since typically files is
1205 1205 # much smaller than dmap
1206 1206 return [f for f in files if f in dmap]
1207 1207 if match.prefix() and all(fn in dmap for fn in files):
1208 1208 # fast path -- all the values are known to be files, so just return
1209 1209 # that
1210 1210 return list(files)
1211 1211 return [f for f in dmap if match(f)]
1212 1212
1213 1213 def _actualfilename(self, tr):
1214 1214 if tr:
1215 1215 return self._pendingfilename
1216 1216 else:
1217 1217 return self._filename
1218 1218
1219 1219 def savebackup(self, tr, suffix='', prefix=''):
1220 1220 '''Save current dirstate into backup file with suffix'''
1221 1221 assert len(suffix) > 0 or len(prefix) > 0
1222 1222 filename = self._actualfilename(tr)
1223 1223
1224 1224 # use '_writedirstate' instead of 'write' to write changes certainly,
1225 1225 # because the latter omits writing out if transaction is running.
1226 1226 # output file will be used to create backup of dirstate at this point.
1227 1227 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1228 1228 checkambig=True))
1229 1229
1230 1230 if tr:
1231 1231 # ensure that subsequent tr.writepending returns True for
1232 1232 # changes written out above, even if dirstate is never
1233 1233 # changed after this
1234 1234 tr.addfilegenerator('dirstate', (self._filename,),
1235 1235 self._writedirstate, location='plain')
1236 1236
1237 1237 # ensure that pending file written above is unlinked at
1238 1238 # failure, even if tr.writepending isn't invoked until the
1239 1239 # end of this transaction
1240 1240 tr.registertmp(filename, location='plain')
1241 1241
1242 1242 self._opener.write(prefix + self._filename + suffix,
1243 1243 self._opener.tryread(filename))
1244 1244
1245 1245 def restorebackup(self, tr, suffix='', prefix=''):
1246 1246 '''Restore dirstate by backup file with suffix'''
1247 1247 assert len(suffix) > 0 or len(prefix) > 0
1248 1248 # this "invalidate()" prevents "wlock.release()" from writing
1249 1249 # changes of dirstate out after restoring from backup file
1250 1250 self.invalidate()
1251 1251 filename = self._actualfilename(tr)
1252 1252 # using self._filename to avoid having "pending" in the backup filename
1253 1253 self._opener.rename(prefix + self._filename + suffix, filename,
1254 1254 checkambig=True)
1255 1255
1256 1256 def clearbackup(self, tr, suffix='', prefix=''):
1257 1257 '''Clear backup file with suffix'''
1258 1258 assert len(suffix) > 0 or len(prefix) > 0
1259 1259 # using self._filename to avoid having "pending" in the backup filename
1260 1260 self._opener.unlink(prefix + self._filename + suffix)
@@ -1,2004 +1,2004
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 repoview,
52 52 revset,
53 53 scmutil,
54 54 store,
55 55 subrepo,
56 56 tags as tagsmod,
57 57 transaction,
58 58 util,
59 59 )
60 60
61 61 release = lockmod.release
62 62 urlerr = util.urlerr
63 63 urlreq = util.urlreq
64 64
65 65 class repofilecache(scmutil.filecache):
66 66 """All filecache usage on repo are done for logic that should be unfiltered
67 67 """
68 68
69 69 def __get__(self, repo, type=None):
70 70 if repo is None:
71 71 return self
72 72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
73 73 def __set__(self, repo, value):
74 74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
75 75 def __delete__(self, repo):
76 76 return super(repofilecache, self).__delete__(repo.unfiltered())
77 77
78 78 class storecache(repofilecache):
79 79 """filecache for files in the store"""
80 80 def join(self, obj, fname):
81 81 return obj.sjoin(fname)
82 82
83 83 class unfilteredpropertycache(util.propertycache):
84 84 """propertycache that apply to unfiltered repo only"""
85 85
86 86 def __get__(self, repo, type=None):
87 87 unfi = repo.unfiltered()
88 88 if unfi is repo:
89 89 return super(unfilteredpropertycache, self).__get__(unfi)
90 90 return getattr(unfi, self.name)
91 91
92 92 class filteredpropertycache(util.propertycache):
93 93 """propertycache that must take filtering in account"""
94 94
95 95 def cachevalue(self, obj, value):
96 96 object.__setattr__(obj, self.name, value)
97 97
98 98
99 99 def hasunfilteredcache(repo, name):
100 100 """check if a repo has an unfilteredpropertycache value for <name>"""
101 101 return name in vars(repo.unfiltered())
102 102
103 103 def unfilteredmethod(orig):
104 104 """decorate method that always need to be run on unfiltered version"""
105 105 def wrapper(repo, *args, **kwargs):
106 106 return orig(repo.unfiltered(), *args, **kwargs)
107 107 return wrapper
108 108
109 109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
110 110 'unbundle'))
111 111 legacycaps = moderncaps.union(set(['changegroupsubset']))
112 112
113 113 class localpeer(peer.peerrepository):
114 114 '''peer for a local repo; reflects only the most recent API'''
115 115
116 116 def __init__(self, repo, caps=moderncaps):
117 117 peer.peerrepository.__init__(self)
118 118 self._repo = repo.filtered('served')
119 119 self.ui = repo.ui
120 120 self._caps = repo._restrictcapabilities(caps)
121 121 self.requirements = repo.requirements
122 122 self.supportedformats = repo.supportedformats
123 123
124 124 def close(self):
125 125 self._repo.close()
126 126
127 127 def _capabilities(self):
128 128 return self._caps
129 129
130 130 def local(self):
131 131 return self._repo
132 132
133 133 def canpush(self):
134 134 return True
135 135
136 136 def url(self):
137 137 return self._repo.url()
138 138
139 139 def lookup(self, key):
140 140 return self._repo.lookup(key)
141 141
142 142 def branchmap(self):
143 143 return self._repo.branchmap()
144 144
145 145 def heads(self):
146 146 return self._repo.heads()
147 147
148 148 def known(self, nodes):
149 149 return self._repo.known(nodes)
150 150
151 151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
152 152 **kwargs):
153 153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
154 154 common=common, bundlecaps=bundlecaps,
155 155 **kwargs)
156 156 cb = util.chunkbuffer(chunks)
157 157
158 158 if bundlecaps is not None and 'HG20' in bundlecaps:
159 159 # When requesting a bundle2, getbundle returns a stream to make the
160 160 # wire level function happier. We need to build a proper object
161 161 # from it in local peer.
162 162 return bundle2.getunbundler(self.ui, cb)
163 163 else:
164 164 return changegroup.getunbundler('01', cb, None)
165 165
166 166 # TODO We might want to move the next two calls into legacypeer and add
167 167 # unbundle instead.
168 168
169 169 def unbundle(self, cg, heads, url):
170 170 """apply a bundle on a repo
171 171
172 172 This function handles the repo locking itself."""
173 173 try:
174 174 try:
175 175 cg = exchange.readbundle(self.ui, cg, None)
176 176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
177 177 if util.safehasattr(ret, 'getchunks'):
178 178 # This is a bundle20 object, turn it into an unbundler.
179 179 # This little dance should be dropped eventually when the
180 180 # API is finally improved.
181 181 stream = util.chunkbuffer(ret.getchunks())
182 182 ret = bundle2.getunbundler(self.ui, stream)
183 183 return ret
184 184 except Exception as exc:
185 185 # If the exception contains output salvaged from a bundle2
186 186 # reply, we need to make sure it is printed before continuing
187 187 # to fail. So we build a bundle2 with such output and consume
188 188 # it directly.
189 189 #
190 190 # This is not very elegant but allows a "simple" solution for
191 191 # issue4594
192 192 output = getattr(exc, '_bundle2salvagedoutput', ())
193 193 if output:
194 194 bundler = bundle2.bundle20(self._repo.ui)
195 195 for out in output:
196 196 bundler.addpart(out)
197 197 stream = util.chunkbuffer(bundler.getchunks())
198 198 b = bundle2.getunbundler(self.ui, stream)
199 199 bundle2.processbundle(self._repo, b)
200 200 raise
201 201 except error.PushRaced as exc:
202 202 raise error.ResponseError(_('push failed:'), str(exc))
203 203
204 204 def lock(self):
205 205 return self._repo.lock()
206 206
207 207 def addchangegroup(self, cg, source, url):
208 208 return cg.apply(self._repo, source, url)
209 209
210 210 def pushkey(self, namespace, key, old, new):
211 211 return self._repo.pushkey(namespace, key, old, new)
212 212
213 213 def listkeys(self, namespace):
214 214 return self._repo.listkeys(namespace)
215 215
216 216 def debugwireargs(self, one, two, three=None, four=None, five=None):
217 217 '''used to test argument passing over the wire'''
218 218 return "%s %s %s %s %s" % (one, two, three, four, five)
219 219
220 220 class locallegacypeer(localpeer):
221 221 '''peer extension which implements legacy methods too; used for tests with
222 222 restricted capabilities'''
223 223
224 224 def __init__(self, repo):
225 225 localpeer.__init__(self, repo, caps=legacycaps)
226 226
227 227 def branches(self, nodes):
228 228 return self._repo.branches(nodes)
229 229
230 230 def between(self, pairs):
231 231 return self._repo.between(pairs)
232 232
233 233 def changegroup(self, basenodes, source):
234 234 return changegroup.changegroup(self._repo, basenodes, source)
235 235
236 236 def changegroupsubset(self, bases, heads, source):
237 237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
238 238
239 239 class localrepository(object):
240 240
241 241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
242 242 'manifestv2'))
243 243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
244 244 'dotencode'))
245 245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
246 246 filtername = None
247 247
248 248 # a list of (ui, featureset) functions.
249 249 # only functions defined in module of enabled extensions are invoked
250 250 featuresetupfuncs = set()
251 251
252 252 def __init__(self, baseui, path, create=False):
253 253 self.requirements = set()
254 254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
255 255 self.wopener = self.wvfs
256 256 self.root = self.wvfs.base
257 257 self.path = self.wvfs.join(".hg")
258 258 self.origroot = path
259 259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
260 260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
261 261 realfs=False)
262 262 self.vfs = scmutil.vfs(self.path)
263 263 self.opener = self.vfs
264 264 self.baseui = baseui
265 265 self.ui = baseui.copy()
266 266 self.ui.copy = baseui.copy # prevent copying repo configuration
267 267 # A list of callback to shape the phase if no data were found.
268 268 # Callback are in the form: func(repo, roots) --> processed root.
269 269 # This list it to be filled by extension during repo setup
270 270 self._phasedefaults = []
271 271 try:
272 272 self.ui.readconfig(self.join("hgrc"), self.root)
273 273 extensions.loadall(self.ui)
274 274 except IOError:
275 275 pass
276 276
277 277 if self.featuresetupfuncs:
278 278 self.supported = set(self._basesupported) # use private copy
279 279 extmods = set(m.__name__ for n, m
280 280 in extensions.extensions(self.ui))
281 281 for setupfunc in self.featuresetupfuncs:
282 282 if setupfunc.__module__ in extmods:
283 283 setupfunc(self.ui, self.supported)
284 284 else:
285 285 self.supported = self._basesupported
286 286
287 287 if not self.vfs.isdir():
288 288 if create:
289 289 self.requirements = newreporequirements(self)
290 290
291 291 if not self.wvfs.exists():
292 292 self.wvfs.makedirs()
293 293 self.vfs.makedir(notindexed=True)
294 294
295 295 if 'store' in self.requirements:
296 296 self.vfs.mkdir("store")
297 297
298 298 # create an invalid changelog
299 299 self.vfs.append(
300 300 "00changelog.i",
301 301 '\0\0\0\2' # represents revlogv2
302 302 ' dummy changelog to prevent using the old repo layout'
303 303 )
304 304 else:
305 305 raise error.RepoError(_("repository %s not found") % path)
306 306 elif create:
307 307 raise error.RepoError(_("repository %s already exists") % path)
308 308 else:
309 309 try:
310 310 self.requirements = scmutil.readrequires(
311 311 self.vfs, self.supported)
312 312 except IOError as inst:
313 313 if inst.errno != errno.ENOENT:
314 314 raise
315 315
316 316 self.sharedpath = self.path
317 317 try:
318 318 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
319 319 realpath=True)
320 320 s = vfs.base
321 321 if not vfs.exists():
322 322 raise error.RepoError(
323 323 _('.hg/sharedpath points to nonexistent directory %s') % s)
324 324 self.sharedpath = s
325 325 except IOError as inst:
326 326 if inst.errno != errno.ENOENT:
327 327 raise
328 328
329 329 self.store = store.store(
330 330 self.requirements, self.sharedpath, scmutil.vfs)
331 331 self.spath = self.store.path
332 332 self.svfs = self.store.vfs
333 333 self.sjoin = self.store.join
334 334 self.vfs.createmode = self.store.createmode
335 335 self._applyopenerreqs()
336 336 if create:
337 337 self._writerequirements()
338 338
339 339 self._dirstatevalidatewarned = False
340 340
341 341 self._branchcaches = {}
342 342 self._revbranchcache = None
343 343 self.filterpats = {}
344 344 self._datafilters = {}
345 345 self._transref = self._lockref = self._wlockref = None
346 346
347 347 # A cache for various files under .hg/ that tracks file changes,
348 348 # (used by the filecache decorator)
349 349 #
350 350 # Maps a property name to its util.filecacheentry
351 351 self._filecache = {}
352 352
353 353 # hold sets of revision to be filtered
354 354 # should be cleared when something might have changed the filter value:
355 355 # - new changesets,
356 356 # - phase change,
357 357 # - new obsolescence marker,
358 358 # - working directory parent change,
359 359 # - bookmark changes
360 360 self.filteredrevcache = {}
361 361
362 362 # generic mapping between names and nodes
363 363 self.names = namespaces.namespaces()
364 364
365 365 def close(self):
366 366 self._writecaches()
367 367
368 368 def _writecaches(self):
369 369 if self._revbranchcache:
370 370 self._revbranchcache.write()
371 371
372 372 def _restrictcapabilities(self, caps):
373 373 if self.ui.configbool('experimental', 'bundle2-advertise', True):
374 374 caps = set(caps)
375 375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
376 376 caps.add('bundle2=' + urlreq.quote(capsblob))
377 377 return caps
378 378
379 379 def _applyopenerreqs(self):
380 380 self.svfs.options = dict((r, 1) for r in self.requirements
381 381 if r in self.openerreqs)
382 382 # experimental config: format.chunkcachesize
383 383 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
384 384 if chunkcachesize is not None:
385 385 self.svfs.options['chunkcachesize'] = chunkcachesize
386 386 # experimental config: format.maxchainlen
387 387 maxchainlen = self.ui.configint('format', 'maxchainlen')
388 388 if maxchainlen is not None:
389 389 self.svfs.options['maxchainlen'] = maxchainlen
390 390 # experimental config: format.manifestcachesize
391 391 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
392 392 if manifestcachesize is not None:
393 393 self.svfs.options['manifestcachesize'] = manifestcachesize
394 394 # experimental config: format.aggressivemergedeltas
395 395 aggressivemergedeltas = self.ui.configbool('format',
396 396 'aggressivemergedeltas', False)
397 397 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
398 398 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
399 399
400 400 def _writerequirements(self):
401 401 scmutil.writerequires(self.vfs, self.requirements)
402 402
403 403 def _checknested(self, path):
404 404 """Determine if path is a legal nested repository."""
405 405 if not path.startswith(self.root):
406 406 return False
407 407 subpath = path[len(self.root) + 1:]
408 408 normsubpath = util.pconvert(subpath)
409 409
410 410 # XXX: Checking against the current working copy is wrong in
411 411 # the sense that it can reject things like
412 412 #
413 413 # $ hg cat -r 10 sub/x.txt
414 414 #
415 415 # if sub/ is no longer a subrepository in the working copy
416 416 # parent revision.
417 417 #
418 418 # However, it can of course also allow things that would have
419 419 # been rejected before, such as the above cat command if sub/
420 420 # is a subrepository now, but was a normal directory before.
421 421 # The old path auditor would have rejected by mistake since it
422 422 # panics when it sees sub/.hg/.
423 423 #
424 424 # All in all, checking against the working copy seems sensible
425 425 # since we want to prevent access to nested repositories on
426 426 # the filesystem *now*.
427 427 ctx = self[None]
428 428 parts = util.splitpath(subpath)
429 429 while parts:
430 430 prefix = '/'.join(parts)
431 431 if prefix in ctx.substate:
432 432 if prefix == normsubpath:
433 433 return True
434 434 else:
435 435 sub = ctx.sub(prefix)
436 436 return sub.checknested(subpath[len(prefix) + 1:])
437 437 else:
438 438 parts.pop()
439 439 return False
440 440
441 441 def peer(self):
442 442 return localpeer(self) # not cached to avoid reference cycle
443 443
444 444 def unfiltered(self):
445 445 """Return unfiltered version of the repository
446 446
447 447 Intended to be overwritten by filtered repo."""
448 448 return self
449 449
450 450 def filtered(self, name):
451 451 """Return a filtered version of a repository"""
452 452 # build a new class with the mixin and the current class
453 453 # (possibly subclass of the repo)
454 454 class proxycls(repoview.repoview, self.unfiltered().__class__):
455 455 pass
456 456 return proxycls(self, name)
457 457
458 458 @repofilecache('bookmarks', 'bookmarks.current')
459 459 def _bookmarks(self):
460 460 return bookmarks.bmstore(self)
461 461
462 462 @property
463 463 def _activebookmark(self):
464 464 return self._bookmarks.active
465 465
466 466 def bookmarkheads(self, bookmark):
467 467 name = bookmark.split('@', 1)[0]
468 468 heads = []
469 469 for mark, n in self._bookmarks.iteritems():
470 470 if mark.split('@', 1)[0] == name:
471 471 heads.append(n)
472 472 return heads
473 473
474 474 # _phaserevs and _phasesets depend on changelog. what we need is to
475 475 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
476 476 # can't be easily expressed in filecache mechanism.
477 477 @storecache('phaseroots', '00changelog.i')
478 478 def _phasecache(self):
479 479 return phases.phasecache(self, self._phasedefaults)
480 480
481 481 @storecache('obsstore')
482 482 def obsstore(self):
483 483 # read default format for new obsstore.
484 484 # developer config: format.obsstore-version
485 485 defaultformat = self.ui.configint('format', 'obsstore-version', None)
486 486 # rely on obsstore class default when possible.
487 487 kwargs = {}
488 488 if defaultformat is not None:
489 489 kwargs['defaultformat'] = defaultformat
490 490 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
491 491 store = obsolete.obsstore(self.svfs, readonly=readonly,
492 492 **kwargs)
493 493 if store and readonly:
494 494 self.ui.warn(
495 495 _('obsolete feature not enabled but %i markers found!\n')
496 496 % len(list(store)))
497 497 return store
498 498
499 499 @storecache('00changelog.i')
500 500 def changelog(self):
501 501 c = changelog.changelog(self.svfs)
502 if 'HG_PENDING' in os.environ:
503 p = os.environ['HG_PENDING']
502 if 'HG_PENDING' in encoding.environ:
503 p = encoding.environ['HG_PENDING']
504 504 if p.startswith(self.root):
505 505 c.readpending('00changelog.i.a')
506 506 return c
507 507
508 508 def _constructmanifest(self):
509 509 # This is a temporary function while we migrate from manifest to
510 510 # manifestlog. It allows bundlerepo and unionrepo to intercept the
511 511 # manifest creation.
512 512 return manifest.manifestrevlog(self.svfs)
513 513
514 514 @storecache('00manifest.i')
515 515 def manifestlog(self):
516 516 return manifest.manifestlog(self.svfs, self)
517 517
518 518 @repofilecache('dirstate')
519 519 def dirstate(self):
520 520 return dirstate.dirstate(self.vfs, self.ui, self.root,
521 521 self._dirstatevalidate)
522 522
523 523 def _dirstatevalidate(self, node):
524 524 try:
525 525 self.changelog.rev(node)
526 526 return node
527 527 except error.LookupError:
528 528 if not self._dirstatevalidatewarned:
529 529 self._dirstatevalidatewarned = True
530 530 self.ui.warn(_("warning: ignoring unknown"
531 531 " working parent %s!\n") % short(node))
532 532 return nullid
533 533
534 534 def __getitem__(self, changeid):
535 535 if changeid is None or changeid == wdirrev:
536 536 return context.workingctx(self)
537 537 if isinstance(changeid, slice):
538 538 return [context.changectx(self, i)
539 539 for i in xrange(*changeid.indices(len(self)))
540 540 if i not in self.changelog.filteredrevs]
541 541 return context.changectx(self, changeid)
542 542
543 543 def __contains__(self, changeid):
544 544 try:
545 545 self[changeid]
546 546 return True
547 547 except error.RepoLookupError:
548 548 return False
549 549
550 550 def __nonzero__(self):
551 551 return True
552 552
553 553 def __len__(self):
554 554 return len(self.changelog)
555 555
556 556 def __iter__(self):
557 557 return iter(self.changelog)
558 558
559 559 def revs(self, expr, *args):
560 560 '''Find revisions matching a revset.
561 561
562 562 The revset is specified as a string ``expr`` that may contain
563 563 %-formatting to escape certain types. See ``revset.formatspec``.
564 564
565 565 Revset aliases from the configuration are not expanded. To expand
566 566 user aliases, consider calling ``scmutil.revrange()``.
567 567
568 568 Returns a revset.abstractsmartset, which is a list-like interface
569 569 that contains integer revisions.
570 570 '''
571 571 expr = revset.formatspec(expr, *args)
572 572 m = revset.match(None, expr)
573 573 return m(self)
574 574
575 575 def set(self, expr, *args):
576 576 '''Find revisions matching a revset and emit changectx instances.
577 577
578 578 This is a convenience wrapper around ``revs()`` that iterates the
579 579 result and is a generator of changectx instances.
580 580
581 581 Revset aliases from the configuration are not expanded. To expand
582 582 user aliases, consider calling ``scmutil.revrange()``.
583 583 '''
584 584 for r in self.revs(expr, *args):
585 585 yield self[r]
586 586
587 587 def url(self):
588 588 return 'file:' + self.root
589 589
590 590 def hook(self, name, throw=False, **args):
591 591 """Call a hook, passing this repo instance.
592 592
593 593 This a convenience method to aid invoking hooks. Extensions likely
594 594 won't call this unless they have registered a custom hook or are
595 595 replacing code that is expected to call a hook.
596 596 """
597 597 return hook.hook(self.ui, self, name, throw, **args)
598 598
599 599 @unfilteredmethod
600 600 def _tag(self, names, node, message, local, user, date, extra=None,
601 601 editor=False):
602 602 if isinstance(names, str):
603 603 names = (names,)
604 604
605 605 branches = self.branchmap()
606 606 for name in names:
607 607 self.hook('pretag', throw=True, node=hex(node), tag=name,
608 608 local=local)
609 609 if name in branches:
610 610 self.ui.warn(_("warning: tag %s conflicts with existing"
611 611 " branch name\n") % name)
612 612
613 613 def writetags(fp, names, munge, prevtags):
614 614 fp.seek(0, 2)
615 615 if prevtags and prevtags[-1] != '\n':
616 616 fp.write('\n')
617 617 for name in names:
618 618 if munge:
619 619 m = munge(name)
620 620 else:
621 621 m = name
622 622
623 623 if (self._tagscache.tagtypes and
624 624 name in self._tagscache.tagtypes):
625 625 old = self.tags().get(name, nullid)
626 626 fp.write('%s %s\n' % (hex(old), m))
627 627 fp.write('%s %s\n' % (hex(node), m))
628 628 fp.close()
629 629
630 630 prevtags = ''
631 631 if local:
632 632 try:
633 633 fp = self.vfs('localtags', 'r+')
634 634 except IOError:
635 635 fp = self.vfs('localtags', 'a')
636 636 else:
637 637 prevtags = fp.read()
638 638
639 639 # local tags are stored in the current charset
640 640 writetags(fp, names, None, prevtags)
641 641 for name in names:
642 642 self.hook('tag', node=hex(node), tag=name, local=local)
643 643 return
644 644
645 645 try:
646 646 fp = self.wfile('.hgtags', 'rb+')
647 647 except IOError as e:
648 648 if e.errno != errno.ENOENT:
649 649 raise
650 650 fp = self.wfile('.hgtags', 'ab')
651 651 else:
652 652 prevtags = fp.read()
653 653
654 654 # committed tags are stored in UTF-8
655 655 writetags(fp, names, encoding.fromlocal, prevtags)
656 656
657 657 fp.close()
658 658
659 659 self.invalidatecaches()
660 660
661 661 if '.hgtags' not in self.dirstate:
662 662 self[None].add(['.hgtags'])
663 663
664 664 m = matchmod.exact(self.root, '', ['.hgtags'])
665 665 tagnode = self.commit(message, user, date, extra=extra, match=m,
666 666 editor=editor)
667 667
668 668 for name in names:
669 669 self.hook('tag', node=hex(node), tag=name, local=local)
670 670
671 671 return tagnode
672 672
673 673 def tag(self, names, node, message, local, user, date, editor=False):
674 674 '''tag a revision with one or more symbolic names.
675 675
676 676 names is a list of strings or, when adding a single tag, names may be a
677 677 string.
678 678
679 679 if local is True, the tags are stored in a per-repository file.
680 680 otherwise, they are stored in the .hgtags file, and a new
681 681 changeset is committed with the change.
682 682
683 683 keyword arguments:
684 684
685 685 local: whether to store tags in non-version-controlled file
686 686 (default False)
687 687
688 688 message: commit message to use if committing
689 689
690 690 user: name of user to use if committing
691 691
692 692 date: date tuple to use if committing'''
693 693
694 694 if not local:
695 695 m = matchmod.exact(self.root, '', ['.hgtags'])
696 696 if any(self.status(match=m, unknown=True, ignored=True)):
697 697 raise error.Abort(_('working copy of .hgtags is changed'),
698 698 hint=_('please commit .hgtags manually'))
699 699
700 700 self.tags() # instantiate the cache
701 701 self._tag(names, node, message, local, user, date, editor=editor)
702 702
703 703 @filteredpropertycache
704 704 def _tagscache(self):
705 705 '''Returns a tagscache object that contains various tags related
706 706 caches.'''
707 707
708 708 # This simplifies its cache management by having one decorated
709 709 # function (this one) and the rest simply fetch things from it.
710 710 class tagscache(object):
711 711 def __init__(self):
712 712 # These two define the set of tags for this repository. tags
713 713 # maps tag name to node; tagtypes maps tag name to 'global' or
714 714 # 'local'. (Global tags are defined by .hgtags across all
715 715 # heads, and local tags are defined in .hg/localtags.)
716 716 # They constitute the in-memory cache of tags.
717 717 self.tags = self.tagtypes = None
718 718
719 719 self.nodetagscache = self.tagslist = None
720 720
721 721 cache = tagscache()
722 722 cache.tags, cache.tagtypes = self._findtags()
723 723
724 724 return cache
725 725
726 726 def tags(self):
727 727 '''return a mapping of tag to node'''
728 728 t = {}
729 729 if self.changelog.filteredrevs:
730 730 tags, tt = self._findtags()
731 731 else:
732 732 tags = self._tagscache.tags
733 733 for k, v in tags.iteritems():
734 734 try:
735 735 # ignore tags to unknown nodes
736 736 self.changelog.rev(v)
737 737 t[k] = v
738 738 except (error.LookupError, ValueError):
739 739 pass
740 740 return t
741 741
742 742 def _findtags(self):
743 743 '''Do the hard work of finding tags. Return a pair of dicts
744 744 (tags, tagtypes) where tags maps tag name to node, and tagtypes
745 745 maps tag name to a string like \'global\' or \'local\'.
746 746 Subclasses or extensions are free to add their own tags, but
747 747 should be aware that the returned dicts will be retained for the
748 748 duration of the localrepo object.'''
749 749
750 750 # XXX what tagtype should subclasses/extensions use? Currently
751 751 # mq and bookmarks add tags, but do not set the tagtype at all.
752 752 # Should each extension invent its own tag type? Should there
753 753 # be one tagtype for all such "virtual" tags? Or is the status
754 754 # quo fine?
755 755
756 756 alltags = {} # map tag name to (node, hist)
757 757 tagtypes = {}
758 758
759 759 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
760 760 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
761 761
762 762 # Build the return dicts. Have to re-encode tag names because
763 763 # the tags module always uses UTF-8 (in order not to lose info
764 764 # writing to the cache), but the rest of Mercurial wants them in
765 765 # local encoding.
766 766 tags = {}
767 767 for (name, (node, hist)) in alltags.iteritems():
768 768 if node != nullid:
769 769 tags[encoding.tolocal(name)] = node
770 770 tags['tip'] = self.changelog.tip()
771 771 tagtypes = dict([(encoding.tolocal(name), value)
772 772 for (name, value) in tagtypes.iteritems()])
773 773 return (tags, tagtypes)
774 774
775 775 def tagtype(self, tagname):
776 776 '''
777 777 return the type of the given tag. result can be:
778 778
779 779 'local' : a local tag
780 780 'global' : a global tag
781 781 None : tag does not exist
782 782 '''
783 783
784 784 return self._tagscache.tagtypes.get(tagname)
785 785
786 786 def tagslist(self):
787 787 '''return a list of tags ordered by revision'''
788 788 if not self._tagscache.tagslist:
789 789 l = []
790 790 for t, n in self.tags().iteritems():
791 791 l.append((self.changelog.rev(n), t, n))
792 792 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
793 793
794 794 return self._tagscache.tagslist
795 795
796 796 def nodetags(self, node):
797 797 '''return the tags associated with a node'''
798 798 if not self._tagscache.nodetagscache:
799 799 nodetagscache = {}
800 800 for t, n in self._tagscache.tags.iteritems():
801 801 nodetagscache.setdefault(n, []).append(t)
802 802 for tags in nodetagscache.itervalues():
803 803 tags.sort()
804 804 self._tagscache.nodetagscache = nodetagscache
805 805 return self._tagscache.nodetagscache.get(node, [])
806 806
807 807 def nodebookmarks(self, node):
808 808 """return the list of bookmarks pointing to the specified node"""
809 809 marks = []
810 810 for bookmark, n in self._bookmarks.iteritems():
811 811 if n == node:
812 812 marks.append(bookmark)
813 813 return sorted(marks)
814 814
815 815 def branchmap(self):
816 816 '''returns a dictionary {branch: [branchheads]} with branchheads
817 817 ordered by increasing revision number'''
818 818 branchmap.updatecache(self)
819 819 return self._branchcaches[self.filtername]
820 820
821 821 @unfilteredmethod
822 822 def revbranchcache(self):
823 823 if not self._revbranchcache:
824 824 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
825 825 return self._revbranchcache
826 826
827 827 def branchtip(self, branch, ignoremissing=False):
828 828 '''return the tip node for a given branch
829 829
830 830 If ignoremissing is True, then this method will not raise an error.
831 831 This is helpful for callers that only expect None for a missing branch
832 832 (e.g. namespace).
833 833
834 834 '''
835 835 try:
836 836 return self.branchmap().branchtip(branch)
837 837 except KeyError:
838 838 if not ignoremissing:
839 839 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
840 840 else:
841 841 pass
842 842
843 843 def lookup(self, key):
844 844 return self[key].node()
845 845
846 846 def lookupbranch(self, key, remote=None):
847 847 repo = remote or self
848 848 if key in repo.branchmap():
849 849 return key
850 850
851 851 repo = (remote and remote.local()) and remote or self
852 852 return repo[key].branch()
853 853
854 854 def known(self, nodes):
855 855 cl = self.changelog
856 856 nm = cl.nodemap
857 857 filtered = cl.filteredrevs
858 858 result = []
859 859 for n in nodes:
860 860 r = nm.get(n)
861 861 resp = not (r is None or r in filtered)
862 862 result.append(resp)
863 863 return result
864 864
865 865 def local(self):
866 866 return self
867 867
868 868 def publishing(self):
869 869 # it's safe (and desirable) to trust the publish flag unconditionally
870 870 # so that we don't finalize changes shared between users via ssh or nfs
871 871 return self.ui.configbool('phases', 'publish', True, untrusted=True)
872 872
873 873 def cancopy(self):
874 874 # so statichttprepo's override of local() works
875 875 if not self.local():
876 876 return False
877 877 if not self.publishing():
878 878 return True
879 879 # if publishing we can't copy if there is filtered content
880 880 return not self.filtered('visible').changelog.filteredrevs
881 881
882 882 def shared(self):
883 883 '''the type of shared repository (None if not shared)'''
884 884 if self.sharedpath != self.path:
885 885 return 'store'
886 886 return None
887 887
888 888 def join(self, f, *insidef):
889 889 return self.vfs.join(os.path.join(f, *insidef))
890 890
891 891 def wjoin(self, f, *insidef):
892 892 return self.vfs.reljoin(self.root, f, *insidef)
893 893
894 894 def file(self, f):
895 895 if f[0] == '/':
896 896 f = f[1:]
897 897 return filelog.filelog(self.svfs, f)
898 898
899 899 def changectx(self, changeid):
900 900 return self[changeid]
901 901
902 902 def setparents(self, p1, p2=nullid):
903 903 self.dirstate.beginparentchange()
904 904 copies = self.dirstate.setparents(p1, p2)
905 905 pctx = self[p1]
906 906 if copies:
907 907 # Adjust copy records, the dirstate cannot do it, it
908 908 # requires access to parents manifests. Preserve them
909 909 # only for entries added to first parent.
910 910 for f in copies:
911 911 if f not in pctx and copies[f] in pctx:
912 912 self.dirstate.copy(copies[f], f)
913 913 if p2 == nullid:
914 914 for f, s in sorted(self.dirstate.copies().items()):
915 915 if f not in pctx and s not in pctx:
916 916 self.dirstate.copy(None, f)
917 917 self.dirstate.endparentchange()
918 918
919 919 def filectx(self, path, changeid=None, fileid=None):
920 920 """changeid can be a changeset revision, node, or tag.
921 921 fileid can be a file revision or node."""
922 922 return context.filectx(self, path, changeid, fileid)
923 923
924 924 def getcwd(self):
925 925 return self.dirstate.getcwd()
926 926
927 927 def pathto(self, f, cwd=None):
928 928 return self.dirstate.pathto(f, cwd)
929 929
930 930 def wfile(self, f, mode='r'):
931 931 return self.wvfs(f, mode)
932 932
933 933 def _link(self, f):
934 934 return self.wvfs.islink(f)
935 935
936 936 def _loadfilter(self, filter):
937 937 if filter not in self.filterpats:
938 938 l = []
939 939 for pat, cmd in self.ui.configitems(filter):
940 940 if cmd == '!':
941 941 continue
942 942 mf = matchmod.match(self.root, '', [pat])
943 943 fn = None
944 944 params = cmd
945 945 for name, filterfn in self._datafilters.iteritems():
946 946 if cmd.startswith(name):
947 947 fn = filterfn
948 948 params = cmd[len(name):].lstrip()
949 949 break
950 950 if not fn:
951 951 fn = lambda s, c, **kwargs: util.filter(s, c)
952 952 # Wrap old filters not supporting keyword arguments
953 953 if not inspect.getargspec(fn)[2]:
954 954 oldfn = fn
955 955 fn = lambda s, c, **kwargs: oldfn(s, c)
956 956 l.append((mf, fn, params))
957 957 self.filterpats[filter] = l
958 958 return self.filterpats[filter]
959 959
960 960 def _filter(self, filterpats, filename, data):
961 961 for mf, fn, cmd in filterpats:
962 962 if mf(filename):
963 963 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
964 964 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
965 965 break
966 966
967 967 return data
968 968
969 969 @unfilteredpropertycache
970 970 def _encodefilterpats(self):
971 971 return self._loadfilter('encode')
972 972
973 973 @unfilteredpropertycache
974 974 def _decodefilterpats(self):
975 975 return self._loadfilter('decode')
976 976
977 977 def adddatafilter(self, name, filter):
978 978 self._datafilters[name] = filter
979 979
980 980 def wread(self, filename):
981 981 if self._link(filename):
982 982 data = self.wvfs.readlink(filename)
983 983 else:
984 984 data = self.wvfs.read(filename)
985 985 return self._filter(self._encodefilterpats, filename, data)
986 986
987 987 def wwrite(self, filename, data, flags, backgroundclose=False):
988 988 """write ``data`` into ``filename`` in the working directory
989 989
990 990 This returns length of written (maybe decoded) data.
991 991 """
992 992 data = self._filter(self._decodefilterpats, filename, data)
993 993 if 'l' in flags:
994 994 self.wvfs.symlink(data, filename)
995 995 else:
996 996 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
997 997 if 'x' in flags:
998 998 self.wvfs.setflags(filename, False, True)
999 999 return len(data)
1000 1000
1001 1001 def wwritedata(self, filename, data):
1002 1002 return self._filter(self._decodefilterpats, filename, data)
1003 1003
1004 1004 def currenttransaction(self):
1005 1005 """return the current transaction or None if non exists"""
1006 1006 if self._transref:
1007 1007 tr = self._transref()
1008 1008 else:
1009 1009 tr = None
1010 1010
1011 1011 if tr and tr.running():
1012 1012 return tr
1013 1013 return None
1014 1014
1015 1015 def transaction(self, desc, report=None):
1016 1016 if (self.ui.configbool('devel', 'all-warnings')
1017 1017 or self.ui.configbool('devel', 'check-locks')):
1018 1018 if self._currentlock(self._lockref) is None:
1019 1019 raise error.ProgrammingError('transaction requires locking')
1020 1020 tr = self.currenttransaction()
1021 1021 if tr is not None:
1022 1022 return tr.nest()
1023 1023
1024 1024 # abort here if the journal already exists
1025 1025 if self.svfs.exists("journal"):
1026 1026 raise error.RepoError(
1027 1027 _("abandoned transaction found"),
1028 1028 hint=_("run 'hg recover' to clean up transaction"))
1029 1029
1030 1030 idbase = "%.40f#%f" % (random.random(), time.time())
1031 1031 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1032 1032 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1033 1033
1034 1034 self._writejournal(desc)
1035 1035 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1036 1036 if report:
1037 1037 rp = report
1038 1038 else:
1039 1039 rp = self.ui.warn
1040 1040 vfsmap = {'plain': self.vfs} # root of .hg/
1041 1041 # we must avoid cyclic reference between repo and transaction.
1042 1042 reporef = weakref.ref(self)
1043 1043 def validate(tr):
1044 1044 """will run pre-closing hooks"""
1045 1045 reporef().hook('pretxnclose', throw=True,
1046 1046 txnname=desc, **tr.hookargs)
1047 1047 def releasefn(tr, success):
1048 1048 repo = reporef()
1049 1049 if success:
1050 1050 # this should be explicitly invoked here, because
1051 1051 # in-memory changes aren't written out at closing
1052 1052 # transaction, if tr.addfilegenerator (via
1053 1053 # dirstate.write or so) isn't invoked while
1054 1054 # transaction running
1055 1055 repo.dirstate.write(None)
1056 1056 else:
1057 1057 # discard all changes (including ones already written
1058 1058 # out) in this transaction
1059 1059 repo.dirstate.restorebackup(None, prefix='journal.')
1060 1060
1061 1061 repo.invalidate(clearfilecache=True)
1062 1062
1063 1063 tr = transaction.transaction(rp, self.svfs, vfsmap,
1064 1064 "journal",
1065 1065 "undo",
1066 1066 aftertrans(renames),
1067 1067 self.store.createmode,
1068 1068 validator=validate,
1069 1069 releasefn=releasefn)
1070 1070
1071 1071 tr.hookargs['txnid'] = txnid
1072 1072 # note: writing the fncache only during finalize mean that the file is
1073 1073 # outdated when running hooks. As fncache is used for streaming clone,
1074 1074 # this is not expected to break anything that happen during the hooks.
1075 1075 tr.addfinalize('flush-fncache', self.store.write)
1076 1076 def txnclosehook(tr2):
1077 1077 """To be run if transaction is successful, will schedule a hook run
1078 1078 """
1079 1079 # Don't reference tr2 in hook() so we don't hold a reference.
1080 1080 # This reduces memory consumption when there are multiple
1081 1081 # transactions per lock. This can likely go away if issue5045
1082 1082 # fixes the function accumulation.
1083 1083 hookargs = tr2.hookargs
1084 1084
1085 1085 def hook():
1086 1086 reporef().hook('txnclose', throw=False, txnname=desc,
1087 1087 **hookargs)
1088 1088 reporef()._afterlock(hook)
1089 1089 tr.addfinalize('txnclose-hook', txnclosehook)
1090 1090 def txnaborthook(tr2):
1091 1091 """To be run if transaction is aborted
1092 1092 """
1093 1093 reporef().hook('txnabort', throw=False, txnname=desc,
1094 1094 **tr2.hookargs)
1095 1095 tr.addabort('txnabort-hook', txnaborthook)
1096 1096 # avoid eager cache invalidation. in-memory data should be identical
1097 1097 # to stored data if transaction has no error.
1098 1098 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1099 1099 self._transref = weakref.ref(tr)
1100 1100 return tr
1101 1101
1102 1102 def _journalfiles(self):
1103 1103 return ((self.svfs, 'journal'),
1104 1104 (self.vfs, 'journal.dirstate'),
1105 1105 (self.vfs, 'journal.branch'),
1106 1106 (self.vfs, 'journal.desc'),
1107 1107 (self.vfs, 'journal.bookmarks'),
1108 1108 (self.svfs, 'journal.phaseroots'))
1109 1109
1110 1110 def undofiles(self):
1111 1111 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1112 1112
1113 1113 def _writejournal(self, desc):
1114 1114 self.dirstate.savebackup(None, prefix='journal.')
1115 1115 self.vfs.write("journal.branch",
1116 1116 encoding.fromlocal(self.dirstate.branch()))
1117 1117 self.vfs.write("journal.desc",
1118 1118 "%d\n%s\n" % (len(self), desc))
1119 1119 self.vfs.write("journal.bookmarks",
1120 1120 self.vfs.tryread("bookmarks"))
1121 1121 self.svfs.write("journal.phaseroots",
1122 1122 self.svfs.tryread("phaseroots"))
1123 1123
1124 1124 def recover(self):
1125 1125 with self.lock():
1126 1126 if self.svfs.exists("journal"):
1127 1127 self.ui.status(_("rolling back interrupted transaction\n"))
1128 1128 vfsmap = {'': self.svfs,
1129 1129 'plain': self.vfs,}
1130 1130 transaction.rollback(self.svfs, vfsmap, "journal",
1131 1131 self.ui.warn)
1132 1132 self.invalidate()
1133 1133 return True
1134 1134 else:
1135 1135 self.ui.warn(_("no interrupted transaction available\n"))
1136 1136 return False
1137 1137
1138 1138 def rollback(self, dryrun=False, force=False):
1139 1139 wlock = lock = dsguard = None
1140 1140 try:
1141 1141 wlock = self.wlock()
1142 1142 lock = self.lock()
1143 1143 if self.svfs.exists("undo"):
1144 1144 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1145 1145
1146 1146 return self._rollback(dryrun, force, dsguard)
1147 1147 else:
1148 1148 self.ui.warn(_("no rollback information available\n"))
1149 1149 return 1
1150 1150 finally:
1151 1151 release(dsguard, lock, wlock)
1152 1152
1153 1153 @unfilteredmethod # Until we get smarter cache management
1154 1154 def _rollback(self, dryrun, force, dsguard):
1155 1155 ui = self.ui
1156 1156 try:
1157 1157 args = self.vfs.read('undo.desc').splitlines()
1158 1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 1159 if len(args) >= 3:
1160 1160 detail = args[2]
1161 1161 oldtip = oldlen - 1
1162 1162
1163 1163 if detail and ui.verbose:
1164 1164 msg = (_('repository tip rolled back to revision %s'
1165 1165 ' (undo %s: %s)\n')
1166 1166 % (oldtip, desc, detail))
1167 1167 else:
1168 1168 msg = (_('repository tip rolled back to revision %s'
1169 1169 ' (undo %s)\n')
1170 1170 % (oldtip, desc))
1171 1171 except IOError:
1172 1172 msg = _('rolling back unknown transaction\n')
1173 1173 desc = None
1174 1174
1175 1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 1176 raise error.Abort(
1177 1177 _('rollback of last commit while not checked out '
1178 1178 'may lose data'), hint=_('use -f to force'))
1179 1179
1180 1180 ui.status(msg)
1181 1181 if dryrun:
1182 1182 return 0
1183 1183
1184 1184 parents = self.dirstate.parents()
1185 1185 self.destroying()
1186 1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 1188 if self.vfs.exists('undo.bookmarks'):
1189 1189 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1190 1190 if self.svfs.exists('undo.phaseroots'):
1191 1191 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1192 1192 self.invalidate()
1193 1193
1194 1194 parentgone = (parents[0] not in self.changelog.nodemap or
1195 1195 parents[1] not in self.changelog.nodemap)
1196 1196 if parentgone:
1197 1197 # prevent dirstateguard from overwriting already restored one
1198 1198 dsguard.close()
1199 1199
1200 1200 self.dirstate.restorebackup(None, prefix='undo.')
1201 1201 try:
1202 1202 branch = self.vfs.read('undo.branch')
1203 1203 self.dirstate.setbranch(encoding.tolocal(branch))
1204 1204 except IOError:
1205 1205 ui.warn(_('named branch could not be reset: '
1206 1206 'current branch is still \'%s\'\n')
1207 1207 % self.dirstate.branch())
1208 1208
1209 1209 parents = tuple([p.rev() for p in self[None].parents()])
1210 1210 if len(parents) > 1:
1211 1211 ui.status(_('working directory now based on '
1212 1212 'revisions %d and %d\n') % parents)
1213 1213 else:
1214 1214 ui.status(_('working directory now based on '
1215 1215 'revision %d\n') % parents)
1216 1216 mergemod.mergestate.clean(self, self['.'].node())
1217 1217
1218 1218 # TODO: if we know which new heads may result from this rollback, pass
1219 1219 # them to destroy(), which will prevent the branchhead cache from being
1220 1220 # invalidated.
1221 1221 self.destroyed()
1222 1222 return 0
1223 1223
1224 1224 def invalidatecaches(self):
1225 1225
1226 1226 if '_tagscache' in vars(self):
1227 1227 # can't use delattr on proxy
1228 1228 del self.__dict__['_tagscache']
1229 1229
1230 1230 self.unfiltered()._branchcaches.clear()
1231 1231 self.invalidatevolatilesets()
1232 1232
1233 1233 def invalidatevolatilesets(self):
1234 1234 self.filteredrevcache.clear()
1235 1235 obsolete.clearobscaches(self)
1236 1236
1237 1237 def invalidatedirstate(self):
1238 1238 '''Invalidates the dirstate, causing the next call to dirstate
1239 1239 to check if it was modified since the last time it was read,
1240 1240 rereading it if it has.
1241 1241
1242 1242 This is different to dirstate.invalidate() that it doesn't always
1243 1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 1244 explicitly read the dirstate again (i.e. restoring it to a previous
1245 1245 known good state).'''
1246 1246 if hasunfilteredcache(self, 'dirstate'):
1247 1247 for k in self.dirstate._filecache:
1248 1248 try:
1249 1249 delattr(self.dirstate, k)
1250 1250 except AttributeError:
1251 1251 pass
1252 1252 delattr(self.unfiltered(), 'dirstate')
1253 1253
1254 1254 def invalidate(self, clearfilecache=False):
1255 1255 '''Invalidates both store and non-store parts other than dirstate
1256 1256
1257 1257 If a transaction is running, invalidation of store is omitted,
1258 1258 because discarding in-memory changes might cause inconsistency
1259 1259 (e.g. incomplete fncache causes unintentional failure, but
1260 1260 redundant one doesn't).
1261 1261 '''
1262 1262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1263 1263 for k in self._filecache.keys():
1264 1264 # dirstate is invalidated separately in invalidatedirstate()
1265 1265 if k == 'dirstate':
1266 1266 continue
1267 1267
1268 1268 if clearfilecache:
1269 1269 del self._filecache[k]
1270 1270 try:
1271 1271 delattr(unfiltered, k)
1272 1272 except AttributeError:
1273 1273 pass
1274 1274 self.invalidatecaches()
1275 1275 if not self.currenttransaction():
1276 1276 # TODO: Changing contents of store outside transaction
1277 1277 # causes inconsistency. We should make in-memory store
1278 1278 # changes detectable, and abort if changed.
1279 1279 self.store.invalidatecaches()
1280 1280
1281 1281 def invalidateall(self):
1282 1282 '''Fully invalidates both store and non-store parts, causing the
1283 1283 subsequent operation to reread any outside changes.'''
1284 1284 # extension should hook this to invalidate its caches
1285 1285 self.invalidate()
1286 1286 self.invalidatedirstate()
1287 1287
1288 1288 @unfilteredmethod
1289 1289 def _refreshfilecachestats(self, tr):
1290 1290 """Reload stats of cached files so that they are flagged as valid"""
1291 1291 for k, ce in self._filecache.items():
1292 1292 if k == 'dirstate' or k not in self.__dict__:
1293 1293 continue
1294 1294 ce.refresh()
1295 1295
1296 1296 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1297 1297 inheritchecker=None, parentenvvar=None):
1298 1298 parentlock = None
1299 1299 # the contents of parentenvvar are used by the underlying lock to
1300 1300 # determine whether it can be inherited
1301 1301 if parentenvvar is not None:
1302 parentlock = os.environ.get(parentenvvar)
1302 parentlock = encoding.environ.get(parentenvvar)
1303 1303 try:
1304 1304 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1305 1305 acquirefn=acquirefn, desc=desc,
1306 1306 inheritchecker=inheritchecker,
1307 1307 parentlock=parentlock)
1308 1308 except error.LockHeld as inst:
1309 1309 if not wait:
1310 1310 raise
1311 1311 # show more details for new-style locks
1312 1312 if ':' in inst.locker:
1313 1313 host, pid = inst.locker.split(":", 1)
1314 1314 self.ui.warn(
1315 1315 _("waiting for lock on %s held by process %r "
1316 1316 "on host %r\n") % (desc, pid, host))
1317 1317 else:
1318 1318 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1319 1319 (desc, inst.locker))
1320 1320 # default to 600 seconds timeout
1321 1321 l = lockmod.lock(vfs, lockname,
1322 1322 int(self.ui.config("ui", "timeout", "600")),
1323 1323 releasefn=releasefn, acquirefn=acquirefn,
1324 1324 desc=desc)
1325 1325 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1326 1326 return l
1327 1327
1328 1328 def _afterlock(self, callback):
1329 1329 """add a callback to be run when the repository is fully unlocked
1330 1330
1331 1331 The callback will be executed when the outermost lock is released
1332 1332 (with wlock being higher level than 'lock')."""
1333 1333 for ref in (self._wlockref, self._lockref):
1334 1334 l = ref and ref()
1335 1335 if l and l.held:
1336 1336 l.postrelease.append(callback)
1337 1337 break
1338 1338 else: # no lock have been found.
1339 1339 callback()
1340 1340
1341 1341 def lock(self, wait=True):
1342 1342 '''Lock the repository store (.hg/store) and return a weak reference
1343 1343 to the lock. Use this before modifying the store (e.g. committing or
1344 1344 stripping). If you are opening a transaction, get a lock as well.)
1345 1345
1346 1346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1347 1347 'wlock' first to avoid a dead-lock hazard.'''
1348 1348 l = self._currentlock(self._lockref)
1349 1349 if l is not None:
1350 1350 l.lock()
1351 1351 return l
1352 1352
1353 1353 l = self._lock(self.svfs, "lock", wait, None,
1354 1354 self.invalidate, _('repository %s') % self.origroot)
1355 1355 self._lockref = weakref.ref(l)
1356 1356 return l
1357 1357
1358 1358 def _wlockchecktransaction(self):
1359 1359 if self.currenttransaction() is not None:
1360 1360 raise error.LockInheritanceContractViolation(
1361 1361 'wlock cannot be inherited in the middle of a transaction')
1362 1362
1363 1363 def wlock(self, wait=True):
1364 1364 '''Lock the non-store parts of the repository (everything under
1365 1365 .hg except .hg/store) and return a weak reference to the lock.
1366 1366
1367 1367 Use this before modifying files in .hg.
1368 1368
1369 1369 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1370 1370 'wlock' first to avoid a dead-lock hazard.'''
1371 1371 l = self._wlockref and self._wlockref()
1372 1372 if l is not None and l.held:
1373 1373 l.lock()
1374 1374 return l
1375 1375
1376 1376 # We do not need to check for non-waiting lock acquisition. Such
1377 1377 # acquisition would not cause dead-lock as they would just fail.
1378 1378 if wait and (self.ui.configbool('devel', 'all-warnings')
1379 1379 or self.ui.configbool('devel', 'check-locks')):
1380 1380 if self._currentlock(self._lockref) is not None:
1381 1381 self.ui.develwarn('"wlock" acquired after "lock"')
1382 1382
1383 1383 def unlock():
1384 1384 if self.dirstate.pendingparentchange():
1385 1385 self.dirstate.invalidate()
1386 1386 else:
1387 1387 self.dirstate.write(None)
1388 1388
1389 1389 self._filecache['dirstate'].refresh()
1390 1390
1391 1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 1392 self.invalidatedirstate, _('working directory of %s') %
1393 1393 self.origroot,
1394 1394 inheritchecker=self._wlockchecktransaction,
1395 1395 parentenvvar='HG_WLOCK_LOCKER')
1396 1396 self._wlockref = weakref.ref(l)
1397 1397 return l
1398 1398
1399 1399 def _currentlock(self, lockref):
1400 1400 """Returns the lock if it's held, or None if it's not."""
1401 1401 if lockref is None:
1402 1402 return None
1403 1403 l = lockref()
1404 1404 if l is None or not l.held:
1405 1405 return None
1406 1406 return l
1407 1407
1408 1408 def currentwlock(self):
1409 1409 """Returns the wlock if it's held, or None if it's not."""
1410 1410 return self._currentlock(self._wlockref)
1411 1411
1412 1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 1413 """
1414 1414 commit an individual file as part of a larger transaction
1415 1415 """
1416 1416
1417 1417 fname = fctx.path()
1418 1418 fparent1 = manifest1.get(fname, nullid)
1419 1419 fparent2 = manifest2.get(fname, nullid)
1420 1420 if isinstance(fctx, context.filectx):
1421 1421 node = fctx.filenode()
1422 1422 if node in [fparent1, fparent2]:
1423 1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 1424 if manifest1.flags(fname) != fctx.flags():
1425 1425 changelist.append(fname)
1426 1426 return node
1427 1427
1428 1428 flog = self.file(fname)
1429 1429 meta = {}
1430 1430 copy = fctx.renamed()
1431 1431 if copy and copy[0] != fname:
1432 1432 # Mark the new revision of this file as a copy of another
1433 1433 # file. This copy data will effectively act as a parent
1434 1434 # of this new revision. If this is a merge, the first
1435 1435 # parent will be the nullid (meaning "look up the copy data")
1436 1436 # and the second one will be the other parent. For example:
1437 1437 #
1438 1438 # 0 --- 1 --- 3 rev1 changes file foo
1439 1439 # \ / rev2 renames foo to bar and changes it
1440 1440 # \- 2 -/ rev3 should have bar with all changes and
1441 1441 # should record that bar descends from
1442 1442 # bar in rev2 and foo in rev1
1443 1443 #
1444 1444 # this allows this merge to succeed:
1445 1445 #
1446 1446 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1447 1447 # \ / merging rev3 and rev4 should use bar@rev2
1448 1448 # \- 2 --- 4 as the merge base
1449 1449 #
1450 1450
1451 1451 cfname = copy[0]
1452 1452 crev = manifest1.get(cfname)
1453 1453 newfparent = fparent2
1454 1454
1455 1455 if manifest2: # branch merge
1456 1456 if fparent2 == nullid or crev is None: # copied on remote side
1457 1457 if cfname in manifest2:
1458 1458 crev = manifest2[cfname]
1459 1459 newfparent = fparent1
1460 1460
1461 1461 # Here, we used to search backwards through history to try to find
1462 1462 # where the file copy came from if the source of a copy was not in
1463 1463 # the parent directory. However, this doesn't actually make sense to
1464 1464 # do (what does a copy from something not in your working copy even
1465 1465 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1466 1466 # the user that copy information was dropped, so if they didn't
1467 1467 # expect this outcome it can be fixed, but this is the correct
1468 1468 # behavior in this circumstance.
1469 1469
1470 1470 if crev:
1471 1471 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1472 1472 meta["copy"] = cfname
1473 1473 meta["copyrev"] = hex(crev)
1474 1474 fparent1, fparent2 = nullid, newfparent
1475 1475 else:
1476 1476 self.ui.warn(_("warning: can't find ancestor for '%s' "
1477 1477 "copied from '%s'!\n") % (fname, cfname))
1478 1478
1479 1479 elif fparent1 == nullid:
1480 1480 fparent1, fparent2 = fparent2, nullid
1481 1481 elif fparent2 != nullid:
1482 1482 # is one parent an ancestor of the other?
1483 1483 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1484 1484 if fparent1 in fparentancestors:
1485 1485 fparent1, fparent2 = fparent2, nullid
1486 1486 elif fparent2 in fparentancestors:
1487 1487 fparent2 = nullid
1488 1488
1489 1489 # is the file changed?
1490 1490 text = fctx.data()
1491 1491 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1492 1492 changelist.append(fname)
1493 1493 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1494 1494 # are just the flags changed during merge?
1495 1495 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1496 1496 changelist.append(fname)
1497 1497
1498 1498 return fparent1
1499 1499
1500 1500 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1501 1501 """check for commit arguments that aren't committable"""
1502 1502 if match.isexact() or match.prefix():
1503 1503 matched = set(status.modified + status.added + status.removed)
1504 1504
1505 1505 for f in match.files():
1506 1506 f = self.dirstate.normalize(f)
1507 1507 if f == '.' or f in matched or f in wctx.substate:
1508 1508 continue
1509 1509 if f in status.deleted:
1510 1510 fail(f, _('file not found!'))
1511 1511 if f in vdirs: # visited directory
1512 1512 d = f + '/'
1513 1513 for mf in matched:
1514 1514 if mf.startswith(d):
1515 1515 break
1516 1516 else:
1517 1517 fail(f, _("no match under directory!"))
1518 1518 elif f not in self.dirstate:
1519 1519 fail(f, _("file not tracked!"))
1520 1520
1521 1521 @unfilteredmethod
1522 1522 def commit(self, text="", user=None, date=None, match=None, force=False,
1523 1523 editor=False, extra=None):
1524 1524 """Add a new revision to current repository.
1525 1525
1526 1526 Revision information is gathered from the working directory,
1527 1527 match can be used to filter the committed files. If editor is
1528 1528 supplied, it is called to get a commit message.
1529 1529 """
1530 1530 if extra is None:
1531 1531 extra = {}
1532 1532
1533 1533 def fail(f, msg):
1534 1534 raise error.Abort('%s: %s' % (f, msg))
1535 1535
1536 1536 if not match:
1537 1537 match = matchmod.always(self.root, '')
1538 1538
1539 1539 if not force:
1540 1540 vdirs = []
1541 1541 match.explicitdir = vdirs.append
1542 1542 match.bad = fail
1543 1543
1544 1544 wlock = lock = tr = None
1545 1545 try:
1546 1546 wlock = self.wlock()
1547 1547 lock = self.lock() # for recent changelog (see issue4368)
1548 1548
1549 1549 wctx = self[None]
1550 1550 merge = len(wctx.parents()) > 1
1551 1551
1552 1552 if not force and merge and match.ispartial():
1553 1553 raise error.Abort(_('cannot partially commit a merge '
1554 1554 '(do not specify files or patterns)'))
1555 1555
1556 1556 status = self.status(match=match, clean=force)
1557 1557 if force:
1558 1558 status.modified.extend(status.clean) # mq may commit clean files
1559 1559
1560 1560 # check subrepos
1561 1561 subs = []
1562 1562 commitsubs = set()
1563 1563 newstate = wctx.substate.copy()
1564 1564 # only manage subrepos and .hgsubstate if .hgsub is present
1565 1565 if '.hgsub' in wctx:
1566 1566 # we'll decide whether to track this ourselves, thanks
1567 1567 for c in status.modified, status.added, status.removed:
1568 1568 if '.hgsubstate' in c:
1569 1569 c.remove('.hgsubstate')
1570 1570
1571 1571 # compare current state to last committed state
1572 1572 # build new substate based on last committed state
1573 1573 oldstate = wctx.p1().substate
1574 1574 for s in sorted(newstate.keys()):
1575 1575 if not match(s):
1576 1576 # ignore working copy, use old state if present
1577 1577 if s in oldstate:
1578 1578 newstate[s] = oldstate[s]
1579 1579 continue
1580 1580 if not force:
1581 1581 raise error.Abort(
1582 1582 _("commit with new subrepo %s excluded") % s)
1583 1583 dirtyreason = wctx.sub(s).dirtyreason(True)
1584 1584 if dirtyreason:
1585 1585 if not self.ui.configbool('ui', 'commitsubrepos'):
1586 1586 raise error.Abort(dirtyreason,
1587 1587 hint=_("use --subrepos for recursive commit"))
1588 1588 subs.append(s)
1589 1589 commitsubs.add(s)
1590 1590 else:
1591 1591 bs = wctx.sub(s).basestate()
1592 1592 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1593 1593 if oldstate.get(s, (None, None, None))[1] != bs:
1594 1594 subs.append(s)
1595 1595
1596 1596 # check for removed subrepos
1597 1597 for p in wctx.parents():
1598 1598 r = [s for s in p.substate if s not in newstate]
1599 1599 subs += [s for s in r if match(s)]
1600 1600 if subs:
1601 1601 if (not match('.hgsub') and
1602 1602 '.hgsub' in (wctx.modified() + wctx.added())):
1603 1603 raise error.Abort(
1604 1604 _("can't commit subrepos without .hgsub"))
1605 1605 status.modified.insert(0, '.hgsubstate')
1606 1606
1607 1607 elif '.hgsub' in status.removed:
1608 1608 # clean up .hgsubstate when .hgsub is removed
1609 1609 if ('.hgsubstate' in wctx and
1610 1610 '.hgsubstate' not in (status.modified + status.added +
1611 1611 status.removed)):
1612 1612 status.removed.insert(0, '.hgsubstate')
1613 1613
1614 1614 # make sure all explicit patterns are matched
1615 1615 if not force:
1616 1616 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1617 1617
1618 1618 cctx = context.workingcommitctx(self, status,
1619 1619 text, user, date, extra)
1620 1620
1621 1621 # internal config: ui.allowemptycommit
1622 1622 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1623 1623 or extra.get('close') or merge or cctx.files()
1624 1624 or self.ui.configbool('ui', 'allowemptycommit'))
1625 1625 if not allowemptycommit:
1626 1626 return None
1627 1627
1628 1628 if merge and cctx.deleted():
1629 1629 raise error.Abort(_("cannot commit merge with missing files"))
1630 1630
1631 1631 ms = mergemod.mergestate.read(self)
1632 1632 mergeutil.checkunresolved(ms)
1633 1633
1634 1634 if editor:
1635 1635 cctx._text = editor(self, cctx, subs)
1636 1636 edited = (text != cctx._text)
1637 1637
1638 1638 # Save commit message in case this transaction gets rolled back
1639 1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 1640 # the assumption that the user will use the same editor again.
1641 1641 msgfn = self.savecommitmessage(cctx._text)
1642 1642
1643 1643 # commit subs and write new state
1644 1644 if subs:
1645 1645 for s in sorted(commitsubs):
1646 1646 sub = wctx.sub(s)
1647 1647 self.ui.status(_('committing subrepository %s\n') %
1648 1648 subrepo.subrelpath(sub))
1649 1649 sr = sub.commit(cctx._text, user, date)
1650 1650 newstate[s] = (newstate[s][0], sr)
1651 1651 subrepo.writestate(self, newstate)
1652 1652
1653 1653 p1, p2 = self.dirstate.parents()
1654 1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 1655 try:
1656 1656 self.hook("precommit", throw=True, parent1=hookp1,
1657 1657 parent2=hookp2)
1658 1658 tr = self.transaction('commit')
1659 1659 ret = self.commitctx(cctx, True)
1660 1660 except: # re-raises
1661 1661 if edited:
1662 1662 self.ui.write(
1663 1663 _('note: commit message saved in %s\n') % msgfn)
1664 1664 raise
1665 1665 # update bookmarks, dirstate and mergestate
1666 1666 bookmarks.update(self, [p1, p2], ret)
1667 1667 cctx.markcommitted(ret)
1668 1668 ms.reset()
1669 1669 tr.close()
1670 1670
1671 1671 finally:
1672 1672 lockmod.release(tr, lock, wlock)
1673 1673
1674 1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 1675 # hack for command that use a temporary commit (eg: histedit)
1676 1676 # temporary commit got stripped before hook release
1677 1677 if self.changelog.hasnode(ret):
1678 1678 self.hook("commit", node=node, parent1=parent1,
1679 1679 parent2=parent2)
1680 1680 self._afterlock(commithook)
1681 1681 return ret
1682 1682
1683 1683 @unfilteredmethod
1684 1684 def commitctx(self, ctx, error=False):
1685 1685 """Add a new revision to current repository.
1686 1686 Revision information is passed via the context argument.
1687 1687 """
1688 1688
1689 1689 tr = None
1690 1690 p1, p2 = ctx.p1(), ctx.p2()
1691 1691 user = ctx.user()
1692 1692
1693 1693 lock = self.lock()
1694 1694 try:
1695 1695 tr = self.transaction("commit")
1696 1696 trp = weakref.proxy(tr)
1697 1697
1698 1698 if ctx.manifestnode():
1699 1699 # reuse an existing manifest revision
1700 1700 mn = ctx.manifestnode()
1701 1701 files = ctx.files()
1702 1702 elif ctx.files():
1703 1703 m1ctx = p1.manifestctx()
1704 1704 m2ctx = p2.manifestctx()
1705 1705 mctx = m1ctx.copy()
1706 1706
1707 1707 m = mctx.read()
1708 1708 m1 = m1ctx.read()
1709 1709 m2 = m2ctx.read()
1710 1710
1711 1711 # check in files
1712 1712 added = []
1713 1713 changed = []
1714 1714 removed = list(ctx.removed())
1715 1715 linkrev = len(self)
1716 1716 self.ui.note(_("committing files:\n"))
1717 1717 for f in sorted(ctx.modified() + ctx.added()):
1718 1718 self.ui.note(f + "\n")
1719 1719 try:
1720 1720 fctx = ctx[f]
1721 1721 if fctx is None:
1722 1722 removed.append(f)
1723 1723 else:
1724 1724 added.append(f)
1725 1725 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1726 1726 trp, changed)
1727 1727 m.setflag(f, fctx.flags())
1728 1728 except OSError as inst:
1729 1729 self.ui.warn(_("trouble committing %s!\n") % f)
1730 1730 raise
1731 1731 except IOError as inst:
1732 1732 errcode = getattr(inst, 'errno', errno.ENOENT)
1733 1733 if error or errcode and errcode != errno.ENOENT:
1734 1734 self.ui.warn(_("trouble committing %s!\n") % f)
1735 1735 raise
1736 1736
1737 1737 # update manifest
1738 1738 self.ui.note(_("committing manifest\n"))
1739 1739 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1740 1740 drop = [f for f in removed if f in m]
1741 1741 for f in drop:
1742 1742 del m[f]
1743 1743 mn = mctx.write(trp, linkrev,
1744 1744 p1.manifestnode(), p2.manifestnode(),
1745 1745 added, drop)
1746 1746 files = changed + removed
1747 1747 else:
1748 1748 mn = p1.manifestnode()
1749 1749 files = []
1750 1750
1751 1751 # update changelog
1752 1752 self.ui.note(_("committing changelog\n"))
1753 1753 self.changelog.delayupdate(tr)
1754 1754 n = self.changelog.add(mn, files, ctx.description(),
1755 1755 trp, p1.node(), p2.node(),
1756 1756 user, ctx.date(), ctx.extra().copy())
1757 1757 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1758 1758 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1759 1759 parent2=xp2)
1760 1760 # set the new commit is proper phase
1761 1761 targetphase = subrepo.newcommitphase(self.ui, ctx)
1762 1762 if targetphase:
1763 1763 # retract boundary do not alter parent changeset.
1764 1764 # if a parent have higher the resulting phase will
1765 1765 # be compliant anyway
1766 1766 #
1767 1767 # if minimal phase was 0 we don't need to retract anything
1768 1768 phases.retractboundary(self, tr, targetphase, [n])
1769 1769 tr.close()
1770 1770 branchmap.updatecache(self.filtered('served'))
1771 1771 return n
1772 1772 finally:
1773 1773 if tr:
1774 1774 tr.release()
1775 1775 lock.release()
1776 1776
1777 1777 @unfilteredmethod
1778 1778 def destroying(self):
1779 1779 '''Inform the repository that nodes are about to be destroyed.
1780 1780 Intended for use by strip and rollback, so there's a common
1781 1781 place for anything that has to be done before destroying history.
1782 1782
1783 1783 This is mostly useful for saving state that is in memory and waiting
1784 1784 to be flushed when the current lock is released. Because a call to
1785 1785 destroyed is imminent, the repo will be invalidated causing those
1786 1786 changes to stay in memory (waiting for the next unlock), or vanish
1787 1787 completely.
1788 1788 '''
1789 1789 # When using the same lock to commit and strip, the phasecache is left
1790 1790 # dirty after committing. Then when we strip, the repo is invalidated,
1791 1791 # causing those changes to disappear.
1792 1792 if '_phasecache' in vars(self):
1793 1793 self._phasecache.write()
1794 1794
1795 1795 @unfilteredmethod
1796 1796 def destroyed(self):
1797 1797 '''Inform the repository that nodes have been destroyed.
1798 1798 Intended for use by strip and rollback, so there's a common
1799 1799 place for anything that has to be done after destroying history.
1800 1800 '''
1801 1801 # When one tries to:
1802 1802 # 1) destroy nodes thus calling this method (e.g. strip)
1803 1803 # 2) use phasecache somewhere (e.g. commit)
1804 1804 #
1805 1805 # then 2) will fail because the phasecache contains nodes that were
1806 1806 # removed. We can either remove phasecache from the filecache,
1807 1807 # causing it to reload next time it is accessed, or simply filter
1808 1808 # the removed nodes now and write the updated cache.
1809 1809 self._phasecache.filterunknown(self)
1810 1810 self._phasecache.write()
1811 1811
1812 1812 # update the 'served' branch cache to help read only server process
1813 1813 # Thanks to branchcache collaboration this is done from the nearest
1814 1814 # filtered subset and it is expected to be fast.
1815 1815 branchmap.updatecache(self.filtered('served'))
1816 1816
1817 1817 # Ensure the persistent tag cache is updated. Doing it now
1818 1818 # means that the tag cache only has to worry about destroyed
1819 1819 # heads immediately after a strip/rollback. That in turn
1820 1820 # guarantees that "cachetip == currenttip" (comparing both rev
1821 1821 # and node) always means no nodes have been added or destroyed.
1822 1822
1823 1823 # XXX this is suboptimal when qrefresh'ing: we strip the current
1824 1824 # head, refresh the tag cache, then immediately add a new head.
1825 1825 # But I think doing it this way is necessary for the "instant
1826 1826 # tag cache retrieval" case to work.
1827 1827 self.invalidate()
1828 1828
1829 1829 def walk(self, match, node=None):
1830 1830 '''
1831 1831 walk recursively through the directory tree or a given
1832 1832 changeset, finding all files matched by the match
1833 1833 function
1834 1834 '''
1835 1835 return self[node].walk(match)
1836 1836
1837 1837 def status(self, node1='.', node2=None, match=None,
1838 1838 ignored=False, clean=False, unknown=False,
1839 1839 listsubrepos=False):
1840 1840 '''a convenience method that calls node1.status(node2)'''
1841 1841 return self[node1].status(node2, match, ignored, clean, unknown,
1842 1842 listsubrepos)
1843 1843
1844 1844 def heads(self, start=None):
1845 1845 heads = self.changelog.heads(start)
1846 1846 # sort the output in rev descending order
1847 1847 return sorted(heads, key=self.changelog.rev, reverse=True)
1848 1848
1849 1849 def branchheads(self, branch=None, start=None, closed=False):
1850 1850 '''return a (possibly filtered) list of heads for the given branch
1851 1851
1852 1852 Heads are returned in topological order, from newest to oldest.
1853 1853 If branch is None, use the dirstate branch.
1854 1854 If start is not None, return only heads reachable from start.
1855 1855 If closed is True, return heads that are marked as closed as well.
1856 1856 '''
1857 1857 if branch is None:
1858 1858 branch = self[None].branch()
1859 1859 branches = self.branchmap()
1860 1860 if branch not in branches:
1861 1861 return []
1862 1862 # the cache returns heads ordered lowest to highest
1863 1863 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1864 1864 if start is not None:
1865 1865 # filter out the heads that cannot be reached from startrev
1866 1866 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1867 1867 bheads = [h for h in bheads if h in fbheads]
1868 1868 return bheads
1869 1869
1870 1870 def branches(self, nodes):
1871 1871 if not nodes:
1872 1872 nodes = [self.changelog.tip()]
1873 1873 b = []
1874 1874 for n in nodes:
1875 1875 t = n
1876 1876 while True:
1877 1877 p = self.changelog.parents(n)
1878 1878 if p[1] != nullid or p[0] == nullid:
1879 1879 b.append((t, n, p[0], p[1]))
1880 1880 break
1881 1881 n = p[0]
1882 1882 return b
1883 1883
1884 1884 def between(self, pairs):
1885 1885 r = []
1886 1886
1887 1887 for top, bottom in pairs:
1888 1888 n, l, i = top, [], 0
1889 1889 f = 1
1890 1890
1891 1891 while n != bottom and n != nullid:
1892 1892 p = self.changelog.parents(n)[0]
1893 1893 if i == f:
1894 1894 l.append(n)
1895 1895 f = f * 2
1896 1896 n = p
1897 1897 i += 1
1898 1898
1899 1899 r.append(l)
1900 1900
1901 1901 return r
1902 1902
1903 1903 def checkpush(self, pushop):
1904 1904 """Extensions can override this function if additional checks have
1905 1905 to be performed before pushing, or call it if they override push
1906 1906 command.
1907 1907 """
1908 1908 pass
1909 1909
1910 1910 @unfilteredpropertycache
1911 1911 def prepushoutgoinghooks(self):
1912 1912 """Return util.hooks consists of a pushop with repo, remote, outgoing
1913 1913 methods, which are called before pushing changesets.
1914 1914 """
1915 1915 return util.hooks()
1916 1916
1917 1917 def pushkey(self, namespace, key, old, new):
1918 1918 try:
1919 1919 tr = self.currenttransaction()
1920 1920 hookargs = {}
1921 1921 if tr is not None:
1922 1922 hookargs.update(tr.hookargs)
1923 1923 hookargs['namespace'] = namespace
1924 1924 hookargs['key'] = key
1925 1925 hookargs['old'] = old
1926 1926 hookargs['new'] = new
1927 1927 self.hook('prepushkey', throw=True, **hookargs)
1928 1928 except error.HookAbort as exc:
1929 1929 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1930 1930 if exc.hint:
1931 1931 self.ui.write_err(_("(%s)\n") % exc.hint)
1932 1932 return False
1933 1933 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1934 1934 ret = pushkey.push(self, namespace, key, old, new)
1935 1935 def runhook():
1936 1936 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1937 1937 ret=ret)
1938 1938 self._afterlock(runhook)
1939 1939 return ret
1940 1940
1941 1941 def listkeys(self, namespace):
1942 1942 self.hook('prelistkeys', throw=True, namespace=namespace)
1943 1943 self.ui.debug('listing keys for "%s"\n' % namespace)
1944 1944 values = pushkey.list(self, namespace)
1945 1945 self.hook('listkeys', namespace=namespace, values=values)
1946 1946 return values
1947 1947
1948 1948 def debugwireargs(self, one, two, three=None, four=None, five=None):
1949 1949 '''used to test argument passing over the wire'''
1950 1950 return "%s %s %s %s %s" % (one, two, three, four, five)
1951 1951
1952 1952 def savecommitmessage(self, text):
1953 1953 fp = self.vfs('last-message.txt', 'wb')
1954 1954 try:
1955 1955 fp.write(text)
1956 1956 finally:
1957 1957 fp.close()
1958 1958 return self.pathto(fp.name[len(self.root) + 1:])
1959 1959
1960 1960 # used to avoid circular references so destructors work
1961 1961 def aftertrans(files):
1962 1962 renamefiles = [tuple(t) for t in files]
1963 1963 def a():
1964 1964 for vfs, src, dest in renamefiles:
1965 1965 try:
1966 1966 vfs.rename(src, dest)
1967 1967 except OSError: # journal file does not yet exist
1968 1968 pass
1969 1969 return a
1970 1970
1971 1971 def undoname(fn):
1972 1972 base, name = os.path.split(fn)
1973 1973 assert name.startswith('journal')
1974 1974 return os.path.join(base, name.replace('journal', 'undo', 1))
1975 1975
1976 1976 def instance(ui, path, create):
1977 1977 return localrepository(ui, util.urllocalpath(path), create)
1978 1978
1979 1979 def islocal(path):
1980 1980 return True
1981 1981
1982 1982 def newreporequirements(repo):
1983 1983 """Determine the set of requirements for a new local repository.
1984 1984
1985 1985 Extensions can wrap this function to specify custom requirements for
1986 1986 new repositories.
1987 1987 """
1988 1988 ui = repo.ui
1989 1989 requirements = set(['revlogv1'])
1990 1990 if ui.configbool('format', 'usestore', True):
1991 1991 requirements.add('store')
1992 1992 if ui.configbool('format', 'usefncache', True):
1993 1993 requirements.add('fncache')
1994 1994 if ui.configbool('format', 'dotencode', True):
1995 1995 requirements.add('dotencode')
1996 1996
1997 1997 if scmutil.gdinitconfig(ui):
1998 1998 requirements.add('generaldelta')
1999 1999 if ui.configbool('experimental', 'treemanifest', False):
2000 2000 requirements.add('treemanifest')
2001 2001 if ui.configbool('experimental', 'manifestv2', False):
2002 2002 requirements.add('manifestv2')
2003 2003
2004 2004 return requirements
@@ -1,481 +1,481
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 from __future__ import absolute_import
104 104
105 105 import errno
106 import os
107 106
108 107 from .i18n import _
109 108 from .node import (
110 109 bin,
111 110 hex,
112 111 nullid,
113 112 nullrev,
114 113 short,
115 114 )
116 115 from . import (
116 encoding,
117 117 error,
118 118 )
119 119
120 120 allphases = public, draft, secret = range(3)
121 121 trackedphases = allphases[1:]
122 122 phasenames = ['public', 'draft', 'secret']
123 123
124 124 def _readroots(repo, phasedefaults=None):
125 125 """Read phase roots from disk
126 126
127 127 phasedefaults is a list of fn(repo, roots) callable, which are
128 128 executed if the phase roots file does not exist. When phases are
129 129 being initialized on an existing repository, this could be used to
130 130 set selected changesets phase to something else than public.
131 131
132 132 Return (roots, dirty) where dirty is true if roots differ from
133 133 what is being stored.
134 134 """
135 135 repo = repo.unfiltered()
136 136 dirty = False
137 137 roots = [set() for i in allphases]
138 138 try:
139 139 f = None
140 if 'HG_PENDING' in os.environ:
140 if 'HG_PENDING' in encoding.environ:
141 141 try:
142 142 f = repo.svfs('phaseroots.pending')
143 143 except IOError as inst:
144 144 if inst.errno != errno.ENOENT:
145 145 raise
146 146 if f is None:
147 147 f = repo.svfs('phaseroots')
148 148 try:
149 149 for line in f:
150 150 phase, nh = line.split()
151 151 roots[int(phase)].add(bin(nh))
152 152 finally:
153 153 f.close()
154 154 except IOError as inst:
155 155 if inst.errno != errno.ENOENT:
156 156 raise
157 157 if phasedefaults:
158 158 for f in phasedefaults:
159 159 roots = f(repo, roots)
160 160 dirty = True
161 161 return roots, dirty
162 162
163 163 class phasecache(object):
164 164 def __init__(self, repo, phasedefaults, _load=True):
165 165 if _load:
166 166 # Cheap trick to allow shallow-copy without copy module
167 167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
168 168 self._phaserevs = None
169 169 self._phasesets = None
170 170 self.filterunknown(repo)
171 171 self.opener = repo.svfs
172 172
173 173 def copy(self):
174 174 # Shallow copy meant to ensure isolation in
175 175 # advance/retractboundary(), nothing more.
176 176 ph = self.__class__(None, None, _load=False)
177 177 ph.phaseroots = self.phaseroots[:]
178 178 ph.dirty = self.dirty
179 179 ph.opener = self.opener
180 180 ph._phaserevs = self._phaserevs
181 181 ph._phasesets = self._phasesets
182 182 return ph
183 183
184 184 def replace(self, phcache):
185 185 """replace all values in 'self' with content of phcache"""
186 186 for a in ('phaseroots', 'dirty', 'opener', '_phaserevs', '_phasesets'):
187 187 setattr(self, a, getattr(phcache, a))
188 188
189 189 def _getphaserevsnative(self, repo):
190 190 repo = repo.unfiltered()
191 191 nativeroots = []
192 192 for phase in trackedphases:
193 193 nativeroots.append(map(repo.changelog.rev, self.phaseroots[phase]))
194 194 return repo.changelog.computephases(nativeroots)
195 195
196 196 def _computephaserevspure(self, repo):
197 197 repo = repo.unfiltered()
198 198 revs = [public] * len(repo.changelog)
199 199 self._phaserevs = revs
200 200 self._populatephaseroots(repo)
201 201 for phase in trackedphases:
202 202 roots = map(repo.changelog.rev, self.phaseroots[phase])
203 203 if roots:
204 204 for rev in roots:
205 205 revs[rev] = phase
206 206 for rev in repo.changelog.descendants(roots):
207 207 revs[rev] = phase
208 208
209 209 def loadphaserevs(self, repo):
210 210 """ensure phase information is loaded in the object"""
211 211 if self._phaserevs is None:
212 212 try:
213 213 if repo.ui.configbool('experimental',
214 214 'nativephaseskillswitch'):
215 215 self._computephaserevspure(repo)
216 216 else:
217 217 res = self._getphaserevsnative(repo)
218 218 self._phaserevs, self._phasesets = res
219 219 except AttributeError:
220 220 self._computephaserevspure(repo)
221 221
222 222 def invalidate(self):
223 223 self._phaserevs = None
224 224 self._phasesets = None
225 225
226 226 def _populatephaseroots(self, repo):
227 227 """Fills the _phaserevs cache with phases for the roots.
228 228 """
229 229 cl = repo.changelog
230 230 phaserevs = self._phaserevs
231 231 for phase in trackedphases:
232 232 roots = map(cl.rev, self.phaseroots[phase])
233 233 for root in roots:
234 234 phaserevs[root] = phase
235 235
236 236 def phase(self, repo, rev):
237 237 # We need a repo argument here to be able to build _phaserevs
238 238 # if necessary. The repository instance is not stored in
239 239 # phasecache to avoid reference cycles. The changelog instance
240 240 # is not stored because it is a filecache() property and can
241 241 # be replaced without us being notified.
242 242 if rev == nullrev:
243 243 return public
244 244 if rev < nullrev:
245 245 raise ValueError(_('cannot lookup negative revision'))
246 246 if self._phaserevs is None or rev >= len(self._phaserevs):
247 247 self.invalidate()
248 248 self.loadphaserevs(repo)
249 249 return self._phaserevs[rev]
250 250
251 251 def write(self):
252 252 if not self.dirty:
253 253 return
254 254 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
255 255 try:
256 256 self._write(f)
257 257 finally:
258 258 f.close()
259 259
260 260 def _write(self, fp):
261 261 for phase, roots in enumerate(self.phaseroots):
262 262 for h in roots:
263 263 fp.write('%i %s\n' % (phase, hex(h)))
264 264 self.dirty = False
265 265
266 266 def _updateroots(self, phase, newroots, tr):
267 267 self.phaseroots[phase] = newroots
268 268 self.invalidate()
269 269 self.dirty = True
270 270
271 271 tr.addfilegenerator('phase', ('phaseroots',), self._write)
272 272 tr.hookargs['phases_moved'] = '1'
273 273
274 274 def advanceboundary(self, repo, tr, targetphase, nodes):
275 275 # Be careful to preserve shallow-copied values: do not update
276 276 # phaseroots values, replace them.
277 277
278 278 repo = repo.unfiltered()
279 279 delroots = [] # set of root deleted by this path
280 280 for phase in xrange(targetphase + 1, len(allphases)):
281 281 # filter nodes that are not in a compatible phase already
282 282 nodes = [n for n in nodes
283 283 if self.phase(repo, repo[n].rev()) >= phase]
284 284 if not nodes:
285 285 break # no roots to move anymore
286 286 olds = self.phaseroots[phase]
287 287 roots = set(ctx.node() for ctx in repo.set(
288 288 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
289 289 if olds != roots:
290 290 self._updateroots(phase, roots, tr)
291 291 # some roots may need to be declared for lower phases
292 292 delroots.extend(olds - roots)
293 293 # declare deleted root in the target phase
294 294 if targetphase != 0:
295 295 self.retractboundary(repo, tr, targetphase, delroots)
296 296 repo.invalidatevolatilesets()
297 297
298 298 def retractboundary(self, repo, tr, targetphase, nodes):
299 299 # Be careful to preserve shallow-copied values: do not update
300 300 # phaseroots values, replace them.
301 301
302 302 repo = repo.unfiltered()
303 303 currentroots = self.phaseroots[targetphase]
304 304 newroots = [n for n in nodes
305 305 if self.phase(repo, repo[n].rev()) < targetphase]
306 306 if newroots:
307 307 if nullid in newroots:
308 308 raise error.Abort(_('cannot change null revision phase'))
309 309 currentroots = currentroots.copy()
310 310 currentroots.update(newroots)
311 311
312 312 # Only compute new roots for revs above the roots that are being
313 313 # retracted.
314 314 minnewroot = min(repo[n].rev() for n in newroots)
315 315 aboveroots = [n for n in currentroots
316 316 if repo[n].rev() >= minnewroot]
317 317 updatedroots = repo.set('roots(%ln::)', aboveroots)
318 318
319 319 finalroots = set(n for n in currentroots if repo[n].rev() <
320 320 minnewroot)
321 321 finalroots.update(ctx.node() for ctx in updatedroots)
322 322
323 323 self._updateroots(targetphase, finalroots, tr)
324 324 repo.invalidatevolatilesets()
325 325
326 326 def filterunknown(self, repo):
327 327 """remove unknown nodes from the phase boundary
328 328
329 329 Nothing is lost as unknown nodes only hold data for their descendants.
330 330 """
331 331 filtered = False
332 332 nodemap = repo.changelog.nodemap # to filter unknown nodes
333 333 for phase, nodes in enumerate(self.phaseroots):
334 334 missing = sorted(node for node in nodes if node not in nodemap)
335 335 if missing:
336 336 for mnode in missing:
337 337 repo.ui.debug(
338 338 'removing unknown node %s from %i-phase boundary\n'
339 339 % (short(mnode), phase))
340 340 nodes.symmetric_difference_update(missing)
341 341 filtered = True
342 342 if filtered:
343 343 self.dirty = True
344 344 # filterunknown is called by repo.destroyed, we may have no changes in
345 345 # root but phaserevs contents is certainly invalid (or at least we
346 346 # have not proper way to check that). related to issue 3858.
347 347 #
348 348 # The other caller is __init__ that have no _phaserevs initialized
349 349 # anyway. If this change we should consider adding a dedicated
350 350 # "destroyed" function to phasecache or a proper cache key mechanism
351 351 # (see branchmap one)
352 352 self.invalidate()
353 353
354 354 def advanceboundary(repo, tr, targetphase, nodes):
355 355 """Add nodes to a phase changing other nodes phases if necessary.
356 356
357 357 This function move boundary *forward* this means that all nodes
358 358 are set in the target phase or kept in a *lower* phase.
359 359
360 360 Simplify boundary to contains phase roots only."""
361 361 phcache = repo._phasecache.copy()
362 362 phcache.advanceboundary(repo, tr, targetphase, nodes)
363 363 repo._phasecache.replace(phcache)
364 364
365 365 def retractboundary(repo, tr, targetphase, nodes):
366 366 """Set nodes back to a phase changing other nodes phases if
367 367 necessary.
368 368
369 369 This function move boundary *backward* this means that all nodes
370 370 are set in the target phase or kept in a *higher* phase.
371 371
372 372 Simplify boundary to contains phase roots only."""
373 373 phcache = repo._phasecache.copy()
374 374 phcache.retractboundary(repo, tr, targetphase, nodes)
375 375 repo._phasecache.replace(phcache)
376 376
377 377 def listphases(repo):
378 378 """List phases root for serialization over pushkey"""
379 379 keys = {}
380 380 value = '%i' % draft
381 381 for root in repo._phasecache.phaseroots[draft]:
382 382 keys[hex(root)] = value
383 383
384 384 if repo.publishing():
385 385 # Add an extra data to let remote know we are a publishing
386 386 # repo. Publishing repo can't just pretend they are old repo.
387 387 # When pushing to a publishing repo, the client still need to
388 388 # push phase boundary
389 389 #
390 390 # Push do not only push changeset. It also push phase data.
391 391 # New phase data may apply to common changeset which won't be
392 392 # push (as they are common). Here is a very simple example:
393 393 #
394 394 # 1) repo A push changeset X as draft to repo B
395 395 # 2) repo B make changeset X public
396 396 # 3) repo B push to repo A. X is not pushed but the data that
397 397 # X as now public should
398 398 #
399 399 # The server can't handle it on it's own as it has no idea of
400 400 # client phase data.
401 401 keys['publishing'] = 'True'
402 402 return keys
403 403
404 404 def pushphase(repo, nhex, oldphasestr, newphasestr):
405 405 """List phases root for serialization over pushkey"""
406 406 repo = repo.unfiltered()
407 407 with repo.lock():
408 408 currentphase = repo[nhex].phase()
409 409 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
410 410 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
411 411 if currentphase == oldphase and newphase < oldphase:
412 412 with repo.transaction('pushkey-phase') as tr:
413 413 advanceboundary(repo, tr, newphase, [bin(nhex)])
414 414 return 1
415 415 elif currentphase == newphase:
416 416 # raced, but got correct result
417 417 return 1
418 418 else:
419 419 return 0
420 420
421 421 def analyzeremotephases(repo, subset, roots):
422 422 """Compute phases heads and root in a subset of node from root dict
423 423
424 424 * subset is heads of the subset
425 425 * roots is {<nodeid> => phase} mapping. key and value are string.
426 426
427 427 Accept unknown element input
428 428 """
429 429 repo = repo.unfiltered()
430 430 # build list from dictionary
431 431 draftroots = []
432 432 nodemap = repo.changelog.nodemap # to filter unknown nodes
433 433 for nhex, phase in roots.iteritems():
434 434 if nhex == 'publishing': # ignore data related to publish option
435 435 continue
436 436 node = bin(nhex)
437 437 phase = int(phase)
438 438 if phase == public:
439 439 if node != nullid:
440 440 repo.ui.warn(_('ignoring inconsistent public root'
441 441 ' from remote: %s\n') % nhex)
442 442 elif phase == draft:
443 443 if node in nodemap:
444 444 draftroots.append(node)
445 445 else:
446 446 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
447 447 % (phase, nhex))
448 448 # compute heads
449 449 publicheads = newheads(repo, subset, draftroots)
450 450 return publicheads, draftroots
451 451
452 452 def newheads(repo, heads, roots):
453 453 """compute new head of a subset minus another
454 454
455 455 * `heads`: define the first subset
456 456 * `roots`: define the second we subtract from the first"""
457 457 repo = repo.unfiltered()
458 458 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
459 459 heads, roots, roots, heads)
460 460 return [c.node() for c in revset]
461 461
462 462
463 463 def newcommitphase(ui):
464 464 """helper to get the target phase of new commit
465 465
466 466 Handle all possible values for the phases.new-commit options.
467 467
468 468 """
469 469 v = ui.config('phases', 'new-commit', draft)
470 470 try:
471 471 return phasenames.index(v)
472 472 except ValueError:
473 473 try:
474 474 return int(v)
475 475 except ValueError:
476 476 msg = _("phases.new-commit: not a valid phase name ('%s')")
477 477 raise error.ConfigError(msg % v)
478 478
479 479 def hassecret(repo):
480 480 """utility function that check if a repo have any secret changeset."""
481 481 return bool(repo._phasecache.phaseroots[2])
@@ -1,652 +1,652
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import fcntl
12 12 import getpass
13 13 import grp
14 14 import os
15 15 import pwd
16 16 import re
17 17 import select
18 18 import stat
19 19 import sys
20 20 import tempfile
21 21 import unicodedata
22 22
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 pycompat,
27 27 )
28 28
29 29 posixfile = open
30 30 normpath = os.path.normpath
31 31 samestat = os.path.samestat
32 32 try:
33 33 oslink = os.link
34 34 except AttributeError:
35 35 # Some platforms build Python without os.link on systems that are
36 36 # vaguely unix-like but don't have hardlink support. For those
37 37 # poor souls, just say we tried and that it failed so we fall back
38 38 # to copies.
39 39 def oslink(src, dst):
40 40 raise OSError(errno.EINVAL,
41 41 'hardlinks not supported: %s to %s' % (src, dst))
42 42 unlink = os.unlink
43 43 rename = os.rename
44 44 removedirs = os.removedirs
45 45 expandglobs = False
46 46
47 47 umask = os.umask(0)
48 48 os.umask(umask)
49 49
50 50 def split(p):
51 51 '''Same as posixpath.split, but faster
52 52
53 53 >>> import posixpath
54 54 >>> for f in ['/absolute/path/to/file',
55 55 ... 'relative/path/to/file',
56 56 ... 'file_alone',
57 57 ... 'path/to/directory/',
58 58 ... '/multiple/path//separators',
59 59 ... '/file_at_root',
60 60 ... '///multiple_leading_separators_at_root',
61 61 ... '']:
62 62 ... assert split(f) == posixpath.split(f), f
63 63 '''
64 64 ht = p.rsplit('/', 1)
65 65 if len(ht) == 1:
66 66 return '', p
67 67 nh = ht[0].rstrip('/')
68 68 if nh:
69 69 return nh, ht[1]
70 70 return ht[0] + '/', ht[1]
71 71
72 72 def openhardlinks():
73 73 '''return true if it is safe to hold open file handles to hardlinks'''
74 74 return True
75 75
76 76 def nlinks(name):
77 77 '''return number of hardlinks for the given file'''
78 78 return os.lstat(name).st_nlink
79 79
80 80 def parsepatchoutput(output_line):
81 81 """parses the output produced by patch and returns the filename"""
82 82 pf = output_line[14:]
83 83 if os.sys.platform == 'OpenVMS':
84 84 if pf[0] == '`':
85 85 pf = pf[1:-1] # Remove the quotes
86 86 else:
87 87 if pf.startswith("'") and pf.endswith("'") and " " in pf:
88 88 pf = pf[1:-1] # Remove the quotes
89 89 return pf
90 90
91 91 def sshargs(sshcmd, host, user, port):
92 92 '''Build argument list for ssh'''
93 93 args = user and ("%s@%s" % (user, host)) or host
94 94 return port and ("%s -p %s" % (args, port)) or args
95 95
96 96 def isexec(f):
97 97 """check whether a file is executable"""
98 98 return (os.lstat(f).st_mode & 0o100 != 0)
99 99
100 100 def setflags(f, l, x):
101 101 s = os.lstat(f).st_mode
102 102 if l:
103 103 if not stat.S_ISLNK(s):
104 104 # switch file to link
105 105 fp = open(f)
106 106 data = fp.read()
107 107 fp.close()
108 108 os.unlink(f)
109 109 try:
110 110 os.symlink(data, f)
111 111 except OSError:
112 112 # failed to make a link, rewrite file
113 113 fp = open(f, "w")
114 114 fp.write(data)
115 115 fp.close()
116 116 # no chmod needed at this point
117 117 return
118 118 if stat.S_ISLNK(s):
119 119 # switch link to file
120 120 data = os.readlink(f)
121 121 os.unlink(f)
122 122 fp = open(f, "w")
123 123 fp.write(data)
124 124 fp.close()
125 125 s = 0o666 & ~umask # avoid restatting for chmod
126 126
127 127 sx = s & 0o100
128 128 if x and not sx:
129 129 # Turn on +x for every +r bit when making a file executable
130 130 # and obey umask.
131 131 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
132 132 elif not x and sx:
133 133 # Turn off all +x bits
134 134 os.chmod(f, s & 0o666)
135 135
136 136 def copymode(src, dst, mode=None):
137 137 '''Copy the file mode from the file at path src to dst.
138 138 If src doesn't exist, we're using mode instead. If mode is None, we're
139 139 using umask.'''
140 140 try:
141 141 st_mode = os.lstat(src).st_mode & 0o777
142 142 except OSError as inst:
143 143 if inst.errno != errno.ENOENT:
144 144 raise
145 145 st_mode = mode
146 146 if st_mode is None:
147 147 st_mode = ~umask
148 148 st_mode &= 0o666
149 149 os.chmod(dst, st_mode)
150 150
151 151 def checkexec(path):
152 152 """
153 153 Check whether the given path is on a filesystem with UNIX-like exec flags
154 154
155 155 Requires a directory (like /foo/.hg)
156 156 """
157 157
158 158 # VFAT on some Linux versions can flip mode but it doesn't persist
159 159 # a FS remount. Frequently we can detect it if files are created
160 160 # with exec bit on.
161 161
162 162 try:
163 163 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
164 164 cachedir = os.path.join(path, '.hg', 'cache')
165 165 if os.path.isdir(cachedir):
166 166 checkisexec = os.path.join(cachedir, 'checkisexec')
167 167 checknoexec = os.path.join(cachedir, 'checknoexec')
168 168
169 169 try:
170 170 m = os.stat(checkisexec).st_mode
171 171 except OSError as e:
172 172 if e.errno != errno.ENOENT:
173 173 raise
174 174 # checkisexec does not exist - fall through ...
175 175 else:
176 176 # checkisexec exists, check if it actually is exec
177 177 if m & EXECFLAGS != 0:
178 178 # ensure checkisexec exists, check it isn't exec
179 179 try:
180 180 m = os.stat(checknoexec).st_mode
181 181 except OSError as e:
182 182 if e.errno != errno.ENOENT:
183 183 raise
184 184 file(checknoexec, 'w').close() # might fail
185 185 m = os.stat(checknoexec).st_mode
186 186 if m & EXECFLAGS == 0:
187 187 # check-exec is exec and check-no-exec is not exec
188 188 return True
189 189 # checknoexec exists but is exec - delete it
190 190 os.unlink(checknoexec)
191 191 # checkisexec exists but is not exec - delete it
192 192 os.unlink(checkisexec)
193 193
194 194 # check using one file, leave it as checkisexec
195 195 checkdir = cachedir
196 196 else:
197 197 # check directly in path and don't leave checkisexec behind
198 198 checkdir = path
199 199 checkisexec = None
200 200 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
201 201 try:
202 202 os.close(fh)
203 203 m = os.stat(fn).st_mode
204 204 if m & EXECFLAGS == 0:
205 205 os.chmod(fn, m & 0o777 | EXECFLAGS)
206 206 if os.stat(fn).st_mode & EXECFLAGS != 0:
207 207 if checkisexec is not None:
208 208 os.rename(fn, checkisexec)
209 209 fn = None
210 210 return True
211 211 finally:
212 212 if fn is not None:
213 213 os.unlink(fn)
214 214 except (IOError, OSError):
215 215 # we don't care, the user probably won't be able to commit anyway
216 216 return False
217 217
218 218 def checklink(path):
219 219 """check whether the given path is on a symlink-capable filesystem"""
220 220 # mktemp is not racy because symlink creation will fail if the
221 221 # file already exists
222 222 while True:
223 223 cachedir = os.path.join(path, '.hg', 'cache')
224 224 checklink = os.path.join(cachedir, 'checklink')
225 225 # try fast path, read only
226 226 if os.path.islink(checklink):
227 227 return True
228 228 if os.path.isdir(cachedir):
229 229 checkdir = cachedir
230 230 else:
231 231 checkdir = path
232 232 cachedir = None
233 233 name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
234 234 try:
235 235 fd = None
236 236 if cachedir is None:
237 237 fd = tempfile.NamedTemporaryFile(dir=checkdir,
238 238 prefix='hg-checklink-')
239 239 target = os.path.basename(fd.name)
240 240 else:
241 241 # create a fixed file to link to; doesn't matter if it
242 242 # already exists.
243 243 target = 'checklink-target'
244 244 open(os.path.join(cachedir, target), 'w').close()
245 245 try:
246 246 os.symlink(target, name)
247 247 if cachedir is None:
248 248 os.unlink(name)
249 249 else:
250 250 try:
251 251 os.rename(name, checklink)
252 252 except OSError:
253 253 os.unlink(name)
254 254 return True
255 255 except OSError as inst:
256 256 # link creation might race, try again
257 257 if inst[0] == errno.EEXIST:
258 258 continue
259 259 raise
260 260 finally:
261 261 if fd is not None:
262 262 fd.close()
263 263 except AttributeError:
264 264 return False
265 265 except OSError as inst:
266 266 # sshfs might report failure while successfully creating the link
267 267 if inst[0] == errno.EIO and os.path.exists(name):
268 268 os.unlink(name)
269 269 return False
270 270
271 271 def checkosfilename(path):
272 272 '''Check that the base-relative path is a valid filename on this platform.
273 273 Returns None if the path is ok, or a UI string describing the problem.'''
274 274 pass # on posix platforms, every path is ok
275 275
276 276 def setbinary(fd):
277 277 pass
278 278
279 279 def pconvert(path):
280 280 return path
281 281
282 282 def localpath(path):
283 283 return path
284 284
285 285 def samefile(fpath1, fpath2):
286 286 """Returns whether path1 and path2 refer to the same file. This is only
287 287 guaranteed to work for files, not directories."""
288 288 return os.path.samefile(fpath1, fpath2)
289 289
290 290 def samedevice(fpath1, fpath2):
291 291 """Returns whether fpath1 and fpath2 are on the same device. This is only
292 292 guaranteed to work for files, not directories."""
293 293 st1 = os.lstat(fpath1)
294 294 st2 = os.lstat(fpath2)
295 295 return st1.st_dev == st2.st_dev
296 296
297 297 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
298 298 def normcase(path):
299 299 return path.lower()
300 300
301 301 # what normcase does to ASCII strings
302 302 normcasespec = encoding.normcasespecs.lower
303 303 # fallback normcase function for non-ASCII strings
304 304 normcasefallback = normcase
305 305
306 306 if sys.platform == 'darwin':
307 307
308 308 def normcase(path):
309 309 '''
310 310 Normalize a filename for OS X-compatible comparison:
311 311 - escape-encode invalid characters
312 312 - decompose to NFD
313 313 - lowercase
314 314 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
315 315
316 316 >>> normcase('UPPER')
317 317 'upper'
318 318 >>> normcase('Caf\xc3\xa9')
319 319 'cafe\\xcc\\x81'
320 320 >>> normcase('\xc3\x89')
321 321 'e\\xcc\\x81'
322 322 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
323 323 '%b8%ca%c3\\xca\\xbe%c8.jpg'
324 324 '''
325 325
326 326 try:
327 327 return encoding.asciilower(path) # exception for non-ASCII
328 328 except UnicodeDecodeError:
329 329 return normcasefallback(path)
330 330
331 331 normcasespec = encoding.normcasespecs.lower
332 332
333 333 def normcasefallback(path):
334 334 try:
335 335 u = path.decode('utf-8')
336 336 except UnicodeDecodeError:
337 337 # OS X percent-encodes any bytes that aren't valid utf-8
338 338 s = ''
339 339 pos = 0
340 340 l = len(path)
341 341 while pos < l:
342 342 try:
343 343 c = encoding.getutf8char(path, pos)
344 344 pos += len(c)
345 345 except ValueError:
346 346 c = '%%%02X' % ord(path[pos])
347 347 pos += 1
348 348 s += c
349 349
350 350 u = s.decode('utf-8')
351 351
352 352 # Decompose then lowercase (HFS+ technote specifies lower)
353 353 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
354 354 # drop HFS+ ignored characters
355 355 return encoding.hfsignoreclean(enc)
356 356
357 357 if sys.platform == 'cygwin':
358 358 # workaround for cygwin, in which mount point part of path is
359 359 # treated as case sensitive, even though underlying NTFS is case
360 360 # insensitive.
361 361
362 362 # default mount points
363 363 cygwinmountpoints = sorted([
364 364 "/usr/bin",
365 365 "/usr/lib",
366 366 "/cygdrive",
367 367 ], reverse=True)
368 368
369 369 # use upper-ing as normcase as same as NTFS workaround
370 370 def normcase(path):
371 371 pathlen = len(path)
372 372 if (pathlen == 0) or (path[0] != pycompat.ossep):
373 373 # treat as relative
374 374 return encoding.upper(path)
375 375
376 376 # to preserve case of mountpoint part
377 377 for mp in cygwinmountpoints:
378 378 if not path.startswith(mp):
379 379 continue
380 380
381 381 mplen = len(mp)
382 382 if mplen == pathlen: # mount point itself
383 383 return mp
384 384 if path[mplen] == pycompat.ossep:
385 385 return mp + encoding.upper(path[mplen:])
386 386
387 387 return encoding.upper(path)
388 388
389 389 normcasespec = encoding.normcasespecs.other
390 390 normcasefallback = normcase
391 391
392 392 # Cygwin translates native ACLs to POSIX permissions,
393 393 # but these translations are not supported by native
394 394 # tools, so the exec bit tends to be set erroneously.
395 395 # Therefore, disable executable bit access on Cygwin.
396 396 def checkexec(path):
397 397 return False
398 398
399 399 # Similarly, Cygwin's symlink emulation is likely to create
400 400 # problems when Mercurial is used from both Cygwin and native
401 401 # Windows, with other native tools, or on shared volumes
402 402 def checklink(path):
403 403 return False
404 404
405 405 _needsshellquote = None
406 406 def shellquote(s):
407 407 if os.sys.platform == 'OpenVMS':
408 408 return '"%s"' % s
409 409 global _needsshellquote
410 410 if _needsshellquote is None:
411 411 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
412 412 if s and not _needsshellquote(s):
413 413 # "s" shouldn't have to be quoted
414 414 return s
415 415 else:
416 416 return "'%s'" % s.replace("'", "'\\''")
417 417
418 418 def quotecommand(cmd):
419 419 return cmd
420 420
421 421 def popen(command, mode='r'):
422 422 return os.popen(command, mode)
423 423
424 424 def testpid(pid):
425 425 '''return False if pid dead, True if running or not sure'''
426 426 if os.sys.platform == 'OpenVMS':
427 427 return True
428 428 try:
429 429 os.kill(pid, 0)
430 430 return True
431 431 except OSError as inst:
432 432 return inst.errno != errno.ESRCH
433 433
434 434 def explainexit(code):
435 435 """return a 2-tuple (desc, code) describing a subprocess status
436 436 (codes from kill are negative - not os.system/wait encoding)"""
437 437 if code >= 0:
438 438 return _("exited with status %d") % code, code
439 439 return _("killed by signal %d") % -code, -code
440 440
441 441 def isowner(st):
442 442 """Return True if the stat object st is from the current user."""
443 443 return st.st_uid == os.getuid()
444 444
445 445 def findexe(command):
446 446 '''Find executable for command searching like which does.
447 447 If command is a basename then PATH is searched for command.
448 448 PATH isn't searched if command is an absolute or relative path.
449 449 If command isn't found None is returned.'''
450 450 if sys.platform == 'OpenVMS':
451 451 return command
452 452
453 453 def findexisting(executable):
454 454 'Will return executable if existing file'
455 455 if os.path.isfile(executable) and os.access(executable, os.X_OK):
456 456 return executable
457 457 return None
458 458
459 459 if pycompat.ossep in command:
460 460 return findexisting(command)
461 461
462 462 if sys.platform == 'plan9':
463 463 return findexisting(os.path.join('/bin', command))
464 464
465 for path in os.environ.get('PATH', '').split(pycompat.ospathsep):
465 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
466 466 executable = findexisting(os.path.join(path, command))
467 467 if executable is not None:
468 468 return executable
469 469 return None
470 470
471 471 def setsignalhandler():
472 472 pass
473 473
474 474 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
475 475
476 476 def statfiles(files):
477 477 '''Stat each file in files. Yield each stat, or None if a file does not
478 478 exist or has a type we don't care about.'''
479 479 lstat = os.lstat
480 480 getkind = stat.S_IFMT
481 481 for nf in files:
482 482 try:
483 483 st = lstat(nf)
484 484 if getkind(st.st_mode) not in _wantedkinds:
485 485 st = None
486 486 except OSError as err:
487 487 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
488 488 raise
489 489 st = None
490 490 yield st
491 491
492 492 def getuser():
493 493 '''return name of current user'''
494 494 return getpass.getuser()
495 495
496 496 def username(uid=None):
497 497 """Return the name of the user with the given uid.
498 498
499 499 If uid is None, return the name of the current user."""
500 500
501 501 if uid is None:
502 502 uid = os.getuid()
503 503 try:
504 504 return pwd.getpwuid(uid)[0]
505 505 except KeyError:
506 506 return str(uid)
507 507
508 508 def groupname(gid=None):
509 509 """Return the name of the group with the given gid.
510 510
511 511 If gid is None, return the name of the current group."""
512 512
513 513 if gid is None:
514 514 gid = os.getgid()
515 515 try:
516 516 return grp.getgrgid(gid)[0]
517 517 except KeyError:
518 518 return str(gid)
519 519
520 520 def groupmembers(name):
521 521 """Return the list of members of the group with the given
522 522 name, KeyError if the group does not exist.
523 523 """
524 524 return list(grp.getgrnam(name).gr_mem)
525 525
526 526 def spawndetached(args):
527 527 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
528 528 args[0], args)
529 529
530 530 def gethgcmd():
531 531 return sys.argv[:1]
532 532
533 533 def makedir(path, notindexed):
534 534 os.mkdir(path)
535 535
536 536 def unlinkpath(f, ignoremissing=False):
537 537 """unlink and remove the directory if it is empty"""
538 538 try:
539 539 os.unlink(f)
540 540 except OSError as e:
541 541 if not (ignoremissing and e.errno == errno.ENOENT):
542 542 raise
543 543 # try removing directories that might now be empty
544 544 try:
545 545 os.removedirs(os.path.dirname(f))
546 546 except OSError:
547 547 pass
548 548
549 549 def lookupreg(key, name=None, scope=None):
550 550 return None
551 551
552 552 def hidewindow():
553 553 """Hide current shell window.
554 554
555 555 Used to hide the window opened when starting asynchronous
556 556 child process under Windows, unneeded on other systems.
557 557 """
558 558 pass
559 559
560 560 class cachestat(object):
561 561 def __init__(self, path):
562 562 self.stat = os.stat(path)
563 563
564 564 def cacheable(self):
565 565 return bool(self.stat.st_ino)
566 566
567 567 __hash__ = object.__hash__
568 568
569 569 def __eq__(self, other):
570 570 try:
571 571 # Only dev, ino, size, mtime and atime are likely to change. Out
572 572 # of these, we shouldn't compare atime but should compare the
573 573 # rest. However, one of the other fields changing indicates
574 574 # something fishy going on, so return False if anything but atime
575 575 # changes.
576 576 return (self.stat.st_mode == other.stat.st_mode and
577 577 self.stat.st_ino == other.stat.st_ino and
578 578 self.stat.st_dev == other.stat.st_dev and
579 579 self.stat.st_nlink == other.stat.st_nlink and
580 580 self.stat.st_uid == other.stat.st_uid and
581 581 self.stat.st_gid == other.stat.st_gid and
582 582 self.stat.st_size == other.stat.st_size and
583 583 self.stat.st_mtime == other.stat.st_mtime and
584 584 self.stat.st_ctime == other.stat.st_ctime)
585 585 except AttributeError:
586 586 return False
587 587
588 588 def __ne__(self, other):
589 589 return not self == other
590 590
591 591 def executablepath():
592 592 return None # available on Windows only
593 593
594 594 def statislink(st):
595 595 '''check whether a stat result is a symlink'''
596 596 return st and stat.S_ISLNK(st.st_mode)
597 597
598 598 def statisexec(st):
599 599 '''check whether a stat result is an executable file'''
600 600 return st and (st.st_mode & 0o100 != 0)
601 601
602 602 def poll(fds):
603 603 """block until something happens on any file descriptor
604 604
605 605 This is a generic helper that will check for any activity
606 606 (read, write. exception) and return the list of touched files.
607 607
608 608 In unsupported cases, it will raise a NotImplementedError"""
609 609 try:
610 610 res = select.select(fds, fds, fds)
611 611 except ValueError: # out of range file descriptor
612 612 raise NotImplementedError()
613 613 return sorted(list(set(sum(res, []))))
614 614
615 615 def readpipe(pipe):
616 616 """Read all available data from a pipe."""
617 617 # We can't fstat() a pipe because Linux will always report 0.
618 618 # So, we set the pipe to non-blocking mode and read everything
619 619 # that's available.
620 620 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
621 621 flags |= os.O_NONBLOCK
622 622 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
623 623
624 624 try:
625 625 chunks = []
626 626 while True:
627 627 try:
628 628 s = pipe.read()
629 629 if not s:
630 630 break
631 631 chunks.append(s)
632 632 except IOError:
633 633 break
634 634
635 635 return ''.join(chunks)
636 636 finally:
637 637 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
638 638
639 639 def bindunixsocket(sock, path):
640 640 """Bind the UNIX domain socket to the specified path"""
641 641 # use relative path instead of full path at bind() if possible, since
642 642 # AF_UNIX path has very small length limit (107 chars) on common
643 643 # platforms (see sys/un.h)
644 644 dirname, basename = os.path.split(path)
645 645 bakwdfd = None
646 646 if dirname:
647 647 bakwdfd = os.open('.', os.O_DIRECTORY)
648 648 os.chdir(dirname)
649 649 sock.bind(basename)
650 650 if bakwdfd:
651 651 os.fchdir(bakwdfd)
652 652 os.close(bakwdfd)
General Comments 0
You need to be logged in to leave comments. Login now