##// END OF EJS Templates
context: add workingcommitctx for exact context to be committed...
FUJIWARA Katsunori -
r23710:745e3b48 default
parent child Browse files
Show More
@@ -1,1796 +1,1808 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 26 """return the first ancestor of <srcrev> introducting <fnode>
27 27
28 28 If the linkrev of the file revision does not point to an ancestor of
29 29 srcrev, we'll walk down the ancestors until we find one introducing this
30 30 file revision.
31 31
32 32 :repo: a localrepository object (used to access changelog and manifest)
33 33 :path: the file path
34 34 :fnode: the nodeid of the file revision
35 35 :filelog: the filelog of this path
36 36 :srcrev: the changeset revision we search ancestors from
37 37 :inclusive: if true, the src revision will also be checked
38 38 """
39 39 cl = repo.unfiltered().changelog
40 40 ma = repo.manifest
41 41 # fetch the linkrev
42 42 fr = filelog.rev(fnode)
43 43 lkr = filelog.linkrev(fr)
44 44 # check if this linkrev is an ancestor of srcrev
45 45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 46 if lkr not in anc:
47 47 for a in anc:
48 48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 49 if path in ac[3]: # checking the 'files' field.
50 50 # The file has been touched, check if the content is similar
51 51 # to the one we search for.
52 52 if fnode == ma.readdelta(ac[0]).get(path):
53 53 return a
54 54 # In theory, we should never get out of that loop without a result. But
55 55 # if manifest uses a buggy file revision (not children of the one it
56 56 # replaces) we could. Such a buggy situation will likely result is crash
57 57 # somewhere else at to some point.
58 58 return lkr
59 59
60 60 class basectx(object):
61 61 """A basectx object represents the common logic for its children:
62 62 changectx: read-only context that is already present in the repo,
63 63 workingctx: a context that represents the working directory and can
64 64 be committed,
65 65 memctx: a context that represents changes in-memory and can also
66 66 be committed."""
67 67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 68 if isinstance(changeid, basectx):
69 69 return changeid
70 70
71 71 o = super(basectx, cls).__new__(cls)
72 72
73 73 o._repo = repo
74 74 o._rev = nullrev
75 75 o._node = nullid
76 76
77 77 return o
78 78
79 79 def __str__(self):
80 80 return short(self.node())
81 81
82 82 def __int__(self):
83 83 return self.rev()
84 84
85 85 def __repr__(self):
86 86 return "<%s %s>" % (type(self).__name__, str(self))
87 87
88 88 def __eq__(self, other):
89 89 try:
90 90 return type(self) == type(other) and self._rev == other._rev
91 91 except AttributeError:
92 92 return False
93 93
94 94 def __ne__(self, other):
95 95 return not (self == other)
96 96
97 97 def __contains__(self, key):
98 98 return key in self._manifest
99 99
100 100 def __getitem__(self, key):
101 101 return self.filectx(key)
102 102
103 103 def __iter__(self):
104 104 for f in sorted(self._manifest):
105 105 yield f
106 106
107 107 def _manifestmatches(self, match, s):
108 108 """generate a new manifest filtered by the match argument
109 109
110 110 This method is for internal use only and mainly exists to provide an
111 111 object oriented way for other contexts to customize the manifest
112 112 generation.
113 113 """
114 114 return self.manifest().matches(match)
115 115
116 116 def _matchstatus(self, other, match):
117 117 """return match.always if match is none
118 118
119 119 This internal method provides a way for child objects to override the
120 120 match operator.
121 121 """
122 122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123 123
124 124 def _buildstatus(self, other, s, match, listignored, listclean,
125 125 listunknown):
126 126 """build a status with respect to another context"""
127 127 # Load earliest manifest first for caching reasons. More specifically,
128 128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 131 # delta to what's in the cache. So that's one full reconstruction + one
132 132 # delta application.
133 133 if self.rev() is not None and self.rev() < other.rev():
134 134 self.manifest()
135 135 mf1 = other._manifestmatches(match, s)
136 136 mf2 = self._manifestmatches(match, s)
137 137
138 138 modified, added, clean = [], [], []
139 139 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
140 140 deletedset = set(deleted)
141 141 withflags = mf1.withflags() | mf2.withflags()
142 142 for fn, mf2node in mf2.iteritems():
143 143 if fn in mf1:
144 144 if (fn not in deletedset and
145 145 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
146 146 (mf1[fn] != mf2node and
147 147 (mf2node != _newnode or self[fn].cmp(other[fn]))))):
148 148 modified.append(fn)
149 149 elif listclean:
150 150 clean.append(fn)
151 151 del mf1[fn]
152 152 elif fn not in deletedset:
153 153 added.append(fn)
154 154 removed = mf1.keys()
155 155 if removed:
156 156 # need to filter files if they are already reported as removed
157 157 unknown = [fn for fn in unknown if fn not in mf1]
158 158 ignored = [fn for fn in ignored if fn not in mf1]
159 159
160 160 return scmutil.status(modified, added, removed, deleted, unknown,
161 161 ignored, clean)
162 162
163 163 @propertycache
164 164 def substate(self):
165 165 return subrepo.state(self, self._repo.ui)
166 166
167 167 def subrev(self, subpath):
168 168 return self.substate[subpath][1]
169 169
170 170 def rev(self):
171 171 return self._rev
172 172 def node(self):
173 173 return self._node
174 174 def hex(self):
175 175 return hex(self.node())
176 176 def manifest(self):
177 177 return self._manifest
178 178 def phasestr(self):
179 179 return phases.phasenames[self.phase()]
180 180 def mutable(self):
181 181 return self.phase() > phases.public
182 182
183 183 def getfileset(self, expr):
184 184 return fileset.getfileset(self, expr)
185 185
186 186 def obsolete(self):
187 187 """True if the changeset is obsolete"""
188 188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 189
190 190 def extinct(self):
191 191 """True if the changeset is extinct"""
192 192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 193
194 194 def unstable(self):
195 195 """True if the changeset is not obsolete but it's ancestor are"""
196 196 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
197 197
198 198 def bumped(self):
199 199 """True if the changeset try to be a successor of a public changeset
200 200
201 201 Only non-public and non-obsolete changesets may be bumped.
202 202 """
203 203 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
204 204
205 205 def divergent(self):
206 206 """Is a successors of a changeset with multiple possible successors set
207 207
208 208 Only non-public and non-obsolete changesets may be divergent.
209 209 """
210 210 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
211 211
212 212 def troubled(self):
213 213 """True if the changeset is either unstable, bumped or divergent"""
214 214 return self.unstable() or self.bumped() or self.divergent()
215 215
216 216 def troubles(self):
217 217 """return the list of troubles affecting this changesets.
218 218
219 219 Troubles are returned as strings. possible values are:
220 220 - unstable,
221 221 - bumped,
222 222 - divergent.
223 223 """
224 224 troubles = []
225 225 if self.unstable():
226 226 troubles.append('unstable')
227 227 if self.bumped():
228 228 troubles.append('bumped')
229 229 if self.divergent():
230 230 troubles.append('divergent')
231 231 return troubles
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 if len(self._parents) == 2:
242 242 return self._parents[1]
243 243 return changectx(self._repo, -1)
244 244
245 245 def _fileinfo(self, path):
246 246 if '_manifest' in self.__dict__:
247 247 try:
248 248 return self._manifest[path], self._manifest.flags(path)
249 249 except KeyError:
250 250 raise error.ManifestLookupError(self._node, path,
251 251 _('not found in manifest'))
252 252 if '_manifestdelta' in self.__dict__ or path in self.files():
253 253 if path in self._manifestdelta:
254 254 return (self._manifestdelta[path],
255 255 self._manifestdelta.flags(path))
256 256 node, flag = self._repo.manifest.find(self._changeset[0], path)
257 257 if not node:
258 258 raise error.ManifestLookupError(self._node, path,
259 259 _('not found in manifest'))
260 260
261 261 return node, flag
262 262
263 263 def filenode(self, path):
264 264 return self._fileinfo(path)[0]
265 265
266 266 def flags(self, path):
267 267 try:
268 268 return self._fileinfo(path)[1]
269 269 except error.LookupError:
270 270 return ''
271 271
272 272 def sub(self, path):
273 273 return subrepo.subrepo(self, path)
274 274
275 275 def match(self, pats=[], include=None, exclude=None, default='glob'):
276 276 r = self._repo
277 277 return matchmod.match(r.root, r.getcwd(), pats,
278 278 include, exclude, default,
279 279 auditor=r.auditor, ctx=self)
280 280
281 281 def diff(self, ctx2=None, match=None, **opts):
282 282 """Returns a diff generator for the given contexts and matcher"""
283 283 if ctx2 is None:
284 284 ctx2 = self.p1()
285 285 if ctx2 is not None:
286 286 ctx2 = self._repo[ctx2]
287 287 diffopts = patch.diffopts(self._repo.ui, opts)
288 288 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
289 289
290 290 @propertycache
291 291 def _dirs(self):
292 292 return scmutil.dirs(self._manifest)
293 293
294 294 def dirs(self):
295 295 return self._dirs
296 296
297 297 def dirty(self, missing=False, merge=True, branch=True):
298 298 return False
299 299
300 300 def status(self, other=None, match=None, listignored=False,
301 301 listclean=False, listunknown=False, listsubrepos=False):
302 302 """return status of files between two nodes or node and working
303 303 directory.
304 304
305 305 If other is None, compare this node with working directory.
306 306
307 307 returns (modified, added, removed, deleted, unknown, ignored, clean)
308 308 """
309 309
310 310 ctx1 = self
311 311 ctx2 = self._repo[other]
312 312
313 313 # This next code block is, admittedly, fragile logic that tests for
314 314 # reversing the contexts and wouldn't need to exist if it weren't for
315 315 # the fast (and common) code path of comparing the working directory
316 316 # with its first parent.
317 317 #
318 318 # What we're aiming for here is the ability to call:
319 319 #
320 320 # workingctx.status(parentctx)
321 321 #
322 322 # If we always built the manifest for each context and compared those,
323 323 # then we'd be done. But the special case of the above call means we
324 324 # just copy the manifest of the parent.
325 325 reversed = False
326 326 if (not isinstance(ctx1, changectx)
327 327 and isinstance(ctx2, changectx)):
328 328 reversed = True
329 329 ctx1, ctx2 = ctx2, ctx1
330 330
331 331 match = ctx2._matchstatus(ctx1, match)
332 332 r = scmutil.status([], [], [], [], [], [], [])
333 333 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
334 334 listunknown)
335 335
336 336 if reversed:
337 337 # Reverse added and removed. Clear deleted, unknown and ignored as
338 338 # these make no sense to reverse.
339 339 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
340 340 r.clean)
341 341
342 342 if listsubrepos:
343 343 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
344 344 rev2 = ctx2.subrev(subpath)
345 345 try:
346 346 submatch = matchmod.narrowmatcher(subpath, match)
347 347 s = sub.status(rev2, match=submatch, ignored=listignored,
348 348 clean=listclean, unknown=listunknown,
349 349 listsubrepos=True)
350 350 for rfiles, sfiles in zip(r, s):
351 351 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
352 352 except error.LookupError:
353 353 self._repo.ui.status(_("skipping missing "
354 354 "subrepository: %s\n") % subpath)
355 355
356 356 for l in r:
357 357 l.sort()
358 358
359 359 return r
360 360
361 361
362 362 def makememctx(repo, parents, text, user, date, branch, files, store,
363 363 editor=None):
364 364 def getfilectx(repo, memctx, path):
365 365 data, mode, copied = store.getfile(path)
366 366 if data is None:
367 367 return None
368 368 islink, isexec = mode
369 369 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
370 370 copied=copied, memctx=memctx)
371 371 extra = {}
372 372 if branch:
373 373 extra['branch'] = encoding.fromlocal(branch)
374 374 ctx = memctx(repo, parents, text, files, getfilectx, user,
375 375 date, extra, editor)
376 376 return ctx
377 377
378 378 class changectx(basectx):
379 379 """A changecontext object makes access to data related to a particular
380 380 changeset convenient. It represents a read-only context already present in
381 381 the repo."""
382 382 def __init__(self, repo, changeid=''):
383 383 """changeid is a revision number, node, or tag"""
384 384
385 385 # since basectx.__new__ already took care of copying the object, we
386 386 # don't need to do anything in __init__, so we just exit here
387 387 if isinstance(changeid, basectx):
388 388 return
389 389
390 390 if changeid == '':
391 391 changeid = '.'
392 392 self._repo = repo
393 393
394 394 try:
395 395 if isinstance(changeid, int):
396 396 self._node = repo.changelog.node(changeid)
397 397 self._rev = changeid
398 398 return
399 399 if isinstance(changeid, long):
400 400 changeid = str(changeid)
401 401 if changeid == '.':
402 402 self._node = repo.dirstate.p1()
403 403 self._rev = repo.changelog.rev(self._node)
404 404 return
405 405 if changeid == 'null':
406 406 self._node = nullid
407 407 self._rev = nullrev
408 408 return
409 409 if changeid == 'tip':
410 410 self._node = repo.changelog.tip()
411 411 self._rev = repo.changelog.rev(self._node)
412 412 return
413 413 if len(changeid) == 20:
414 414 try:
415 415 self._node = changeid
416 416 self._rev = repo.changelog.rev(changeid)
417 417 return
418 418 except error.FilteredRepoLookupError:
419 419 raise
420 420 except LookupError:
421 421 pass
422 422
423 423 try:
424 424 r = int(changeid)
425 425 if str(r) != changeid:
426 426 raise ValueError
427 427 l = len(repo.changelog)
428 428 if r < 0:
429 429 r += l
430 430 if r < 0 or r >= l:
431 431 raise ValueError
432 432 self._rev = r
433 433 self._node = repo.changelog.node(r)
434 434 return
435 435 except error.FilteredIndexError:
436 436 raise
437 437 except (ValueError, OverflowError, IndexError):
438 438 pass
439 439
440 440 if len(changeid) == 40:
441 441 try:
442 442 self._node = bin(changeid)
443 443 self._rev = repo.changelog.rev(self._node)
444 444 return
445 445 except error.FilteredLookupError:
446 446 raise
447 447 except (TypeError, LookupError):
448 448 pass
449 449
450 450 # lookup bookmarks through the name interface
451 451 try:
452 452 self._node = repo.names.singlenode(repo, changeid)
453 453 self._rev = repo.changelog.rev(self._node)
454 454 return
455 455 except KeyError:
456 456 pass
457 457 except error.FilteredRepoLookupError:
458 458 raise
459 459 except error.RepoLookupError:
460 460 pass
461 461
462 462 self._node = repo.unfiltered().changelog._partialmatch(changeid)
463 463 if self._node is not None:
464 464 self._rev = repo.changelog.rev(self._node)
465 465 return
466 466
467 467 # lookup failed
468 468 # check if it might have come from damaged dirstate
469 469 #
470 470 # XXX we could avoid the unfiltered if we had a recognizable
471 471 # exception for filtered changeset access
472 472 if changeid in repo.unfiltered().dirstate.parents():
473 473 msg = _("working directory has unknown parent '%s'!")
474 474 raise error.Abort(msg % short(changeid))
475 475 try:
476 476 if len(changeid) == 20:
477 477 changeid = hex(changeid)
478 478 except TypeError:
479 479 pass
480 480 except (error.FilteredIndexError, error.FilteredLookupError,
481 481 error.FilteredRepoLookupError):
482 482 if repo.filtername == 'visible':
483 483 msg = _("hidden revision '%s'") % changeid
484 484 hint = _('use --hidden to access hidden revisions')
485 485 raise error.FilteredRepoLookupError(msg, hint=hint)
486 486 msg = _("filtered revision '%s' (not in '%s' subset)")
487 487 msg %= (changeid, repo.filtername)
488 488 raise error.FilteredRepoLookupError(msg)
489 489 except IndexError:
490 490 pass
491 491 raise error.RepoLookupError(
492 492 _("unknown revision '%s'") % changeid)
493 493
494 494 def __hash__(self):
495 495 try:
496 496 return hash(self._rev)
497 497 except AttributeError:
498 498 return id(self)
499 499
500 500 def __nonzero__(self):
501 501 return self._rev != nullrev
502 502
503 503 @propertycache
504 504 def _changeset(self):
505 505 return self._repo.changelog.read(self.rev())
506 506
507 507 @propertycache
508 508 def _manifest(self):
509 509 return self._repo.manifest.read(self._changeset[0])
510 510
511 511 @propertycache
512 512 def _manifestdelta(self):
513 513 return self._repo.manifest.readdelta(self._changeset[0])
514 514
515 515 @propertycache
516 516 def _parents(self):
517 517 p = self._repo.changelog.parentrevs(self._rev)
518 518 if p[1] == nullrev:
519 519 p = p[:-1]
520 520 return [changectx(self._repo, x) for x in p]
521 521
522 522 def changeset(self):
523 523 return self._changeset
524 524 def manifestnode(self):
525 525 return self._changeset[0]
526 526
527 527 def user(self):
528 528 return self._changeset[1]
529 529 def date(self):
530 530 return self._changeset[2]
531 531 def files(self):
532 532 return self._changeset[3]
533 533 def description(self):
534 534 return self._changeset[4]
535 535 def branch(self):
536 536 return encoding.tolocal(self._changeset[5].get("branch"))
537 537 def closesbranch(self):
538 538 return 'close' in self._changeset[5]
539 539 def extra(self):
540 540 return self._changeset[5]
541 541 def tags(self):
542 542 return self._repo.nodetags(self._node)
543 543 def bookmarks(self):
544 544 return self._repo.nodebookmarks(self._node)
545 545 def phase(self):
546 546 return self._repo._phasecache.phase(self._repo, self._rev)
547 547 def hidden(self):
548 548 return self._rev in repoview.filterrevs(self._repo, 'visible')
549 549
550 550 def children(self):
551 551 """return contexts for each child changeset"""
552 552 c = self._repo.changelog.children(self._node)
553 553 return [changectx(self._repo, x) for x in c]
554 554
555 555 def ancestors(self):
556 556 for a in self._repo.changelog.ancestors([self._rev]):
557 557 yield changectx(self._repo, a)
558 558
559 559 def descendants(self):
560 560 for d in self._repo.changelog.descendants([self._rev]):
561 561 yield changectx(self._repo, d)
562 562
563 563 def filectx(self, path, fileid=None, filelog=None):
564 564 """get a file context from this changeset"""
565 565 if fileid is None:
566 566 fileid = self.filenode(path)
567 567 return filectx(self._repo, path, fileid=fileid,
568 568 changectx=self, filelog=filelog)
569 569
570 570 def ancestor(self, c2, warn=False):
571 571 """return the "best" ancestor context of self and c2
572 572
573 573 If there are multiple candidates, it will show a message and check
574 574 merge.preferancestor configuration before falling back to the
575 575 revlog ancestor."""
576 576 # deal with workingctxs
577 577 n2 = c2._node
578 578 if n2 is None:
579 579 n2 = c2._parents[0]._node
580 580 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
581 581 if not cahs:
582 582 anc = nullid
583 583 elif len(cahs) == 1:
584 584 anc = cahs[0]
585 585 else:
586 586 for r in self._repo.ui.configlist('merge', 'preferancestor'):
587 587 try:
588 588 ctx = changectx(self._repo, r)
589 589 except error.RepoLookupError:
590 590 continue
591 591 anc = ctx.node()
592 592 if anc in cahs:
593 593 break
594 594 else:
595 595 anc = self._repo.changelog.ancestor(self._node, n2)
596 596 if warn:
597 597 self._repo.ui.status(
598 598 (_("note: using %s as ancestor of %s and %s\n") %
599 599 (short(anc), short(self._node), short(n2))) +
600 600 ''.join(_(" alternatively, use --config "
601 601 "merge.preferancestor=%s\n") %
602 602 short(n) for n in sorted(cahs) if n != anc))
603 603 return changectx(self._repo, anc)
604 604
605 605 def descendant(self, other):
606 606 """True if other is descendant of this changeset"""
607 607 return self._repo.changelog.descendant(self._rev, other._rev)
608 608
609 609 def walk(self, match):
610 610 fset = set(match.files())
611 611 # for dirstate.walk, files=['.'] means "walk the whole tree".
612 612 # follow that here, too
613 613 fset.discard('.')
614 614
615 615 # avoid the entire walk if we're only looking for specific files
616 616 if fset and not match.anypats():
617 617 if util.all([fn in self for fn in fset]):
618 618 for fn in sorted(fset):
619 619 if match(fn):
620 620 yield fn
621 621 raise StopIteration
622 622
623 623 for fn in self:
624 624 if fn in fset:
625 625 # specified pattern is the exact name
626 626 fset.remove(fn)
627 627 if match(fn):
628 628 yield fn
629 629 for fn in sorted(fset):
630 630 if fn in self._dirs:
631 631 # specified pattern is a directory
632 632 continue
633 633 match.bad(fn, _('no such file in rev %s') % self)
634 634
635 635 def matches(self, match):
636 636 return self.walk(match)
637 637
638 638 class basefilectx(object):
639 639 """A filecontext object represents the common logic for its children:
640 640 filectx: read-only access to a filerevision that is already present
641 641 in the repo,
642 642 workingfilectx: a filecontext that represents files from the working
643 643 directory,
644 644 memfilectx: a filecontext that represents files in-memory."""
645 645 def __new__(cls, repo, path, *args, **kwargs):
646 646 return super(basefilectx, cls).__new__(cls)
647 647
648 648 @propertycache
649 649 def _filelog(self):
650 650 return self._repo.file(self._path)
651 651
652 652 @propertycache
653 653 def _changeid(self):
654 654 if '_changeid' in self.__dict__:
655 655 return self._changeid
656 656 elif '_changectx' in self.__dict__:
657 657 return self._changectx.rev()
658 658 else:
659 659 return self._filelog.linkrev(self._filerev)
660 660
661 661 @propertycache
662 662 def _filenode(self):
663 663 if '_fileid' in self.__dict__:
664 664 return self._filelog.lookup(self._fileid)
665 665 else:
666 666 return self._changectx.filenode(self._path)
667 667
668 668 @propertycache
669 669 def _filerev(self):
670 670 return self._filelog.rev(self._filenode)
671 671
672 672 @propertycache
673 673 def _repopath(self):
674 674 return self._path
675 675
676 676 def __nonzero__(self):
677 677 try:
678 678 self._filenode
679 679 return True
680 680 except error.LookupError:
681 681 # file is missing
682 682 return False
683 683
684 684 def __str__(self):
685 685 return "%s@%s" % (self.path(), self._changectx)
686 686
687 687 def __repr__(self):
688 688 return "<%s %s>" % (type(self).__name__, str(self))
689 689
690 690 def __hash__(self):
691 691 try:
692 692 return hash((self._path, self._filenode))
693 693 except AttributeError:
694 694 return id(self)
695 695
696 696 def __eq__(self, other):
697 697 try:
698 698 return (type(self) == type(other) and self._path == other._path
699 699 and self._filenode == other._filenode)
700 700 except AttributeError:
701 701 return False
702 702
703 703 def __ne__(self, other):
704 704 return not (self == other)
705 705
706 706 def filerev(self):
707 707 return self._filerev
708 708 def filenode(self):
709 709 return self._filenode
710 710 def flags(self):
711 711 return self._changectx.flags(self._path)
712 712 def filelog(self):
713 713 return self._filelog
714 714 def rev(self):
715 715 return self._changeid
716 716 def linkrev(self):
717 717 return self._filelog.linkrev(self._filerev)
718 718 def node(self):
719 719 return self._changectx.node()
720 720 def hex(self):
721 721 return self._changectx.hex()
722 722 def user(self):
723 723 return self._changectx.user()
724 724 def date(self):
725 725 return self._changectx.date()
726 726 def files(self):
727 727 return self._changectx.files()
728 728 def description(self):
729 729 return self._changectx.description()
730 730 def branch(self):
731 731 return self._changectx.branch()
732 732 def extra(self):
733 733 return self._changectx.extra()
734 734 def phase(self):
735 735 return self._changectx.phase()
736 736 def phasestr(self):
737 737 return self._changectx.phasestr()
738 738 def manifest(self):
739 739 return self._changectx.manifest()
740 740 def changectx(self):
741 741 return self._changectx
742 742
743 743 def path(self):
744 744 return self._path
745 745
746 746 def isbinary(self):
747 747 try:
748 748 return util.binary(self.data())
749 749 except IOError:
750 750 return False
751 751 def isexec(self):
752 752 return 'x' in self.flags()
753 753 def islink(self):
754 754 return 'l' in self.flags()
755 755
756 756 def cmp(self, fctx):
757 757 """compare with other file context
758 758
759 759 returns True if different than fctx.
760 760 """
761 761 if (fctx._filerev is None
762 762 and (self._repo._encodefilterpats
763 763 # if file data starts with '\1\n', empty metadata block is
764 764 # prepended, which adds 4 bytes to filelog.size().
765 765 or self.size() - 4 == fctx.size())
766 766 or self.size() == fctx.size()):
767 767 return self._filelog.cmp(self._filenode, fctx.data())
768 768
769 769 return True
770 770
771 771 def introrev(self):
772 772 """return the rev of the changeset which introduced this file revision
773 773
774 774 This method is different from linkrev because it take into account the
775 775 changeset the filectx was created from. It ensures the returned
776 776 revision is one of its ancestors. This prevents bugs from
777 777 'linkrev-shadowing' when a file revision is used by multiple
778 778 changesets.
779 779 """
780 780 lkr = self.linkrev()
781 781 attrs = vars(self)
782 782 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
783 783 if noctx or self.rev() == lkr:
784 784 return self.linkrev()
785 785 return _adjustlinkrev(self._repo, self._path, self._filelog,
786 786 self._filenode, self.rev(), inclusive=True)
787 787
788 788 def parents(self):
789 789 _path = self._path
790 790 fl = self._filelog
791 791 parents = self._filelog.parents(self._filenode)
792 792 pl = [(_path, node, fl) for node in parents if node != nullid]
793 793
794 794 r = fl.renamed(self._filenode)
795 795 if r:
796 796 # - In the simple rename case, both parent are nullid, pl is empty.
797 797 # - In case of merge, only one of the parent is null id and should
798 798 # be replaced with the rename information. This parent is -always-
799 799 # the first one.
800 800 #
801 801 # As null id have alway been filtered out in the previous list
802 802 # comprehension, inserting to 0 will always result in "replacing
803 803 # first nullid parent with rename information.
804 804 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
805 805
806 806 ret = []
807 807 for path, fnode, l in pl:
808 808 if '_changeid' in vars(self) or '_changectx' in vars(self):
809 809 # If self is associated with a changeset (probably explicitly
810 810 # fed), ensure the created filectx is associated with a
811 811 # changeset that is an ancestor of self.changectx.
812 812 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
813 813 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
814 814 changeid=rev)
815 815 else:
816 816 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
817 817 ret.append(fctx)
818 818 return ret
819 819
820 820 def p1(self):
821 821 return self.parents()[0]
822 822
823 823 def p2(self):
824 824 p = self.parents()
825 825 if len(p) == 2:
826 826 return p[1]
827 827 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
828 828
829 829 def annotate(self, follow=False, linenumber=None, diffopts=None):
830 830 '''returns a list of tuples of (ctx, line) for each line
831 831 in the file, where ctx is the filectx of the node where
832 832 that line was last changed.
833 833 This returns tuples of ((ctx, linenumber), line) for each line,
834 834 if "linenumber" parameter is NOT "None".
835 835 In such tuples, linenumber means one at the first appearance
836 836 in the managed file.
837 837 To reduce annotation cost,
838 838 this returns fixed value(False is used) as linenumber,
839 839 if "linenumber" parameter is "False".'''
840 840
841 841 if linenumber is None:
842 842 def decorate(text, rev):
843 843 return ([rev] * len(text.splitlines()), text)
844 844 elif linenumber:
845 845 def decorate(text, rev):
846 846 size = len(text.splitlines())
847 847 return ([(rev, i) for i in xrange(1, size + 1)], text)
848 848 else:
849 849 def decorate(text, rev):
850 850 return ([(rev, False)] * len(text.splitlines()), text)
851 851
852 852 def pair(parent, child):
853 853 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
854 854 refine=True)
855 855 for (a1, a2, b1, b2), t in blocks:
856 856 # Changed blocks ('!') or blocks made only of blank lines ('~')
857 857 # belong to the child.
858 858 if t == '=':
859 859 child[0][b1:b2] = parent[0][a1:a2]
860 860 return child
861 861
862 862 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
863 863
864 864 def parents(f):
865 865 pl = f.parents()
866 866
867 867 # Don't return renamed parents if we aren't following.
868 868 if not follow:
869 869 pl = [p for p in pl if p.path() == f.path()]
870 870
871 871 # renamed filectx won't have a filelog yet, so set it
872 872 # from the cache to save time
873 873 for p in pl:
874 874 if not '_filelog' in p.__dict__:
875 875 p._filelog = getlog(p.path())
876 876
877 877 return pl
878 878
879 879 # use linkrev to find the first changeset where self appeared
880 880 base = self
881 881 introrev = self.introrev()
882 882 if self.rev() != introrev:
883 883 base = filectx(self._repo, self._path, filelog=self.filelog(),
884 884 fileid=self.filenode(), changeid=introrev)
885 885
886 886 # This algorithm would prefer to be recursive, but Python is a
887 887 # bit recursion-hostile. Instead we do an iterative
888 888 # depth-first search.
889 889
890 890 visit = [base]
891 891 hist = {}
892 892 pcache = {}
893 893 needed = {base: 1}
894 894 while visit:
895 895 f = visit[-1]
896 896 pcached = f in pcache
897 897 if not pcached:
898 898 pcache[f] = parents(f)
899 899
900 900 ready = True
901 901 pl = pcache[f]
902 902 for p in pl:
903 903 if p not in hist:
904 904 ready = False
905 905 visit.append(p)
906 906 if not pcached:
907 907 needed[p] = needed.get(p, 0) + 1
908 908 if ready:
909 909 visit.pop()
910 910 reusable = f in hist
911 911 if reusable:
912 912 curr = hist[f]
913 913 else:
914 914 curr = decorate(f.data(), f)
915 915 for p in pl:
916 916 if not reusable:
917 917 curr = pair(hist[p], curr)
918 918 if needed[p] == 1:
919 919 del hist[p]
920 920 del needed[p]
921 921 else:
922 922 needed[p] -= 1
923 923
924 924 hist[f] = curr
925 925 pcache[f] = []
926 926
927 927 return zip(hist[base][0], hist[base][1].splitlines(True))
928 928
929 929 def ancestors(self, followfirst=False):
930 930 visit = {}
931 931 c = self
932 932 cut = followfirst and 1 or None
933 933 while True:
934 934 for parent in c.parents()[:cut]:
935 935 visit[(parent.rev(), parent.node())] = parent
936 936 if not visit:
937 937 break
938 938 c = visit.pop(max(visit))
939 939 yield c
940 940
941 941 class filectx(basefilectx):
942 942 """A filecontext object makes access to data related to a particular
943 943 filerevision convenient."""
944 944 def __init__(self, repo, path, changeid=None, fileid=None,
945 945 filelog=None, changectx=None):
946 946 """changeid can be a changeset revision, node, or tag.
947 947 fileid can be a file revision or node."""
948 948 self._repo = repo
949 949 self._path = path
950 950
951 951 assert (changeid is not None
952 952 or fileid is not None
953 953 or changectx is not None), \
954 954 ("bad args: changeid=%r, fileid=%r, changectx=%r"
955 955 % (changeid, fileid, changectx))
956 956
957 957 if filelog is not None:
958 958 self._filelog = filelog
959 959
960 960 if changeid is not None:
961 961 self._changeid = changeid
962 962 if changectx is not None:
963 963 self._changectx = changectx
964 964 if fileid is not None:
965 965 self._fileid = fileid
966 966
967 967 @propertycache
968 968 def _changectx(self):
969 969 try:
970 970 return changectx(self._repo, self._changeid)
971 971 except error.FilteredRepoLookupError:
972 972 # Linkrev may point to any revision in the repository. When the
973 973 # repository is filtered this may lead to `filectx` trying to build
974 974 # `changectx` for filtered revision. In such case we fallback to
975 975 # creating `changectx` on the unfiltered version of the reposition.
976 976 # This fallback should not be an issue because `changectx` from
977 977 # `filectx` are not used in complex operations that care about
978 978 # filtering.
979 979 #
980 980 # This fallback is a cheap and dirty fix that prevent several
981 981 # crashes. It does not ensure the behavior is correct. However the
982 982 # behavior was not correct before filtering either and "incorrect
983 983 # behavior" is seen as better as "crash"
984 984 #
985 985 # Linkrevs have several serious troubles with filtering that are
986 986 # complicated to solve. Proper handling of the issue here should be
987 987 # considered when solving linkrev issue are on the table.
988 988 return changectx(self._repo.unfiltered(), self._changeid)
989 989
990 990 def filectx(self, fileid):
991 991 '''opens an arbitrary revision of the file without
992 992 opening a new filelog'''
993 993 return filectx(self._repo, self._path, fileid=fileid,
994 994 filelog=self._filelog)
995 995
996 996 def data(self):
997 997 try:
998 998 return self._filelog.read(self._filenode)
999 999 except error.CensoredNodeError:
1000 1000 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1001 1001 return ""
1002 1002 raise util.Abort(_("censored node: %s") % short(self._filenode),
1003 1003 hint=_("set censor.policy to ignore errors"))
1004 1004
1005 1005 def size(self):
1006 1006 return self._filelog.size(self._filerev)
1007 1007
1008 1008 def renamed(self):
1009 1009 """check if file was actually renamed in this changeset revision
1010 1010
1011 1011 If rename logged in file revision, we report copy for changeset only
1012 1012 if file revisions linkrev points back to the changeset in question
1013 1013 or both changeset parents contain different file revisions.
1014 1014 """
1015 1015
1016 1016 renamed = self._filelog.renamed(self._filenode)
1017 1017 if not renamed:
1018 1018 return renamed
1019 1019
1020 1020 if self.rev() == self.linkrev():
1021 1021 return renamed
1022 1022
1023 1023 name = self.path()
1024 1024 fnode = self._filenode
1025 1025 for p in self._changectx.parents():
1026 1026 try:
1027 1027 if fnode == p.filenode(name):
1028 1028 return None
1029 1029 except error.LookupError:
1030 1030 pass
1031 1031 return renamed
1032 1032
1033 1033 def children(self):
1034 1034 # hard for renames
1035 1035 c = self._filelog.children(self._filenode)
1036 1036 return [filectx(self._repo, self._path, fileid=x,
1037 1037 filelog=self._filelog) for x in c]
1038 1038
1039 1039 class committablectx(basectx):
1040 1040 """A committablectx object provides common functionality for a context that
1041 1041 wants the ability to commit, e.g. workingctx or memctx."""
1042 1042 def __init__(self, repo, text="", user=None, date=None, extra=None,
1043 1043 changes=None):
1044 1044 self._repo = repo
1045 1045 self._rev = None
1046 1046 self._node = None
1047 1047 self._text = text
1048 1048 if date:
1049 1049 self._date = util.parsedate(date)
1050 1050 if user:
1051 1051 self._user = user
1052 1052 if changes:
1053 1053 self._status = changes
1054 1054
1055 1055 self._extra = {}
1056 1056 if extra:
1057 1057 self._extra = extra.copy()
1058 1058 if 'branch' not in self._extra:
1059 1059 try:
1060 1060 branch = encoding.fromlocal(self._repo.dirstate.branch())
1061 1061 except UnicodeDecodeError:
1062 1062 raise util.Abort(_('branch name not in UTF-8!'))
1063 1063 self._extra['branch'] = branch
1064 1064 if self._extra['branch'] == '':
1065 1065 self._extra['branch'] = 'default'
1066 1066
1067 1067 def __str__(self):
1068 1068 return str(self._parents[0]) + "+"
1069 1069
1070 1070 def __nonzero__(self):
1071 1071 return True
1072 1072
1073 1073 def _buildflagfunc(self):
1074 1074 # Create a fallback function for getting file flags when the
1075 1075 # filesystem doesn't support them
1076 1076
1077 1077 copiesget = self._repo.dirstate.copies().get
1078 1078
1079 1079 if len(self._parents) < 2:
1080 1080 # when we have one parent, it's easy: copy from parent
1081 1081 man = self._parents[0].manifest()
1082 1082 def func(f):
1083 1083 f = copiesget(f, f)
1084 1084 return man.flags(f)
1085 1085 else:
1086 1086 # merges are tricky: we try to reconstruct the unstored
1087 1087 # result from the merge (issue1802)
1088 1088 p1, p2 = self._parents
1089 1089 pa = p1.ancestor(p2)
1090 1090 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1091 1091
1092 1092 def func(f):
1093 1093 f = copiesget(f, f) # may be wrong for merges with copies
1094 1094 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1095 1095 if fl1 == fl2:
1096 1096 return fl1
1097 1097 if fl1 == fla:
1098 1098 return fl2
1099 1099 if fl2 == fla:
1100 1100 return fl1
1101 1101 return '' # punt for conflicts
1102 1102
1103 1103 return func
1104 1104
1105 1105 @propertycache
1106 1106 def _flagfunc(self):
1107 1107 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1108 1108
1109 1109 @propertycache
1110 1110 def _manifest(self):
1111 1111 """generate a manifest corresponding to the values in self._status
1112 1112
1113 1113 This reuse the file nodeid from parent, but we append an extra letter
1114 1114 when modified. Modified files get an extra 'm' while added files get
1115 1115 an extra 'a'. This is used by manifests merge to see that files
1116 1116 are different and by update logic to avoid deleting newly added files.
1117 1117 """
1118 1118
1119 1119 man1 = self._parents[0].manifest()
1120 1120 man = man1.copy()
1121 1121 if len(self._parents) > 1:
1122 1122 man2 = self.p2().manifest()
1123 1123 def getman(f):
1124 1124 if f in man1:
1125 1125 return man1
1126 1126 return man2
1127 1127 else:
1128 1128 getman = lambda f: man1
1129 1129
1130 1130 copied = self._repo.dirstate.copies()
1131 1131 ff = self._flagfunc
1132 1132 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1133 1133 for f in l:
1134 1134 orig = copied.get(f, f)
1135 1135 man[f] = getman(orig).get(orig, nullid) + i
1136 1136 try:
1137 1137 man.setflag(f, ff(f))
1138 1138 except OSError:
1139 1139 pass
1140 1140
1141 1141 for f in self._status.deleted + self._status.removed:
1142 1142 if f in man:
1143 1143 del man[f]
1144 1144
1145 1145 return man
1146 1146
1147 1147 @propertycache
1148 1148 def _status(self):
1149 1149 return self._repo.status()
1150 1150
1151 1151 @propertycache
1152 1152 def _user(self):
1153 1153 return self._repo.ui.username()
1154 1154
1155 1155 @propertycache
1156 1156 def _date(self):
1157 1157 return util.makedate()
1158 1158
1159 1159 def subrev(self, subpath):
1160 1160 return None
1161 1161
1162 1162 def user(self):
1163 1163 return self._user or self._repo.ui.username()
1164 1164 def date(self):
1165 1165 return self._date
1166 1166 def description(self):
1167 1167 return self._text
1168 1168 def files(self):
1169 1169 return sorted(self._status.modified + self._status.added +
1170 1170 self._status.removed)
1171 1171
1172 1172 def modified(self):
1173 1173 return self._status.modified
1174 1174 def added(self):
1175 1175 return self._status.added
1176 1176 def removed(self):
1177 1177 return self._status.removed
1178 1178 def deleted(self):
1179 1179 return self._status.deleted
1180 1180 def branch(self):
1181 1181 return encoding.tolocal(self._extra['branch'])
1182 1182 def closesbranch(self):
1183 1183 return 'close' in self._extra
1184 1184 def extra(self):
1185 1185 return self._extra
1186 1186
1187 1187 def tags(self):
1188 1188 t = []
1189 1189 for p in self.parents():
1190 1190 t.extend(p.tags())
1191 1191 return t
1192 1192
1193 1193 def bookmarks(self):
1194 1194 b = []
1195 1195 for p in self.parents():
1196 1196 b.extend(p.bookmarks())
1197 1197 return b
1198 1198
1199 1199 def phase(self):
1200 1200 phase = phases.draft # default phase to draft
1201 1201 for p in self.parents():
1202 1202 phase = max(phase, p.phase())
1203 1203 return phase
1204 1204
1205 1205 def hidden(self):
1206 1206 return False
1207 1207
1208 1208 def children(self):
1209 1209 return []
1210 1210
1211 1211 def flags(self, path):
1212 1212 if '_manifest' in self.__dict__:
1213 1213 try:
1214 1214 return self._manifest.flags(path)
1215 1215 except KeyError:
1216 1216 return ''
1217 1217
1218 1218 try:
1219 1219 return self._flagfunc(path)
1220 1220 except OSError:
1221 1221 return ''
1222 1222
1223 1223 def ancestor(self, c2):
1224 1224 """return the "best" ancestor context of self and c2"""
1225 1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1226 1226
1227 1227 def walk(self, match):
1228 1228 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1229 1229 True, False))
1230 1230
1231 1231 def matches(self, match):
1232 1232 return sorted(self._repo.dirstate.matches(match))
1233 1233
1234 1234 def ancestors(self):
1235 1235 for p in self._parents:
1236 1236 yield p
1237 1237 for a in self._repo.changelog.ancestors(
1238 1238 [p.rev() for p in self._parents]):
1239 1239 yield changectx(self._repo, a)
1240 1240
1241 1241 def markcommitted(self, node):
1242 1242 """Perform post-commit cleanup necessary after committing this ctx
1243 1243
1244 1244 Specifically, this updates backing stores this working context
1245 1245 wraps to reflect the fact that the changes reflected by this
1246 1246 workingctx have been committed. For example, it marks
1247 1247 modified and added files as normal in the dirstate.
1248 1248
1249 1249 """
1250 1250
1251 1251 self._repo.dirstate.beginparentchange()
1252 1252 for f in self.modified() + self.added():
1253 1253 self._repo.dirstate.normal(f)
1254 1254 for f in self.removed():
1255 1255 self._repo.dirstate.drop(f)
1256 1256 self._repo.dirstate.setparents(node)
1257 1257 self._repo.dirstate.endparentchange()
1258 1258
1259 1259 def dirs(self):
1260 1260 return self._repo.dirstate.dirs()
1261 1261
1262 1262 class workingctx(committablectx):
1263 1263 """A workingctx object makes access to data related to
1264 1264 the current working directory convenient.
1265 1265 date - any valid date string or (unixtime, offset), or None.
1266 1266 user - username string, or None.
1267 1267 extra - a dictionary of extra values, or None.
1268 1268 changes - a list of file lists as returned by localrepo.status()
1269 1269 or None to use the repository status.
1270 1270 """
1271 1271 def __init__(self, repo, text="", user=None, date=None, extra=None,
1272 1272 changes=None):
1273 1273 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1274 1274
1275 1275 def __iter__(self):
1276 1276 d = self._repo.dirstate
1277 1277 for f in d:
1278 1278 if d[f] != 'r':
1279 1279 yield f
1280 1280
1281 1281 def __contains__(self, key):
1282 1282 return self._repo.dirstate[key] not in "?r"
1283 1283
1284 1284 @propertycache
1285 1285 def _parents(self):
1286 1286 p = self._repo.dirstate.parents()
1287 1287 if p[1] == nullid:
1288 1288 p = p[:-1]
1289 1289 return [changectx(self._repo, x) for x in p]
1290 1290
1291 1291 def filectx(self, path, filelog=None):
1292 1292 """get a file context from the working directory"""
1293 1293 return workingfilectx(self._repo, path, workingctx=self,
1294 1294 filelog=filelog)
1295 1295
1296 1296 def dirty(self, missing=False, merge=True, branch=True):
1297 1297 "check whether a working directory is modified"
1298 1298 # check subrepos first
1299 1299 for s in sorted(self.substate):
1300 1300 if self.sub(s).dirty():
1301 1301 return True
1302 1302 # check current working dir
1303 1303 return ((merge and self.p2()) or
1304 1304 (branch and self.branch() != self.p1().branch()) or
1305 1305 self.modified() or self.added() or self.removed() or
1306 1306 (missing and self.deleted()))
1307 1307
1308 1308 def add(self, list, prefix=""):
1309 1309 join = lambda f: os.path.join(prefix, f)
1310 1310 wlock = self._repo.wlock()
1311 1311 ui, ds = self._repo.ui, self._repo.dirstate
1312 1312 try:
1313 1313 rejected = []
1314 1314 lstat = self._repo.wvfs.lstat
1315 1315 for f in list:
1316 1316 scmutil.checkportable(ui, join(f))
1317 1317 try:
1318 1318 st = lstat(f)
1319 1319 except OSError:
1320 1320 ui.warn(_("%s does not exist!\n") % join(f))
1321 1321 rejected.append(f)
1322 1322 continue
1323 1323 if st.st_size > 10000000:
1324 1324 ui.warn(_("%s: up to %d MB of RAM may be required "
1325 1325 "to manage this file\n"
1326 1326 "(use 'hg revert %s' to cancel the "
1327 1327 "pending addition)\n")
1328 1328 % (f, 3 * st.st_size // 1000000, join(f)))
1329 1329 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1330 1330 ui.warn(_("%s not added: only files and symlinks "
1331 1331 "supported currently\n") % join(f))
1332 1332 rejected.append(f)
1333 1333 elif ds[f] in 'amn':
1334 1334 ui.warn(_("%s already tracked!\n") % join(f))
1335 1335 elif ds[f] == 'r':
1336 1336 ds.normallookup(f)
1337 1337 else:
1338 1338 ds.add(f)
1339 1339 return rejected
1340 1340 finally:
1341 1341 wlock.release()
1342 1342
1343 1343 def forget(self, files, prefix=""):
1344 1344 join = lambda f: os.path.join(prefix, f)
1345 1345 wlock = self._repo.wlock()
1346 1346 try:
1347 1347 rejected = []
1348 1348 for f in files:
1349 1349 if f not in self._repo.dirstate:
1350 1350 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1351 1351 rejected.append(f)
1352 1352 elif self._repo.dirstate[f] != 'a':
1353 1353 self._repo.dirstate.remove(f)
1354 1354 else:
1355 1355 self._repo.dirstate.drop(f)
1356 1356 return rejected
1357 1357 finally:
1358 1358 wlock.release()
1359 1359
1360 1360 def undelete(self, list):
1361 1361 pctxs = self.parents()
1362 1362 wlock = self._repo.wlock()
1363 1363 try:
1364 1364 for f in list:
1365 1365 if self._repo.dirstate[f] != 'r':
1366 1366 self._repo.ui.warn(_("%s not removed!\n") % f)
1367 1367 else:
1368 1368 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1369 1369 t = fctx.data()
1370 1370 self._repo.wwrite(f, t, fctx.flags())
1371 1371 self._repo.dirstate.normal(f)
1372 1372 finally:
1373 1373 wlock.release()
1374 1374
1375 1375 def copy(self, source, dest):
1376 1376 try:
1377 1377 st = self._repo.wvfs.lstat(dest)
1378 1378 except OSError, err:
1379 1379 if err.errno != errno.ENOENT:
1380 1380 raise
1381 1381 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1382 1382 return
1383 1383 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1384 1384 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1385 1385 "symbolic link\n") % dest)
1386 1386 else:
1387 1387 wlock = self._repo.wlock()
1388 1388 try:
1389 1389 if self._repo.dirstate[dest] in '?':
1390 1390 self._repo.dirstate.add(dest)
1391 1391 elif self._repo.dirstate[dest] in 'r':
1392 1392 self._repo.dirstate.normallookup(dest)
1393 1393 self._repo.dirstate.copy(source, dest)
1394 1394 finally:
1395 1395 wlock.release()
1396 1396
1397 1397 def _filtersuspectsymlink(self, files):
1398 1398 if not files or self._repo.dirstate._checklink:
1399 1399 return files
1400 1400
1401 1401 # Symlink placeholders may get non-symlink-like contents
1402 1402 # via user error or dereferencing by NFS or Samba servers,
1403 1403 # so we filter out any placeholders that don't look like a
1404 1404 # symlink
1405 1405 sane = []
1406 1406 for f in files:
1407 1407 if self.flags(f) == 'l':
1408 1408 d = self[f].data()
1409 1409 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1410 1410 self._repo.ui.debug('ignoring suspect symlink placeholder'
1411 1411 ' "%s"\n' % f)
1412 1412 continue
1413 1413 sane.append(f)
1414 1414 return sane
1415 1415
1416 1416 def _checklookup(self, files):
1417 1417 # check for any possibly clean files
1418 1418 if not files:
1419 1419 return [], []
1420 1420
1421 1421 modified = []
1422 1422 fixup = []
1423 1423 pctx = self._parents[0]
1424 1424 # do a full compare of any files that might have changed
1425 1425 for f in sorted(files):
1426 1426 if (f not in pctx or self.flags(f) != pctx.flags(f)
1427 1427 or pctx[f].cmp(self[f])):
1428 1428 modified.append(f)
1429 1429 else:
1430 1430 fixup.append(f)
1431 1431
1432 1432 # update dirstate for files that are actually clean
1433 1433 if fixup:
1434 1434 try:
1435 1435 # updating the dirstate is optional
1436 1436 # so we don't wait on the lock
1437 1437 # wlock can invalidate the dirstate, so cache normal _after_
1438 1438 # taking the lock
1439 1439 wlock = self._repo.wlock(False)
1440 1440 normal = self._repo.dirstate.normal
1441 1441 try:
1442 1442 for f in fixup:
1443 1443 normal(f)
1444 1444 finally:
1445 1445 wlock.release()
1446 1446 except error.LockError:
1447 1447 pass
1448 1448 return modified, fixup
1449 1449
1450 1450 def _manifestmatches(self, match, s):
1451 1451 """Slow path for workingctx
1452 1452
1453 1453 The fast path is when we compare the working directory to its parent
1454 1454 which means this function is comparing with a non-parent; therefore we
1455 1455 need to build a manifest and return what matches.
1456 1456 """
1457 1457 mf = self._repo['.']._manifestmatches(match, s)
1458 1458 for f in s.modified + s.added:
1459 1459 mf[f] = _newnode
1460 1460 mf.setflag(f, self.flags(f))
1461 1461 for f in s.removed:
1462 1462 if f in mf:
1463 1463 del mf[f]
1464 1464 return mf
1465 1465
1466 1466 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1467 1467 unknown=False):
1468 1468 '''Gets the status from the dirstate -- internal use only.'''
1469 1469 listignored, listclean, listunknown = ignored, clean, unknown
1470 1470 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1471 1471 subrepos = []
1472 1472 if '.hgsub' in self:
1473 1473 subrepos = sorted(self.substate)
1474 1474 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1475 1475 listclean, listunknown)
1476 1476
1477 1477 # check for any possibly clean files
1478 1478 if cmp:
1479 1479 modified2, fixup = self._checklookup(cmp)
1480 1480 s.modified.extend(modified2)
1481 1481
1482 1482 # update dirstate for files that are actually clean
1483 1483 if fixup and listclean:
1484 1484 s.clean.extend(fixup)
1485 1485
1486 1486 return s
1487 1487
1488 1488 def _buildstatus(self, other, s, match, listignored, listclean,
1489 1489 listunknown):
1490 1490 """build a status with respect to another context
1491 1491
1492 1492 This includes logic for maintaining the fast path of status when
1493 1493 comparing the working directory against its parent, which is to skip
1494 1494 building a new manifest if self (working directory) is not comparing
1495 1495 against its parent (repo['.']).
1496 1496 """
1497 1497 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1498 1498 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1499 1499 # might have accidentally ended up with the entire contents of the file
1500 1500 # they are supposed to be linking to.
1501 1501 s.modified[:] = self._filtersuspectsymlink(s.modified)
1502 1502 if other != self._repo['.']:
1503 1503 s = super(workingctx, self)._buildstatus(other, s, match,
1504 1504 listignored, listclean,
1505 1505 listunknown)
1506 1506 elif match.always():
1507 1507 # cache for performance
1508 1508 if s.unknown or s.ignored or s.clean:
1509 1509 # "_status" is cached with list*=False in the normal route
1510 1510 self._status = scmutil.status(s.modified, s.added, s.removed,
1511 1511 s.deleted, [], [], [])
1512 1512 else:
1513 1513 self._status = s
1514 1514 return s
1515 1515
1516 1516 def _matchstatus(self, other, match):
1517 1517 """override the match method with a filter for directory patterns
1518 1518
1519 1519 We use inheritance to customize the match.bad method only in cases of
1520 1520 workingctx since it belongs only to the working directory when
1521 1521 comparing against the parent changeset.
1522 1522
1523 1523 If we aren't comparing against the working directory's parent, then we
1524 1524 just use the default match object sent to us.
1525 1525 """
1526 1526 superself = super(workingctx, self)
1527 1527 match = superself._matchstatus(other, match)
1528 1528 if other != self._repo['.']:
1529 1529 def bad(f, msg):
1530 1530 # 'f' may be a directory pattern from 'match.files()',
1531 1531 # so 'f not in ctx1' is not enough
1532 1532 if f not in other and f not in other.dirs():
1533 1533 self._repo.ui.warn('%s: %s\n' %
1534 1534 (self._repo.dirstate.pathto(f), msg))
1535 1535 match.bad = bad
1536 1536 return match
1537 1537
1538 1538 class committablefilectx(basefilectx):
1539 1539 """A committablefilectx provides common functionality for a file context
1540 1540 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1541 1541 def __init__(self, repo, path, filelog=None, ctx=None):
1542 1542 self._repo = repo
1543 1543 self._path = path
1544 1544 self._changeid = None
1545 1545 self._filerev = self._filenode = None
1546 1546
1547 1547 if filelog is not None:
1548 1548 self._filelog = filelog
1549 1549 if ctx:
1550 1550 self._changectx = ctx
1551 1551
1552 1552 def __nonzero__(self):
1553 1553 return True
1554 1554
1555 1555 def parents(self):
1556 1556 '''return parent filectxs, following copies if necessary'''
1557 1557 def filenode(ctx, path):
1558 1558 return ctx._manifest.get(path, nullid)
1559 1559
1560 1560 path = self._path
1561 1561 fl = self._filelog
1562 1562 pcl = self._changectx._parents
1563 1563 renamed = self.renamed()
1564 1564
1565 1565 if renamed:
1566 1566 pl = [renamed + (None,)]
1567 1567 else:
1568 1568 pl = [(path, filenode(pcl[0], path), fl)]
1569 1569
1570 1570 for pc in pcl[1:]:
1571 1571 pl.append((path, filenode(pc, path), fl))
1572 1572
1573 1573 return [filectx(self._repo, p, fileid=n, filelog=l)
1574 1574 for p, n, l in pl if n != nullid]
1575 1575
1576 1576 def children(self):
1577 1577 return []
1578 1578
1579 1579 class workingfilectx(committablefilectx):
1580 1580 """A workingfilectx object makes access to data related to a particular
1581 1581 file in the working directory convenient."""
1582 1582 def __init__(self, repo, path, filelog=None, workingctx=None):
1583 1583 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1584 1584
1585 1585 @propertycache
1586 1586 def _changectx(self):
1587 1587 return workingctx(self._repo)
1588 1588
1589 1589 def data(self):
1590 1590 return self._repo.wread(self._path)
1591 1591 def renamed(self):
1592 1592 rp = self._repo.dirstate.copied(self._path)
1593 1593 if not rp:
1594 1594 return None
1595 1595 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1596 1596
1597 1597 def size(self):
1598 1598 return self._repo.wvfs.lstat(self._path).st_size
1599 1599 def date(self):
1600 1600 t, tz = self._changectx.date()
1601 1601 try:
1602 1602 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1603 1603 except OSError, err:
1604 1604 if err.errno != errno.ENOENT:
1605 1605 raise
1606 1606 return (t, tz)
1607 1607
1608 1608 def cmp(self, fctx):
1609 1609 """compare with other file context
1610 1610
1611 1611 returns True if different than fctx.
1612 1612 """
1613 1613 # fctx should be a filectx (not a workingfilectx)
1614 1614 # invert comparison to reuse the same code path
1615 1615 return fctx.cmp(self)
1616 1616
1617 1617 def remove(self, ignoremissing=False):
1618 1618 """wraps unlink for a repo's working directory"""
1619 1619 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1620 1620
1621 1621 def write(self, data, flags):
1622 1622 """wraps repo.wwrite"""
1623 1623 self._repo.wwrite(self._path, data, flags)
1624 1624
1625 class workingcommitctx(workingctx):
1626 """A workingcommitctx object makes access to data related to
1627 the revision being committed convenient.
1628
1629 This hides changes in the working directory, if they aren't
1630 committed in this context.
1631 """
1632 def __init__(self, repo, changes,
1633 text="", user=None, date=None, extra=None):
1634 super(workingctx, self).__init__(repo, text, user, date, extra,
1635 changes)
1636
1625 1637 class memctx(committablectx):
1626 1638 """Use memctx to perform in-memory commits via localrepo.commitctx().
1627 1639
1628 1640 Revision information is supplied at initialization time while
1629 1641 related files data and is made available through a callback
1630 1642 mechanism. 'repo' is the current localrepo, 'parents' is a
1631 1643 sequence of two parent revisions identifiers (pass None for every
1632 1644 missing parent), 'text' is the commit message and 'files' lists
1633 1645 names of files touched by the revision (normalized and relative to
1634 1646 repository root).
1635 1647
1636 1648 filectxfn(repo, memctx, path) is a callable receiving the
1637 1649 repository, the current memctx object and the normalized path of
1638 1650 requested file, relative to repository root. It is fired by the
1639 1651 commit function for every file in 'files', but calls order is
1640 1652 undefined. If the file is available in the revision being
1641 1653 committed (updated or added), filectxfn returns a memfilectx
1642 1654 object. If the file was removed, filectxfn raises an
1643 1655 IOError. Moved files are represented by marking the source file
1644 1656 removed and the new file added with copy information (see
1645 1657 memfilectx).
1646 1658
1647 1659 user receives the committer name and defaults to current
1648 1660 repository username, date is the commit date in any format
1649 1661 supported by util.parsedate() and defaults to current date, extra
1650 1662 is a dictionary of metadata or is left empty.
1651 1663 """
1652 1664
1653 1665 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1654 1666 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1655 1667 # this field to determine what to do in filectxfn.
1656 1668 _returnnoneformissingfiles = True
1657 1669
1658 1670 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1659 1671 date=None, extra=None, editor=False):
1660 1672 super(memctx, self).__init__(repo, text, user, date, extra)
1661 1673 self._rev = None
1662 1674 self._node = None
1663 1675 parents = [(p or nullid) for p in parents]
1664 1676 p1, p2 = parents
1665 1677 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1666 1678 files = sorted(set(files))
1667 1679 self._files = files
1668 1680 self.substate = {}
1669 1681
1670 1682 # if store is not callable, wrap it in a function
1671 1683 if not callable(filectxfn):
1672 1684 def getfilectx(repo, memctx, path):
1673 1685 fctx = filectxfn[path]
1674 1686 # this is weird but apparently we only keep track of one parent
1675 1687 # (why not only store that instead of a tuple?)
1676 1688 copied = fctx.renamed()
1677 1689 if copied:
1678 1690 copied = copied[0]
1679 1691 return memfilectx(repo, path, fctx.data(),
1680 1692 islink=fctx.islink(), isexec=fctx.isexec(),
1681 1693 copied=copied, memctx=memctx)
1682 1694 self._filectxfn = getfilectx
1683 1695 else:
1684 1696 # "util.cachefunc" reduces invocation of possibly expensive
1685 1697 # "filectxfn" for performance (e.g. converting from another VCS)
1686 1698 self._filectxfn = util.cachefunc(filectxfn)
1687 1699
1688 1700 self._extra = extra and extra.copy() or {}
1689 1701 if self._extra.get('branch', '') == '':
1690 1702 self._extra['branch'] = 'default'
1691 1703
1692 1704 if editor:
1693 1705 self._text = editor(self._repo, self, [])
1694 1706 self._repo.savecommitmessage(self._text)
1695 1707
1696 1708 def filectx(self, path, filelog=None):
1697 1709 """get a file context from the working directory
1698 1710
1699 1711 Returns None if file doesn't exist and should be removed."""
1700 1712 return self._filectxfn(self._repo, self, path)
1701 1713
1702 1714 def commit(self):
1703 1715 """commit context to the repo"""
1704 1716 return self._repo.commitctx(self)
1705 1717
1706 1718 @propertycache
1707 1719 def _manifest(self):
1708 1720 """generate a manifest based on the return values of filectxfn"""
1709 1721
1710 1722 # keep this simple for now; just worry about p1
1711 1723 pctx = self._parents[0]
1712 1724 man = pctx.manifest().copy()
1713 1725
1714 1726 for f in self._status.modified:
1715 1727 p1node = nullid
1716 1728 p2node = nullid
1717 1729 p = pctx[f].parents() # if file isn't in pctx, check p2?
1718 1730 if len(p) > 0:
1719 1731 p1node = p[0].node()
1720 1732 if len(p) > 1:
1721 1733 p2node = p[1].node()
1722 1734 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1723 1735
1724 1736 for f in self._status.added:
1725 1737 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1726 1738
1727 1739 for f in self._status.removed:
1728 1740 if f in man:
1729 1741 del man[f]
1730 1742
1731 1743 return man
1732 1744
1733 1745 @propertycache
1734 1746 def _status(self):
1735 1747 """Calculate exact status from ``files`` specified at construction
1736 1748 """
1737 1749 man1 = self.p1().manifest()
1738 1750 p2 = self._parents[1]
1739 1751 # "1 < len(self._parents)" can't be used for checking
1740 1752 # existence of the 2nd parent, because "memctx._parents" is
1741 1753 # explicitly initialized by the list, of which length is 2.
1742 1754 if p2.node() != nullid:
1743 1755 man2 = p2.manifest()
1744 1756 managing = lambda f: f in man1 or f in man2
1745 1757 else:
1746 1758 managing = lambda f: f in man1
1747 1759
1748 1760 modified, added, removed = [], [], []
1749 1761 for f in self._files:
1750 1762 if not managing(f):
1751 1763 added.append(f)
1752 1764 elif self[f]:
1753 1765 modified.append(f)
1754 1766 else:
1755 1767 removed.append(f)
1756 1768
1757 1769 return scmutil.status(modified, added, removed, [], [], [], [])
1758 1770
1759 1771 class memfilectx(committablefilectx):
1760 1772 """memfilectx represents an in-memory file to commit.
1761 1773
1762 1774 See memctx and committablefilectx for more details.
1763 1775 """
1764 1776 def __init__(self, repo, path, data, islink=False,
1765 1777 isexec=False, copied=None, memctx=None):
1766 1778 """
1767 1779 path is the normalized file path relative to repository root.
1768 1780 data is the file content as a string.
1769 1781 islink is True if the file is a symbolic link.
1770 1782 isexec is True if the file is executable.
1771 1783 copied is the source file path if current file was copied in the
1772 1784 revision being committed, or None."""
1773 1785 super(memfilectx, self).__init__(repo, path, None, memctx)
1774 1786 self._data = data
1775 1787 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1776 1788 self._copied = None
1777 1789 if copied:
1778 1790 self._copied = (copied, nullid)
1779 1791
1780 1792 def data(self):
1781 1793 return self._data
1782 1794 def size(self):
1783 1795 return len(self.data())
1784 1796 def flags(self):
1785 1797 return self._flags
1786 1798 def renamed(self):
1787 1799 return self._copied
1788 1800
1789 1801 def remove(self, ignoremissing=False):
1790 1802 """wraps unlink for a repo's working directory"""
1791 1803 # need to figure out what to do here
1792 1804 del self._changectx[self._path]
1793 1805
1794 1806 def write(self, data, flags):
1795 1807 """wraps repo.wwrite"""
1796 1808 self._data = data
@@ -1,1839 +1,1840 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 282 self.filterpats = {}
283 283 self._datafilters = {}
284 284 self._transref = self._lockref = self._wlockref = None
285 285
286 286 # A cache for various files under .hg/ that tracks file changes,
287 287 # (used by the filecache decorator)
288 288 #
289 289 # Maps a property name to its util.filecacheentry
290 290 self._filecache = {}
291 291
292 292 # hold sets of revision to be filtered
293 293 # should be cleared when something might have changed the filter value:
294 294 # - new changesets,
295 295 # - phase change,
296 296 # - new obsolescence marker,
297 297 # - working directory parent change,
298 298 # - bookmark changes
299 299 self.filteredrevcache = {}
300 300
301 301 # generic mapping between names and nodes
302 302 self.names = namespaces.namespaces()
303 303
304 304 def close(self):
305 305 pass
306 306
307 307 def _restrictcapabilities(self, caps):
308 308 # bundle2 is not ready for prime time, drop it unless explicitly
309 309 # required by the tests (or some brave tester)
310 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 311 caps = set(caps)
312 312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 314 return caps
315 315
316 316 def _applyrequirements(self, requirements):
317 317 self.requirements = requirements
318 318 self.sopener.options = dict((r, 1) for r in requirements
319 319 if r in self.openerreqs)
320 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 321 if chunkcachesize is not None:
322 322 self.sopener.options['chunkcachesize'] = chunkcachesize
323 323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 324 if maxchainlen is not None:
325 325 self.sopener.options['maxchainlen'] = maxchainlen
326 326
327 327 def _writerequirements(self):
328 328 reqfile = self.opener("requires", "w")
329 329 for r in sorted(self.requirements):
330 330 reqfile.write("%s\n" % r)
331 331 reqfile.close()
332 332
333 333 def _checknested(self, path):
334 334 """Determine if path is a legal nested repository."""
335 335 if not path.startswith(self.root):
336 336 return False
337 337 subpath = path[len(self.root) + 1:]
338 338 normsubpath = util.pconvert(subpath)
339 339
340 340 # XXX: Checking against the current working copy is wrong in
341 341 # the sense that it can reject things like
342 342 #
343 343 # $ hg cat -r 10 sub/x.txt
344 344 #
345 345 # if sub/ is no longer a subrepository in the working copy
346 346 # parent revision.
347 347 #
348 348 # However, it can of course also allow things that would have
349 349 # been rejected before, such as the above cat command if sub/
350 350 # is a subrepository now, but was a normal directory before.
351 351 # The old path auditor would have rejected by mistake since it
352 352 # panics when it sees sub/.hg/.
353 353 #
354 354 # All in all, checking against the working copy seems sensible
355 355 # since we want to prevent access to nested repositories on
356 356 # the filesystem *now*.
357 357 ctx = self[None]
358 358 parts = util.splitpath(subpath)
359 359 while parts:
360 360 prefix = '/'.join(parts)
361 361 if prefix in ctx.substate:
362 362 if prefix == normsubpath:
363 363 return True
364 364 else:
365 365 sub = ctx.sub(prefix)
366 366 return sub.checknested(subpath[len(prefix) + 1:])
367 367 else:
368 368 parts.pop()
369 369 return False
370 370
371 371 def peer(self):
372 372 return localpeer(self) # not cached to avoid reference cycle
373 373
374 374 def unfiltered(self):
375 375 """Return unfiltered version of the repository
376 376
377 377 Intended to be overwritten by filtered repo."""
378 378 return self
379 379
380 380 def filtered(self, name):
381 381 """Return a filtered version of a repository"""
382 382 # build a new class with the mixin and the current class
383 383 # (possibly subclass of the repo)
384 384 class proxycls(repoview.repoview, self.unfiltered().__class__):
385 385 pass
386 386 return proxycls(self, name)
387 387
388 388 @repofilecache('bookmarks')
389 389 def _bookmarks(self):
390 390 return bookmarks.bmstore(self)
391 391
392 392 @repofilecache('bookmarks.current')
393 393 def _bookmarkcurrent(self):
394 394 return bookmarks.readcurrent(self)
395 395
396 396 def bookmarkheads(self, bookmark):
397 397 name = bookmark.split('@', 1)[0]
398 398 heads = []
399 399 for mark, n in self._bookmarks.iteritems():
400 400 if mark.split('@', 1)[0] == name:
401 401 heads.append(n)
402 402 return heads
403 403
404 404 @storecache('phaseroots')
405 405 def _phasecache(self):
406 406 return phases.phasecache(self, self._phasedefaults)
407 407
408 408 @storecache('obsstore')
409 409 def obsstore(self):
410 410 # read default format for new obsstore.
411 411 defaultformat = self.ui.configint('format', 'obsstore-version', None)
412 412 # rely on obsstore class default when possible.
413 413 kwargs = {}
414 414 if defaultformat is not None:
415 415 kwargs['defaultformat'] = defaultformat
416 416 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
417 417 store = obsolete.obsstore(self.sopener, readonly=readonly,
418 418 **kwargs)
419 419 if store and readonly:
420 420 # message is rare enough to not be translated
421 421 msg = 'obsolete feature not enabled but %i markers found!\n'
422 422 self.ui.warn(msg % len(list(store)))
423 423 return store
424 424
425 425 @storecache('00changelog.i')
426 426 def changelog(self):
427 427 c = changelog.changelog(self.sopener)
428 428 if 'HG_PENDING' in os.environ:
429 429 p = os.environ['HG_PENDING']
430 430 if p.startswith(self.root):
431 431 c.readpending('00changelog.i.a')
432 432 return c
433 433
434 434 @storecache('00manifest.i')
435 435 def manifest(self):
436 436 return manifest.manifest(self.sopener)
437 437
438 438 @repofilecache('dirstate')
439 439 def dirstate(self):
440 440 warned = [0]
441 441 def validate(node):
442 442 try:
443 443 self.changelog.rev(node)
444 444 return node
445 445 except error.LookupError:
446 446 if not warned[0]:
447 447 warned[0] = True
448 448 self.ui.warn(_("warning: ignoring unknown"
449 449 " working parent %s!\n") % short(node))
450 450 return nullid
451 451
452 452 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
453 453
454 454 def __getitem__(self, changeid):
455 455 if changeid is None:
456 456 return context.workingctx(self)
457 457 if isinstance(changeid, slice):
458 458 return [context.changectx(self, i)
459 459 for i in xrange(*changeid.indices(len(self)))
460 460 if i not in self.changelog.filteredrevs]
461 461 return context.changectx(self, changeid)
462 462
463 463 def __contains__(self, changeid):
464 464 try:
465 465 return bool(self.lookup(changeid))
466 466 except error.RepoLookupError:
467 467 return False
468 468
469 469 def __nonzero__(self):
470 470 return True
471 471
472 472 def __len__(self):
473 473 return len(self.changelog)
474 474
475 475 def __iter__(self):
476 476 return iter(self.changelog)
477 477
478 478 def revs(self, expr, *args):
479 479 '''Return a list of revisions matching the given revset'''
480 480 expr = revset.formatspec(expr, *args)
481 481 m = revset.match(None, expr)
482 482 return m(self, revset.spanset(self))
483 483
484 484 def set(self, expr, *args):
485 485 '''
486 486 Yield a context for each matching revision, after doing arg
487 487 replacement via revset.formatspec
488 488 '''
489 489 for r in self.revs(expr, *args):
490 490 yield self[r]
491 491
492 492 def url(self):
493 493 return 'file:' + self.root
494 494
495 495 def hook(self, name, throw=False, **args):
496 496 """Call a hook, passing this repo instance.
497 497
498 498 This a convenience method to aid invoking hooks. Extensions likely
499 499 won't call this unless they have registered a custom hook or are
500 500 replacing code that is expected to call a hook.
501 501 """
502 502 return hook.hook(self.ui, self, name, throw, **args)
503 503
504 504 @unfilteredmethod
505 505 def _tag(self, names, node, message, local, user, date, extra={},
506 506 editor=False):
507 507 if isinstance(names, str):
508 508 names = (names,)
509 509
510 510 branches = self.branchmap()
511 511 for name in names:
512 512 self.hook('pretag', throw=True, node=hex(node), tag=name,
513 513 local=local)
514 514 if name in branches:
515 515 self.ui.warn(_("warning: tag %s conflicts with existing"
516 516 " branch name\n") % name)
517 517
518 518 def writetags(fp, names, munge, prevtags):
519 519 fp.seek(0, 2)
520 520 if prevtags and prevtags[-1] != '\n':
521 521 fp.write('\n')
522 522 for name in names:
523 523 m = munge and munge(name) or name
524 524 if (self._tagscache.tagtypes and
525 525 name in self._tagscache.tagtypes):
526 526 old = self.tags().get(name, nullid)
527 527 fp.write('%s %s\n' % (hex(old), m))
528 528 fp.write('%s %s\n' % (hex(node), m))
529 529 fp.close()
530 530
531 531 prevtags = ''
532 532 if local:
533 533 try:
534 534 fp = self.opener('localtags', 'r+')
535 535 except IOError:
536 536 fp = self.opener('localtags', 'a')
537 537 else:
538 538 prevtags = fp.read()
539 539
540 540 # local tags are stored in the current charset
541 541 writetags(fp, names, None, prevtags)
542 542 for name in names:
543 543 self.hook('tag', node=hex(node), tag=name, local=local)
544 544 return
545 545
546 546 try:
547 547 fp = self.wfile('.hgtags', 'rb+')
548 548 except IOError, e:
549 549 if e.errno != errno.ENOENT:
550 550 raise
551 551 fp = self.wfile('.hgtags', 'ab')
552 552 else:
553 553 prevtags = fp.read()
554 554
555 555 # committed tags are stored in UTF-8
556 556 writetags(fp, names, encoding.fromlocal, prevtags)
557 557
558 558 fp.close()
559 559
560 560 self.invalidatecaches()
561 561
562 562 if '.hgtags' not in self.dirstate:
563 563 self[None].add(['.hgtags'])
564 564
565 565 m = matchmod.exact(self.root, '', ['.hgtags'])
566 566 tagnode = self.commit(message, user, date, extra=extra, match=m,
567 567 editor=editor)
568 568
569 569 for name in names:
570 570 self.hook('tag', node=hex(node), tag=name, local=local)
571 571
572 572 return tagnode
573 573
574 574 def tag(self, names, node, message, local, user, date, editor=False):
575 575 '''tag a revision with one or more symbolic names.
576 576
577 577 names is a list of strings or, when adding a single tag, names may be a
578 578 string.
579 579
580 580 if local is True, the tags are stored in a per-repository file.
581 581 otherwise, they are stored in the .hgtags file, and a new
582 582 changeset is committed with the change.
583 583
584 584 keyword arguments:
585 585
586 586 local: whether to store tags in non-version-controlled file
587 587 (default False)
588 588
589 589 message: commit message to use if committing
590 590
591 591 user: name of user to use if committing
592 592
593 593 date: date tuple to use if committing'''
594 594
595 595 if not local:
596 596 m = matchmod.exact(self.root, '', ['.hgtags'])
597 597 if util.any(self.status(match=m, unknown=True, ignored=True)):
598 598 raise util.Abort(_('working copy of .hgtags is changed'),
599 599 hint=_('please commit .hgtags manually'))
600 600
601 601 self.tags() # instantiate the cache
602 602 self._tag(names, node, message, local, user, date, editor=editor)
603 603
604 604 @filteredpropertycache
605 605 def _tagscache(self):
606 606 '''Returns a tagscache object that contains various tags related
607 607 caches.'''
608 608
609 609 # This simplifies its cache management by having one decorated
610 610 # function (this one) and the rest simply fetch things from it.
611 611 class tagscache(object):
612 612 def __init__(self):
613 613 # These two define the set of tags for this repository. tags
614 614 # maps tag name to node; tagtypes maps tag name to 'global' or
615 615 # 'local'. (Global tags are defined by .hgtags across all
616 616 # heads, and local tags are defined in .hg/localtags.)
617 617 # They constitute the in-memory cache of tags.
618 618 self.tags = self.tagtypes = None
619 619
620 620 self.nodetagscache = self.tagslist = None
621 621
622 622 cache = tagscache()
623 623 cache.tags, cache.tagtypes = self._findtags()
624 624
625 625 return cache
626 626
627 627 def tags(self):
628 628 '''return a mapping of tag to node'''
629 629 t = {}
630 630 if self.changelog.filteredrevs:
631 631 tags, tt = self._findtags()
632 632 else:
633 633 tags = self._tagscache.tags
634 634 for k, v in tags.iteritems():
635 635 try:
636 636 # ignore tags to unknown nodes
637 637 self.changelog.rev(v)
638 638 t[k] = v
639 639 except (error.LookupError, ValueError):
640 640 pass
641 641 return t
642 642
643 643 def _findtags(self):
644 644 '''Do the hard work of finding tags. Return a pair of dicts
645 645 (tags, tagtypes) where tags maps tag name to node, and tagtypes
646 646 maps tag name to a string like \'global\' or \'local\'.
647 647 Subclasses or extensions are free to add their own tags, but
648 648 should be aware that the returned dicts will be retained for the
649 649 duration of the localrepo object.'''
650 650
651 651 # XXX what tagtype should subclasses/extensions use? Currently
652 652 # mq and bookmarks add tags, but do not set the tagtype at all.
653 653 # Should each extension invent its own tag type? Should there
654 654 # be one tagtype for all such "virtual" tags? Or is the status
655 655 # quo fine?
656 656
657 657 alltags = {} # map tag name to (node, hist)
658 658 tagtypes = {}
659 659
660 660 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
661 661 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
662 662
663 663 # Build the return dicts. Have to re-encode tag names because
664 664 # the tags module always uses UTF-8 (in order not to lose info
665 665 # writing to the cache), but the rest of Mercurial wants them in
666 666 # local encoding.
667 667 tags = {}
668 668 for (name, (node, hist)) in alltags.iteritems():
669 669 if node != nullid:
670 670 tags[encoding.tolocal(name)] = node
671 671 tags['tip'] = self.changelog.tip()
672 672 tagtypes = dict([(encoding.tolocal(name), value)
673 673 for (name, value) in tagtypes.iteritems()])
674 674 return (tags, tagtypes)
675 675
676 676 def tagtype(self, tagname):
677 677 '''
678 678 return the type of the given tag. result can be:
679 679
680 680 'local' : a local tag
681 681 'global' : a global tag
682 682 None : tag does not exist
683 683 '''
684 684
685 685 return self._tagscache.tagtypes.get(tagname)
686 686
687 687 def tagslist(self):
688 688 '''return a list of tags ordered by revision'''
689 689 if not self._tagscache.tagslist:
690 690 l = []
691 691 for t, n in self.tags().iteritems():
692 692 l.append((self.changelog.rev(n), t, n))
693 693 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
694 694
695 695 return self._tagscache.tagslist
696 696
697 697 def nodetags(self, node):
698 698 '''return the tags associated with a node'''
699 699 if not self._tagscache.nodetagscache:
700 700 nodetagscache = {}
701 701 for t, n in self._tagscache.tags.iteritems():
702 702 nodetagscache.setdefault(n, []).append(t)
703 703 for tags in nodetagscache.itervalues():
704 704 tags.sort()
705 705 self._tagscache.nodetagscache = nodetagscache
706 706 return self._tagscache.nodetagscache.get(node, [])
707 707
708 708 def nodebookmarks(self, node):
709 709 marks = []
710 710 for bookmark, n in self._bookmarks.iteritems():
711 711 if n == node:
712 712 marks.append(bookmark)
713 713 return sorted(marks)
714 714
715 715 def branchmap(self):
716 716 '''returns a dictionary {branch: [branchheads]} with branchheads
717 717 ordered by increasing revision number'''
718 718 branchmap.updatecache(self)
719 719 return self._branchcaches[self.filtername]
720 720
721 721 def branchtip(self, branch):
722 722 '''return the tip node for a given branch'''
723 723 try:
724 724 return self.branchmap().branchtip(branch)
725 725 except KeyError:
726 726 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
727 727
728 728 def lookup(self, key):
729 729 return self[key].node()
730 730
731 731 def lookupbranch(self, key, remote=None):
732 732 repo = remote or self
733 733 if key in repo.branchmap():
734 734 return key
735 735
736 736 repo = (remote and remote.local()) and remote or self
737 737 return repo[key].branch()
738 738
739 739 def known(self, nodes):
740 740 nm = self.changelog.nodemap
741 741 pc = self._phasecache
742 742 result = []
743 743 for n in nodes:
744 744 r = nm.get(n)
745 745 resp = not (r is None or pc.phase(self, r) >= phases.secret)
746 746 result.append(resp)
747 747 return result
748 748
749 749 def local(self):
750 750 return self
751 751
752 752 def cancopy(self):
753 753 # so statichttprepo's override of local() works
754 754 if not self.local():
755 755 return False
756 756 if not self.ui.configbool('phases', 'publish', True):
757 757 return True
758 758 # if publishing we can't copy if there is filtered content
759 759 return not self.filtered('visible').changelog.filteredrevs
760 760
761 761 def shared(self):
762 762 '''the type of shared repository (None if not shared)'''
763 763 if self.sharedpath != self.path:
764 764 return 'store'
765 765 return None
766 766
767 767 def join(self, f, *insidef):
768 768 return self.vfs.join(os.path.join(f, *insidef))
769 769
770 770 def wjoin(self, f, *insidef):
771 771 return os.path.join(self.root, f, *insidef)
772 772
773 773 def file(self, f):
774 774 if f[0] == '/':
775 775 f = f[1:]
776 776 return filelog.filelog(self.sopener, f)
777 777
778 778 def changectx(self, changeid):
779 779 return self[changeid]
780 780
781 781 def parents(self, changeid=None):
782 782 '''get list of changectxs for parents of changeid'''
783 783 return self[changeid].parents()
784 784
785 785 def setparents(self, p1, p2=nullid):
786 786 self.dirstate.beginparentchange()
787 787 copies = self.dirstate.setparents(p1, p2)
788 788 pctx = self[p1]
789 789 if copies:
790 790 # Adjust copy records, the dirstate cannot do it, it
791 791 # requires access to parents manifests. Preserve them
792 792 # only for entries added to first parent.
793 793 for f in copies:
794 794 if f not in pctx and copies[f] in pctx:
795 795 self.dirstate.copy(copies[f], f)
796 796 if p2 == nullid:
797 797 for f, s in sorted(self.dirstate.copies().items()):
798 798 if f not in pctx and s not in pctx:
799 799 self.dirstate.copy(None, f)
800 800 self.dirstate.endparentchange()
801 801
802 802 def filectx(self, path, changeid=None, fileid=None):
803 803 """changeid can be a changeset revision, node, or tag.
804 804 fileid can be a file revision or node."""
805 805 return context.filectx(self, path, changeid, fileid)
806 806
807 807 def getcwd(self):
808 808 return self.dirstate.getcwd()
809 809
810 810 def pathto(self, f, cwd=None):
811 811 return self.dirstate.pathto(f, cwd)
812 812
813 813 def wfile(self, f, mode='r'):
814 814 return self.wopener(f, mode)
815 815
816 816 def _link(self, f):
817 817 return self.wvfs.islink(f)
818 818
819 819 def _loadfilter(self, filter):
820 820 if filter not in self.filterpats:
821 821 l = []
822 822 for pat, cmd in self.ui.configitems(filter):
823 823 if cmd == '!':
824 824 continue
825 825 mf = matchmod.match(self.root, '', [pat])
826 826 fn = None
827 827 params = cmd
828 828 for name, filterfn in self._datafilters.iteritems():
829 829 if cmd.startswith(name):
830 830 fn = filterfn
831 831 params = cmd[len(name):].lstrip()
832 832 break
833 833 if not fn:
834 834 fn = lambda s, c, **kwargs: util.filter(s, c)
835 835 # Wrap old filters not supporting keyword arguments
836 836 if not inspect.getargspec(fn)[2]:
837 837 oldfn = fn
838 838 fn = lambda s, c, **kwargs: oldfn(s, c)
839 839 l.append((mf, fn, params))
840 840 self.filterpats[filter] = l
841 841 return self.filterpats[filter]
842 842
843 843 def _filter(self, filterpats, filename, data):
844 844 for mf, fn, cmd in filterpats:
845 845 if mf(filename):
846 846 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
847 847 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
848 848 break
849 849
850 850 return data
851 851
852 852 @unfilteredpropertycache
853 853 def _encodefilterpats(self):
854 854 return self._loadfilter('encode')
855 855
856 856 @unfilteredpropertycache
857 857 def _decodefilterpats(self):
858 858 return self._loadfilter('decode')
859 859
860 860 def adddatafilter(self, name, filter):
861 861 self._datafilters[name] = filter
862 862
863 863 def wread(self, filename):
864 864 if self._link(filename):
865 865 data = self.wvfs.readlink(filename)
866 866 else:
867 867 data = self.wopener.read(filename)
868 868 return self._filter(self._encodefilterpats, filename, data)
869 869
870 870 def wwrite(self, filename, data, flags):
871 871 data = self._filter(self._decodefilterpats, filename, data)
872 872 if 'l' in flags:
873 873 self.wopener.symlink(data, filename)
874 874 else:
875 875 self.wopener.write(filename, data)
876 876 if 'x' in flags:
877 877 self.wvfs.setflags(filename, False, True)
878 878
879 879 def wwritedata(self, filename, data):
880 880 return self._filter(self._decodefilterpats, filename, data)
881 881
882 882 def currenttransaction(self):
883 883 """return the current transaction or None if non exists"""
884 884 tr = self._transref and self._transref() or None
885 885 if tr and tr.running():
886 886 return tr
887 887 return None
888 888
889 889 def transaction(self, desc, report=None):
890 890 tr = self.currenttransaction()
891 891 if tr is not None:
892 892 return tr.nest()
893 893
894 894 # abort here if the journal already exists
895 895 if self.svfs.exists("journal"):
896 896 raise error.RepoError(
897 897 _("abandoned transaction found"),
898 898 hint=_("run 'hg recover' to clean up transaction"))
899 899
900 900 self._writejournal(desc)
901 901 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
902 902 rp = report and report or self.ui.warn
903 903 vfsmap = {'plain': self.opener} # root of .hg/
904 904 tr = transaction.transaction(rp, self.sopener, vfsmap,
905 905 "journal",
906 906 aftertrans(renames),
907 907 self.store.createmode)
908 908 # note: writing the fncache only during finalize mean that the file is
909 909 # outdated when running hooks. As fncache is used for streaming clone,
910 910 # this is not expected to break anything that happen during the hooks.
911 911 tr.addfinalize('flush-fncache', self.store.write)
912 912 self._transref = weakref.ref(tr)
913 913 return tr
914 914
915 915 def _journalfiles(self):
916 916 return ((self.svfs, 'journal'),
917 917 (self.vfs, 'journal.dirstate'),
918 918 (self.vfs, 'journal.branch'),
919 919 (self.vfs, 'journal.desc'),
920 920 (self.vfs, 'journal.bookmarks'),
921 921 (self.svfs, 'journal.phaseroots'))
922 922
923 923 def undofiles(self):
924 924 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
925 925
926 926 def _writejournal(self, desc):
927 927 self.opener.write("journal.dirstate",
928 928 self.opener.tryread("dirstate"))
929 929 self.opener.write("journal.branch",
930 930 encoding.fromlocal(self.dirstate.branch()))
931 931 self.opener.write("journal.desc",
932 932 "%d\n%s\n" % (len(self), desc))
933 933 self.opener.write("journal.bookmarks",
934 934 self.opener.tryread("bookmarks"))
935 935 self.sopener.write("journal.phaseroots",
936 936 self.sopener.tryread("phaseroots"))
937 937
938 938 def recover(self):
939 939 lock = self.lock()
940 940 try:
941 941 if self.svfs.exists("journal"):
942 942 self.ui.status(_("rolling back interrupted transaction\n"))
943 943 vfsmap = {'': self.sopener,
944 944 'plain': self.opener,}
945 945 transaction.rollback(self.sopener, vfsmap, "journal",
946 946 self.ui.warn)
947 947 self.invalidate()
948 948 return True
949 949 else:
950 950 self.ui.warn(_("no interrupted transaction available\n"))
951 951 return False
952 952 finally:
953 953 lock.release()
954 954
955 955 def rollback(self, dryrun=False, force=False):
956 956 wlock = lock = None
957 957 try:
958 958 wlock = self.wlock()
959 959 lock = self.lock()
960 960 if self.svfs.exists("undo"):
961 961 return self._rollback(dryrun, force)
962 962 else:
963 963 self.ui.warn(_("no rollback information available\n"))
964 964 return 1
965 965 finally:
966 966 release(lock, wlock)
967 967
968 968 @unfilteredmethod # Until we get smarter cache management
969 969 def _rollback(self, dryrun, force):
970 970 ui = self.ui
971 971 try:
972 972 args = self.opener.read('undo.desc').splitlines()
973 973 (oldlen, desc, detail) = (int(args[0]), args[1], None)
974 974 if len(args) >= 3:
975 975 detail = args[2]
976 976 oldtip = oldlen - 1
977 977
978 978 if detail and ui.verbose:
979 979 msg = (_('repository tip rolled back to revision %s'
980 980 ' (undo %s: %s)\n')
981 981 % (oldtip, desc, detail))
982 982 else:
983 983 msg = (_('repository tip rolled back to revision %s'
984 984 ' (undo %s)\n')
985 985 % (oldtip, desc))
986 986 except IOError:
987 987 msg = _('rolling back unknown transaction\n')
988 988 desc = None
989 989
990 990 if not force and self['.'] != self['tip'] and desc == 'commit':
991 991 raise util.Abort(
992 992 _('rollback of last commit while not checked out '
993 993 'may lose data'), hint=_('use -f to force'))
994 994
995 995 ui.status(msg)
996 996 if dryrun:
997 997 return 0
998 998
999 999 parents = self.dirstate.parents()
1000 1000 self.destroying()
1001 1001 vfsmap = {'plain': self.opener}
1002 1002 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
1003 1003 if self.vfs.exists('undo.bookmarks'):
1004 1004 self.vfs.rename('undo.bookmarks', 'bookmarks')
1005 1005 if self.svfs.exists('undo.phaseroots'):
1006 1006 self.svfs.rename('undo.phaseroots', 'phaseroots')
1007 1007 self.invalidate()
1008 1008
1009 1009 parentgone = (parents[0] not in self.changelog.nodemap or
1010 1010 parents[1] not in self.changelog.nodemap)
1011 1011 if parentgone:
1012 1012 self.vfs.rename('undo.dirstate', 'dirstate')
1013 1013 try:
1014 1014 branch = self.opener.read('undo.branch')
1015 1015 self.dirstate.setbranch(encoding.tolocal(branch))
1016 1016 except IOError:
1017 1017 ui.warn(_('named branch could not be reset: '
1018 1018 'current branch is still \'%s\'\n')
1019 1019 % self.dirstate.branch())
1020 1020
1021 1021 self.dirstate.invalidate()
1022 1022 parents = tuple([p.rev() for p in self.parents()])
1023 1023 if len(parents) > 1:
1024 1024 ui.status(_('working directory now based on '
1025 1025 'revisions %d and %d\n') % parents)
1026 1026 else:
1027 1027 ui.status(_('working directory now based on '
1028 1028 'revision %d\n') % parents)
1029 1029 # TODO: if we know which new heads may result from this rollback, pass
1030 1030 # them to destroy(), which will prevent the branchhead cache from being
1031 1031 # invalidated.
1032 1032 self.destroyed()
1033 1033 return 0
1034 1034
1035 1035 def invalidatecaches(self):
1036 1036
1037 1037 if '_tagscache' in vars(self):
1038 1038 # can't use delattr on proxy
1039 1039 del self.__dict__['_tagscache']
1040 1040
1041 1041 self.unfiltered()._branchcaches.clear()
1042 1042 self.invalidatevolatilesets()
1043 1043
1044 1044 def invalidatevolatilesets(self):
1045 1045 self.filteredrevcache.clear()
1046 1046 obsolete.clearobscaches(self)
1047 1047
1048 1048 def invalidatedirstate(self):
1049 1049 '''Invalidates the dirstate, causing the next call to dirstate
1050 1050 to check if it was modified since the last time it was read,
1051 1051 rereading it if it has.
1052 1052
1053 1053 This is different to dirstate.invalidate() that it doesn't always
1054 1054 rereads the dirstate. Use dirstate.invalidate() if you want to
1055 1055 explicitly read the dirstate again (i.e. restoring it to a previous
1056 1056 known good state).'''
1057 1057 if hasunfilteredcache(self, 'dirstate'):
1058 1058 for k in self.dirstate._filecache:
1059 1059 try:
1060 1060 delattr(self.dirstate, k)
1061 1061 except AttributeError:
1062 1062 pass
1063 1063 delattr(self.unfiltered(), 'dirstate')
1064 1064
1065 1065 def invalidate(self):
1066 1066 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1067 1067 for k in self._filecache:
1068 1068 # dirstate is invalidated separately in invalidatedirstate()
1069 1069 if k == 'dirstate':
1070 1070 continue
1071 1071
1072 1072 try:
1073 1073 delattr(unfiltered, k)
1074 1074 except AttributeError:
1075 1075 pass
1076 1076 self.invalidatecaches()
1077 1077 self.store.invalidatecaches()
1078 1078
1079 1079 def invalidateall(self):
1080 1080 '''Fully invalidates both store and non-store parts, causing the
1081 1081 subsequent operation to reread any outside changes.'''
1082 1082 # extension should hook this to invalidate its caches
1083 1083 self.invalidate()
1084 1084 self.invalidatedirstate()
1085 1085
1086 1086 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1087 1087 try:
1088 1088 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1089 1089 except error.LockHeld, inst:
1090 1090 if not wait:
1091 1091 raise
1092 1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1093 1093 (desc, inst.locker))
1094 1094 # default to 600 seconds timeout
1095 1095 l = lockmod.lock(vfs, lockname,
1096 1096 int(self.ui.config("ui", "timeout", "600")),
1097 1097 releasefn, desc=desc)
1098 1098 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1099 1099 if acquirefn:
1100 1100 acquirefn()
1101 1101 return l
1102 1102
1103 1103 def _afterlock(self, callback):
1104 1104 """add a callback to the current repository lock.
1105 1105
1106 1106 The callback will be executed on lock release."""
1107 1107 l = self._lockref and self._lockref()
1108 1108 if l:
1109 1109 l.postrelease.append(callback)
1110 1110 else:
1111 1111 callback()
1112 1112
1113 1113 def lock(self, wait=True):
1114 1114 '''Lock the repository store (.hg/store) and return a weak reference
1115 1115 to the lock. Use this before modifying the store (e.g. committing or
1116 1116 stripping). If you are opening a transaction, get a lock as well.)'''
1117 1117 l = self._lockref and self._lockref()
1118 1118 if l is not None and l.held:
1119 1119 l.lock()
1120 1120 return l
1121 1121
1122 1122 def unlock():
1123 1123 for k, ce in self._filecache.items():
1124 1124 if k == 'dirstate' or k not in self.__dict__:
1125 1125 continue
1126 1126 ce.refresh()
1127 1127
1128 1128 l = self._lock(self.svfs, "lock", wait, unlock,
1129 1129 self.invalidate, _('repository %s') % self.origroot)
1130 1130 self._lockref = weakref.ref(l)
1131 1131 return l
1132 1132
1133 1133 def wlock(self, wait=True):
1134 1134 '''Lock the non-store parts of the repository (everything under
1135 1135 .hg except .hg/store) and return a weak reference to the lock.
1136 1136 Use this before modifying files in .hg.'''
1137 1137 l = self._wlockref and self._wlockref()
1138 1138 if l is not None and l.held:
1139 1139 l.lock()
1140 1140 return l
1141 1141
1142 1142 def unlock():
1143 1143 if self.dirstate.pendingparentchange():
1144 1144 self.dirstate.invalidate()
1145 1145 else:
1146 1146 self.dirstate.write()
1147 1147
1148 1148 self._filecache['dirstate'].refresh()
1149 1149
1150 1150 l = self._lock(self.vfs, "wlock", wait, unlock,
1151 1151 self.invalidatedirstate, _('working directory of %s') %
1152 1152 self.origroot)
1153 1153 self._wlockref = weakref.ref(l)
1154 1154 return l
1155 1155
1156 1156 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1157 1157 """
1158 1158 commit an individual file as part of a larger transaction
1159 1159 """
1160 1160
1161 1161 fname = fctx.path()
1162 1162 text = fctx.data()
1163 1163 flog = self.file(fname)
1164 1164 fparent1 = manifest1.get(fname, nullid)
1165 1165 fparent2 = manifest2.get(fname, nullid)
1166 1166
1167 1167 meta = {}
1168 1168 copy = fctx.renamed()
1169 1169 if copy and copy[0] != fname:
1170 1170 # Mark the new revision of this file as a copy of another
1171 1171 # file. This copy data will effectively act as a parent
1172 1172 # of this new revision. If this is a merge, the first
1173 1173 # parent will be the nullid (meaning "look up the copy data")
1174 1174 # and the second one will be the other parent. For example:
1175 1175 #
1176 1176 # 0 --- 1 --- 3 rev1 changes file foo
1177 1177 # \ / rev2 renames foo to bar and changes it
1178 1178 # \- 2 -/ rev3 should have bar with all changes and
1179 1179 # should record that bar descends from
1180 1180 # bar in rev2 and foo in rev1
1181 1181 #
1182 1182 # this allows this merge to succeed:
1183 1183 #
1184 1184 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1185 1185 # \ / merging rev3 and rev4 should use bar@rev2
1186 1186 # \- 2 --- 4 as the merge base
1187 1187 #
1188 1188
1189 1189 cfname = copy[0]
1190 1190 crev = manifest1.get(cfname)
1191 1191 newfparent = fparent2
1192 1192
1193 1193 if manifest2: # branch merge
1194 1194 if fparent2 == nullid or crev is None: # copied on remote side
1195 1195 if cfname in manifest2:
1196 1196 crev = manifest2[cfname]
1197 1197 newfparent = fparent1
1198 1198
1199 1199 # find source in nearest ancestor if we've lost track
1200 1200 if not crev:
1201 1201 self.ui.debug(" %s: searching for copy revision for %s\n" %
1202 1202 (fname, cfname))
1203 1203 for ancestor in self[None].ancestors():
1204 1204 if cfname in ancestor:
1205 1205 crev = ancestor[cfname].filenode()
1206 1206 break
1207 1207
1208 1208 if crev:
1209 1209 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1210 1210 meta["copy"] = cfname
1211 1211 meta["copyrev"] = hex(crev)
1212 1212 fparent1, fparent2 = nullid, newfparent
1213 1213 else:
1214 1214 self.ui.warn(_("warning: can't find ancestor for '%s' "
1215 1215 "copied from '%s'!\n") % (fname, cfname))
1216 1216
1217 1217 elif fparent1 == nullid:
1218 1218 fparent1, fparent2 = fparent2, nullid
1219 1219 elif fparent2 != nullid:
1220 1220 # is one parent an ancestor of the other?
1221 1221 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1222 1222 if fparent1 in fparentancestors:
1223 1223 fparent1, fparent2 = fparent2, nullid
1224 1224 elif fparent2 in fparentancestors:
1225 1225 fparent2 = nullid
1226 1226
1227 1227 # is the file changed?
1228 1228 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1229 1229 changelist.append(fname)
1230 1230 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1231 1231 # are just the flags changed during merge?
1232 1232 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1233 1233 changelist.append(fname)
1234 1234
1235 1235 return fparent1
1236 1236
1237 1237 @unfilteredmethod
1238 1238 def commit(self, text="", user=None, date=None, match=None, force=False,
1239 1239 editor=False, extra={}):
1240 1240 """Add a new revision to current repository.
1241 1241
1242 1242 Revision information is gathered from the working directory,
1243 1243 match can be used to filter the committed files. If editor is
1244 1244 supplied, it is called to get a commit message.
1245 1245 """
1246 1246
1247 1247 def fail(f, msg):
1248 1248 raise util.Abort('%s: %s' % (f, msg))
1249 1249
1250 1250 if not match:
1251 1251 match = matchmod.always(self.root, '')
1252 1252
1253 1253 if not force:
1254 1254 vdirs = []
1255 1255 match.explicitdir = vdirs.append
1256 1256 match.bad = fail
1257 1257
1258 1258 wlock = self.wlock()
1259 1259 try:
1260 1260 wctx = self[None]
1261 1261 merge = len(wctx.parents()) > 1
1262 1262
1263 1263 if (not force and merge and match and
1264 1264 (match.files() or match.anypats())):
1265 1265 raise util.Abort(_('cannot partially commit a merge '
1266 1266 '(do not specify files or patterns)'))
1267 1267
1268 1268 status = self.status(match=match, clean=force)
1269 1269 if force:
1270 1270 status.modified.extend(status.clean) # mq may commit clean files
1271 1271
1272 1272 # check subrepos
1273 1273 subs = []
1274 1274 commitsubs = set()
1275 1275 newstate = wctx.substate.copy()
1276 1276 # only manage subrepos and .hgsubstate if .hgsub is present
1277 1277 if '.hgsub' in wctx:
1278 1278 # we'll decide whether to track this ourselves, thanks
1279 1279 for c in status.modified, status.added, status.removed:
1280 1280 if '.hgsubstate' in c:
1281 1281 c.remove('.hgsubstate')
1282 1282
1283 1283 # compare current state to last committed state
1284 1284 # build new substate based on last committed state
1285 1285 oldstate = wctx.p1().substate
1286 1286 for s in sorted(newstate.keys()):
1287 1287 if not match(s):
1288 1288 # ignore working copy, use old state if present
1289 1289 if s in oldstate:
1290 1290 newstate[s] = oldstate[s]
1291 1291 continue
1292 1292 if not force:
1293 1293 raise util.Abort(
1294 1294 _("commit with new subrepo %s excluded") % s)
1295 1295 if wctx.sub(s).dirty(True):
1296 1296 if not self.ui.configbool('ui', 'commitsubrepos'):
1297 1297 raise util.Abort(
1298 1298 _("uncommitted changes in subrepo %s") % s,
1299 1299 hint=_("use --subrepos for recursive commit"))
1300 1300 subs.append(s)
1301 1301 commitsubs.add(s)
1302 1302 else:
1303 1303 bs = wctx.sub(s).basestate()
1304 1304 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1305 1305 if oldstate.get(s, (None, None, None))[1] != bs:
1306 1306 subs.append(s)
1307 1307
1308 1308 # check for removed subrepos
1309 1309 for p in wctx.parents():
1310 1310 r = [s for s in p.substate if s not in newstate]
1311 1311 subs += [s for s in r if match(s)]
1312 1312 if subs:
1313 1313 if (not match('.hgsub') and
1314 1314 '.hgsub' in (wctx.modified() + wctx.added())):
1315 1315 raise util.Abort(
1316 1316 _("can't commit subrepos without .hgsub"))
1317 1317 status.modified.insert(0, '.hgsubstate')
1318 1318
1319 1319 elif '.hgsub' in status.removed:
1320 1320 # clean up .hgsubstate when .hgsub is removed
1321 1321 if ('.hgsubstate' in wctx and
1322 1322 '.hgsubstate' not in (status.modified + status.added +
1323 1323 status.removed)):
1324 1324 status.removed.insert(0, '.hgsubstate')
1325 1325
1326 1326 # make sure all explicit patterns are matched
1327 1327 if not force and match.files():
1328 1328 matched = set(status.modified + status.added + status.removed)
1329 1329
1330 1330 for f in match.files():
1331 1331 f = self.dirstate.normalize(f)
1332 1332 if f == '.' or f in matched or f in wctx.substate:
1333 1333 continue
1334 1334 if f in status.deleted:
1335 1335 fail(f, _('file not found!'))
1336 1336 if f in vdirs: # visited directory
1337 1337 d = f + '/'
1338 1338 for mf in matched:
1339 1339 if mf.startswith(d):
1340 1340 break
1341 1341 else:
1342 1342 fail(f, _("no match under directory!"))
1343 1343 elif f not in self.dirstate:
1344 1344 fail(f, _("file not tracked!"))
1345 1345
1346 cctx = context.workingctx(self, text, user, date, extra, status)
1346 cctx = context.workingcommitctx(self, status,
1347 text, user, date, extra)
1347 1348
1348 1349 if (not force and not extra.get("close") and not merge
1349 1350 and not cctx.files()
1350 1351 and wctx.branch() == wctx.p1().branch()):
1351 1352 return None
1352 1353
1353 1354 if merge and cctx.deleted():
1354 1355 raise util.Abort(_("cannot commit merge with missing files"))
1355 1356
1356 1357 ms = mergemod.mergestate(self)
1357 1358 for f in status.modified:
1358 1359 if f in ms and ms[f] == 'u':
1359 1360 raise util.Abort(_("unresolved merge conflicts "
1360 1361 "(see hg help resolve)"))
1361 1362
1362 1363 if editor:
1363 1364 cctx._text = editor(self, cctx, subs)
1364 1365 edited = (text != cctx._text)
1365 1366
1366 1367 # Save commit message in case this transaction gets rolled back
1367 1368 # (e.g. by a pretxncommit hook). Leave the content alone on
1368 1369 # the assumption that the user will use the same editor again.
1369 1370 msgfn = self.savecommitmessage(cctx._text)
1370 1371
1371 1372 # commit subs and write new state
1372 1373 if subs:
1373 1374 for s in sorted(commitsubs):
1374 1375 sub = wctx.sub(s)
1375 1376 self.ui.status(_('committing subrepository %s\n') %
1376 1377 subrepo.subrelpath(sub))
1377 1378 sr = sub.commit(cctx._text, user, date)
1378 1379 newstate[s] = (newstate[s][0], sr)
1379 1380 subrepo.writestate(self, newstate)
1380 1381
1381 1382 p1, p2 = self.dirstate.parents()
1382 1383 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1383 1384 try:
1384 1385 self.hook("precommit", throw=True, parent1=hookp1,
1385 1386 parent2=hookp2)
1386 1387 ret = self.commitctx(cctx, True)
1387 1388 except: # re-raises
1388 1389 if edited:
1389 1390 self.ui.write(
1390 1391 _('note: commit message saved in %s\n') % msgfn)
1391 1392 raise
1392 1393
1393 1394 # update bookmarks, dirstate and mergestate
1394 1395 bookmarks.update(self, [p1, p2], ret)
1395 1396 cctx.markcommitted(ret)
1396 1397 ms.reset()
1397 1398 finally:
1398 1399 wlock.release()
1399 1400
1400 1401 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1401 1402 # hack for command that use a temporary commit (eg: histedit)
1402 1403 # temporary commit got stripped before hook release
1403 1404 if node in self:
1404 1405 self.hook("commit", node=node, parent1=parent1,
1405 1406 parent2=parent2)
1406 1407 self._afterlock(commithook)
1407 1408 return ret
1408 1409
1409 1410 @unfilteredmethod
1410 1411 def commitctx(self, ctx, error=False):
1411 1412 """Add a new revision to current repository.
1412 1413 Revision information is passed via the context argument.
1413 1414 """
1414 1415
1415 1416 tr = None
1416 1417 p1, p2 = ctx.p1(), ctx.p2()
1417 1418 user = ctx.user()
1418 1419
1419 1420 lock = self.lock()
1420 1421 try:
1421 1422 tr = self.transaction("commit")
1422 1423 trp = weakref.proxy(tr)
1423 1424
1424 1425 if ctx.files():
1425 1426 m1 = p1.manifest()
1426 1427 m2 = p2.manifest()
1427 1428 m = m1.copy()
1428 1429
1429 1430 # check in files
1430 1431 added = []
1431 1432 changed = []
1432 1433 removed = list(ctx.removed())
1433 1434 linkrev = len(self)
1434 1435 for f in sorted(ctx.modified() + ctx.added()):
1435 1436 self.ui.note(f + "\n")
1436 1437 try:
1437 1438 fctx = ctx[f]
1438 1439 if fctx is None:
1439 1440 removed.append(f)
1440 1441 else:
1441 1442 added.append(f)
1442 1443 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1443 1444 trp, changed)
1444 1445 m.setflag(f, fctx.flags())
1445 1446 except OSError, inst:
1446 1447 self.ui.warn(_("trouble committing %s!\n") % f)
1447 1448 raise
1448 1449 except IOError, inst:
1449 1450 errcode = getattr(inst, 'errno', errno.ENOENT)
1450 1451 if error or errcode and errcode != errno.ENOENT:
1451 1452 self.ui.warn(_("trouble committing %s!\n") % f)
1452 1453 raise
1453 1454
1454 1455 # update manifest
1455 1456 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1456 1457 drop = [f for f in removed if f in m]
1457 1458 for f in drop:
1458 1459 del m[f]
1459 1460 mn = self.manifest.add(m, trp, linkrev,
1460 1461 p1.manifestnode(), p2.manifestnode(),
1461 1462 added, drop)
1462 1463 files = changed + removed
1463 1464 else:
1464 1465 mn = p1.manifestnode()
1465 1466 files = []
1466 1467
1467 1468 # update changelog
1468 1469 self.changelog.delayupdate(tr)
1469 1470 n = self.changelog.add(mn, files, ctx.description(),
1470 1471 trp, p1.node(), p2.node(),
1471 1472 user, ctx.date(), ctx.extra().copy())
1472 1473 p = lambda: tr.writepending() and self.root or ""
1473 1474 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1474 1475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1475 1476 parent2=xp2, pending=p)
1476 1477 # set the new commit is proper phase
1477 1478 targetphase = subrepo.newcommitphase(self.ui, ctx)
1478 1479 if targetphase:
1479 1480 # retract boundary do not alter parent changeset.
1480 1481 # if a parent have higher the resulting phase will
1481 1482 # be compliant anyway
1482 1483 #
1483 1484 # if minimal phase was 0 we don't need to retract anything
1484 1485 phases.retractboundary(self, tr, targetphase, [n])
1485 1486 tr.close()
1486 1487 branchmap.updatecache(self.filtered('served'))
1487 1488 return n
1488 1489 finally:
1489 1490 if tr:
1490 1491 tr.release()
1491 1492 lock.release()
1492 1493
1493 1494 @unfilteredmethod
1494 1495 def destroying(self):
1495 1496 '''Inform the repository that nodes are about to be destroyed.
1496 1497 Intended for use by strip and rollback, so there's a common
1497 1498 place for anything that has to be done before destroying history.
1498 1499
1499 1500 This is mostly useful for saving state that is in memory and waiting
1500 1501 to be flushed when the current lock is released. Because a call to
1501 1502 destroyed is imminent, the repo will be invalidated causing those
1502 1503 changes to stay in memory (waiting for the next unlock), or vanish
1503 1504 completely.
1504 1505 '''
1505 1506 # When using the same lock to commit and strip, the phasecache is left
1506 1507 # dirty after committing. Then when we strip, the repo is invalidated,
1507 1508 # causing those changes to disappear.
1508 1509 if '_phasecache' in vars(self):
1509 1510 self._phasecache.write()
1510 1511
1511 1512 @unfilteredmethod
1512 1513 def destroyed(self):
1513 1514 '''Inform the repository that nodes have been destroyed.
1514 1515 Intended for use by strip and rollback, so there's a common
1515 1516 place for anything that has to be done after destroying history.
1516 1517 '''
1517 1518 # When one tries to:
1518 1519 # 1) destroy nodes thus calling this method (e.g. strip)
1519 1520 # 2) use phasecache somewhere (e.g. commit)
1520 1521 #
1521 1522 # then 2) will fail because the phasecache contains nodes that were
1522 1523 # removed. We can either remove phasecache from the filecache,
1523 1524 # causing it to reload next time it is accessed, or simply filter
1524 1525 # the removed nodes now and write the updated cache.
1525 1526 self._phasecache.filterunknown(self)
1526 1527 self._phasecache.write()
1527 1528
1528 1529 # update the 'served' branch cache to help read only server process
1529 1530 # Thanks to branchcache collaboration this is done from the nearest
1530 1531 # filtered subset and it is expected to be fast.
1531 1532 branchmap.updatecache(self.filtered('served'))
1532 1533
1533 1534 # Ensure the persistent tag cache is updated. Doing it now
1534 1535 # means that the tag cache only has to worry about destroyed
1535 1536 # heads immediately after a strip/rollback. That in turn
1536 1537 # guarantees that "cachetip == currenttip" (comparing both rev
1537 1538 # and node) always means no nodes have been added or destroyed.
1538 1539
1539 1540 # XXX this is suboptimal when qrefresh'ing: we strip the current
1540 1541 # head, refresh the tag cache, then immediately add a new head.
1541 1542 # But I think doing it this way is necessary for the "instant
1542 1543 # tag cache retrieval" case to work.
1543 1544 self.invalidate()
1544 1545
1545 1546 def walk(self, match, node=None):
1546 1547 '''
1547 1548 walk recursively through the directory tree or a given
1548 1549 changeset, finding all files matched by the match
1549 1550 function
1550 1551 '''
1551 1552 return self[node].walk(match)
1552 1553
1553 1554 def status(self, node1='.', node2=None, match=None,
1554 1555 ignored=False, clean=False, unknown=False,
1555 1556 listsubrepos=False):
1556 1557 '''a convenience method that calls node1.status(node2)'''
1557 1558 return self[node1].status(node2, match, ignored, clean, unknown,
1558 1559 listsubrepos)
1559 1560
1560 1561 def heads(self, start=None):
1561 1562 heads = self.changelog.heads(start)
1562 1563 # sort the output in rev descending order
1563 1564 return sorted(heads, key=self.changelog.rev, reverse=True)
1564 1565
1565 1566 def branchheads(self, branch=None, start=None, closed=False):
1566 1567 '''return a (possibly filtered) list of heads for the given branch
1567 1568
1568 1569 Heads are returned in topological order, from newest to oldest.
1569 1570 If branch is None, use the dirstate branch.
1570 1571 If start is not None, return only heads reachable from start.
1571 1572 If closed is True, return heads that are marked as closed as well.
1572 1573 '''
1573 1574 if branch is None:
1574 1575 branch = self[None].branch()
1575 1576 branches = self.branchmap()
1576 1577 if branch not in branches:
1577 1578 return []
1578 1579 # the cache returns heads ordered lowest to highest
1579 1580 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1580 1581 if start is not None:
1581 1582 # filter out the heads that cannot be reached from startrev
1582 1583 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1583 1584 bheads = [h for h in bheads if h in fbheads]
1584 1585 return bheads
1585 1586
1586 1587 def branches(self, nodes):
1587 1588 if not nodes:
1588 1589 nodes = [self.changelog.tip()]
1589 1590 b = []
1590 1591 for n in nodes:
1591 1592 t = n
1592 1593 while True:
1593 1594 p = self.changelog.parents(n)
1594 1595 if p[1] != nullid or p[0] == nullid:
1595 1596 b.append((t, n, p[0], p[1]))
1596 1597 break
1597 1598 n = p[0]
1598 1599 return b
1599 1600
1600 1601 def between(self, pairs):
1601 1602 r = []
1602 1603
1603 1604 for top, bottom in pairs:
1604 1605 n, l, i = top, [], 0
1605 1606 f = 1
1606 1607
1607 1608 while n != bottom and n != nullid:
1608 1609 p = self.changelog.parents(n)[0]
1609 1610 if i == f:
1610 1611 l.append(n)
1611 1612 f = f * 2
1612 1613 n = p
1613 1614 i += 1
1614 1615
1615 1616 r.append(l)
1616 1617
1617 1618 return r
1618 1619
1619 1620 def checkpush(self, pushop):
1620 1621 """Extensions can override this function if additional checks have
1621 1622 to be performed before pushing, or call it if they override push
1622 1623 command.
1623 1624 """
1624 1625 pass
1625 1626
1626 1627 @unfilteredpropertycache
1627 1628 def prepushoutgoinghooks(self):
1628 1629 """Return util.hooks consists of "(repo, remote, outgoing)"
1629 1630 functions, which are called before pushing changesets.
1630 1631 """
1631 1632 return util.hooks()
1632 1633
1633 1634 def stream_in(self, remote, requirements):
1634 1635 lock = self.lock()
1635 1636 try:
1636 1637 # Save remote branchmap. We will use it later
1637 1638 # to speed up branchcache creation
1638 1639 rbranchmap = None
1639 1640 if remote.capable("branchmap"):
1640 1641 rbranchmap = remote.branchmap()
1641 1642
1642 1643 fp = remote.stream_out()
1643 1644 l = fp.readline()
1644 1645 try:
1645 1646 resp = int(l)
1646 1647 except ValueError:
1647 1648 raise error.ResponseError(
1648 1649 _('unexpected response from remote server:'), l)
1649 1650 if resp == 1:
1650 1651 raise util.Abort(_('operation forbidden by server'))
1651 1652 elif resp == 2:
1652 1653 raise util.Abort(_('locking the remote repository failed'))
1653 1654 elif resp != 0:
1654 1655 raise util.Abort(_('the server sent an unknown error code'))
1655 1656 self.ui.status(_('streaming all changes\n'))
1656 1657 l = fp.readline()
1657 1658 try:
1658 1659 total_files, total_bytes = map(int, l.split(' ', 1))
1659 1660 except (ValueError, TypeError):
1660 1661 raise error.ResponseError(
1661 1662 _('unexpected response from remote server:'), l)
1662 1663 self.ui.status(_('%d files to transfer, %s of data\n') %
1663 1664 (total_files, util.bytecount(total_bytes)))
1664 1665 handled_bytes = 0
1665 1666 self.ui.progress(_('clone'), 0, total=total_bytes)
1666 1667 start = time.time()
1667 1668
1668 1669 tr = self.transaction(_('clone'))
1669 1670 try:
1670 1671 for i in xrange(total_files):
1671 1672 # XXX doesn't support '\n' or '\r' in filenames
1672 1673 l = fp.readline()
1673 1674 try:
1674 1675 name, size = l.split('\0', 1)
1675 1676 size = int(size)
1676 1677 except (ValueError, TypeError):
1677 1678 raise error.ResponseError(
1678 1679 _('unexpected response from remote server:'), l)
1679 1680 if self.ui.debugflag:
1680 1681 self.ui.debug('adding %s (%s)\n' %
1681 1682 (name, util.bytecount(size)))
1682 1683 # for backwards compat, name was partially encoded
1683 1684 ofp = self.sopener(store.decodedir(name), 'w')
1684 1685 for chunk in util.filechunkiter(fp, limit=size):
1685 1686 handled_bytes += len(chunk)
1686 1687 self.ui.progress(_('clone'), handled_bytes,
1687 1688 total=total_bytes)
1688 1689 ofp.write(chunk)
1689 1690 ofp.close()
1690 1691 tr.close()
1691 1692 finally:
1692 1693 tr.release()
1693 1694
1694 1695 # Writing straight to files circumvented the inmemory caches
1695 1696 self.invalidate()
1696 1697
1697 1698 elapsed = time.time() - start
1698 1699 if elapsed <= 0:
1699 1700 elapsed = 0.001
1700 1701 self.ui.progress(_('clone'), None)
1701 1702 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1702 1703 (util.bytecount(total_bytes), elapsed,
1703 1704 util.bytecount(total_bytes / elapsed)))
1704 1705
1705 1706 # new requirements = old non-format requirements +
1706 1707 # new format-related
1707 1708 # requirements from the streamed-in repository
1708 1709 requirements.update(set(self.requirements) - self.supportedformats)
1709 1710 self._applyrequirements(requirements)
1710 1711 self._writerequirements()
1711 1712
1712 1713 if rbranchmap:
1713 1714 rbheads = []
1714 1715 closed = []
1715 1716 for bheads in rbranchmap.itervalues():
1716 1717 rbheads.extend(bheads)
1717 1718 for h in bheads:
1718 1719 r = self.changelog.rev(h)
1719 1720 b, c = self.changelog.branchinfo(r)
1720 1721 if c:
1721 1722 closed.append(h)
1722 1723
1723 1724 if rbheads:
1724 1725 rtiprev = max((int(self.changelog.rev(node))
1725 1726 for node in rbheads))
1726 1727 cache = branchmap.branchcache(rbranchmap,
1727 1728 self[rtiprev].node(),
1728 1729 rtiprev,
1729 1730 closednodes=closed)
1730 1731 # Try to stick it as low as possible
1731 1732 # filter above served are unlikely to be fetch from a clone
1732 1733 for candidate in ('base', 'immutable', 'served'):
1733 1734 rview = self.filtered(candidate)
1734 1735 if cache.validfor(rview):
1735 1736 self._branchcaches[candidate] = cache
1736 1737 cache.write(rview)
1737 1738 break
1738 1739 self.invalidate()
1739 1740 return len(self.heads()) + 1
1740 1741 finally:
1741 1742 lock.release()
1742 1743
1743 1744 def clone(self, remote, heads=[], stream=None):
1744 1745 '''clone remote repository.
1745 1746
1746 1747 keyword arguments:
1747 1748 heads: list of revs to clone (forces use of pull)
1748 1749 stream: use streaming clone if possible'''
1749 1750
1750 1751 # now, all clients that can request uncompressed clones can
1751 1752 # read repo formats supported by all servers that can serve
1752 1753 # them.
1753 1754
1754 1755 # if revlog format changes, client will have to check version
1755 1756 # and format flags on "stream" capability, and use
1756 1757 # uncompressed only if compatible.
1757 1758
1758 1759 if stream is None:
1759 1760 # if the server explicitly prefers to stream (for fast LANs)
1760 1761 stream = remote.capable('stream-preferred')
1761 1762
1762 1763 if stream and not heads:
1763 1764 # 'stream' means remote revlog format is revlogv1 only
1764 1765 if remote.capable('stream'):
1765 1766 self.stream_in(remote, set(('revlogv1',)))
1766 1767 else:
1767 1768 # otherwise, 'streamreqs' contains the remote revlog format
1768 1769 streamreqs = remote.capable('streamreqs')
1769 1770 if streamreqs:
1770 1771 streamreqs = set(streamreqs.split(','))
1771 1772 # if we support it, stream in and adjust our requirements
1772 1773 if not streamreqs - self.supportedformats:
1773 1774 self.stream_in(remote, streamreqs)
1774 1775
1775 1776 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1776 1777 try:
1777 1778 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1778 1779 ret = exchange.pull(self, remote, heads).cgresult
1779 1780 finally:
1780 1781 self.ui.restoreconfig(quiet)
1781 1782 return ret
1782 1783
1783 1784 def pushkey(self, namespace, key, old, new):
1784 1785 try:
1785 1786 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1786 1787 old=old, new=new)
1787 1788 except error.HookAbort, exc:
1788 1789 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1789 1790 if exc.hint:
1790 1791 self.ui.write_err(_("(%s)\n") % exc.hint)
1791 1792 return False
1792 1793 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1793 1794 ret = pushkey.push(self, namespace, key, old, new)
1794 1795 def runhook():
1795 1796 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1796 1797 ret=ret)
1797 1798 self._afterlock(runhook)
1798 1799 return ret
1799 1800
1800 1801 def listkeys(self, namespace):
1801 1802 self.hook('prelistkeys', throw=True, namespace=namespace)
1802 1803 self.ui.debug('listing keys for "%s"\n' % namespace)
1803 1804 values = pushkey.list(self, namespace)
1804 1805 self.hook('listkeys', namespace=namespace, values=values)
1805 1806 return values
1806 1807
1807 1808 def debugwireargs(self, one, two, three=None, four=None, five=None):
1808 1809 '''used to test argument passing over the wire'''
1809 1810 return "%s %s %s %s %s" % (one, two, three, four, five)
1810 1811
1811 1812 def savecommitmessage(self, text):
1812 1813 fp = self.opener('last-message.txt', 'wb')
1813 1814 try:
1814 1815 fp.write(text)
1815 1816 finally:
1816 1817 fp.close()
1817 1818 return self.pathto(fp.name[len(self.root) + 1:])
1818 1819
1819 1820 # used to avoid circular references so destructors work
1820 1821 def aftertrans(files):
1821 1822 renamefiles = [tuple(t) for t in files]
1822 1823 def a():
1823 1824 for vfs, src, dest in renamefiles:
1824 1825 try:
1825 1826 vfs.rename(src, dest)
1826 1827 except OSError: # journal file does not yet exist
1827 1828 pass
1828 1829 return a
1829 1830
1830 1831 def undoname(fn):
1831 1832 base, name = os.path.split(fn)
1832 1833 assert name.startswith('journal')
1833 1834 return os.path.join(base, name.replace('journal', 'undo', 1))
1834 1835
1835 1836 def instance(ui, path, create):
1836 1837 return localrepository(ui, util.urllocalpath(path), create)
1837 1838
1838 1839 def islocal(path):
1839 1840 return True
General Comments 0
You need to be logged in to leave comments. Login now