##// END OF EJS Templates
localrepo: use "vfs.islink()" instead of "os.path.islink()"
FUJIWARA Katsunori -
r18949:138978f2 default
parent child Browse files
Show More
@@ -1,2594 +1,2594 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
213 213 realpath=True)
214 214 s = vfs.base
215 215 if not vfs.exists():
216 216 raise error.RepoError(
217 217 _('.hg/sharedpath points to nonexistent directory %s') % s)
218 218 self.sharedpath = s
219 219 except IOError, inst:
220 220 if inst.errno != errno.ENOENT:
221 221 raise
222 222
223 223 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
224 224 self.spath = self.store.path
225 225 self.svfs = self.store.vfs
226 226 self.sopener = self.svfs
227 227 self.sjoin = self.store.join
228 228 self.vfs.createmode = self.store.createmode
229 229 self._applyrequirements(requirements)
230 230 if create:
231 231 self._writerequirements()
232 232
233 233
234 234 self._branchcaches = {}
235 235 self.filterpats = {}
236 236 self._datafilters = {}
237 237 self._transref = self._lockref = self._wlockref = None
238 238
239 239 # A cache for various files under .hg/ that tracks file changes,
240 240 # (used by the filecache decorator)
241 241 #
242 242 # Maps a property name to its util.filecacheentry
243 243 self._filecache = {}
244 244
245 245 # hold sets of revision to be filtered
246 246 # should be cleared when something might have changed the filter value:
247 247 # - new changesets,
248 248 # - phase change,
249 249 # - new obsolescence marker,
250 250 # - working directory parent change,
251 251 # - bookmark changes
252 252 self.filteredrevcache = {}
253 253
254 254 def close(self):
255 255 pass
256 256
257 257 def _restrictcapabilities(self, caps):
258 258 return caps
259 259
260 260 def _applyrequirements(self, requirements):
261 261 self.requirements = requirements
262 262 self.sopener.options = dict((r, 1) for r in requirements
263 263 if r in self.openerreqs)
264 264
265 265 def _writerequirements(self):
266 266 reqfile = self.opener("requires", "w")
267 267 for r in sorted(self.requirements):
268 268 reqfile.write("%s\n" % r)
269 269 reqfile.close()
270 270
271 271 def _checknested(self, path):
272 272 """Determine if path is a legal nested repository."""
273 273 if not path.startswith(self.root):
274 274 return False
275 275 subpath = path[len(self.root) + 1:]
276 276 normsubpath = util.pconvert(subpath)
277 277
278 278 # XXX: Checking against the current working copy is wrong in
279 279 # the sense that it can reject things like
280 280 #
281 281 # $ hg cat -r 10 sub/x.txt
282 282 #
283 283 # if sub/ is no longer a subrepository in the working copy
284 284 # parent revision.
285 285 #
286 286 # However, it can of course also allow things that would have
287 287 # been rejected before, such as the above cat command if sub/
288 288 # is a subrepository now, but was a normal directory before.
289 289 # The old path auditor would have rejected by mistake since it
290 290 # panics when it sees sub/.hg/.
291 291 #
292 292 # All in all, checking against the working copy seems sensible
293 293 # since we want to prevent access to nested repositories on
294 294 # the filesystem *now*.
295 295 ctx = self[None]
296 296 parts = util.splitpath(subpath)
297 297 while parts:
298 298 prefix = '/'.join(parts)
299 299 if prefix in ctx.substate:
300 300 if prefix == normsubpath:
301 301 return True
302 302 else:
303 303 sub = ctx.sub(prefix)
304 304 return sub.checknested(subpath[len(prefix) + 1:])
305 305 else:
306 306 parts.pop()
307 307 return False
308 308
309 309 def peer(self):
310 310 return localpeer(self) # not cached to avoid reference cycle
311 311
312 312 def unfiltered(self):
313 313 """Return unfiltered version of the repository
314 314
315 315 Intended to be overwritten by filtered repo."""
316 316 return self
317 317
318 318 def filtered(self, name):
319 319 """Return a filtered version of a repository"""
320 320 # build a new class with the mixin and the current class
321 321 # (possibly subclass of the repo)
322 322 class proxycls(repoview.repoview, self.unfiltered().__class__):
323 323 pass
324 324 return proxycls(self, name)
325 325
326 326 @repofilecache('bookmarks')
327 327 def _bookmarks(self):
328 328 return bookmarks.bmstore(self)
329 329
330 330 @repofilecache('bookmarks.current')
331 331 def _bookmarkcurrent(self):
332 332 return bookmarks.readcurrent(self)
333 333
334 334 def bookmarkheads(self, bookmark):
335 335 name = bookmark.split('@', 1)[0]
336 336 heads = []
337 337 for mark, n in self._bookmarks.iteritems():
338 338 if mark.split('@', 1)[0] == name:
339 339 heads.append(n)
340 340 return heads
341 341
342 342 @storecache('phaseroots')
343 343 def _phasecache(self):
344 344 return phases.phasecache(self, self._phasedefaults)
345 345
346 346 @storecache('obsstore')
347 347 def obsstore(self):
348 348 store = obsolete.obsstore(self.sopener)
349 349 if store and not obsolete._enabled:
350 350 # message is rare enough to not be translated
351 351 msg = 'obsolete feature not enabled but %i markers found!\n'
352 352 self.ui.warn(msg % len(list(store)))
353 353 return store
354 354
355 355 @storecache('00changelog.i')
356 356 def changelog(self):
357 357 c = changelog.changelog(self.sopener)
358 358 if 'HG_PENDING' in os.environ:
359 359 p = os.environ['HG_PENDING']
360 360 if p.startswith(self.root):
361 361 c.readpending('00changelog.i.a')
362 362 return c
363 363
364 364 @storecache('00manifest.i')
365 365 def manifest(self):
366 366 return manifest.manifest(self.sopener)
367 367
368 368 @repofilecache('dirstate')
369 369 def dirstate(self):
370 370 warned = [0]
371 371 def validate(node):
372 372 try:
373 373 self.changelog.rev(node)
374 374 return node
375 375 except error.LookupError:
376 376 if not warned[0]:
377 377 warned[0] = True
378 378 self.ui.warn(_("warning: ignoring unknown"
379 379 " working parent %s!\n") % short(node))
380 380 return nullid
381 381
382 382 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 383
384 384 def __getitem__(self, changeid):
385 385 if changeid is None:
386 386 return context.workingctx(self)
387 387 return context.changectx(self, changeid)
388 388
389 389 def __contains__(self, changeid):
390 390 try:
391 391 return bool(self.lookup(changeid))
392 392 except error.RepoLookupError:
393 393 return False
394 394
395 395 def __nonzero__(self):
396 396 return True
397 397
398 398 def __len__(self):
399 399 return len(self.changelog)
400 400
401 401 def __iter__(self):
402 402 return iter(self.changelog)
403 403
404 404 def revs(self, expr, *args):
405 405 '''Return a list of revisions matching the given revset'''
406 406 expr = revset.formatspec(expr, *args)
407 407 m = revset.match(None, expr)
408 408 return [r for r in m(self, list(self))]
409 409
410 410 def set(self, expr, *args):
411 411 '''
412 412 Yield a context for each matching revision, after doing arg
413 413 replacement via revset.formatspec
414 414 '''
415 415 for r in self.revs(expr, *args):
416 416 yield self[r]
417 417
418 418 def url(self):
419 419 return 'file:' + self.root
420 420
421 421 def hook(self, name, throw=False, **args):
422 422 return hook.hook(self.ui, self, name, throw, **args)
423 423
424 424 @unfilteredmethod
425 425 def _tag(self, names, node, message, local, user, date, extra={}):
426 426 if isinstance(names, str):
427 427 names = (names,)
428 428
429 429 branches = self.branchmap()
430 430 for name in names:
431 431 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 432 local=local)
433 433 if name in branches:
434 434 self.ui.warn(_("warning: tag %s conflicts with existing"
435 435 " branch name\n") % name)
436 436
437 437 def writetags(fp, names, munge, prevtags):
438 438 fp.seek(0, 2)
439 439 if prevtags and prevtags[-1] != '\n':
440 440 fp.write('\n')
441 441 for name in names:
442 442 m = munge and munge(name) or name
443 443 if (self._tagscache.tagtypes and
444 444 name in self._tagscache.tagtypes):
445 445 old = self.tags().get(name, nullid)
446 446 fp.write('%s %s\n' % (hex(old), m))
447 447 fp.write('%s %s\n' % (hex(node), m))
448 448 fp.close()
449 449
450 450 prevtags = ''
451 451 if local:
452 452 try:
453 453 fp = self.opener('localtags', 'r+')
454 454 except IOError:
455 455 fp = self.opener('localtags', 'a')
456 456 else:
457 457 prevtags = fp.read()
458 458
459 459 # local tags are stored in the current charset
460 460 writetags(fp, names, None, prevtags)
461 461 for name in names:
462 462 self.hook('tag', node=hex(node), tag=name, local=local)
463 463 return
464 464
465 465 try:
466 466 fp = self.wfile('.hgtags', 'rb+')
467 467 except IOError, e:
468 468 if e.errno != errno.ENOENT:
469 469 raise
470 470 fp = self.wfile('.hgtags', 'ab')
471 471 else:
472 472 prevtags = fp.read()
473 473
474 474 # committed tags are stored in UTF-8
475 475 writetags(fp, names, encoding.fromlocal, prevtags)
476 476
477 477 fp.close()
478 478
479 479 self.invalidatecaches()
480 480
481 481 if '.hgtags' not in self.dirstate:
482 482 self[None].add(['.hgtags'])
483 483
484 484 m = matchmod.exact(self.root, '', ['.hgtags'])
485 485 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 486
487 487 for name in names:
488 488 self.hook('tag', node=hex(node), tag=name, local=local)
489 489
490 490 return tagnode
491 491
492 492 def tag(self, names, node, message, local, user, date):
493 493 '''tag a revision with one or more symbolic names.
494 494
495 495 names is a list of strings or, when adding a single tag, names may be a
496 496 string.
497 497
498 498 if local is True, the tags are stored in a per-repository file.
499 499 otherwise, they are stored in the .hgtags file, and a new
500 500 changeset is committed with the change.
501 501
502 502 keyword arguments:
503 503
504 504 local: whether to store tags in non-version-controlled file
505 505 (default False)
506 506
507 507 message: commit message to use if committing
508 508
509 509 user: name of user to use if committing
510 510
511 511 date: date tuple to use if committing'''
512 512
513 513 if not local:
514 514 for x in self.status()[:5]:
515 515 if '.hgtags' in x:
516 516 raise util.Abort(_('working copy of .hgtags is changed '
517 517 '(please commit .hgtags manually)'))
518 518
519 519 self.tags() # instantiate the cache
520 520 self._tag(names, node, message, local, user, date)
521 521
522 522 @filteredpropertycache
523 523 def _tagscache(self):
524 524 '''Returns a tagscache object that contains various tags related
525 525 caches.'''
526 526
527 527 # This simplifies its cache management by having one decorated
528 528 # function (this one) and the rest simply fetch things from it.
529 529 class tagscache(object):
530 530 def __init__(self):
531 531 # These two define the set of tags for this repository. tags
532 532 # maps tag name to node; tagtypes maps tag name to 'global' or
533 533 # 'local'. (Global tags are defined by .hgtags across all
534 534 # heads, and local tags are defined in .hg/localtags.)
535 535 # They constitute the in-memory cache of tags.
536 536 self.tags = self.tagtypes = None
537 537
538 538 self.nodetagscache = self.tagslist = None
539 539
540 540 cache = tagscache()
541 541 cache.tags, cache.tagtypes = self._findtags()
542 542
543 543 return cache
544 544
545 545 def tags(self):
546 546 '''return a mapping of tag to node'''
547 547 t = {}
548 548 if self.changelog.filteredrevs:
549 549 tags, tt = self._findtags()
550 550 else:
551 551 tags = self._tagscache.tags
552 552 for k, v in tags.iteritems():
553 553 try:
554 554 # ignore tags to unknown nodes
555 555 self.changelog.rev(v)
556 556 t[k] = v
557 557 except (error.LookupError, ValueError):
558 558 pass
559 559 return t
560 560
561 561 def _findtags(self):
562 562 '''Do the hard work of finding tags. Return a pair of dicts
563 563 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 564 maps tag name to a string like \'global\' or \'local\'.
565 565 Subclasses or extensions are free to add their own tags, but
566 566 should be aware that the returned dicts will be retained for the
567 567 duration of the localrepo object.'''
568 568
569 569 # XXX what tagtype should subclasses/extensions use? Currently
570 570 # mq and bookmarks add tags, but do not set the tagtype at all.
571 571 # Should each extension invent its own tag type? Should there
572 572 # be one tagtype for all such "virtual" tags? Or is the status
573 573 # quo fine?
574 574
575 575 alltags = {} # map tag name to (node, hist)
576 576 tagtypes = {}
577 577
578 578 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 579 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 580
581 581 # Build the return dicts. Have to re-encode tag names because
582 582 # the tags module always uses UTF-8 (in order not to lose info
583 583 # writing to the cache), but the rest of Mercurial wants them in
584 584 # local encoding.
585 585 tags = {}
586 586 for (name, (node, hist)) in alltags.iteritems():
587 587 if node != nullid:
588 588 tags[encoding.tolocal(name)] = node
589 589 tags['tip'] = self.changelog.tip()
590 590 tagtypes = dict([(encoding.tolocal(name), value)
591 591 for (name, value) in tagtypes.iteritems()])
592 592 return (tags, tagtypes)
593 593
594 594 def tagtype(self, tagname):
595 595 '''
596 596 return the type of the given tag. result can be:
597 597
598 598 'local' : a local tag
599 599 'global' : a global tag
600 600 None : tag does not exist
601 601 '''
602 602
603 603 return self._tagscache.tagtypes.get(tagname)
604 604
605 605 def tagslist(self):
606 606 '''return a list of tags ordered by revision'''
607 607 if not self._tagscache.tagslist:
608 608 l = []
609 609 for t, n in self.tags().iteritems():
610 610 r = self.changelog.rev(n)
611 611 l.append((r, t, n))
612 612 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 613
614 614 return self._tagscache.tagslist
615 615
616 616 def nodetags(self, node):
617 617 '''return the tags associated with a node'''
618 618 if not self._tagscache.nodetagscache:
619 619 nodetagscache = {}
620 620 for t, n in self._tagscache.tags.iteritems():
621 621 nodetagscache.setdefault(n, []).append(t)
622 622 for tags in nodetagscache.itervalues():
623 623 tags.sort()
624 624 self._tagscache.nodetagscache = nodetagscache
625 625 return self._tagscache.nodetagscache.get(node, [])
626 626
627 627 def nodebookmarks(self, node):
628 628 marks = []
629 629 for bookmark, n in self._bookmarks.iteritems():
630 630 if n == node:
631 631 marks.append(bookmark)
632 632 return sorted(marks)
633 633
634 634 def branchmap(self):
635 635 '''returns a dictionary {branch: [branchheads]}'''
636 636 branchmap.updatecache(self)
637 637 return self._branchcaches[self.filtername]
638 638
639 639
640 640 def _branchtip(self, heads):
641 641 '''return the tipmost branch head in heads'''
642 642 tip = heads[-1]
643 643 for h in reversed(heads):
644 644 if not self[h].closesbranch():
645 645 tip = h
646 646 break
647 647 return tip
648 648
649 649 def branchtip(self, branch):
650 650 '''return the tip node for a given branch'''
651 651 if branch not in self.branchmap():
652 652 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 653 return self._branchtip(self.branchmap()[branch])
654 654
655 655 def branchtags(self):
656 656 '''return a dict where branch names map to the tipmost head of
657 657 the branch, open heads come before closed'''
658 658 bt = {}
659 659 for bn, heads in self.branchmap().iteritems():
660 660 bt[bn] = self._branchtip(heads)
661 661 return bt
662 662
663 663 def lookup(self, key):
664 664 return self[key].node()
665 665
666 666 def lookupbranch(self, key, remote=None):
667 667 repo = remote or self
668 668 if key in repo.branchmap():
669 669 return key
670 670
671 671 repo = (remote and remote.local()) and remote or self
672 672 return repo[key].branch()
673 673
674 674 def known(self, nodes):
675 675 nm = self.changelog.nodemap
676 676 pc = self._phasecache
677 677 result = []
678 678 for n in nodes:
679 679 r = nm.get(n)
680 680 resp = not (r is None or pc.phase(self, r) >= phases.secret)
681 681 result.append(resp)
682 682 return result
683 683
684 684 def local(self):
685 685 return self
686 686
687 687 def cancopy(self):
688 688 return self.local() # so statichttprepo's override of local() works
689 689
690 690 def join(self, f):
691 691 return os.path.join(self.path, f)
692 692
693 693 def wjoin(self, f):
694 694 return os.path.join(self.root, f)
695 695
696 696 def file(self, f):
697 697 if f[0] == '/':
698 698 f = f[1:]
699 699 return filelog.filelog(self.sopener, f)
700 700
701 701 def changectx(self, changeid):
702 702 return self[changeid]
703 703
704 704 def parents(self, changeid=None):
705 705 '''get list of changectxs for parents of changeid'''
706 706 return self[changeid].parents()
707 707
708 708 def setparents(self, p1, p2=nullid):
709 709 copies = self.dirstate.setparents(p1, p2)
710 710 pctx = self[p1]
711 711 if copies:
712 712 # Adjust copy records, the dirstate cannot do it, it
713 713 # requires access to parents manifests. Preserve them
714 714 # only for entries added to first parent.
715 715 for f in copies:
716 716 if f not in pctx and copies[f] in pctx:
717 717 self.dirstate.copy(copies[f], f)
718 718 if p2 == nullid:
719 719 for f, s in sorted(self.dirstate.copies().items()):
720 720 if f not in pctx and s not in pctx:
721 721 self.dirstate.copy(None, f)
722 722
723 723 def filectx(self, path, changeid=None, fileid=None):
724 724 """changeid can be a changeset revision, node, or tag.
725 725 fileid can be a file revision or node."""
726 726 return context.filectx(self, path, changeid, fileid)
727 727
728 728 def getcwd(self):
729 729 return self.dirstate.getcwd()
730 730
731 731 def pathto(self, f, cwd=None):
732 732 return self.dirstate.pathto(f, cwd)
733 733
734 734 def wfile(self, f, mode='r'):
735 735 return self.wopener(f, mode)
736 736
737 737 def _link(self, f):
738 return os.path.islink(self.wjoin(f))
738 return self.wvfs.islink(f)
739 739
740 740 def _loadfilter(self, filter):
741 741 if filter not in self.filterpats:
742 742 l = []
743 743 for pat, cmd in self.ui.configitems(filter):
744 744 if cmd == '!':
745 745 continue
746 746 mf = matchmod.match(self.root, '', [pat])
747 747 fn = None
748 748 params = cmd
749 749 for name, filterfn in self._datafilters.iteritems():
750 750 if cmd.startswith(name):
751 751 fn = filterfn
752 752 params = cmd[len(name):].lstrip()
753 753 break
754 754 if not fn:
755 755 fn = lambda s, c, **kwargs: util.filter(s, c)
756 756 # Wrap old filters not supporting keyword arguments
757 757 if not inspect.getargspec(fn)[2]:
758 758 oldfn = fn
759 759 fn = lambda s, c, **kwargs: oldfn(s, c)
760 760 l.append((mf, fn, params))
761 761 self.filterpats[filter] = l
762 762 return self.filterpats[filter]
763 763
764 764 def _filter(self, filterpats, filename, data):
765 765 for mf, fn, cmd in filterpats:
766 766 if mf(filename):
767 767 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
768 768 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
769 769 break
770 770
771 771 return data
772 772
773 773 @unfilteredpropertycache
774 774 def _encodefilterpats(self):
775 775 return self._loadfilter('encode')
776 776
777 777 @unfilteredpropertycache
778 778 def _decodefilterpats(self):
779 779 return self._loadfilter('decode')
780 780
781 781 def adddatafilter(self, name, filter):
782 782 self._datafilters[name] = filter
783 783
784 784 def wread(self, filename):
785 785 if self._link(filename):
786 786 data = os.readlink(self.wjoin(filename))
787 787 else:
788 788 data = self.wopener.read(filename)
789 789 return self._filter(self._encodefilterpats, filename, data)
790 790
791 791 def wwrite(self, filename, data, flags):
792 792 data = self._filter(self._decodefilterpats, filename, data)
793 793 if 'l' in flags:
794 794 self.wopener.symlink(data, filename)
795 795 else:
796 796 self.wopener.write(filename, data)
797 797 if 'x' in flags:
798 798 util.setflags(self.wjoin(filename), False, True)
799 799
800 800 def wwritedata(self, filename, data):
801 801 return self._filter(self._decodefilterpats, filename, data)
802 802
803 803 def transaction(self, desc):
804 804 tr = self._transref and self._transref() or None
805 805 if tr and tr.running():
806 806 return tr.nest()
807 807
808 808 # abort here if the journal already exists
809 809 if self.svfs.exists("journal"):
810 810 raise error.RepoError(
811 811 _("abandoned transaction found - run hg recover"))
812 812
813 813 self._writejournal(desc)
814 814 renames = [(x, undoname(x)) for x in self._journalfiles()]
815 815
816 816 tr = transaction.transaction(self.ui.warn, self.sopener,
817 817 self.sjoin("journal"),
818 818 aftertrans(renames),
819 819 self.store.createmode)
820 820 self._transref = weakref.ref(tr)
821 821 return tr
822 822
823 823 def _journalfiles(self):
824 824 return (self.sjoin('journal'), self.join('journal.dirstate'),
825 825 self.join('journal.branch'), self.join('journal.desc'),
826 826 self.join('journal.bookmarks'),
827 827 self.sjoin('journal.phaseroots'))
828 828
829 829 def undofiles(self):
830 830 return [undoname(x) for x in self._journalfiles()]
831 831
832 832 def _writejournal(self, desc):
833 833 self.opener.write("journal.dirstate",
834 834 self.opener.tryread("dirstate"))
835 835 self.opener.write("journal.branch",
836 836 encoding.fromlocal(self.dirstate.branch()))
837 837 self.opener.write("journal.desc",
838 838 "%d\n%s\n" % (len(self), desc))
839 839 self.opener.write("journal.bookmarks",
840 840 self.opener.tryread("bookmarks"))
841 841 self.sopener.write("journal.phaseroots",
842 842 self.sopener.tryread("phaseroots"))
843 843
844 844 def recover(self):
845 845 lock = self.lock()
846 846 try:
847 847 if self.svfs.exists("journal"):
848 848 self.ui.status(_("rolling back interrupted transaction\n"))
849 849 transaction.rollback(self.sopener, self.sjoin("journal"),
850 850 self.ui.warn)
851 851 self.invalidate()
852 852 return True
853 853 else:
854 854 self.ui.warn(_("no interrupted transaction available\n"))
855 855 return False
856 856 finally:
857 857 lock.release()
858 858
859 859 def rollback(self, dryrun=False, force=False):
860 860 wlock = lock = None
861 861 try:
862 862 wlock = self.wlock()
863 863 lock = self.lock()
864 864 if self.svfs.exists("undo"):
865 865 return self._rollback(dryrun, force)
866 866 else:
867 867 self.ui.warn(_("no rollback information available\n"))
868 868 return 1
869 869 finally:
870 870 release(lock, wlock)
871 871
872 872 @unfilteredmethod # Until we get smarter cache management
873 873 def _rollback(self, dryrun, force):
874 874 ui = self.ui
875 875 try:
876 876 args = self.opener.read('undo.desc').splitlines()
877 877 (oldlen, desc, detail) = (int(args[0]), args[1], None)
878 878 if len(args) >= 3:
879 879 detail = args[2]
880 880 oldtip = oldlen - 1
881 881
882 882 if detail and ui.verbose:
883 883 msg = (_('repository tip rolled back to revision %s'
884 884 ' (undo %s: %s)\n')
885 885 % (oldtip, desc, detail))
886 886 else:
887 887 msg = (_('repository tip rolled back to revision %s'
888 888 ' (undo %s)\n')
889 889 % (oldtip, desc))
890 890 except IOError:
891 891 msg = _('rolling back unknown transaction\n')
892 892 desc = None
893 893
894 894 if not force and self['.'] != self['tip'] and desc == 'commit':
895 895 raise util.Abort(
896 896 _('rollback of last commit while not checked out '
897 897 'may lose data'), hint=_('use -f to force'))
898 898
899 899 ui.status(msg)
900 900 if dryrun:
901 901 return 0
902 902
903 903 parents = self.dirstate.parents()
904 904 self.destroying()
905 905 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
906 906 if self.vfs.exists('undo.bookmarks'):
907 907 self.vfs.rename('undo.bookmarks', 'bookmarks')
908 908 if self.svfs.exists('undo.phaseroots'):
909 909 self.svfs.rename('undo.phaseroots', 'phaseroots')
910 910 self.invalidate()
911 911
912 912 parentgone = (parents[0] not in self.changelog.nodemap or
913 913 parents[1] not in self.changelog.nodemap)
914 914 if parentgone:
915 915 self.vfs.rename('undo.dirstate', 'dirstate')
916 916 try:
917 917 branch = self.opener.read('undo.branch')
918 918 self.dirstate.setbranch(encoding.tolocal(branch))
919 919 except IOError:
920 920 ui.warn(_('named branch could not be reset: '
921 921 'current branch is still \'%s\'\n')
922 922 % self.dirstate.branch())
923 923
924 924 self.dirstate.invalidate()
925 925 parents = tuple([p.rev() for p in self.parents()])
926 926 if len(parents) > 1:
927 927 ui.status(_('working directory now based on '
928 928 'revisions %d and %d\n') % parents)
929 929 else:
930 930 ui.status(_('working directory now based on '
931 931 'revision %d\n') % parents)
932 932 # TODO: if we know which new heads may result from this rollback, pass
933 933 # them to destroy(), which will prevent the branchhead cache from being
934 934 # invalidated.
935 935 self.destroyed()
936 936 return 0
937 937
938 938 def invalidatecaches(self):
939 939
940 940 if '_tagscache' in vars(self):
941 941 # can't use delattr on proxy
942 942 del self.__dict__['_tagscache']
943 943
944 944 self.unfiltered()._branchcaches.clear()
945 945 self.invalidatevolatilesets()
946 946
947 947 def invalidatevolatilesets(self):
948 948 self.filteredrevcache.clear()
949 949 obsolete.clearobscaches(self)
950 950
951 951 def invalidatedirstate(self):
952 952 '''Invalidates the dirstate, causing the next call to dirstate
953 953 to check if it was modified since the last time it was read,
954 954 rereading it if it has.
955 955
956 956 This is different to dirstate.invalidate() that it doesn't always
957 957 rereads the dirstate. Use dirstate.invalidate() if you want to
958 958 explicitly read the dirstate again (i.e. restoring it to a previous
959 959 known good state).'''
960 960 if hasunfilteredcache(self, 'dirstate'):
961 961 for k in self.dirstate._filecache:
962 962 try:
963 963 delattr(self.dirstate, k)
964 964 except AttributeError:
965 965 pass
966 966 delattr(self.unfiltered(), 'dirstate')
967 967
968 968 def invalidate(self):
969 969 unfiltered = self.unfiltered() # all file caches are stored unfiltered
970 970 for k in self._filecache:
971 971 # dirstate is invalidated separately in invalidatedirstate()
972 972 if k == 'dirstate':
973 973 continue
974 974
975 975 try:
976 976 delattr(unfiltered, k)
977 977 except AttributeError:
978 978 pass
979 979 self.invalidatecaches()
980 980
981 981 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
982 982 try:
983 983 l = lock.lock(lockname, 0, releasefn, desc=desc)
984 984 except error.LockHeld, inst:
985 985 if not wait:
986 986 raise
987 987 self.ui.warn(_("waiting for lock on %s held by %r\n") %
988 988 (desc, inst.locker))
989 989 # default to 600 seconds timeout
990 990 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
991 991 releasefn, desc=desc)
992 992 if acquirefn:
993 993 acquirefn()
994 994 return l
995 995
996 996 def _afterlock(self, callback):
997 997 """add a callback to the current repository lock.
998 998
999 999 The callback will be executed on lock release."""
1000 1000 l = self._lockref and self._lockref()
1001 1001 if l:
1002 1002 l.postrelease.append(callback)
1003 1003 else:
1004 1004 callback()
1005 1005
1006 1006 def lock(self, wait=True):
1007 1007 '''Lock the repository store (.hg/store) and return a weak reference
1008 1008 to the lock. Use this before modifying the store (e.g. committing or
1009 1009 stripping). If you are opening a transaction, get a lock as well.)'''
1010 1010 l = self._lockref and self._lockref()
1011 1011 if l is not None and l.held:
1012 1012 l.lock()
1013 1013 return l
1014 1014
1015 1015 def unlock():
1016 1016 self.store.write()
1017 1017 if hasunfilteredcache(self, '_phasecache'):
1018 1018 self._phasecache.write()
1019 1019 for k, ce in self._filecache.items():
1020 1020 if k == 'dirstate' or k not in self.__dict__:
1021 1021 continue
1022 1022 ce.refresh()
1023 1023
1024 1024 l = self._lock(self.sjoin("lock"), wait, unlock,
1025 1025 self.invalidate, _('repository %s') % self.origroot)
1026 1026 self._lockref = weakref.ref(l)
1027 1027 return l
1028 1028
1029 1029 def wlock(self, wait=True):
1030 1030 '''Lock the non-store parts of the repository (everything under
1031 1031 .hg except .hg/store) and return a weak reference to the lock.
1032 1032 Use this before modifying files in .hg.'''
1033 1033 l = self._wlockref and self._wlockref()
1034 1034 if l is not None and l.held:
1035 1035 l.lock()
1036 1036 return l
1037 1037
1038 1038 def unlock():
1039 1039 self.dirstate.write()
1040 1040 self._filecache['dirstate'].refresh()
1041 1041
1042 1042 l = self._lock(self.join("wlock"), wait, unlock,
1043 1043 self.invalidatedirstate, _('working directory of %s') %
1044 1044 self.origroot)
1045 1045 self._wlockref = weakref.ref(l)
1046 1046 return l
1047 1047
1048 1048 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1049 1049 """
1050 1050 commit an individual file as part of a larger transaction
1051 1051 """
1052 1052
1053 1053 fname = fctx.path()
1054 1054 text = fctx.data()
1055 1055 flog = self.file(fname)
1056 1056 fparent1 = manifest1.get(fname, nullid)
1057 1057 fparent2 = fparent2o = manifest2.get(fname, nullid)
1058 1058
1059 1059 meta = {}
1060 1060 copy = fctx.renamed()
1061 1061 if copy and copy[0] != fname:
1062 1062 # Mark the new revision of this file as a copy of another
1063 1063 # file. This copy data will effectively act as a parent
1064 1064 # of this new revision. If this is a merge, the first
1065 1065 # parent will be the nullid (meaning "look up the copy data")
1066 1066 # and the second one will be the other parent. For example:
1067 1067 #
1068 1068 # 0 --- 1 --- 3 rev1 changes file foo
1069 1069 # \ / rev2 renames foo to bar and changes it
1070 1070 # \- 2 -/ rev3 should have bar with all changes and
1071 1071 # should record that bar descends from
1072 1072 # bar in rev2 and foo in rev1
1073 1073 #
1074 1074 # this allows this merge to succeed:
1075 1075 #
1076 1076 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1077 1077 # \ / merging rev3 and rev4 should use bar@rev2
1078 1078 # \- 2 --- 4 as the merge base
1079 1079 #
1080 1080
1081 1081 cfname = copy[0]
1082 1082 crev = manifest1.get(cfname)
1083 1083 newfparent = fparent2
1084 1084
1085 1085 if manifest2: # branch merge
1086 1086 if fparent2 == nullid or crev is None: # copied on remote side
1087 1087 if cfname in manifest2:
1088 1088 crev = manifest2[cfname]
1089 1089 newfparent = fparent1
1090 1090
1091 1091 # find source in nearest ancestor if we've lost track
1092 1092 if not crev:
1093 1093 self.ui.debug(" %s: searching for copy revision for %s\n" %
1094 1094 (fname, cfname))
1095 1095 for ancestor in self[None].ancestors():
1096 1096 if cfname in ancestor:
1097 1097 crev = ancestor[cfname].filenode()
1098 1098 break
1099 1099
1100 1100 if crev:
1101 1101 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1102 1102 meta["copy"] = cfname
1103 1103 meta["copyrev"] = hex(crev)
1104 1104 fparent1, fparent2 = nullid, newfparent
1105 1105 else:
1106 1106 self.ui.warn(_("warning: can't find ancestor for '%s' "
1107 1107 "copied from '%s'!\n") % (fname, cfname))
1108 1108
1109 1109 elif fparent2 != nullid:
1110 1110 # is one parent an ancestor of the other?
1111 1111 fparentancestor = flog.ancestor(fparent1, fparent2)
1112 1112 if fparentancestor == fparent1:
1113 1113 fparent1, fparent2 = fparent2, nullid
1114 1114 elif fparentancestor == fparent2:
1115 1115 fparent2 = nullid
1116 1116
1117 1117 # is the file changed?
1118 1118 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1119 1119 changelist.append(fname)
1120 1120 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1121 1121
1122 1122 # are just the flags changed during merge?
1123 1123 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1124 1124 changelist.append(fname)
1125 1125
1126 1126 return fparent1
1127 1127
1128 1128 @unfilteredmethod
1129 1129 def commit(self, text="", user=None, date=None, match=None, force=False,
1130 1130 editor=False, extra={}):
1131 1131 """Add a new revision to current repository.
1132 1132
1133 1133 Revision information is gathered from the working directory,
1134 1134 match can be used to filter the committed files. If editor is
1135 1135 supplied, it is called to get a commit message.
1136 1136 """
1137 1137
1138 1138 def fail(f, msg):
1139 1139 raise util.Abort('%s: %s' % (f, msg))
1140 1140
1141 1141 if not match:
1142 1142 match = matchmod.always(self.root, '')
1143 1143
1144 1144 if not force:
1145 1145 vdirs = []
1146 1146 match.dir = vdirs.append
1147 1147 match.bad = fail
1148 1148
1149 1149 wlock = self.wlock()
1150 1150 try:
1151 1151 wctx = self[None]
1152 1152 merge = len(wctx.parents()) > 1
1153 1153
1154 1154 if (not force and merge and match and
1155 1155 (match.files() or match.anypats())):
1156 1156 raise util.Abort(_('cannot partially commit a merge '
1157 1157 '(do not specify files or patterns)'))
1158 1158
1159 1159 changes = self.status(match=match, clean=force)
1160 1160 if force:
1161 1161 changes[0].extend(changes[6]) # mq may commit unchanged files
1162 1162
1163 1163 # check subrepos
1164 1164 subs = []
1165 1165 commitsubs = set()
1166 1166 newstate = wctx.substate.copy()
1167 1167 # only manage subrepos and .hgsubstate if .hgsub is present
1168 1168 if '.hgsub' in wctx:
1169 1169 # we'll decide whether to track this ourselves, thanks
1170 1170 if '.hgsubstate' in changes[0]:
1171 1171 changes[0].remove('.hgsubstate')
1172 1172 if '.hgsubstate' in changes[2]:
1173 1173 changes[2].remove('.hgsubstate')
1174 1174
1175 1175 # compare current state to last committed state
1176 1176 # build new substate based on last committed state
1177 1177 oldstate = wctx.p1().substate
1178 1178 for s in sorted(newstate.keys()):
1179 1179 if not match(s):
1180 1180 # ignore working copy, use old state if present
1181 1181 if s in oldstate:
1182 1182 newstate[s] = oldstate[s]
1183 1183 continue
1184 1184 if not force:
1185 1185 raise util.Abort(
1186 1186 _("commit with new subrepo %s excluded") % s)
1187 1187 if wctx.sub(s).dirty(True):
1188 1188 if not self.ui.configbool('ui', 'commitsubrepos'):
1189 1189 raise util.Abort(
1190 1190 _("uncommitted changes in subrepo %s") % s,
1191 1191 hint=_("use --subrepos for recursive commit"))
1192 1192 subs.append(s)
1193 1193 commitsubs.add(s)
1194 1194 else:
1195 1195 bs = wctx.sub(s).basestate()
1196 1196 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1197 1197 if oldstate.get(s, (None, None, None))[1] != bs:
1198 1198 subs.append(s)
1199 1199
1200 1200 # check for removed subrepos
1201 1201 for p in wctx.parents():
1202 1202 r = [s for s in p.substate if s not in newstate]
1203 1203 subs += [s for s in r if match(s)]
1204 1204 if subs:
1205 1205 if (not match('.hgsub') and
1206 1206 '.hgsub' in (wctx.modified() + wctx.added())):
1207 1207 raise util.Abort(
1208 1208 _("can't commit subrepos without .hgsub"))
1209 1209 changes[0].insert(0, '.hgsubstate')
1210 1210
1211 1211 elif '.hgsub' in changes[2]:
1212 1212 # clean up .hgsubstate when .hgsub is removed
1213 1213 if ('.hgsubstate' in wctx and
1214 1214 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1215 1215 changes[2].insert(0, '.hgsubstate')
1216 1216
1217 1217 # make sure all explicit patterns are matched
1218 1218 if not force and match.files():
1219 1219 matched = set(changes[0] + changes[1] + changes[2])
1220 1220
1221 1221 for f in match.files():
1222 1222 f = self.dirstate.normalize(f)
1223 1223 if f == '.' or f in matched or f in wctx.substate:
1224 1224 continue
1225 1225 if f in changes[3]: # missing
1226 1226 fail(f, _('file not found!'))
1227 1227 if f in vdirs: # visited directory
1228 1228 d = f + '/'
1229 1229 for mf in matched:
1230 1230 if mf.startswith(d):
1231 1231 break
1232 1232 else:
1233 1233 fail(f, _("no match under directory!"))
1234 1234 elif f not in self.dirstate:
1235 1235 fail(f, _("file not tracked!"))
1236 1236
1237 1237 cctx = context.workingctx(self, text, user, date, extra, changes)
1238 1238
1239 1239 if (not force and not extra.get("close") and not merge
1240 1240 and not cctx.files()
1241 1241 and wctx.branch() == wctx.p1().branch()):
1242 1242 return None
1243 1243
1244 1244 if merge and cctx.deleted():
1245 1245 raise util.Abort(_("cannot commit merge with missing files"))
1246 1246
1247 1247 ms = mergemod.mergestate(self)
1248 1248 for f in changes[0]:
1249 1249 if f in ms and ms[f] == 'u':
1250 1250 raise util.Abort(_("unresolved merge conflicts "
1251 1251 "(see hg help resolve)"))
1252 1252
1253 1253 if editor:
1254 1254 cctx._text = editor(self, cctx, subs)
1255 1255 edited = (text != cctx._text)
1256 1256
1257 1257 # commit subs and write new state
1258 1258 if subs:
1259 1259 for s in sorted(commitsubs):
1260 1260 sub = wctx.sub(s)
1261 1261 self.ui.status(_('committing subrepository %s\n') %
1262 1262 subrepo.subrelpath(sub))
1263 1263 sr = sub.commit(cctx._text, user, date)
1264 1264 newstate[s] = (newstate[s][0], sr)
1265 1265 subrepo.writestate(self, newstate)
1266 1266
1267 1267 # Save commit message in case this transaction gets rolled back
1268 1268 # (e.g. by a pretxncommit hook). Leave the content alone on
1269 1269 # the assumption that the user will use the same editor again.
1270 1270 msgfn = self.savecommitmessage(cctx._text)
1271 1271
1272 1272 p1, p2 = self.dirstate.parents()
1273 1273 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1274 1274 try:
1275 1275 self.hook("precommit", throw=True, parent1=hookp1,
1276 1276 parent2=hookp2)
1277 1277 ret = self.commitctx(cctx, True)
1278 1278 except: # re-raises
1279 1279 if edited:
1280 1280 self.ui.write(
1281 1281 _('note: commit message saved in %s\n') % msgfn)
1282 1282 raise
1283 1283
1284 1284 # update bookmarks, dirstate and mergestate
1285 1285 bookmarks.update(self, [p1, p2], ret)
1286 1286 cctx.markcommitted(ret)
1287 1287 ms.reset()
1288 1288 finally:
1289 1289 wlock.release()
1290 1290
1291 1291 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1292 1292 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1293 1293 self._afterlock(commithook)
1294 1294 return ret
1295 1295
1296 1296 @unfilteredmethod
1297 1297 def commitctx(self, ctx, error=False):
1298 1298 """Add a new revision to current repository.
1299 1299 Revision information is passed via the context argument.
1300 1300 """
1301 1301
1302 1302 tr = lock = None
1303 1303 removed = list(ctx.removed())
1304 1304 p1, p2 = ctx.p1(), ctx.p2()
1305 1305 user = ctx.user()
1306 1306
1307 1307 lock = self.lock()
1308 1308 try:
1309 1309 tr = self.transaction("commit")
1310 1310 trp = weakref.proxy(tr)
1311 1311
1312 1312 if ctx.files():
1313 1313 m1 = p1.manifest().copy()
1314 1314 m2 = p2.manifest()
1315 1315
1316 1316 # check in files
1317 1317 new = {}
1318 1318 changed = []
1319 1319 linkrev = len(self)
1320 1320 for f in sorted(ctx.modified() + ctx.added()):
1321 1321 self.ui.note(f + "\n")
1322 1322 try:
1323 1323 fctx = ctx[f]
1324 1324 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1325 1325 changed)
1326 1326 m1.set(f, fctx.flags())
1327 1327 except OSError, inst:
1328 1328 self.ui.warn(_("trouble committing %s!\n") % f)
1329 1329 raise
1330 1330 except IOError, inst:
1331 1331 errcode = getattr(inst, 'errno', errno.ENOENT)
1332 1332 if error or errcode and errcode != errno.ENOENT:
1333 1333 self.ui.warn(_("trouble committing %s!\n") % f)
1334 1334 raise
1335 1335 else:
1336 1336 removed.append(f)
1337 1337
1338 1338 # update manifest
1339 1339 m1.update(new)
1340 1340 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1341 1341 drop = [f for f in removed if f in m1]
1342 1342 for f in drop:
1343 1343 del m1[f]
1344 1344 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1345 1345 p2.manifestnode(), (new, drop))
1346 1346 files = changed + removed
1347 1347 else:
1348 1348 mn = p1.manifestnode()
1349 1349 files = []
1350 1350
1351 1351 # update changelog
1352 1352 self.changelog.delayupdate()
1353 1353 n = self.changelog.add(mn, files, ctx.description(),
1354 1354 trp, p1.node(), p2.node(),
1355 1355 user, ctx.date(), ctx.extra().copy())
1356 1356 p = lambda: self.changelog.writepending() and self.root or ""
1357 1357 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1358 1358 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1359 1359 parent2=xp2, pending=p)
1360 1360 self.changelog.finalize(trp)
1361 1361 # set the new commit is proper phase
1362 1362 targetphase = phases.newcommitphase(self.ui)
1363 1363 if targetphase:
1364 1364 # retract boundary do not alter parent changeset.
1365 1365 # if a parent have higher the resulting phase will
1366 1366 # be compliant anyway
1367 1367 #
1368 1368 # if minimal phase was 0 we don't need to retract anything
1369 1369 phases.retractboundary(self, targetphase, [n])
1370 1370 tr.close()
1371 1371 branchmap.updatecache(self.filtered('served'))
1372 1372 return n
1373 1373 finally:
1374 1374 if tr:
1375 1375 tr.release()
1376 1376 lock.release()
1377 1377
1378 1378 @unfilteredmethod
1379 1379 def destroying(self):
1380 1380 '''Inform the repository that nodes are about to be destroyed.
1381 1381 Intended for use by strip and rollback, so there's a common
1382 1382 place for anything that has to be done before destroying history.
1383 1383
1384 1384 This is mostly useful for saving state that is in memory and waiting
1385 1385 to be flushed when the current lock is released. Because a call to
1386 1386 destroyed is imminent, the repo will be invalidated causing those
1387 1387 changes to stay in memory (waiting for the next unlock), or vanish
1388 1388 completely.
1389 1389 '''
1390 1390 # When using the same lock to commit and strip, the phasecache is left
1391 1391 # dirty after committing. Then when we strip, the repo is invalidated,
1392 1392 # causing those changes to disappear.
1393 1393 if '_phasecache' in vars(self):
1394 1394 self._phasecache.write()
1395 1395
1396 1396 @unfilteredmethod
1397 1397 def destroyed(self):
1398 1398 '''Inform the repository that nodes have been destroyed.
1399 1399 Intended for use by strip and rollback, so there's a common
1400 1400 place for anything that has to be done after destroying history.
1401 1401 '''
1402 1402 # When one tries to:
1403 1403 # 1) destroy nodes thus calling this method (e.g. strip)
1404 1404 # 2) use phasecache somewhere (e.g. commit)
1405 1405 #
1406 1406 # then 2) will fail because the phasecache contains nodes that were
1407 1407 # removed. We can either remove phasecache from the filecache,
1408 1408 # causing it to reload next time it is accessed, or simply filter
1409 1409 # the removed nodes now and write the updated cache.
1410 1410 self._phasecache.filterunknown(self)
1411 1411 self._phasecache.write()
1412 1412
1413 1413 # update the 'served' branch cache to help read only server process
1414 1414 # Thanks to branchcache collaboration this is done from the nearest
1415 1415 # filtered subset and it is expected to be fast.
1416 1416 branchmap.updatecache(self.filtered('served'))
1417 1417
1418 1418 # Ensure the persistent tag cache is updated. Doing it now
1419 1419 # means that the tag cache only has to worry about destroyed
1420 1420 # heads immediately after a strip/rollback. That in turn
1421 1421 # guarantees that "cachetip == currenttip" (comparing both rev
1422 1422 # and node) always means no nodes have been added or destroyed.
1423 1423
1424 1424 # XXX this is suboptimal when qrefresh'ing: we strip the current
1425 1425 # head, refresh the tag cache, then immediately add a new head.
1426 1426 # But I think doing it this way is necessary for the "instant
1427 1427 # tag cache retrieval" case to work.
1428 1428 self.invalidate()
1429 1429
1430 1430 def walk(self, match, node=None):
1431 1431 '''
1432 1432 walk recursively through the directory tree or a given
1433 1433 changeset, finding all files matched by the match
1434 1434 function
1435 1435 '''
1436 1436 return self[node].walk(match)
1437 1437
1438 1438 def status(self, node1='.', node2=None, match=None,
1439 1439 ignored=False, clean=False, unknown=False,
1440 1440 listsubrepos=False):
1441 1441 """return status of files between two nodes or node and working
1442 1442 directory.
1443 1443
1444 1444 If node1 is None, use the first dirstate parent instead.
1445 1445 If node2 is None, compare node1 with working directory.
1446 1446 """
1447 1447
1448 1448 def mfmatches(ctx):
1449 1449 mf = ctx.manifest().copy()
1450 1450 if match.always():
1451 1451 return mf
1452 1452 for fn in mf.keys():
1453 1453 if not match(fn):
1454 1454 del mf[fn]
1455 1455 return mf
1456 1456
1457 1457 if isinstance(node1, context.changectx):
1458 1458 ctx1 = node1
1459 1459 else:
1460 1460 ctx1 = self[node1]
1461 1461 if isinstance(node2, context.changectx):
1462 1462 ctx2 = node2
1463 1463 else:
1464 1464 ctx2 = self[node2]
1465 1465
1466 1466 working = ctx2.rev() is None
1467 1467 parentworking = working and ctx1 == self['.']
1468 1468 match = match or matchmod.always(self.root, self.getcwd())
1469 1469 listignored, listclean, listunknown = ignored, clean, unknown
1470 1470
1471 1471 # load earliest manifest first for caching reasons
1472 1472 if not working and ctx2.rev() < ctx1.rev():
1473 1473 ctx2.manifest()
1474 1474
1475 1475 if not parentworking:
1476 1476 def bad(f, msg):
1477 1477 # 'f' may be a directory pattern from 'match.files()',
1478 1478 # so 'f not in ctx1' is not enough
1479 1479 if f not in ctx1 and f not in ctx1.dirs():
1480 1480 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1481 1481 match.bad = bad
1482 1482
1483 1483 if working: # we need to scan the working dir
1484 1484 subrepos = []
1485 1485 if '.hgsub' in self.dirstate:
1486 1486 subrepos = sorted(ctx2.substate)
1487 1487 s = self.dirstate.status(match, subrepos, listignored,
1488 1488 listclean, listunknown)
1489 1489 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1490 1490
1491 1491 # check for any possibly clean files
1492 1492 if parentworking and cmp:
1493 1493 fixup = []
1494 1494 # do a full compare of any files that might have changed
1495 1495 for f in sorted(cmp):
1496 1496 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1497 1497 or ctx1[f].cmp(ctx2[f])):
1498 1498 modified.append(f)
1499 1499 else:
1500 1500 fixup.append(f)
1501 1501
1502 1502 # update dirstate for files that are actually clean
1503 1503 if fixup:
1504 1504 if listclean:
1505 1505 clean += fixup
1506 1506
1507 1507 try:
1508 1508 # updating the dirstate is optional
1509 1509 # so we don't wait on the lock
1510 1510 wlock = self.wlock(False)
1511 1511 try:
1512 1512 for f in fixup:
1513 1513 self.dirstate.normal(f)
1514 1514 finally:
1515 1515 wlock.release()
1516 1516 except error.LockError:
1517 1517 pass
1518 1518
1519 1519 if not parentworking:
1520 1520 mf1 = mfmatches(ctx1)
1521 1521 if working:
1522 1522 # we are comparing working dir against non-parent
1523 1523 # generate a pseudo-manifest for the working dir
1524 1524 mf2 = mfmatches(self['.'])
1525 1525 for f in cmp + modified + added:
1526 1526 mf2[f] = None
1527 1527 mf2.set(f, ctx2.flags(f))
1528 1528 for f in removed:
1529 1529 if f in mf2:
1530 1530 del mf2[f]
1531 1531 else:
1532 1532 # we are comparing two revisions
1533 1533 deleted, unknown, ignored = [], [], []
1534 1534 mf2 = mfmatches(ctx2)
1535 1535
1536 1536 modified, added, clean = [], [], []
1537 1537 withflags = mf1.withflags() | mf2.withflags()
1538 1538 for fn, mf2node in mf2.iteritems():
1539 1539 if fn in mf1:
1540 1540 if (fn not in deleted and
1541 1541 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1542 1542 (mf1[fn] != mf2node and
1543 1543 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1544 1544 modified.append(fn)
1545 1545 elif listclean:
1546 1546 clean.append(fn)
1547 1547 del mf1[fn]
1548 1548 elif fn not in deleted:
1549 1549 added.append(fn)
1550 1550 removed = mf1.keys()
1551 1551
1552 1552 if working and modified and not self.dirstate._checklink:
1553 1553 # Symlink placeholders may get non-symlink-like contents
1554 1554 # via user error or dereferencing by NFS or Samba servers,
1555 1555 # so we filter out any placeholders that don't look like a
1556 1556 # symlink
1557 1557 sane = []
1558 1558 for f in modified:
1559 1559 if ctx2.flags(f) == 'l':
1560 1560 d = ctx2[f].data()
1561 1561 if len(d) >= 1024 or '\n' in d or util.binary(d):
1562 1562 self.ui.debug('ignoring suspect symlink placeholder'
1563 1563 ' "%s"\n' % f)
1564 1564 continue
1565 1565 sane.append(f)
1566 1566 modified = sane
1567 1567
1568 1568 r = modified, added, removed, deleted, unknown, ignored, clean
1569 1569
1570 1570 if listsubrepos:
1571 1571 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1572 1572 if working:
1573 1573 rev2 = None
1574 1574 else:
1575 1575 rev2 = ctx2.substate[subpath][1]
1576 1576 try:
1577 1577 submatch = matchmod.narrowmatcher(subpath, match)
1578 1578 s = sub.status(rev2, match=submatch, ignored=listignored,
1579 1579 clean=listclean, unknown=listunknown,
1580 1580 listsubrepos=True)
1581 1581 for rfiles, sfiles in zip(r, s):
1582 1582 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1583 1583 except error.LookupError:
1584 1584 self.ui.status(_("skipping missing subrepository: %s\n")
1585 1585 % subpath)
1586 1586
1587 1587 for l in r:
1588 1588 l.sort()
1589 1589 return r
1590 1590
1591 1591 def heads(self, start=None):
1592 1592 heads = self.changelog.heads(start)
1593 1593 # sort the output in rev descending order
1594 1594 return sorted(heads, key=self.changelog.rev, reverse=True)
1595 1595
1596 1596 def branchheads(self, branch=None, start=None, closed=False):
1597 1597 '''return a (possibly filtered) list of heads for the given branch
1598 1598
1599 1599 Heads are returned in topological order, from newest to oldest.
1600 1600 If branch is None, use the dirstate branch.
1601 1601 If start is not None, return only heads reachable from start.
1602 1602 If closed is True, return heads that are marked as closed as well.
1603 1603 '''
1604 1604 if branch is None:
1605 1605 branch = self[None].branch()
1606 1606 branches = self.branchmap()
1607 1607 if branch not in branches:
1608 1608 return []
1609 1609 # the cache returns heads ordered lowest to highest
1610 1610 bheads = list(reversed(branches[branch]))
1611 1611 if start is not None:
1612 1612 # filter out the heads that cannot be reached from startrev
1613 1613 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1614 1614 bheads = [h for h in bheads if h in fbheads]
1615 1615 if not closed:
1616 1616 bheads = [h for h in bheads if not self[h].closesbranch()]
1617 1617 return bheads
1618 1618
1619 1619 def branches(self, nodes):
1620 1620 if not nodes:
1621 1621 nodes = [self.changelog.tip()]
1622 1622 b = []
1623 1623 for n in nodes:
1624 1624 t = n
1625 1625 while True:
1626 1626 p = self.changelog.parents(n)
1627 1627 if p[1] != nullid or p[0] == nullid:
1628 1628 b.append((t, n, p[0], p[1]))
1629 1629 break
1630 1630 n = p[0]
1631 1631 return b
1632 1632
1633 1633 def between(self, pairs):
1634 1634 r = []
1635 1635
1636 1636 for top, bottom in pairs:
1637 1637 n, l, i = top, [], 0
1638 1638 f = 1
1639 1639
1640 1640 while n != bottom and n != nullid:
1641 1641 p = self.changelog.parents(n)[0]
1642 1642 if i == f:
1643 1643 l.append(n)
1644 1644 f = f * 2
1645 1645 n = p
1646 1646 i += 1
1647 1647
1648 1648 r.append(l)
1649 1649
1650 1650 return r
1651 1651
1652 1652 def pull(self, remote, heads=None, force=False):
1653 1653 # don't open transaction for nothing or you break future useful
1654 1654 # rollback call
1655 1655 tr = None
1656 1656 trname = 'pull\n' + util.hidepassword(remote.url())
1657 1657 lock = self.lock()
1658 1658 try:
1659 1659 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1660 1660 force=force)
1661 1661 common, fetch, rheads = tmp
1662 1662 if not fetch:
1663 1663 self.ui.status(_("no changes found\n"))
1664 1664 added = []
1665 1665 result = 0
1666 1666 else:
1667 1667 tr = self.transaction(trname)
1668 1668 if heads is None and list(common) == [nullid]:
1669 1669 self.ui.status(_("requesting all changes\n"))
1670 1670 elif heads is None and remote.capable('changegroupsubset'):
1671 1671 # issue1320, avoid a race if remote changed after discovery
1672 1672 heads = rheads
1673 1673
1674 1674 if remote.capable('getbundle'):
1675 1675 cg = remote.getbundle('pull', common=common,
1676 1676 heads=heads or rheads)
1677 1677 elif heads is None:
1678 1678 cg = remote.changegroup(fetch, 'pull')
1679 1679 elif not remote.capable('changegroupsubset'):
1680 1680 raise util.Abort(_("partial pull cannot be done because "
1681 1681 "other repository doesn't support "
1682 1682 "changegroupsubset."))
1683 1683 else:
1684 1684 cg = remote.changegroupsubset(fetch, heads, 'pull')
1685 1685 # we use unfiltered changelog here because hidden revision must
1686 1686 # be taken in account for phase synchronization. They may
1687 1687 # becomes public and becomes visible again.
1688 1688 cl = self.unfiltered().changelog
1689 1689 clstart = len(cl)
1690 1690 result = self.addchangegroup(cg, 'pull', remote.url())
1691 1691 clend = len(cl)
1692 1692 added = [cl.node(r) for r in xrange(clstart, clend)]
1693 1693
1694 1694 # compute target subset
1695 1695 if heads is None:
1696 1696 # We pulled every thing possible
1697 1697 # sync on everything common
1698 1698 subset = common + added
1699 1699 else:
1700 1700 # We pulled a specific subset
1701 1701 # sync on this subset
1702 1702 subset = heads
1703 1703
1704 1704 # Get remote phases data from remote
1705 1705 remotephases = remote.listkeys('phases')
1706 1706 publishing = bool(remotephases.get('publishing', False))
1707 1707 if remotephases and not publishing:
1708 1708 # remote is new and unpublishing
1709 1709 pheads, _dr = phases.analyzeremotephases(self, subset,
1710 1710 remotephases)
1711 1711 phases.advanceboundary(self, phases.public, pheads)
1712 1712 phases.advanceboundary(self, phases.draft, subset)
1713 1713 else:
1714 1714 # Remote is old or publishing all common changesets
1715 1715 # should be seen as public
1716 1716 phases.advanceboundary(self, phases.public, subset)
1717 1717
1718 1718 if obsolete._enabled:
1719 1719 self.ui.debug('fetching remote obsolete markers\n')
1720 1720 remoteobs = remote.listkeys('obsolete')
1721 1721 if 'dump0' in remoteobs:
1722 1722 if tr is None:
1723 1723 tr = self.transaction(trname)
1724 1724 for key in sorted(remoteobs, reverse=True):
1725 1725 if key.startswith('dump'):
1726 1726 data = base85.b85decode(remoteobs[key])
1727 1727 self.obsstore.mergemarkers(tr, data)
1728 1728 self.invalidatevolatilesets()
1729 1729 if tr is not None:
1730 1730 tr.close()
1731 1731 finally:
1732 1732 if tr is not None:
1733 1733 tr.release()
1734 1734 lock.release()
1735 1735
1736 1736 return result
1737 1737
1738 1738 def checkpush(self, force, revs):
1739 1739 """Extensions can override this function if additional checks have
1740 1740 to be performed before pushing, or call it if they override push
1741 1741 command.
1742 1742 """
1743 1743 pass
1744 1744
1745 1745 def push(self, remote, force=False, revs=None, newbranch=False):
1746 1746 '''Push outgoing changesets (limited by revs) from the current
1747 1747 repository to remote. Return an integer:
1748 1748 - None means nothing to push
1749 1749 - 0 means HTTP error
1750 1750 - 1 means we pushed and remote head count is unchanged *or*
1751 1751 we have outgoing changesets but refused to push
1752 1752 - other values as described by addchangegroup()
1753 1753 '''
1754 1754 # there are two ways to push to remote repo:
1755 1755 #
1756 1756 # addchangegroup assumes local user can lock remote
1757 1757 # repo (local filesystem, old ssh servers).
1758 1758 #
1759 1759 # unbundle assumes local user cannot lock remote repo (new ssh
1760 1760 # servers, http servers).
1761 1761
1762 1762 if not remote.canpush():
1763 1763 raise util.Abort(_("destination does not support push"))
1764 1764 unfi = self.unfiltered()
1765 1765 # get local lock as we might write phase data
1766 1766 locallock = self.lock()
1767 1767 try:
1768 1768 self.checkpush(force, revs)
1769 1769 lock = None
1770 1770 unbundle = remote.capable('unbundle')
1771 1771 if not unbundle:
1772 1772 lock = remote.lock()
1773 1773 try:
1774 1774 # discovery
1775 1775 fci = discovery.findcommonincoming
1776 1776 commoninc = fci(unfi, remote, force=force)
1777 1777 common, inc, remoteheads = commoninc
1778 1778 fco = discovery.findcommonoutgoing
1779 1779 outgoing = fco(unfi, remote, onlyheads=revs,
1780 1780 commoninc=commoninc, force=force)
1781 1781
1782 1782
1783 1783 if not outgoing.missing:
1784 1784 # nothing to push
1785 1785 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1786 1786 ret = None
1787 1787 else:
1788 1788 # something to push
1789 1789 if not force:
1790 1790 # if self.obsstore == False --> no obsolete
1791 1791 # then, save the iteration
1792 1792 if unfi.obsstore:
1793 1793 # this message are here for 80 char limit reason
1794 1794 mso = _("push includes obsolete changeset: %s!")
1795 1795 mst = "push includes %s changeset: %s!"
1796 1796 # plain versions for i18n tool to detect them
1797 1797 _("push includes unstable changeset: %s!")
1798 1798 _("push includes bumped changeset: %s!")
1799 1799 _("push includes divergent changeset: %s!")
1800 1800 # If we are to push if there is at least one
1801 1801 # obsolete or unstable changeset in missing, at
1802 1802 # least one of the missinghead will be obsolete or
1803 1803 # unstable. So checking heads only is ok
1804 1804 for node in outgoing.missingheads:
1805 1805 ctx = unfi[node]
1806 1806 if ctx.obsolete():
1807 1807 raise util.Abort(mso % ctx)
1808 1808 elif ctx.troubled():
1809 1809 raise util.Abort(_(mst)
1810 1810 % (ctx.troubles()[0],
1811 1811 ctx))
1812 1812 discovery.checkheads(unfi, remote, outgoing,
1813 1813 remoteheads, newbranch,
1814 1814 bool(inc))
1815 1815
1816 1816 # create a changegroup from local
1817 1817 if revs is None and not outgoing.excluded:
1818 1818 # push everything,
1819 1819 # use the fast path, no race possible on push
1820 1820 cg = self._changegroup(outgoing.missing, 'push')
1821 1821 else:
1822 1822 cg = self.getlocalbundle('push', outgoing)
1823 1823
1824 1824 # apply changegroup to remote
1825 1825 if unbundle:
1826 1826 # local repo finds heads on server, finds out what
1827 1827 # revs it must push. once revs transferred, if server
1828 1828 # finds it has different heads (someone else won
1829 1829 # commit/push race), server aborts.
1830 1830 if force:
1831 1831 remoteheads = ['force']
1832 1832 # ssh: return remote's addchangegroup()
1833 1833 # http: return remote's addchangegroup() or 0 for error
1834 1834 ret = remote.unbundle(cg, remoteheads, 'push')
1835 1835 else:
1836 1836 # we return an integer indicating remote head count
1837 1837 # change
1838 1838 ret = remote.addchangegroup(cg, 'push', self.url())
1839 1839
1840 1840 if ret:
1841 1841 # push succeed, synchronize target of the push
1842 1842 cheads = outgoing.missingheads
1843 1843 elif revs is None:
1844 1844 # All out push fails. synchronize all common
1845 1845 cheads = outgoing.commonheads
1846 1846 else:
1847 1847 # I want cheads = heads(::missingheads and ::commonheads)
1848 1848 # (missingheads is revs with secret changeset filtered out)
1849 1849 #
1850 1850 # This can be expressed as:
1851 1851 # cheads = ( (missingheads and ::commonheads)
1852 1852 # + (commonheads and ::missingheads))"
1853 1853 # )
1854 1854 #
1855 1855 # while trying to push we already computed the following:
1856 1856 # common = (::commonheads)
1857 1857 # missing = ((commonheads::missingheads) - commonheads)
1858 1858 #
1859 1859 # We can pick:
1860 1860 # * missingheads part of common (::commonheads)
1861 1861 common = set(outgoing.common)
1862 1862 cheads = [node for node in revs if node in common]
1863 1863 # and
1864 1864 # * commonheads parents on missing
1865 1865 revset = unfi.set('%ln and parents(roots(%ln))',
1866 1866 outgoing.commonheads,
1867 1867 outgoing.missing)
1868 1868 cheads.extend(c.node() for c in revset)
1869 1869 # even when we don't push, exchanging phase data is useful
1870 1870 remotephases = remote.listkeys('phases')
1871 1871 if (self.ui.configbool('ui', '_usedassubrepo', False)
1872 1872 and remotephases # server supports phases
1873 1873 and ret is None # nothing was pushed
1874 1874 and remotephases.get('publishing', False)):
1875 1875 # When:
1876 1876 # - this is a subrepo push
1877 1877 # - and remote support phase
1878 1878 # - and no changeset was pushed
1879 1879 # - and remote is publishing
1880 1880 # We may be in issue 3871 case!
1881 1881 # We drop the possible phase synchronisation done by
1882 1882 # courtesy to publish changesets possibly locally draft
1883 1883 # on the remote.
1884 1884 remotephases = {'publishing': 'True'}
1885 1885 if not remotephases: # old server or public only repo
1886 1886 phases.advanceboundary(self, phases.public, cheads)
1887 1887 # don't push any phase data as there is nothing to push
1888 1888 else:
1889 1889 ana = phases.analyzeremotephases(self, cheads, remotephases)
1890 1890 pheads, droots = ana
1891 1891 ### Apply remote phase on local
1892 1892 if remotephases.get('publishing', False):
1893 1893 phases.advanceboundary(self, phases.public, cheads)
1894 1894 else: # publish = False
1895 1895 phases.advanceboundary(self, phases.public, pheads)
1896 1896 phases.advanceboundary(self, phases.draft, cheads)
1897 1897 ### Apply local phase on remote
1898 1898
1899 1899 # Get the list of all revs draft on remote by public here.
1900 1900 # XXX Beware that revset break if droots is not strictly
1901 1901 # XXX root we may want to ensure it is but it is costly
1902 1902 outdated = unfi.set('heads((%ln::%ln) and public())',
1903 1903 droots, cheads)
1904 1904 for newremotehead in outdated:
1905 1905 r = remote.pushkey('phases',
1906 1906 newremotehead.hex(),
1907 1907 str(phases.draft),
1908 1908 str(phases.public))
1909 1909 if not r:
1910 1910 self.ui.warn(_('updating %s to public failed!\n')
1911 1911 % newremotehead)
1912 1912 self.ui.debug('try to push obsolete markers to remote\n')
1913 1913 if (obsolete._enabled and self.obsstore and
1914 1914 'obsolete' in remote.listkeys('namespaces')):
1915 1915 rslts = []
1916 1916 remotedata = self.listkeys('obsolete')
1917 1917 for key in sorted(remotedata, reverse=True):
1918 1918 # reverse sort to ensure we end with dump0
1919 1919 data = remotedata[key]
1920 1920 rslts.append(remote.pushkey('obsolete', key, '', data))
1921 1921 if [r for r in rslts if not r]:
1922 1922 msg = _('failed to push some obsolete markers!\n')
1923 1923 self.ui.warn(msg)
1924 1924 finally:
1925 1925 if lock is not None:
1926 1926 lock.release()
1927 1927 finally:
1928 1928 locallock.release()
1929 1929
1930 1930 self.ui.debug("checking for updated bookmarks\n")
1931 1931 rb = remote.listkeys('bookmarks')
1932 1932 for k in rb.keys():
1933 1933 if k in unfi._bookmarks:
1934 1934 nr, nl = rb[k], hex(self._bookmarks[k])
1935 1935 if nr in unfi:
1936 1936 cr = unfi[nr]
1937 1937 cl = unfi[nl]
1938 1938 if bookmarks.validdest(unfi, cr, cl):
1939 1939 r = remote.pushkey('bookmarks', k, nr, nl)
1940 1940 if r:
1941 1941 self.ui.status(_("updating bookmark %s\n") % k)
1942 1942 else:
1943 1943 self.ui.warn(_('updating bookmark %s'
1944 1944 ' failed!\n') % k)
1945 1945
1946 1946 return ret
1947 1947
1948 1948 def changegroupinfo(self, nodes, source):
1949 1949 if self.ui.verbose or source == 'bundle':
1950 1950 self.ui.status(_("%d changesets found\n") % len(nodes))
1951 1951 if self.ui.debugflag:
1952 1952 self.ui.debug("list of changesets:\n")
1953 1953 for node in nodes:
1954 1954 self.ui.debug("%s\n" % hex(node))
1955 1955
1956 1956 def changegroupsubset(self, bases, heads, source):
1957 1957 """Compute a changegroup consisting of all the nodes that are
1958 1958 descendants of any of the bases and ancestors of any of the heads.
1959 1959 Return a chunkbuffer object whose read() method will return
1960 1960 successive changegroup chunks.
1961 1961
1962 1962 It is fairly complex as determining which filenodes and which
1963 1963 manifest nodes need to be included for the changeset to be complete
1964 1964 is non-trivial.
1965 1965
1966 1966 Another wrinkle is doing the reverse, figuring out which changeset in
1967 1967 the changegroup a particular filenode or manifestnode belongs to.
1968 1968 """
1969 1969 cl = self.changelog
1970 1970 if not bases:
1971 1971 bases = [nullid]
1972 1972 csets, bases, heads = cl.nodesbetween(bases, heads)
1973 1973 # We assume that all ancestors of bases are known
1974 1974 common = cl.ancestors([cl.rev(n) for n in bases])
1975 1975 return self._changegroupsubset(common, csets, heads, source)
1976 1976
1977 1977 def getlocalbundle(self, source, outgoing):
1978 1978 """Like getbundle, but taking a discovery.outgoing as an argument.
1979 1979
1980 1980 This is only implemented for local repos and reuses potentially
1981 1981 precomputed sets in outgoing."""
1982 1982 if not outgoing.missing:
1983 1983 return None
1984 1984 return self._changegroupsubset(outgoing.common,
1985 1985 outgoing.missing,
1986 1986 outgoing.missingheads,
1987 1987 source)
1988 1988
1989 1989 def getbundle(self, source, heads=None, common=None):
1990 1990 """Like changegroupsubset, but returns the set difference between the
1991 1991 ancestors of heads and the ancestors common.
1992 1992
1993 1993 If heads is None, use the local heads. If common is None, use [nullid].
1994 1994
1995 1995 The nodes in common might not all be known locally due to the way the
1996 1996 current discovery protocol works.
1997 1997 """
1998 1998 cl = self.changelog
1999 1999 if common:
2000 2000 hasnode = cl.hasnode
2001 2001 common = [n for n in common if hasnode(n)]
2002 2002 else:
2003 2003 common = [nullid]
2004 2004 if not heads:
2005 2005 heads = cl.heads()
2006 2006 return self.getlocalbundle(source,
2007 2007 discovery.outgoing(cl, common, heads))
2008 2008
2009 2009 @unfilteredmethod
2010 2010 def _changegroupsubset(self, commonrevs, csets, heads, source):
2011 2011
2012 2012 cl = self.changelog
2013 2013 mf = self.manifest
2014 2014 mfs = {} # needed manifests
2015 2015 fnodes = {} # needed file nodes
2016 2016 changedfiles = set()
2017 2017 fstate = ['', {}]
2018 2018 count = [0, 0]
2019 2019
2020 2020 # can we go through the fast path ?
2021 2021 heads.sort()
2022 2022 if heads == sorted(self.heads()):
2023 2023 return self._changegroup(csets, source)
2024 2024
2025 2025 # slow path
2026 2026 self.hook('preoutgoing', throw=True, source=source)
2027 2027 self.changegroupinfo(csets, source)
2028 2028
2029 2029 # filter any nodes that claim to be part of the known set
2030 2030 def prune(revlog, missing):
2031 2031 rr, rl = revlog.rev, revlog.linkrev
2032 2032 return [n for n in missing
2033 2033 if rl(rr(n)) not in commonrevs]
2034 2034
2035 2035 progress = self.ui.progress
2036 2036 _bundling = _('bundling')
2037 2037 _changesets = _('changesets')
2038 2038 _manifests = _('manifests')
2039 2039 _files = _('files')
2040 2040
2041 2041 def lookup(revlog, x):
2042 2042 if revlog == cl:
2043 2043 c = cl.read(x)
2044 2044 changedfiles.update(c[3])
2045 2045 mfs.setdefault(c[0], x)
2046 2046 count[0] += 1
2047 2047 progress(_bundling, count[0],
2048 2048 unit=_changesets, total=count[1])
2049 2049 return x
2050 2050 elif revlog == mf:
2051 2051 clnode = mfs[x]
2052 2052 mdata = mf.readfast(x)
2053 2053 for f, n in mdata.iteritems():
2054 2054 if f in changedfiles:
2055 2055 fnodes[f].setdefault(n, clnode)
2056 2056 count[0] += 1
2057 2057 progress(_bundling, count[0],
2058 2058 unit=_manifests, total=count[1])
2059 2059 return clnode
2060 2060 else:
2061 2061 progress(_bundling, count[0], item=fstate[0],
2062 2062 unit=_files, total=count[1])
2063 2063 return fstate[1][x]
2064 2064
2065 2065 bundler = changegroup.bundle10(lookup)
2066 2066 reorder = self.ui.config('bundle', 'reorder', 'auto')
2067 2067 if reorder == 'auto':
2068 2068 reorder = None
2069 2069 else:
2070 2070 reorder = util.parsebool(reorder)
2071 2071
2072 2072 def gengroup():
2073 2073 # Create a changenode group generator that will call our functions
2074 2074 # back to lookup the owning changenode and collect information.
2075 2075 count[:] = [0, len(csets)]
2076 2076 for chunk in cl.group(csets, bundler, reorder=reorder):
2077 2077 yield chunk
2078 2078 progress(_bundling, None)
2079 2079
2080 2080 # Create a generator for the manifestnodes that calls our lookup
2081 2081 # and data collection functions back.
2082 2082 for f in changedfiles:
2083 2083 fnodes[f] = {}
2084 2084 count[:] = [0, len(mfs)]
2085 2085 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2086 2086 yield chunk
2087 2087 progress(_bundling, None)
2088 2088
2089 2089 mfs.clear()
2090 2090
2091 2091 # Go through all our files in order sorted by name.
2092 2092 count[:] = [0, len(changedfiles)]
2093 2093 for fname in sorted(changedfiles):
2094 2094 filerevlog = self.file(fname)
2095 2095 if not len(filerevlog):
2096 2096 raise util.Abort(_("empty or missing revlog for %s")
2097 2097 % fname)
2098 2098 fstate[0] = fname
2099 2099 fstate[1] = fnodes.pop(fname, {})
2100 2100
2101 2101 nodelist = prune(filerevlog, fstate[1])
2102 2102 if nodelist:
2103 2103 count[0] += 1
2104 2104 yield bundler.fileheader(fname)
2105 2105 for chunk in filerevlog.group(nodelist, bundler, reorder):
2106 2106 yield chunk
2107 2107
2108 2108 # Signal that no more groups are left.
2109 2109 yield bundler.close()
2110 2110 progress(_bundling, None)
2111 2111
2112 2112 if csets:
2113 2113 self.hook('outgoing', node=hex(csets[0]), source=source)
2114 2114
2115 2115 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2116 2116
2117 2117 def changegroup(self, basenodes, source):
2118 2118 # to avoid a race we use changegroupsubset() (issue1320)
2119 2119 return self.changegroupsubset(basenodes, self.heads(), source)
2120 2120
2121 2121 @unfilteredmethod
2122 2122 def _changegroup(self, nodes, source):
2123 2123 """Compute the changegroup of all nodes that we have that a recipient
2124 2124 doesn't. Return a chunkbuffer object whose read() method will return
2125 2125 successive changegroup chunks.
2126 2126
2127 2127 This is much easier than the previous function as we can assume that
2128 2128 the recipient has any changenode we aren't sending them.
2129 2129
2130 2130 nodes is the set of nodes to send"""
2131 2131
2132 2132 cl = self.changelog
2133 2133 mf = self.manifest
2134 2134 mfs = {}
2135 2135 changedfiles = set()
2136 2136 fstate = ['']
2137 2137 count = [0, 0]
2138 2138
2139 2139 self.hook('preoutgoing', throw=True, source=source)
2140 2140 self.changegroupinfo(nodes, source)
2141 2141
2142 2142 revset = set([cl.rev(n) for n in nodes])
2143 2143
2144 2144 def gennodelst(log):
2145 2145 ln, llr = log.node, log.linkrev
2146 2146 return [ln(r) for r in log if llr(r) in revset]
2147 2147
2148 2148 progress = self.ui.progress
2149 2149 _bundling = _('bundling')
2150 2150 _changesets = _('changesets')
2151 2151 _manifests = _('manifests')
2152 2152 _files = _('files')
2153 2153
2154 2154 def lookup(revlog, x):
2155 2155 if revlog == cl:
2156 2156 c = cl.read(x)
2157 2157 changedfiles.update(c[3])
2158 2158 mfs.setdefault(c[0], x)
2159 2159 count[0] += 1
2160 2160 progress(_bundling, count[0],
2161 2161 unit=_changesets, total=count[1])
2162 2162 return x
2163 2163 elif revlog == mf:
2164 2164 count[0] += 1
2165 2165 progress(_bundling, count[0],
2166 2166 unit=_manifests, total=count[1])
2167 2167 return cl.node(revlog.linkrev(revlog.rev(x)))
2168 2168 else:
2169 2169 progress(_bundling, count[0], item=fstate[0],
2170 2170 total=count[1], unit=_files)
2171 2171 return cl.node(revlog.linkrev(revlog.rev(x)))
2172 2172
2173 2173 bundler = changegroup.bundle10(lookup)
2174 2174 reorder = self.ui.config('bundle', 'reorder', 'auto')
2175 2175 if reorder == 'auto':
2176 2176 reorder = None
2177 2177 else:
2178 2178 reorder = util.parsebool(reorder)
2179 2179
2180 2180 def gengroup():
2181 2181 '''yield a sequence of changegroup chunks (strings)'''
2182 2182 # construct a list of all changed files
2183 2183
2184 2184 count[:] = [0, len(nodes)]
2185 2185 for chunk in cl.group(nodes, bundler, reorder=reorder):
2186 2186 yield chunk
2187 2187 progress(_bundling, None)
2188 2188
2189 2189 count[:] = [0, len(mfs)]
2190 2190 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2191 2191 yield chunk
2192 2192 progress(_bundling, None)
2193 2193
2194 2194 count[:] = [0, len(changedfiles)]
2195 2195 for fname in sorted(changedfiles):
2196 2196 filerevlog = self.file(fname)
2197 2197 if not len(filerevlog):
2198 2198 raise util.Abort(_("empty or missing revlog for %s")
2199 2199 % fname)
2200 2200 fstate[0] = fname
2201 2201 nodelist = gennodelst(filerevlog)
2202 2202 if nodelist:
2203 2203 count[0] += 1
2204 2204 yield bundler.fileheader(fname)
2205 2205 for chunk in filerevlog.group(nodelist, bundler, reorder):
2206 2206 yield chunk
2207 2207 yield bundler.close()
2208 2208 progress(_bundling, None)
2209 2209
2210 2210 if nodes:
2211 2211 self.hook('outgoing', node=hex(nodes[0]), source=source)
2212 2212
2213 2213 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2214 2214
2215 2215 @unfilteredmethod
2216 2216 def addchangegroup(self, source, srctype, url, emptyok=False):
2217 2217 """Add the changegroup returned by source.read() to this repo.
2218 2218 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2219 2219 the URL of the repo where this changegroup is coming from.
2220 2220
2221 2221 Return an integer summarizing the change to this repo:
2222 2222 - nothing changed or no source: 0
2223 2223 - more heads than before: 1+added heads (2..n)
2224 2224 - fewer heads than before: -1-removed heads (-2..-n)
2225 2225 - number of heads stays the same: 1
2226 2226 """
2227 2227 def csmap(x):
2228 2228 self.ui.debug("add changeset %s\n" % short(x))
2229 2229 return len(cl)
2230 2230
2231 2231 def revmap(x):
2232 2232 return cl.rev(x)
2233 2233
2234 2234 if not source:
2235 2235 return 0
2236 2236
2237 2237 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2238 2238
2239 2239 changesets = files = revisions = 0
2240 2240 efiles = set()
2241 2241
2242 2242 # write changelog data to temp files so concurrent readers will not see
2243 2243 # inconsistent view
2244 2244 cl = self.changelog
2245 2245 cl.delayupdate()
2246 2246 oldheads = cl.heads()
2247 2247
2248 2248 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2249 2249 try:
2250 2250 trp = weakref.proxy(tr)
2251 2251 # pull off the changeset group
2252 2252 self.ui.status(_("adding changesets\n"))
2253 2253 clstart = len(cl)
2254 2254 class prog(object):
2255 2255 step = _('changesets')
2256 2256 count = 1
2257 2257 ui = self.ui
2258 2258 total = None
2259 2259 def __call__(self):
2260 2260 self.ui.progress(self.step, self.count, unit=_('chunks'),
2261 2261 total=self.total)
2262 2262 self.count += 1
2263 2263 pr = prog()
2264 2264 source.callback = pr
2265 2265
2266 2266 source.changelogheader()
2267 2267 srccontent = cl.addgroup(source, csmap, trp)
2268 2268 if not (srccontent or emptyok):
2269 2269 raise util.Abort(_("received changelog group is empty"))
2270 2270 clend = len(cl)
2271 2271 changesets = clend - clstart
2272 2272 for c in xrange(clstart, clend):
2273 2273 efiles.update(self[c].files())
2274 2274 efiles = len(efiles)
2275 2275 self.ui.progress(_('changesets'), None)
2276 2276
2277 2277 # pull off the manifest group
2278 2278 self.ui.status(_("adding manifests\n"))
2279 2279 pr.step = _('manifests')
2280 2280 pr.count = 1
2281 2281 pr.total = changesets # manifests <= changesets
2282 2282 # no need to check for empty manifest group here:
2283 2283 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2284 2284 # no new manifest will be created and the manifest group will
2285 2285 # be empty during the pull
2286 2286 source.manifestheader()
2287 2287 self.manifest.addgroup(source, revmap, trp)
2288 2288 self.ui.progress(_('manifests'), None)
2289 2289
2290 2290 needfiles = {}
2291 2291 if self.ui.configbool('server', 'validate', default=False):
2292 2292 # validate incoming csets have their manifests
2293 2293 for cset in xrange(clstart, clend):
2294 2294 mfest = self.changelog.read(self.changelog.node(cset))[0]
2295 2295 mfest = self.manifest.readdelta(mfest)
2296 2296 # store file nodes we must see
2297 2297 for f, n in mfest.iteritems():
2298 2298 needfiles.setdefault(f, set()).add(n)
2299 2299
2300 2300 # process the files
2301 2301 self.ui.status(_("adding file changes\n"))
2302 2302 pr.step = _('files')
2303 2303 pr.count = 1
2304 2304 pr.total = efiles
2305 2305 source.callback = None
2306 2306
2307 2307 while True:
2308 2308 chunkdata = source.filelogheader()
2309 2309 if not chunkdata:
2310 2310 break
2311 2311 f = chunkdata["filename"]
2312 2312 self.ui.debug("adding %s revisions\n" % f)
2313 2313 pr()
2314 2314 fl = self.file(f)
2315 2315 o = len(fl)
2316 2316 if not fl.addgroup(source, revmap, trp):
2317 2317 raise util.Abort(_("received file revlog group is empty"))
2318 2318 revisions += len(fl) - o
2319 2319 files += 1
2320 2320 if f in needfiles:
2321 2321 needs = needfiles[f]
2322 2322 for new in xrange(o, len(fl)):
2323 2323 n = fl.node(new)
2324 2324 if n in needs:
2325 2325 needs.remove(n)
2326 2326 else:
2327 2327 raise util.Abort(
2328 2328 _("received spurious file revlog entry"))
2329 2329 if not needs:
2330 2330 del needfiles[f]
2331 2331 self.ui.progress(_('files'), None)
2332 2332
2333 2333 for f, needs in needfiles.iteritems():
2334 2334 fl = self.file(f)
2335 2335 for n in needs:
2336 2336 try:
2337 2337 fl.rev(n)
2338 2338 except error.LookupError:
2339 2339 raise util.Abort(
2340 2340 _('missing file data for %s:%s - run hg verify') %
2341 2341 (f, hex(n)))
2342 2342
2343 2343 dh = 0
2344 2344 if oldheads:
2345 2345 heads = cl.heads()
2346 2346 dh = len(heads) - len(oldheads)
2347 2347 for h in heads:
2348 2348 if h not in oldheads and self[h].closesbranch():
2349 2349 dh -= 1
2350 2350 htext = ""
2351 2351 if dh:
2352 2352 htext = _(" (%+d heads)") % dh
2353 2353
2354 2354 self.ui.status(_("added %d changesets"
2355 2355 " with %d changes to %d files%s\n")
2356 2356 % (changesets, revisions, files, htext))
2357 2357 self.invalidatevolatilesets()
2358 2358
2359 2359 if changesets > 0:
2360 2360 p = lambda: cl.writepending() and self.root or ""
2361 2361 self.hook('pretxnchangegroup', throw=True,
2362 2362 node=hex(cl.node(clstart)), source=srctype,
2363 2363 url=url, pending=p)
2364 2364
2365 2365 added = [cl.node(r) for r in xrange(clstart, clend)]
2366 2366 publishing = self.ui.configbool('phases', 'publish', True)
2367 2367 if srctype == 'push':
2368 2368 # Old server can not push the boundary themself.
2369 2369 # New server won't push the boundary if changeset already
2370 2370 # existed locally as secrete
2371 2371 #
2372 2372 # We should not use added here but the list of all change in
2373 2373 # the bundle
2374 2374 if publishing:
2375 2375 phases.advanceboundary(self, phases.public, srccontent)
2376 2376 else:
2377 2377 phases.advanceboundary(self, phases.draft, srccontent)
2378 2378 phases.retractboundary(self, phases.draft, added)
2379 2379 elif srctype != 'strip':
2380 2380 # publishing only alter behavior during push
2381 2381 #
2382 2382 # strip should not touch boundary at all
2383 2383 phases.retractboundary(self, phases.draft, added)
2384 2384
2385 2385 # make changelog see real files again
2386 2386 cl.finalize(trp)
2387 2387
2388 2388 tr.close()
2389 2389
2390 2390 if changesets > 0:
2391 2391 if srctype != 'strip':
2392 2392 # During strip, branchcache is invalid but coming call to
2393 2393 # `destroyed` will repair it.
2394 2394 # In other case we can safely update cache on disk.
2395 2395 branchmap.updatecache(self.filtered('served'))
2396 2396 def runhooks():
2397 2397 # forcefully update the on-disk branch cache
2398 2398 self.ui.debug("updating the branch cache\n")
2399 2399 self.hook("changegroup", node=hex(cl.node(clstart)),
2400 2400 source=srctype, url=url)
2401 2401
2402 2402 for n in added:
2403 2403 self.hook("incoming", node=hex(n), source=srctype,
2404 2404 url=url)
2405 2405
2406 2406 newheads = [h for h in self.heads() if h not in oldheads]
2407 2407 self.ui.log("incoming",
2408 2408 "%s incoming changes - new heads: %s\n",
2409 2409 len(added),
2410 2410 ', '.join([hex(c[:6]) for c in newheads]))
2411 2411 self._afterlock(runhooks)
2412 2412
2413 2413 finally:
2414 2414 tr.release()
2415 2415 # never return 0 here:
2416 2416 if dh < 0:
2417 2417 return dh - 1
2418 2418 else:
2419 2419 return dh + 1
2420 2420
2421 2421 def stream_in(self, remote, requirements):
2422 2422 lock = self.lock()
2423 2423 try:
2424 2424 # Save remote branchmap. We will use it later
2425 2425 # to speed up branchcache creation
2426 2426 rbranchmap = None
2427 2427 if remote.capable("branchmap"):
2428 2428 rbranchmap = remote.branchmap()
2429 2429
2430 2430 fp = remote.stream_out()
2431 2431 l = fp.readline()
2432 2432 try:
2433 2433 resp = int(l)
2434 2434 except ValueError:
2435 2435 raise error.ResponseError(
2436 2436 _('unexpected response from remote server:'), l)
2437 2437 if resp == 1:
2438 2438 raise util.Abort(_('operation forbidden by server'))
2439 2439 elif resp == 2:
2440 2440 raise util.Abort(_('locking the remote repository failed'))
2441 2441 elif resp != 0:
2442 2442 raise util.Abort(_('the server sent an unknown error code'))
2443 2443 self.ui.status(_('streaming all changes\n'))
2444 2444 l = fp.readline()
2445 2445 try:
2446 2446 total_files, total_bytes = map(int, l.split(' ', 1))
2447 2447 except (ValueError, TypeError):
2448 2448 raise error.ResponseError(
2449 2449 _('unexpected response from remote server:'), l)
2450 2450 self.ui.status(_('%d files to transfer, %s of data\n') %
2451 2451 (total_files, util.bytecount(total_bytes)))
2452 2452 handled_bytes = 0
2453 2453 self.ui.progress(_('clone'), 0, total=total_bytes)
2454 2454 start = time.time()
2455 2455 for i in xrange(total_files):
2456 2456 # XXX doesn't support '\n' or '\r' in filenames
2457 2457 l = fp.readline()
2458 2458 try:
2459 2459 name, size = l.split('\0', 1)
2460 2460 size = int(size)
2461 2461 except (ValueError, TypeError):
2462 2462 raise error.ResponseError(
2463 2463 _('unexpected response from remote server:'), l)
2464 2464 if self.ui.debugflag:
2465 2465 self.ui.debug('adding %s (%s)\n' %
2466 2466 (name, util.bytecount(size)))
2467 2467 # for backwards compat, name was partially encoded
2468 2468 ofp = self.sopener(store.decodedir(name), 'w')
2469 2469 for chunk in util.filechunkiter(fp, limit=size):
2470 2470 handled_bytes += len(chunk)
2471 2471 self.ui.progress(_('clone'), handled_bytes,
2472 2472 total=total_bytes)
2473 2473 ofp.write(chunk)
2474 2474 ofp.close()
2475 2475 elapsed = time.time() - start
2476 2476 if elapsed <= 0:
2477 2477 elapsed = 0.001
2478 2478 self.ui.progress(_('clone'), None)
2479 2479 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2480 2480 (util.bytecount(total_bytes), elapsed,
2481 2481 util.bytecount(total_bytes / elapsed)))
2482 2482
2483 2483 # new requirements = old non-format requirements +
2484 2484 # new format-related
2485 2485 # requirements from the streamed-in repository
2486 2486 requirements.update(set(self.requirements) - self.supportedformats)
2487 2487 self._applyrequirements(requirements)
2488 2488 self._writerequirements()
2489 2489
2490 2490 if rbranchmap:
2491 2491 rbheads = []
2492 2492 for bheads in rbranchmap.itervalues():
2493 2493 rbheads.extend(bheads)
2494 2494
2495 2495 if rbheads:
2496 2496 rtiprev = max((int(self.changelog.rev(node))
2497 2497 for node in rbheads))
2498 2498 cache = branchmap.branchcache(rbranchmap,
2499 2499 self[rtiprev].node(),
2500 2500 rtiprev)
2501 2501 # Try to stick it as low as possible
2502 2502 # filter above served are unlikely to be fetch from a clone
2503 2503 for candidate in ('base', 'immutable', 'served'):
2504 2504 rview = self.filtered(candidate)
2505 2505 if cache.validfor(rview):
2506 2506 self._branchcaches[candidate] = cache
2507 2507 cache.write(rview)
2508 2508 break
2509 2509 self.invalidate()
2510 2510 return len(self.heads()) + 1
2511 2511 finally:
2512 2512 lock.release()
2513 2513
2514 2514 def clone(self, remote, heads=[], stream=False):
2515 2515 '''clone remote repository.
2516 2516
2517 2517 keyword arguments:
2518 2518 heads: list of revs to clone (forces use of pull)
2519 2519 stream: use streaming clone if possible'''
2520 2520
2521 2521 # now, all clients that can request uncompressed clones can
2522 2522 # read repo formats supported by all servers that can serve
2523 2523 # them.
2524 2524
2525 2525 # if revlog format changes, client will have to check version
2526 2526 # and format flags on "stream" capability, and use
2527 2527 # uncompressed only if compatible.
2528 2528
2529 2529 if not stream:
2530 2530 # if the server explicitly prefers to stream (for fast LANs)
2531 2531 stream = remote.capable('stream-preferred')
2532 2532
2533 2533 if stream and not heads:
2534 2534 # 'stream' means remote revlog format is revlogv1 only
2535 2535 if remote.capable('stream'):
2536 2536 return self.stream_in(remote, set(('revlogv1',)))
2537 2537 # otherwise, 'streamreqs' contains the remote revlog format
2538 2538 streamreqs = remote.capable('streamreqs')
2539 2539 if streamreqs:
2540 2540 streamreqs = set(streamreqs.split(','))
2541 2541 # if we support it, stream in and adjust our requirements
2542 2542 if not streamreqs - self.supportedformats:
2543 2543 return self.stream_in(remote, streamreqs)
2544 2544 return self.pull(remote, heads)
2545 2545
2546 2546 def pushkey(self, namespace, key, old, new):
2547 2547 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2548 2548 old=old, new=new)
2549 2549 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2550 2550 ret = pushkey.push(self, namespace, key, old, new)
2551 2551 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2552 2552 ret=ret)
2553 2553 return ret
2554 2554
2555 2555 def listkeys(self, namespace):
2556 2556 self.hook('prelistkeys', throw=True, namespace=namespace)
2557 2557 self.ui.debug('listing keys for "%s"\n' % namespace)
2558 2558 values = pushkey.list(self, namespace)
2559 2559 self.hook('listkeys', namespace=namespace, values=values)
2560 2560 return values
2561 2561
2562 2562 def debugwireargs(self, one, two, three=None, four=None, five=None):
2563 2563 '''used to test argument passing over the wire'''
2564 2564 return "%s %s %s %s %s" % (one, two, three, four, five)
2565 2565
2566 2566 def savecommitmessage(self, text):
2567 2567 fp = self.opener('last-message.txt', 'wb')
2568 2568 try:
2569 2569 fp.write(text)
2570 2570 finally:
2571 2571 fp.close()
2572 2572 return self.pathto(fp.name[len(self.root) + 1:])
2573 2573
2574 2574 # used to avoid circular references so destructors work
2575 2575 def aftertrans(files):
2576 2576 renamefiles = [tuple(t) for t in files]
2577 2577 def a():
2578 2578 for src, dest in renamefiles:
2579 2579 try:
2580 2580 util.rename(src, dest)
2581 2581 except OSError: # journal file does not yet exist
2582 2582 pass
2583 2583 return a
2584 2584
2585 2585 def undoname(fn):
2586 2586 base, name = os.path.split(fn)
2587 2587 assert name.startswith('journal')
2588 2588 return os.path.join(base, name.replace('journal', 'undo', 1))
2589 2589
2590 2590 def instance(ui, path, create):
2591 2591 return localrepository(ui, util.urllocalpath(path), create)
2592 2592
2593 2593 def islocal(path):
2594 2594 return True
@@ -1,942 +1,945 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import match as matchmod
12 12 import os, errno, re, stat, glob
13 13
14 14 if os.name == 'nt':
15 15 import scmwindows as scmplatform
16 16 else:
17 17 import scmposix as scmplatform
18 18
19 19 systemrcpath = scmplatform.systemrcpath
20 20 userrcpath = scmplatform.userrcpath
21 21
22 22 def nochangesfound(ui, repo, excluded=None):
23 23 '''Report no changes for push/pull, excluded is None or a list of
24 24 nodes excluded from the push/pull.
25 25 '''
26 26 secretlist = []
27 27 if excluded:
28 28 for n in excluded:
29 29 if n not in repo:
30 30 # discovery should not have included the filtered revision,
31 31 # we have to explicitly exclude it until discovery is cleanup.
32 32 continue
33 33 ctx = repo[n]
34 34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 35 secretlist.append(n)
36 36
37 37 if secretlist:
38 38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 39 % len(secretlist))
40 40 else:
41 41 ui.status(_("no changes found\n"))
42 42
43 43 def checknewlabel(repo, lbl, kind):
44 44 if lbl in ['tip', '.', 'null']:
45 45 raise util.Abort(_("the name '%s' is reserved") % lbl)
46 46 for c in (':', '\0', '\n', '\r'):
47 47 if c in lbl:
48 48 raise util.Abort(_("%r cannot be used in a name") % c)
49 49 try:
50 50 int(lbl)
51 51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
52 52 except ValueError:
53 53 pass
54 54
55 55 def checkfilename(f):
56 56 '''Check that the filename f is an acceptable filename for a tracked file'''
57 57 if '\r' in f or '\n' in f:
58 58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
59 59
60 60 def checkportable(ui, f):
61 61 '''Check if filename f is portable and warn or abort depending on config'''
62 62 checkfilename(f)
63 63 abort, warn = checkportabilityalert(ui)
64 64 if abort or warn:
65 65 msg = util.checkwinfilename(f)
66 66 if msg:
67 67 msg = "%s: %r" % (msg, f)
68 68 if abort:
69 69 raise util.Abort(msg)
70 70 ui.warn(_("warning: %s\n") % msg)
71 71
72 72 def checkportabilityalert(ui):
73 73 '''check if the user's config requests nothing, a warning, or abort for
74 74 non-portable filenames'''
75 75 val = ui.config('ui', 'portablefilenames', 'warn')
76 76 lval = val.lower()
77 77 bval = util.parsebool(val)
78 78 abort = os.name == 'nt' or lval == 'abort'
79 79 warn = bval or lval == 'warn'
80 80 if bval is None and not (warn or abort or lval == 'ignore'):
81 81 raise error.ConfigError(
82 82 _("ui.portablefilenames value is invalid ('%s')") % val)
83 83 return abort, warn
84 84
85 85 class casecollisionauditor(object):
86 86 def __init__(self, ui, abort, dirstate):
87 87 self._ui = ui
88 88 self._abort = abort
89 89 allfiles = '\0'.join(dirstate._map)
90 90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
91 91 self._dirstate = dirstate
92 92 # The purpose of _newfiles is so that we don't complain about
93 93 # case collisions if someone were to call this object with the
94 94 # same filename twice.
95 95 self._newfiles = set()
96 96
97 97 def __call__(self, f):
98 98 fl = encoding.lower(f)
99 99 if (fl in self._loweredfiles and f not in self._dirstate and
100 100 f not in self._newfiles):
101 101 msg = _('possible case-folding collision for %s') % f
102 102 if self._abort:
103 103 raise util.Abort(msg)
104 104 self._ui.warn(_("warning: %s\n") % msg)
105 105 self._loweredfiles.add(fl)
106 106 self._newfiles.add(f)
107 107
108 108 class pathauditor(object):
109 109 '''ensure that a filesystem path contains no banned components.
110 110 the following properties of a path are checked:
111 111
112 112 - ends with a directory separator
113 113 - under top-level .hg
114 114 - starts at the root of a windows drive
115 115 - contains ".."
116 116 - traverses a symlink (e.g. a/symlink_here/b)
117 117 - inside a nested repository (a callback can be used to approve
118 118 some nested repositories, e.g., subrepositories)
119 119 '''
120 120
121 121 def __init__(self, root, callback=None):
122 122 self.audited = set()
123 123 self.auditeddir = set()
124 124 self.root = root
125 125 self.callback = callback
126 126 if os.path.lexists(root) and not util.checkcase(root):
127 127 self.normcase = util.normcase
128 128 else:
129 129 self.normcase = lambda x: x
130 130
131 131 def __call__(self, path):
132 132 '''Check the relative path.
133 133 path may contain a pattern (e.g. foodir/**.txt)'''
134 134
135 135 path = util.localpath(path)
136 136 normpath = self.normcase(path)
137 137 if normpath in self.audited:
138 138 return
139 139 # AIX ignores "/" at end of path, others raise EISDIR.
140 140 if util.endswithsep(path):
141 141 raise util.Abort(_("path ends in directory separator: %s") % path)
142 142 parts = util.splitpath(path)
143 143 if (os.path.splitdrive(path)[0]
144 144 or parts[0].lower() in ('.hg', '.hg.', '')
145 145 or os.pardir in parts):
146 146 raise util.Abort(_("path contains illegal component: %s") % path)
147 147 if '.hg' in path.lower():
148 148 lparts = [p.lower() for p in parts]
149 149 for p in '.hg', '.hg.':
150 150 if p in lparts[1:]:
151 151 pos = lparts.index(p)
152 152 base = os.path.join(*parts[:pos])
153 153 raise util.Abort(_("path '%s' is inside nested repo %r")
154 154 % (path, base))
155 155
156 156 normparts = util.splitpath(normpath)
157 157 assert len(parts) == len(normparts)
158 158
159 159 parts.pop()
160 160 normparts.pop()
161 161 prefixes = []
162 162 while parts:
163 163 prefix = os.sep.join(parts)
164 164 normprefix = os.sep.join(normparts)
165 165 if normprefix in self.auditeddir:
166 166 break
167 167 curpath = os.path.join(self.root, prefix)
168 168 try:
169 169 st = os.lstat(curpath)
170 170 except OSError, err:
171 171 # EINVAL can be raised as invalid path syntax under win32.
172 172 # They must be ignored for patterns can be checked too.
173 173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
174 174 raise
175 175 else:
176 176 if stat.S_ISLNK(st.st_mode):
177 177 raise util.Abort(
178 178 _('path %r traverses symbolic link %r')
179 179 % (path, prefix))
180 180 elif (stat.S_ISDIR(st.st_mode) and
181 181 os.path.isdir(os.path.join(curpath, '.hg'))):
182 182 if not self.callback or not self.callback(curpath):
183 183 raise util.Abort(_("path '%s' is inside nested "
184 184 "repo %r")
185 185 % (path, prefix))
186 186 prefixes.append(normprefix)
187 187 parts.pop()
188 188 normparts.pop()
189 189
190 190 self.audited.add(normpath)
191 191 # only add prefixes to the cache after checking everything: we don't
192 192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
193 193 self.auditeddir.update(prefixes)
194 194
195 195 def check(self, path):
196 196 try:
197 197 self(path)
198 198 return True
199 199 except (OSError, util.Abort):
200 200 return False
201 201
202 202 class abstractvfs(object):
203 203 """Abstract base class; cannot be instantiated"""
204 204
205 205 def __init__(self, *args, **kwargs):
206 206 '''Prevent instantiation; don't call this from subclasses.'''
207 207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
208 208
209 209 def tryread(self, path):
210 210 '''gracefully return an empty string for missing files'''
211 211 try:
212 212 return self.read(path)
213 213 except IOError, inst:
214 214 if inst.errno != errno.ENOENT:
215 215 raise
216 216 return ""
217 217
218 218 def read(self, path):
219 219 fp = self(path, 'rb')
220 220 try:
221 221 return fp.read()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def append(self, path, data):
233 233 fp = self(path, 'ab')
234 234 try:
235 235 return fp.write(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def exists(self, path=None):
240 240 return os.path.exists(self.join(path))
241 241
242 242 def isdir(self, path=None):
243 243 return os.path.isdir(self.join(path))
244 244
245 def islink(self, path=None):
246 return os.path.islink(self.join(path))
247
245 248 def makedir(self, path=None, notindexed=True):
246 249 return util.makedir(self.join(path), notindexed)
247 250
248 251 def makedirs(self, path=None, mode=None):
249 252 return util.makedirs(self.join(path), mode)
250 253
251 254 def mkdir(self, path=None):
252 255 return os.mkdir(self.join(path))
253 256
254 257 def readdir(self, path=None, stat=None, skip=None):
255 258 return osutil.listdir(self.join(path), stat, skip)
256 259
257 260 def rename(self, src, dst):
258 261 return util.rename(self.join(src), self.join(dst))
259 262
260 263 def stat(self, path=None):
261 264 return os.stat(self.join(path))
262 265
263 266 class vfs(abstractvfs):
264 267 '''Operate files relative to a base directory
265 268
266 269 This class is used to hide the details of COW semantics and
267 270 remote file access from higher level code.
268 271 '''
269 272 def __init__(self, base, audit=True, expandpath=False, realpath=False):
270 273 if expandpath:
271 274 base = util.expandpath(base)
272 275 if realpath:
273 276 base = os.path.realpath(base)
274 277 self.base = base
275 278 self._setmustaudit(audit)
276 279 self.createmode = None
277 280 self._trustnlink = None
278 281
279 282 def _getmustaudit(self):
280 283 return self._audit
281 284
282 285 def _setmustaudit(self, onoff):
283 286 self._audit = onoff
284 287 if onoff:
285 288 self.audit = pathauditor(self.base)
286 289 else:
287 290 self.audit = util.always
288 291
289 292 mustaudit = property(_getmustaudit, _setmustaudit)
290 293
291 294 @util.propertycache
292 295 def _cansymlink(self):
293 296 return util.checklink(self.base)
294 297
295 298 @util.propertycache
296 299 def _chmod(self):
297 300 return util.checkexec(self.base)
298 301
299 302 def _fixfilemode(self, name):
300 303 if self.createmode is None or not self._chmod:
301 304 return
302 305 os.chmod(name, self.createmode & 0666)
303 306
304 307 def __call__(self, path, mode="r", text=False, atomictemp=False):
305 308 if self._audit:
306 309 r = util.checkosfilename(path)
307 310 if r:
308 311 raise util.Abort("%s: %r" % (r, path))
309 312 self.audit(path)
310 313 f = self.join(path)
311 314
312 315 if not text and "b" not in mode:
313 316 mode += "b" # for that other OS
314 317
315 318 nlink = -1
316 319 if mode not in ('r', 'rb'):
317 320 dirname, basename = util.split(f)
318 321 # If basename is empty, then the path is malformed because it points
319 322 # to a directory. Let the posixfile() call below raise IOError.
320 323 if basename:
321 324 if atomictemp:
322 325 util.ensuredirs(dirname, self.createmode)
323 326 return util.atomictempfile(f, mode, self.createmode)
324 327 try:
325 328 if 'w' in mode:
326 329 util.unlink(f)
327 330 nlink = 0
328 331 else:
329 332 # nlinks() may behave differently for files on Windows
330 333 # shares if the file is open.
331 334 fd = util.posixfile(f)
332 335 nlink = util.nlinks(f)
333 336 if nlink < 1:
334 337 nlink = 2 # force mktempcopy (issue1922)
335 338 fd.close()
336 339 except (OSError, IOError), e:
337 340 if e.errno != errno.ENOENT:
338 341 raise
339 342 nlink = 0
340 343 util.ensuredirs(dirname, self.createmode)
341 344 if nlink > 0:
342 345 if self._trustnlink is None:
343 346 self._trustnlink = nlink > 1 or util.checknlink(f)
344 347 if nlink > 1 or not self._trustnlink:
345 348 util.rename(util.mktempcopy(f), f)
346 349 fp = util.posixfile(f, mode)
347 350 if nlink == 0:
348 351 self._fixfilemode(f)
349 352 return fp
350 353
351 354 def symlink(self, src, dst):
352 355 self.audit(dst)
353 356 linkname = self.join(dst)
354 357 try:
355 358 os.unlink(linkname)
356 359 except OSError:
357 360 pass
358 361
359 362 util.ensuredirs(os.path.dirname(linkname), self.createmode)
360 363
361 364 if self._cansymlink:
362 365 try:
363 366 os.symlink(src, linkname)
364 367 except OSError, err:
365 368 raise OSError(err.errno, _('could not symlink to %r: %s') %
366 369 (src, err.strerror), linkname)
367 370 else:
368 371 self.write(dst, src)
369 372
370 373 def join(self, path):
371 374 if path:
372 375 return os.path.join(self.base, path)
373 376 else:
374 377 return self.base
375 378
376 379 opener = vfs
377 380
378 381 class auditvfs(object):
379 382 def __init__(self, vfs):
380 383 self.vfs = vfs
381 384
382 385 def _getmustaudit(self):
383 386 return self.vfs.mustaudit
384 387
385 388 def _setmustaudit(self, onoff):
386 389 self.vfs.mustaudit = onoff
387 390
388 391 mustaudit = property(_getmustaudit, _setmustaudit)
389 392
390 393 class filtervfs(abstractvfs, auditvfs):
391 394 '''Wrapper vfs for filtering filenames with a function.'''
392 395
393 396 def __init__(self, vfs, filter):
394 397 auditvfs.__init__(self, vfs)
395 398 self._filter = filter
396 399
397 400 def __call__(self, path, *args, **kwargs):
398 401 return self.vfs(self._filter(path), *args, **kwargs)
399 402
400 403 def join(self, path):
401 404 if path:
402 405 return self.vfs.join(self._filter(path))
403 406 else:
404 407 return self.vfs.join(path)
405 408
406 409 filteropener = filtervfs
407 410
408 411 class readonlyvfs(abstractvfs, auditvfs):
409 412 '''Wrapper vfs preventing any writing.'''
410 413
411 414 def __init__(self, vfs):
412 415 auditvfs.__init__(self, vfs)
413 416
414 417 def __call__(self, path, mode='r', *args, **kw):
415 418 if mode not in ('r', 'rb'):
416 419 raise util.Abort('this vfs is read only')
417 420 return self.vfs(path, mode, *args, **kw)
418 421
419 422
420 423 def canonpath(root, cwd, myname, auditor=None):
421 424 '''return the canonical path of myname, given cwd and root'''
422 425 if util.endswithsep(root):
423 426 rootsep = root
424 427 else:
425 428 rootsep = root + os.sep
426 429 name = myname
427 430 if not os.path.isabs(name):
428 431 name = os.path.join(root, cwd, name)
429 432 name = os.path.normpath(name)
430 433 if auditor is None:
431 434 auditor = pathauditor(root)
432 435 if name != rootsep and name.startswith(rootsep):
433 436 name = name[len(rootsep):]
434 437 auditor(name)
435 438 return util.pconvert(name)
436 439 elif name == root:
437 440 return ''
438 441 else:
439 442 # Determine whether `name' is in the hierarchy at or beneath `root',
440 443 # by iterating name=dirname(name) until that causes no change (can't
441 444 # check name == '/', because that doesn't work on windows). The list
442 445 # `rel' holds the reversed list of components making up the relative
443 446 # file name we want.
444 447 rel = []
445 448 while True:
446 449 try:
447 450 s = util.samefile(name, root)
448 451 except OSError:
449 452 s = False
450 453 if s:
451 454 if not rel:
452 455 # name was actually the same as root (maybe a symlink)
453 456 return ''
454 457 rel.reverse()
455 458 name = os.path.join(*rel)
456 459 auditor(name)
457 460 return util.pconvert(name)
458 461 dirname, basename = util.split(name)
459 462 rel.append(basename)
460 463 if dirname == name:
461 464 break
462 465 name = dirname
463 466
464 467 raise util.Abort(_("%s not under root '%s'") % (myname, root))
465 468
466 469 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
467 470 '''yield every hg repository under path, always recursively.
468 471 The recurse flag will only control recursion into repo working dirs'''
469 472 def errhandler(err):
470 473 if err.filename == path:
471 474 raise err
472 475 samestat = getattr(os.path, 'samestat', None)
473 476 if followsym and samestat is not None:
474 477 def adddir(dirlst, dirname):
475 478 match = False
476 479 dirstat = os.stat(dirname)
477 480 for lstdirstat in dirlst:
478 481 if samestat(dirstat, lstdirstat):
479 482 match = True
480 483 break
481 484 if not match:
482 485 dirlst.append(dirstat)
483 486 return not match
484 487 else:
485 488 followsym = False
486 489
487 490 if (seen_dirs is None) and followsym:
488 491 seen_dirs = []
489 492 adddir(seen_dirs, path)
490 493 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
491 494 dirs.sort()
492 495 if '.hg' in dirs:
493 496 yield root # found a repository
494 497 qroot = os.path.join(root, '.hg', 'patches')
495 498 if os.path.isdir(os.path.join(qroot, '.hg')):
496 499 yield qroot # we have a patch queue repo here
497 500 if recurse:
498 501 # avoid recursing inside the .hg directory
499 502 dirs.remove('.hg')
500 503 else:
501 504 dirs[:] = [] # don't descend further
502 505 elif followsym:
503 506 newdirs = []
504 507 for d in dirs:
505 508 fname = os.path.join(root, d)
506 509 if adddir(seen_dirs, fname):
507 510 if os.path.islink(fname):
508 511 for hgname in walkrepos(fname, True, seen_dirs):
509 512 yield hgname
510 513 else:
511 514 newdirs.append(d)
512 515 dirs[:] = newdirs
513 516
514 517 def osrcpath():
515 518 '''return default os-specific hgrc search path'''
516 519 path = systemrcpath()
517 520 path.extend(userrcpath())
518 521 path = [os.path.normpath(f) for f in path]
519 522 return path
520 523
521 524 _rcpath = None
522 525
523 526 def rcpath():
524 527 '''return hgrc search path. if env var HGRCPATH is set, use it.
525 528 for each item in path, if directory, use files ending in .rc,
526 529 else use item.
527 530 make HGRCPATH empty to only look in .hg/hgrc of current repo.
528 531 if no HGRCPATH, use default os-specific path.'''
529 532 global _rcpath
530 533 if _rcpath is None:
531 534 if 'HGRCPATH' in os.environ:
532 535 _rcpath = []
533 536 for p in os.environ['HGRCPATH'].split(os.pathsep):
534 537 if not p:
535 538 continue
536 539 p = util.expandpath(p)
537 540 if os.path.isdir(p):
538 541 for f, kind in osutil.listdir(p):
539 542 if f.endswith('.rc'):
540 543 _rcpath.append(os.path.join(p, f))
541 544 else:
542 545 _rcpath.append(p)
543 546 else:
544 547 _rcpath = osrcpath()
545 548 return _rcpath
546 549
547 550 def revsingle(repo, revspec, default='.'):
548 551 if not revspec:
549 552 return repo[default]
550 553
551 554 l = revrange(repo, [revspec])
552 555 if len(l) < 1:
553 556 raise util.Abort(_('empty revision set'))
554 557 return repo[l[-1]]
555 558
556 559 def revpair(repo, revs):
557 560 if not revs:
558 561 return repo.dirstate.p1(), None
559 562
560 563 l = revrange(repo, revs)
561 564
562 565 if len(l) == 0:
563 566 if revs:
564 567 raise util.Abort(_('empty revision range'))
565 568 return repo.dirstate.p1(), None
566 569
567 570 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
568 571 return repo.lookup(l[0]), None
569 572
570 573 return repo.lookup(l[0]), repo.lookup(l[-1])
571 574
572 575 _revrangesep = ':'
573 576
574 577 def revrange(repo, revs):
575 578 """Yield revision as strings from a list of revision specifications."""
576 579
577 580 def revfix(repo, val, defval):
578 581 if not val and val != 0 and defval is not None:
579 582 return defval
580 583 return repo[val].rev()
581 584
582 585 seen, l = set(), []
583 586 for spec in revs:
584 587 if l and not seen:
585 588 seen = set(l)
586 589 # attempt to parse old-style ranges first to deal with
587 590 # things like old-tag which contain query metacharacters
588 591 try:
589 592 if isinstance(spec, int):
590 593 seen.add(spec)
591 594 l.append(spec)
592 595 continue
593 596
594 597 if _revrangesep in spec:
595 598 start, end = spec.split(_revrangesep, 1)
596 599 start = revfix(repo, start, 0)
597 600 end = revfix(repo, end, len(repo) - 1)
598 601 if end == nullrev and start <= 0:
599 602 start = nullrev
600 603 rangeiter = repo.changelog.revs(start, end)
601 604 if not seen and not l:
602 605 # by far the most common case: revs = ["-1:0"]
603 606 l = list(rangeiter)
604 607 # defer syncing seen until next iteration
605 608 continue
606 609 newrevs = set(rangeiter)
607 610 if seen:
608 611 newrevs.difference_update(seen)
609 612 seen.update(newrevs)
610 613 else:
611 614 seen = newrevs
612 615 l.extend(sorted(newrevs, reverse=start > end))
613 616 continue
614 617 elif spec and spec in repo: # single unquoted rev
615 618 rev = revfix(repo, spec, None)
616 619 if rev in seen:
617 620 continue
618 621 seen.add(rev)
619 622 l.append(rev)
620 623 continue
621 624 except error.RepoLookupError:
622 625 pass
623 626
624 627 # fall through to new-style queries if old-style fails
625 628 m = revset.match(repo.ui, spec)
626 629 dl = [r for r in m(repo, list(repo)) if r not in seen]
627 630 l.extend(dl)
628 631 seen.update(dl)
629 632
630 633 return l
631 634
632 635 def expandpats(pats):
633 636 if not util.expandglobs:
634 637 return list(pats)
635 638 ret = []
636 639 for p in pats:
637 640 kind, name = matchmod._patsplit(p, None)
638 641 if kind is None:
639 642 try:
640 643 globbed = glob.glob(name)
641 644 except re.error:
642 645 globbed = [name]
643 646 if globbed:
644 647 ret.extend(globbed)
645 648 continue
646 649 ret.append(p)
647 650 return ret
648 651
649 652 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
650 653 if pats == ("",):
651 654 pats = []
652 655 if not globbed and default == 'relpath':
653 656 pats = expandpats(pats or [])
654 657
655 658 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
656 659 default)
657 660 def badfn(f, msg):
658 661 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
659 662 m.bad = badfn
660 663 return m, pats
661 664
662 665 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
663 666 return matchandpats(ctx, pats, opts, globbed, default)[0]
664 667
665 668 def matchall(repo):
666 669 return matchmod.always(repo.root, repo.getcwd())
667 670
668 671 def matchfiles(repo, files):
669 672 return matchmod.exact(repo.root, repo.getcwd(), files)
670 673
671 674 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
672 675 if dry_run is None:
673 676 dry_run = opts.get('dry_run')
674 677 if similarity is None:
675 678 similarity = float(opts.get('similarity') or 0)
676 679 # we'd use status here, except handling of symlinks and ignore is tricky
677 680 added, unknown, deleted, removed = [], [], [], []
678 681 audit_path = pathauditor(repo.root)
679 682 m = match(repo[None], pats, opts)
680 683 rejected = []
681 684 m.bad = lambda x, y: rejected.append(x)
682 685
683 686 ctx = repo[None]
684 687 dirstate = repo.dirstate
685 688 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
686 689 for abs, st in walkresults.iteritems():
687 690 dstate = dirstate[abs]
688 691 if dstate == '?' and audit_path.check(abs):
689 692 unknown.append(abs)
690 693 elif dstate != 'r' and not st:
691 694 deleted.append(abs)
692 695 # for finding renames
693 696 elif dstate == 'r':
694 697 removed.append(abs)
695 698 elif dstate == 'a':
696 699 added.append(abs)
697 700
698 701 unknownset = set(unknown)
699 702 toprint = unknownset.copy()
700 703 toprint.update(deleted)
701 704 for abs in sorted(toprint):
702 705 if repo.ui.verbose or not m.exact(abs):
703 706 rel = m.rel(abs)
704 707 if abs in unknownset:
705 708 status = _('adding %s\n') % ((pats and rel) or abs)
706 709 else:
707 710 status = _('removing %s\n') % ((pats and rel) or abs)
708 711 repo.ui.status(status)
709 712
710 713 copies = {}
711 714 if similarity > 0:
712 715 for old, new, score in similar.findrenames(repo,
713 716 added + unknown, removed + deleted, similarity):
714 717 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
715 718 repo.ui.status(_('recording removal of %s as rename to %s '
716 719 '(%d%% similar)\n') %
717 720 (m.rel(old), m.rel(new), score * 100))
718 721 copies[new] = old
719 722
720 723 if not dry_run:
721 724 wctx = repo[None]
722 725 wlock = repo.wlock()
723 726 try:
724 727 wctx.forget(deleted)
725 728 wctx.add(unknown)
726 729 for new, old in copies.iteritems():
727 730 wctx.copy(old, new)
728 731 finally:
729 732 wlock.release()
730 733
731 734 for f in rejected:
732 735 if f in m.files():
733 736 return 1
734 737 return 0
735 738
736 739 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
737 740 """Update the dirstate to reflect the intent of copying src to dst. For
738 741 different reasons it might not end with dst being marked as copied from src.
739 742 """
740 743 origsrc = repo.dirstate.copied(src) or src
741 744 if dst == origsrc: # copying back a copy?
742 745 if repo.dirstate[dst] not in 'mn' and not dryrun:
743 746 repo.dirstate.normallookup(dst)
744 747 else:
745 748 if repo.dirstate[origsrc] == 'a' and origsrc == src:
746 749 if not ui.quiet:
747 750 ui.warn(_("%s has not been committed yet, so no copy "
748 751 "data will be stored for %s.\n")
749 752 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
750 753 if repo.dirstate[dst] in '?r' and not dryrun:
751 754 wctx.add([dst])
752 755 elif not dryrun:
753 756 wctx.copy(origsrc, dst)
754 757
755 758 def readrequires(opener, supported):
756 759 '''Reads and parses .hg/requires and checks if all entries found
757 760 are in the list of supported features.'''
758 761 requirements = set(opener.read("requires").splitlines())
759 762 missings = []
760 763 for r in requirements:
761 764 if r not in supported:
762 765 if not r or not r[0].isalnum():
763 766 raise error.RequirementError(_(".hg/requires file is corrupt"))
764 767 missings.append(r)
765 768 missings.sort()
766 769 if missings:
767 770 raise error.RequirementError(
768 771 _("unknown repository format: requires features '%s' (upgrade "
769 772 "Mercurial)") % "', '".join(missings))
770 773 return requirements
771 774
772 775 class filecacheentry(object):
773 776 def __init__(self, path, stat=True):
774 777 self.path = path
775 778 self.cachestat = None
776 779 self._cacheable = None
777 780
778 781 if stat:
779 782 self.cachestat = filecacheentry.stat(self.path)
780 783
781 784 if self.cachestat:
782 785 self._cacheable = self.cachestat.cacheable()
783 786 else:
784 787 # None means we don't know yet
785 788 self._cacheable = None
786 789
787 790 def refresh(self):
788 791 if self.cacheable():
789 792 self.cachestat = filecacheentry.stat(self.path)
790 793
791 794 def cacheable(self):
792 795 if self._cacheable is not None:
793 796 return self._cacheable
794 797
795 798 # we don't know yet, assume it is for now
796 799 return True
797 800
798 801 def changed(self):
799 802 # no point in going further if we can't cache it
800 803 if not self.cacheable():
801 804 return True
802 805
803 806 newstat = filecacheentry.stat(self.path)
804 807
805 808 # we may not know if it's cacheable yet, check again now
806 809 if newstat and self._cacheable is None:
807 810 self._cacheable = newstat.cacheable()
808 811
809 812 # check again
810 813 if not self._cacheable:
811 814 return True
812 815
813 816 if self.cachestat != newstat:
814 817 self.cachestat = newstat
815 818 return True
816 819 else:
817 820 return False
818 821
819 822 @staticmethod
820 823 def stat(path):
821 824 try:
822 825 return util.cachestat(path)
823 826 except OSError, e:
824 827 if e.errno != errno.ENOENT:
825 828 raise
826 829
827 830 class filecache(object):
828 831 '''A property like decorator that tracks a file under .hg/ for updates.
829 832
830 833 Records stat info when called in _filecache.
831 834
832 835 On subsequent calls, compares old stat info with new info, and recreates
833 836 the object when needed, updating the new stat info in _filecache.
834 837
835 838 Mercurial either atomic renames or appends for files under .hg,
836 839 so to ensure the cache is reliable we need the filesystem to be able
837 840 to tell us if a file has been replaced. If it can't, we fallback to
838 841 recreating the object on every call (essentially the same behaviour as
839 842 propertycache).'''
840 843 def __init__(self, path):
841 844 self.path = path
842 845
843 846 def join(self, obj, fname):
844 847 """Used to compute the runtime path of the cached file.
845 848
846 849 Users should subclass filecache and provide their own version of this
847 850 function to call the appropriate join function on 'obj' (an instance
848 851 of the class that its member function was decorated).
849 852 """
850 853 return obj.join(fname)
851 854
852 855 def __call__(self, func):
853 856 self.func = func
854 857 self.name = func.__name__
855 858 return self
856 859
857 860 def __get__(self, obj, type=None):
858 861 # do we need to check if the file changed?
859 862 if self.name in obj.__dict__:
860 863 assert self.name in obj._filecache, self.name
861 864 return obj.__dict__[self.name]
862 865
863 866 entry = obj._filecache.get(self.name)
864 867
865 868 if entry:
866 869 if entry.changed():
867 870 entry.obj = self.func(obj)
868 871 else:
869 872 path = self.join(obj, self.path)
870 873
871 874 # We stat -before- creating the object so our cache doesn't lie if
872 875 # a writer modified between the time we read and stat
873 876 entry = filecacheentry(path)
874 877 entry.obj = self.func(obj)
875 878
876 879 obj._filecache[self.name] = entry
877 880
878 881 obj.__dict__[self.name] = entry.obj
879 882 return entry.obj
880 883
881 884 def __set__(self, obj, value):
882 885 if self.name not in obj._filecache:
883 886 # we add an entry for the missing value because X in __dict__
884 887 # implies X in _filecache
885 888 ce = filecacheentry(self.join(obj, self.path), False)
886 889 obj._filecache[self.name] = ce
887 890 else:
888 891 ce = obj._filecache[self.name]
889 892
890 893 ce.obj = value # update cached copy
891 894 obj.__dict__[self.name] = value # update copy returned by obj.x
892 895
893 896 def __delete__(self, obj):
894 897 try:
895 898 del obj.__dict__[self.name]
896 899 except KeyError:
897 900 raise AttributeError(self.name)
898 901
899 902 class dirs(object):
900 903 '''a multiset of directory names from a dirstate or manifest'''
901 904
902 905 def __init__(self, map, skip=None):
903 906 self._dirs = {}
904 907 addpath = self.addpath
905 908 if util.safehasattr(map, 'iteritems') and skip is not None:
906 909 for f, s in map.iteritems():
907 910 if s[0] != skip:
908 911 addpath(f)
909 912 else:
910 913 for f in map:
911 914 addpath(f)
912 915
913 916 def addpath(self, path):
914 917 dirs = self._dirs
915 918 for base in finddirs(path):
916 919 if base in dirs:
917 920 dirs[base] += 1
918 921 return
919 922 dirs[base] = 1
920 923
921 924 def delpath(self, path):
922 925 dirs = self._dirs
923 926 for base in finddirs(path):
924 927 if dirs[base] > 1:
925 928 dirs[base] -= 1
926 929 return
927 930 del dirs[base]
928 931
929 932 def __iter__(self):
930 933 return self._dirs.iterkeys()
931 934
932 935 def __contains__(self, d):
933 936 return d in self._dirs
934 937
935 938 if util.safehasattr(parsers, 'dirs'):
936 939 dirs = parsers.dirs
937 940
938 941 def finddirs(path):
939 942 pos = path.rfind('/')
940 943 while pos != -1:
941 944 yield path[:pos]
942 945 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now