##// END OF EJS Templates
localrepo: fix unpushable repos when using bookmarks (issue3317)...
Michael Bacarella -
r16243:b9c43023 stable
parent child Browse files
Show More
@@ -1,2324 +1,2325 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 self._dirtyphases = False
45 45 # A list of callback to shape the phase if no data were found.
46 46 # Callback are in the form: func(repo, roots) --> processed root.
47 47 # This list it to be filled by extension during repo setup
48 48 self._phasedefaults = []
49 49
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 extensions.loadall(self.ui)
53 53 except IOError:
54 54 pass
55 55
56 56 if not os.path.isdir(self.path):
57 57 if create:
58 58 if not os.path.exists(path):
59 59 util.makedirs(path)
60 60 util.makedir(self.path, notindexed=True)
61 61 requirements = ["revlogv1"]
62 62 if self.ui.configbool('format', 'usestore', True):
63 63 os.mkdir(os.path.join(self.path, "store"))
64 64 requirements.append("store")
65 65 if self.ui.configbool('format', 'usefncache', True):
66 66 requirements.append("fncache")
67 67 if self.ui.configbool('format', 'dotencode', True):
68 68 requirements.append('dotencode')
69 69 # create an invalid changelog
70 70 self.opener.append(
71 71 "00changelog.i",
72 72 '\0\0\0\2' # represents revlogv2
73 73 ' dummy changelog to prevent using the old repo layout'
74 74 )
75 75 if self.ui.configbool('format', 'generaldelta', False):
76 76 requirements.append("generaldelta")
77 77 requirements = set(requirements)
78 78 else:
79 79 raise error.RepoError(_("repository %s not found") % path)
80 80 elif create:
81 81 raise error.RepoError(_("repository %s already exists") % path)
82 82 else:
83 83 try:
84 84 requirements = scmutil.readrequires(self.opener, self.supported)
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 requirements = set()
89 89
90 90 self.sharedpath = self.path
91 91 try:
92 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 93 if not os.path.exists(s):
94 94 raise error.RepoError(
95 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 96 self.sharedpath = s
97 97 except IOError, inst:
98 98 if inst.errno != errno.ENOENT:
99 99 raise
100 100
101 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 102 self.spath = self.store.path
103 103 self.sopener = self.store.opener
104 104 self.sjoin = self.store.join
105 105 self.opener.createmode = self.store.createmode
106 106 self._applyrequirements(requirements)
107 107 if create:
108 108 self._writerequirements()
109 109
110 110
111 111 self._branchcache = None
112 112 self._branchcachetip = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 # A cache for various files under .hg/ that tracks file changes,
118 118 # (used by the filecache decorator)
119 119 #
120 120 # Maps a property name to its util.filecacheentry
121 121 self._filecache = {}
122 122
123 123 def _applyrequirements(self, requirements):
124 124 self.requirements = requirements
125 125 openerreqs = set(('revlogv1', 'generaldelta'))
126 126 self.sopener.options = dict((r, 1) for r in requirements
127 127 if r in openerreqs)
128 128
129 129 def _writerequirements(self):
130 130 reqfile = self.opener("requires", "w")
131 131 for r in self.requirements:
132 132 reqfile.write("%s\n" % r)
133 133 reqfile.close()
134 134
135 135 def _checknested(self, path):
136 136 """Determine if path is a legal nested repository."""
137 137 if not path.startswith(self.root):
138 138 return False
139 139 subpath = path[len(self.root) + 1:]
140 140 normsubpath = util.pconvert(subpath)
141 141
142 142 # XXX: Checking against the current working copy is wrong in
143 143 # the sense that it can reject things like
144 144 #
145 145 # $ hg cat -r 10 sub/x.txt
146 146 #
147 147 # if sub/ is no longer a subrepository in the working copy
148 148 # parent revision.
149 149 #
150 150 # However, it can of course also allow things that would have
151 151 # been rejected before, such as the above cat command if sub/
152 152 # is a subrepository now, but was a normal directory before.
153 153 # The old path auditor would have rejected by mistake since it
154 154 # panics when it sees sub/.hg/.
155 155 #
156 156 # All in all, checking against the working copy seems sensible
157 157 # since we want to prevent access to nested repositories on
158 158 # the filesystem *now*.
159 159 ctx = self[None]
160 160 parts = util.splitpath(subpath)
161 161 while parts:
162 162 prefix = '/'.join(parts)
163 163 if prefix in ctx.substate:
164 164 if prefix == normsubpath:
165 165 return True
166 166 else:
167 167 sub = ctx.sub(prefix)
168 168 return sub.checknested(subpath[len(prefix) + 1:])
169 169 else:
170 170 parts.pop()
171 171 return False
172 172
173 173 @filecache('bookmarks')
174 174 def _bookmarks(self):
175 175 return bookmarks.read(self)
176 176
177 177 @filecache('bookmarks.current')
178 178 def _bookmarkcurrent(self):
179 179 return bookmarks.readcurrent(self)
180 180
181 181 def _writebookmarks(self, marks):
182 182 bookmarks.write(self)
183 183
184 184 @storecache('phaseroots')
185 185 def _phaseroots(self):
186 186 self._dirtyphases = False
187 187 phaseroots = phases.readroots(self)
188 188 phases.filterunknown(self, phaseroots)
189 189 return phaseroots
190 190
191 191 @propertycache
192 192 def _phaserev(self):
193 193 cache = [phases.public] * len(self)
194 194 for phase in phases.trackedphases:
195 195 roots = map(self.changelog.rev, self._phaseroots[phase])
196 196 if roots:
197 197 for rev in roots:
198 198 cache[rev] = phase
199 199 for rev in self.changelog.descendants(*roots):
200 200 cache[rev] = phase
201 201 return cache
202 202
203 203 @storecache('00changelog.i')
204 204 def changelog(self):
205 205 c = changelog.changelog(self.sopener)
206 206 if 'HG_PENDING' in os.environ:
207 207 p = os.environ['HG_PENDING']
208 208 if p.startswith(self.root):
209 209 c.readpending('00changelog.i.a')
210 210 return c
211 211
212 212 @storecache('00manifest.i')
213 213 def manifest(self):
214 214 return manifest.manifest(self.sopener)
215 215
216 216 @filecache('dirstate')
217 217 def dirstate(self):
218 218 warned = [0]
219 219 def validate(node):
220 220 try:
221 221 self.changelog.rev(node)
222 222 return node
223 223 except error.LookupError:
224 224 if not warned[0]:
225 225 warned[0] = True
226 226 self.ui.warn(_("warning: ignoring unknown"
227 227 " working parent %s!\n") % short(node))
228 228 return nullid
229 229
230 230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 231
232 232 def __getitem__(self, changeid):
233 233 if changeid is None:
234 234 return context.workingctx(self)
235 235 return context.changectx(self, changeid)
236 236
237 237 def __contains__(self, changeid):
238 238 try:
239 239 return bool(self.lookup(changeid))
240 240 except error.RepoLookupError:
241 241 return False
242 242
243 243 def __nonzero__(self):
244 244 return True
245 245
246 246 def __len__(self):
247 247 return len(self.changelog)
248 248
249 249 def __iter__(self):
250 250 for i in xrange(len(self)):
251 251 yield i
252 252
253 253 def revs(self, expr, *args):
254 254 '''Return a list of revisions matching the given revset'''
255 255 expr = revset.formatspec(expr, *args)
256 256 m = revset.match(None, expr)
257 257 return [r for r in m(self, range(len(self)))]
258 258
259 259 def set(self, expr, *args):
260 260 '''
261 261 Yield a context for each matching revision, after doing arg
262 262 replacement via revset.formatspec
263 263 '''
264 264 for r in self.revs(expr, *args):
265 265 yield self[r]
266 266
267 267 def url(self):
268 268 return 'file:' + self.root
269 269
270 270 def hook(self, name, throw=False, **args):
271 271 return hook.hook(self.ui, self, name, throw, **args)
272 272
273 273 tag_disallowed = ':\r\n'
274 274
275 275 def _tag(self, names, node, message, local, user, date, extra={}):
276 276 if isinstance(names, str):
277 277 allchars = names
278 278 names = (names,)
279 279 else:
280 280 allchars = ''.join(names)
281 281 for c in self.tag_disallowed:
282 282 if c in allchars:
283 283 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 284
285 285 branches = self.branchmap()
286 286 for name in names:
287 287 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 288 local=local)
289 289 if name in branches:
290 290 self.ui.warn(_("warning: tag %s conflicts with existing"
291 291 " branch name\n") % name)
292 292
293 293 def writetags(fp, names, munge, prevtags):
294 294 fp.seek(0, 2)
295 295 if prevtags and prevtags[-1] != '\n':
296 296 fp.write('\n')
297 297 for name in names:
298 298 m = munge and munge(name) or name
299 299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 300 old = self.tags().get(name, nullid)
301 301 fp.write('%s %s\n' % (hex(old), m))
302 302 fp.write('%s %s\n' % (hex(node), m))
303 303 fp.close()
304 304
305 305 prevtags = ''
306 306 if local:
307 307 try:
308 308 fp = self.opener('localtags', 'r+')
309 309 except IOError:
310 310 fp = self.opener('localtags', 'a')
311 311 else:
312 312 prevtags = fp.read()
313 313
314 314 # local tags are stored in the current charset
315 315 writetags(fp, names, None, prevtags)
316 316 for name in names:
317 317 self.hook('tag', node=hex(node), tag=name, local=local)
318 318 return
319 319
320 320 try:
321 321 fp = self.wfile('.hgtags', 'rb+')
322 322 except IOError, e:
323 323 if e.errno != errno.ENOENT:
324 324 raise
325 325 fp = self.wfile('.hgtags', 'ab')
326 326 else:
327 327 prevtags = fp.read()
328 328
329 329 # committed tags are stored in UTF-8
330 330 writetags(fp, names, encoding.fromlocal, prevtags)
331 331
332 332 fp.close()
333 333
334 334 self.invalidatecaches()
335 335
336 336 if '.hgtags' not in self.dirstate:
337 337 self[None].add(['.hgtags'])
338 338
339 339 m = matchmod.exact(self.root, '', ['.hgtags'])
340 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 341
342 342 for name in names:
343 343 self.hook('tag', node=hex(node), tag=name, local=local)
344 344
345 345 return tagnode
346 346
347 347 def tag(self, names, node, message, local, user, date):
348 348 '''tag a revision with one or more symbolic names.
349 349
350 350 names is a list of strings or, when adding a single tag, names may be a
351 351 string.
352 352
353 353 if local is True, the tags are stored in a per-repository file.
354 354 otherwise, they are stored in the .hgtags file, and a new
355 355 changeset is committed with the change.
356 356
357 357 keyword arguments:
358 358
359 359 local: whether to store tags in non-version-controlled file
360 360 (default False)
361 361
362 362 message: commit message to use if committing
363 363
364 364 user: name of user to use if committing
365 365
366 366 date: date tuple to use if committing'''
367 367
368 368 if not local:
369 369 for x in self.status()[:5]:
370 370 if '.hgtags' in x:
371 371 raise util.Abort(_('working copy of .hgtags is changed '
372 372 '(please commit .hgtags manually)'))
373 373
374 374 self.tags() # instantiate the cache
375 375 self._tag(names, node, message, local, user, date)
376 376
377 377 @propertycache
378 378 def _tagscache(self):
379 379 '''Returns a tagscache object that contains various tags related caches.'''
380 380
381 381 # This simplifies its cache management by having one decorated
382 382 # function (this one) and the rest simply fetch things from it.
383 383 class tagscache(object):
384 384 def __init__(self):
385 385 # These two define the set of tags for this repository. tags
386 386 # maps tag name to node; tagtypes maps tag name to 'global' or
387 387 # 'local'. (Global tags are defined by .hgtags across all
388 388 # heads, and local tags are defined in .hg/localtags.)
389 389 # They constitute the in-memory cache of tags.
390 390 self.tags = self.tagtypes = None
391 391
392 392 self.nodetagscache = self.tagslist = None
393 393
394 394 cache = tagscache()
395 395 cache.tags, cache.tagtypes = self._findtags()
396 396
397 397 return cache
398 398
399 399 def tags(self):
400 400 '''return a mapping of tag to node'''
401 401 return self._tagscache.tags
402 402
403 403 def _findtags(self):
404 404 '''Do the hard work of finding tags. Return a pair of dicts
405 405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
406 406 maps tag name to a string like \'global\' or \'local\'.
407 407 Subclasses or extensions are free to add their own tags, but
408 408 should be aware that the returned dicts will be retained for the
409 409 duration of the localrepo object.'''
410 410
411 411 # XXX what tagtype should subclasses/extensions use? Currently
412 412 # mq and bookmarks add tags, but do not set the tagtype at all.
413 413 # Should each extension invent its own tag type? Should there
414 414 # be one tagtype for all such "virtual" tags? Or is the status
415 415 # quo fine?
416 416
417 417 alltags = {} # map tag name to (node, hist)
418 418 tagtypes = {}
419 419
420 420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
421 421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
422 422
423 423 # Build the return dicts. Have to re-encode tag names because
424 424 # the tags module always uses UTF-8 (in order not to lose info
425 425 # writing to the cache), but the rest of Mercurial wants them in
426 426 # local encoding.
427 427 tags = {}
428 428 for (name, (node, hist)) in alltags.iteritems():
429 429 if node != nullid:
430 430 try:
431 431 # ignore tags to unknown nodes
432 432 self.changelog.lookup(node)
433 433 tags[encoding.tolocal(name)] = node
434 434 except error.LookupError:
435 435 pass
436 436 tags['tip'] = self.changelog.tip()
437 437 tagtypes = dict([(encoding.tolocal(name), value)
438 438 for (name, value) in tagtypes.iteritems()])
439 439 return (tags, tagtypes)
440 440
441 441 def tagtype(self, tagname):
442 442 '''
443 443 return the type of the given tag. result can be:
444 444
445 445 'local' : a local tag
446 446 'global' : a global tag
447 447 None : tag does not exist
448 448 '''
449 449
450 450 return self._tagscache.tagtypes.get(tagname)
451 451
452 452 def tagslist(self):
453 453 '''return a list of tags ordered by revision'''
454 454 if not self._tagscache.tagslist:
455 455 l = []
456 456 for t, n in self.tags().iteritems():
457 457 r = self.changelog.rev(n)
458 458 l.append((r, t, n))
459 459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
460 460
461 461 return self._tagscache.tagslist
462 462
463 463 def nodetags(self, node):
464 464 '''return the tags associated with a node'''
465 465 if not self._tagscache.nodetagscache:
466 466 nodetagscache = {}
467 467 for t, n in self.tags().iteritems():
468 468 nodetagscache.setdefault(n, []).append(t)
469 469 for tags in nodetagscache.itervalues():
470 470 tags.sort()
471 471 self._tagscache.nodetagscache = nodetagscache
472 472 return self._tagscache.nodetagscache.get(node, [])
473 473
474 474 def nodebookmarks(self, node):
475 475 marks = []
476 476 for bookmark, n in self._bookmarks.iteritems():
477 477 if n == node:
478 478 marks.append(bookmark)
479 479 return sorted(marks)
480 480
481 481 def _branchtags(self, partial, lrev):
482 482 # TODO: rename this function?
483 483 tiprev = len(self) - 1
484 484 if lrev != tiprev:
485 485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
486 486 self._updatebranchcache(partial, ctxgen)
487 487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
488 488
489 489 return partial
490 490
491 491 def updatebranchcache(self):
492 492 tip = self.changelog.tip()
493 493 if self._branchcache is not None and self._branchcachetip == tip:
494 494 return
495 495
496 496 oldtip = self._branchcachetip
497 497 self._branchcachetip = tip
498 498 if oldtip is None or oldtip not in self.changelog.nodemap:
499 499 partial, last, lrev = self._readbranchcache()
500 500 else:
501 501 lrev = self.changelog.rev(oldtip)
502 502 partial = self._branchcache
503 503
504 504 self._branchtags(partial, lrev)
505 505 # this private cache holds all heads (not just tips)
506 506 self._branchcache = partial
507 507
508 508 def branchmap(self):
509 509 '''returns a dictionary {branch: [branchheads]}'''
510 510 self.updatebranchcache()
511 511 return self._branchcache
512 512
513 513 def branchtags(self):
514 514 '''return a dict where branch names map to the tipmost head of
515 515 the branch, open heads come before closed'''
516 516 bt = {}
517 517 for bn, heads in self.branchmap().iteritems():
518 518 tip = heads[-1]
519 519 for h in reversed(heads):
520 520 if 'close' not in self.changelog.read(h)[5]:
521 521 tip = h
522 522 break
523 523 bt[bn] = tip
524 524 return bt
525 525
526 526 def _readbranchcache(self):
527 527 partial = {}
528 528 try:
529 529 f = self.opener("cache/branchheads")
530 530 lines = f.read().split('\n')
531 531 f.close()
532 532 except (IOError, OSError):
533 533 return {}, nullid, nullrev
534 534
535 535 try:
536 536 last, lrev = lines.pop(0).split(" ", 1)
537 537 last, lrev = bin(last), int(lrev)
538 538 if lrev >= len(self) or self[lrev].node() != last:
539 539 # invalidate the cache
540 540 raise ValueError('invalidating branch cache (tip differs)')
541 541 for l in lines:
542 542 if not l:
543 543 continue
544 544 node, label = l.split(" ", 1)
545 545 label = encoding.tolocal(label.strip())
546 546 partial.setdefault(label, []).append(bin(node))
547 547 except KeyboardInterrupt:
548 548 raise
549 549 except Exception, inst:
550 550 if self.ui.debugflag:
551 551 self.ui.warn(str(inst), '\n')
552 552 partial, last, lrev = {}, nullid, nullrev
553 553 return partial, last, lrev
554 554
555 555 def _writebranchcache(self, branches, tip, tiprev):
556 556 try:
557 557 f = self.opener("cache/branchheads", "w", atomictemp=True)
558 558 f.write("%s %s\n" % (hex(tip), tiprev))
559 559 for label, nodes in branches.iteritems():
560 560 for node in nodes:
561 561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
562 562 f.close()
563 563 except (IOError, OSError):
564 564 pass
565 565
566 566 def _updatebranchcache(self, partial, ctxgen):
567 567 # collect new branch entries
568 568 newbranches = {}
569 569 for c in ctxgen:
570 570 newbranches.setdefault(c.branch(), []).append(c.node())
571 571 # if older branchheads are reachable from new ones, they aren't
572 572 # really branchheads. Note checking parents is insufficient:
573 573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
574 574 for branch, newnodes in newbranches.iteritems():
575 575 bheads = partial.setdefault(branch, [])
576 576 bheads.extend(newnodes)
577 577 if len(bheads) <= 1:
578 578 continue
579 579 bheads = sorted(bheads, key=lambda x: self[x].rev())
580 580 # starting from tip means fewer passes over reachable
581 581 while newnodes:
582 582 latest = newnodes.pop()
583 583 if latest not in bheads:
584 584 continue
585 585 minbhrev = self[bheads[0]].node()
586 586 reachable = self.changelog.reachable(latest, minbhrev)
587 587 reachable.remove(latest)
588 588 if reachable:
589 589 bheads = [b for b in bheads if b not in reachable]
590 590 partial[branch] = bheads
591 591
592 592 def lookup(self, key):
593 593 if isinstance(key, int):
594 594 return self.changelog.node(key)
595 595 elif key == '.':
596 596 return self.dirstate.p1()
597 597 elif key == 'null':
598 598 return nullid
599 599 elif key == 'tip':
600 600 return self.changelog.tip()
601 601 n = self.changelog._match(key)
602 602 if n:
603 603 return n
604 604 if key in self._bookmarks:
605 605 return self._bookmarks[key]
606 606 if key in self.tags():
607 607 return self.tags()[key]
608 608 if key in self.branchtags():
609 609 return self.branchtags()[key]
610 610 n = self.changelog._partialmatch(key)
611 611 if n:
612 612 return n
613 613
614 614 # can't find key, check if it might have come from damaged dirstate
615 615 if key in self.dirstate.parents():
616 616 raise error.Abort(_("working directory has unknown parent '%s'!")
617 617 % short(key))
618 618 try:
619 619 if len(key) == 20:
620 620 key = hex(key)
621 621 except TypeError:
622 622 pass
623 623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
624 624
625 625 def lookupbranch(self, key, remote=None):
626 626 repo = remote or self
627 627 if key in repo.branchmap():
628 628 return key
629 629
630 630 repo = (remote and remote.local()) and remote or self
631 631 return repo[key].branch()
632 632
633 633 def known(self, nodes):
634 634 nm = self.changelog.nodemap
635 635 result = []
636 636 for n in nodes:
637 637 r = nm.get(n)
638 638 resp = not (r is None or self._phaserev[r] >= phases.secret)
639 639 result.append(resp)
640 640 return result
641 641
642 642 def local(self):
643 643 return self
644 644
645 645 def join(self, f):
646 646 return os.path.join(self.path, f)
647 647
648 648 def wjoin(self, f):
649 649 return os.path.join(self.root, f)
650 650
651 651 def file(self, f):
652 652 if f[0] == '/':
653 653 f = f[1:]
654 654 return filelog.filelog(self.sopener, f)
655 655
656 656 def changectx(self, changeid):
657 657 return self[changeid]
658 658
659 659 def parents(self, changeid=None):
660 660 '''get list of changectxs for parents of changeid'''
661 661 return self[changeid].parents()
662 662
663 663 def filectx(self, path, changeid=None, fileid=None):
664 664 """changeid can be a changeset revision, node, or tag.
665 665 fileid can be a file revision or node."""
666 666 return context.filectx(self, path, changeid, fileid)
667 667
668 668 def getcwd(self):
669 669 return self.dirstate.getcwd()
670 670
671 671 def pathto(self, f, cwd=None):
672 672 return self.dirstate.pathto(f, cwd)
673 673
674 674 def wfile(self, f, mode='r'):
675 675 return self.wopener(f, mode)
676 676
677 677 def _link(self, f):
678 678 return os.path.islink(self.wjoin(f))
679 679
680 680 def _loadfilter(self, filter):
681 681 if filter not in self.filterpats:
682 682 l = []
683 683 for pat, cmd in self.ui.configitems(filter):
684 684 if cmd == '!':
685 685 continue
686 686 mf = matchmod.match(self.root, '', [pat])
687 687 fn = None
688 688 params = cmd
689 689 for name, filterfn in self._datafilters.iteritems():
690 690 if cmd.startswith(name):
691 691 fn = filterfn
692 692 params = cmd[len(name):].lstrip()
693 693 break
694 694 if not fn:
695 695 fn = lambda s, c, **kwargs: util.filter(s, c)
696 696 # Wrap old filters not supporting keyword arguments
697 697 if not inspect.getargspec(fn)[2]:
698 698 oldfn = fn
699 699 fn = lambda s, c, **kwargs: oldfn(s, c)
700 700 l.append((mf, fn, params))
701 701 self.filterpats[filter] = l
702 702 return self.filterpats[filter]
703 703
704 704 def _filter(self, filterpats, filename, data):
705 705 for mf, fn, cmd in filterpats:
706 706 if mf(filename):
707 707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
708 708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
709 709 break
710 710
711 711 return data
712 712
713 713 @propertycache
714 714 def _encodefilterpats(self):
715 715 return self._loadfilter('encode')
716 716
717 717 @propertycache
718 718 def _decodefilterpats(self):
719 719 return self._loadfilter('decode')
720 720
721 721 def adddatafilter(self, name, filter):
722 722 self._datafilters[name] = filter
723 723
724 724 def wread(self, filename):
725 725 if self._link(filename):
726 726 data = os.readlink(self.wjoin(filename))
727 727 else:
728 728 data = self.wopener.read(filename)
729 729 return self._filter(self._encodefilterpats, filename, data)
730 730
731 731 def wwrite(self, filename, data, flags):
732 732 data = self._filter(self._decodefilterpats, filename, data)
733 733 if 'l' in flags:
734 734 self.wopener.symlink(data, filename)
735 735 else:
736 736 self.wopener.write(filename, data)
737 737 if 'x' in flags:
738 738 util.setflags(self.wjoin(filename), False, True)
739 739
740 740 def wwritedata(self, filename, data):
741 741 return self._filter(self._decodefilterpats, filename, data)
742 742
743 743 def transaction(self, desc):
744 744 tr = self._transref and self._transref() or None
745 745 if tr and tr.running():
746 746 return tr.nest()
747 747
748 748 # abort here if the journal already exists
749 749 if os.path.exists(self.sjoin("journal")):
750 750 raise error.RepoError(
751 751 _("abandoned transaction found - run hg recover"))
752 752
753 753 journalfiles = self._writejournal(desc)
754 754 renames = [(x, undoname(x)) for x in journalfiles]
755 755
756 756 tr = transaction.transaction(self.ui.warn, self.sopener,
757 757 self.sjoin("journal"),
758 758 aftertrans(renames),
759 759 self.store.createmode)
760 760 self._transref = weakref.ref(tr)
761 761 return tr
762 762
763 763 def _writejournal(self, desc):
764 764 # save dirstate for rollback
765 765 try:
766 766 ds = self.opener.read("dirstate")
767 767 except IOError:
768 768 ds = ""
769 769 self.opener.write("journal.dirstate", ds)
770 770 self.opener.write("journal.branch",
771 771 encoding.fromlocal(self.dirstate.branch()))
772 772 self.opener.write("journal.desc",
773 773 "%d\n%s\n" % (len(self), desc))
774 774
775 bkname = self.join('bookmarks')
776 if os.path.exists(bkname):
777 util.copyfile(bkname, self.join('journal.bookmarks'))
778 else:
779 self.opener.write('journal.bookmarks', '')
775 try:
776 bk = self.opener.read("bookmarks")
777 except IOError:
778 bk = ""
779 self.opener.write("journal.bookmarks", bk)
780
780 781 phasesname = self.sjoin('phaseroots')
781 782 if os.path.exists(phasesname):
782 783 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
783 784 else:
784 785 self.sopener.write('journal.phaseroots', '')
785 786
786 787 return (self.sjoin('journal'), self.join('journal.dirstate'),
787 788 self.join('journal.branch'), self.join('journal.desc'),
788 789 self.join('journal.bookmarks'),
789 790 self.sjoin('journal.phaseroots'))
790 791
791 792 def recover(self):
792 793 lock = self.lock()
793 794 try:
794 795 if os.path.exists(self.sjoin("journal")):
795 796 self.ui.status(_("rolling back interrupted transaction\n"))
796 797 transaction.rollback(self.sopener, self.sjoin("journal"),
797 798 self.ui.warn)
798 799 self.invalidate()
799 800 return True
800 801 else:
801 802 self.ui.warn(_("no interrupted transaction available\n"))
802 803 return False
803 804 finally:
804 805 lock.release()
805 806
806 807 def rollback(self, dryrun=False, force=False):
807 808 wlock = lock = None
808 809 try:
809 810 wlock = self.wlock()
810 811 lock = self.lock()
811 812 if os.path.exists(self.sjoin("undo")):
812 813 return self._rollback(dryrun, force)
813 814 else:
814 815 self.ui.warn(_("no rollback information available\n"))
815 816 return 1
816 817 finally:
817 818 release(lock, wlock)
818 819
819 820 def _rollback(self, dryrun, force):
820 821 ui = self.ui
821 822 try:
822 823 args = self.opener.read('undo.desc').splitlines()
823 824 (oldlen, desc, detail) = (int(args[0]), args[1], None)
824 825 if len(args) >= 3:
825 826 detail = args[2]
826 827 oldtip = oldlen - 1
827 828
828 829 if detail and ui.verbose:
829 830 msg = (_('repository tip rolled back to revision %s'
830 831 ' (undo %s: %s)\n')
831 832 % (oldtip, desc, detail))
832 833 else:
833 834 msg = (_('repository tip rolled back to revision %s'
834 835 ' (undo %s)\n')
835 836 % (oldtip, desc))
836 837 except IOError:
837 838 msg = _('rolling back unknown transaction\n')
838 839 desc = None
839 840
840 841 if not force and self['.'] != self['tip'] and desc == 'commit':
841 842 raise util.Abort(
842 843 _('rollback of last commit while not checked out '
843 844 'may lose data'), hint=_('use -f to force'))
844 845
845 846 ui.status(msg)
846 847 if dryrun:
847 848 return 0
848 849
849 850 parents = self.dirstate.parents()
850 851 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
851 852 if os.path.exists(self.join('undo.bookmarks')):
852 853 util.rename(self.join('undo.bookmarks'),
853 854 self.join('bookmarks'))
854 855 if os.path.exists(self.sjoin('undo.phaseroots')):
855 856 util.rename(self.sjoin('undo.phaseroots'),
856 857 self.sjoin('phaseroots'))
857 858 self.invalidate()
858 859
859 860 parentgone = (parents[0] not in self.changelog.nodemap or
860 861 parents[1] not in self.changelog.nodemap)
861 862 if parentgone:
862 863 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
863 864 try:
864 865 branch = self.opener.read('undo.branch')
865 866 self.dirstate.setbranch(branch)
866 867 except IOError:
867 868 ui.warn(_('named branch could not be reset: '
868 869 'current branch is still \'%s\'\n')
869 870 % self.dirstate.branch())
870 871
871 872 self.dirstate.invalidate()
872 873 parents = tuple([p.rev() for p in self.parents()])
873 874 if len(parents) > 1:
874 875 ui.status(_('working directory now based on '
875 876 'revisions %d and %d\n') % parents)
876 877 else:
877 878 ui.status(_('working directory now based on '
878 879 'revision %d\n') % parents)
879 880 self.destroyed()
880 881 return 0
881 882
882 883 def invalidatecaches(self):
883 884 def delcache(name):
884 885 try:
885 886 delattr(self, name)
886 887 except AttributeError:
887 888 pass
888 889
889 890 delcache('_tagscache')
890 891 delcache('_phaserev')
891 892
892 893 self._branchcache = None # in UTF-8
893 894 self._branchcachetip = None
894 895
895 896 def invalidatedirstate(self):
896 897 '''Invalidates the dirstate, causing the next call to dirstate
897 898 to check if it was modified since the last time it was read,
898 899 rereading it if it has.
899 900
900 901 This is different to dirstate.invalidate() that it doesn't always
901 902 rereads the dirstate. Use dirstate.invalidate() if you want to
902 903 explicitly read the dirstate again (i.e. restoring it to a previous
903 904 known good state).'''
904 905 if 'dirstate' in self.__dict__:
905 906 for k in self.dirstate._filecache:
906 907 try:
907 908 delattr(self.dirstate, k)
908 909 except AttributeError:
909 910 pass
910 911 delattr(self, 'dirstate')
911 912
912 913 def invalidate(self):
913 914 for k in self._filecache:
914 915 # dirstate is invalidated separately in invalidatedirstate()
915 916 if k == 'dirstate':
916 917 continue
917 918
918 919 try:
919 920 delattr(self, k)
920 921 except AttributeError:
921 922 pass
922 923 self.invalidatecaches()
923 924
924 925 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
925 926 try:
926 927 l = lock.lock(lockname, 0, releasefn, desc=desc)
927 928 except error.LockHeld, inst:
928 929 if not wait:
929 930 raise
930 931 self.ui.warn(_("waiting for lock on %s held by %r\n") %
931 932 (desc, inst.locker))
932 933 # default to 600 seconds timeout
933 934 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
934 935 releasefn, desc=desc)
935 936 if acquirefn:
936 937 acquirefn()
937 938 return l
938 939
939 940 def _afterlock(self, callback):
940 941 """add a callback to the current repository lock.
941 942
942 943 The callback will be executed on lock release."""
943 944 l = self._lockref and self._lockref()
944 945 if l:
945 946 l.postrelease.append(callback)
946 947
947 948 def lock(self, wait=True):
948 949 '''Lock the repository store (.hg/store) and return a weak reference
949 950 to the lock. Use this before modifying the store (e.g. committing or
950 951 stripping). If you are opening a transaction, get a lock as well.)'''
951 952 l = self._lockref and self._lockref()
952 953 if l is not None and l.held:
953 954 l.lock()
954 955 return l
955 956
956 957 def unlock():
957 958 self.store.write()
958 959 if self._dirtyphases:
959 960 phases.writeroots(self)
960 961 self._dirtyphases = False
961 962 for k, ce in self._filecache.items():
962 963 if k == 'dirstate':
963 964 continue
964 965 ce.refresh()
965 966
966 967 l = self._lock(self.sjoin("lock"), wait, unlock,
967 968 self.invalidate, _('repository %s') % self.origroot)
968 969 self._lockref = weakref.ref(l)
969 970 return l
970 971
971 972 def wlock(self, wait=True):
972 973 '''Lock the non-store parts of the repository (everything under
973 974 .hg except .hg/store) and return a weak reference to the lock.
974 975 Use this before modifying files in .hg.'''
975 976 l = self._wlockref and self._wlockref()
976 977 if l is not None and l.held:
977 978 l.lock()
978 979 return l
979 980
980 981 def unlock():
981 982 self.dirstate.write()
982 983 ce = self._filecache.get('dirstate')
983 984 if ce:
984 985 ce.refresh()
985 986
986 987 l = self._lock(self.join("wlock"), wait, unlock,
987 988 self.invalidatedirstate, _('working directory of %s') %
988 989 self.origroot)
989 990 self._wlockref = weakref.ref(l)
990 991 return l
991 992
992 993 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
993 994 """
994 995 commit an individual file as part of a larger transaction
995 996 """
996 997
997 998 fname = fctx.path()
998 999 text = fctx.data()
999 1000 flog = self.file(fname)
1000 1001 fparent1 = manifest1.get(fname, nullid)
1001 1002 fparent2 = fparent2o = manifest2.get(fname, nullid)
1002 1003
1003 1004 meta = {}
1004 1005 copy = fctx.renamed()
1005 1006 if copy and copy[0] != fname:
1006 1007 # Mark the new revision of this file as a copy of another
1007 1008 # file. This copy data will effectively act as a parent
1008 1009 # of this new revision. If this is a merge, the first
1009 1010 # parent will be the nullid (meaning "look up the copy data")
1010 1011 # and the second one will be the other parent. For example:
1011 1012 #
1012 1013 # 0 --- 1 --- 3 rev1 changes file foo
1013 1014 # \ / rev2 renames foo to bar and changes it
1014 1015 # \- 2 -/ rev3 should have bar with all changes and
1015 1016 # should record that bar descends from
1016 1017 # bar in rev2 and foo in rev1
1017 1018 #
1018 1019 # this allows this merge to succeed:
1019 1020 #
1020 1021 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1021 1022 # \ / merging rev3 and rev4 should use bar@rev2
1022 1023 # \- 2 --- 4 as the merge base
1023 1024 #
1024 1025
1025 1026 cfname = copy[0]
1026 1027 crev = manifest1.get(cfname)
1027 1028 newfparent = fparent2
1028 1029
1029 1030 if manifest2: # branch merge
1030 1031 if fparent2 == nullid or crev is None: # copied on remote side
1031 1032 if cfname in manifest2:
1032 1033 crev = manifest2[cfname]
1033 1034 newfparent = fparent1
1034 1035
1035 1036 # find source in nearest ancestor if we've lost track
1036 1037 if not crev:
1037 1038 self.ui.debug(" %s: searching for copy revision for %s\n" %
1038 1039 (fname, cfname))
1039 1040 for ancestor in self[None].ancestors():
1040 1041 if cfname in ancestor:
1041 1042 crev = ancestor[cfname].filenode()
1042 1043 break
1043 1044
1044 1045 if crev:
1045 1046 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1046 1047 meta["copy"] = cfname
1047 1048 meta["copyrev"] = hex(crev)
1048 1049 fparent1, fparent2 = nullid, newfparent
1049 1050 else:
1050 1051 self.ui.warn(_("warning: can't find ancestor for '%s' "
1051 1052 "copied from '%s'!\n") % (fname, cfname))
1052 1053
1053 1054 elif fparent2 != nullid:
1054 1055 # is one parent an ancestor of the other?
1055 1056 fparentancestor = flog.ancestor(fparent1, fparent2)
1056 1057 if fparentancestor == fparent1:
1057 1058 fparent1, fparent2 = fparent2, nullid
1058 1059 elif fparentancestor == fparent2:
1059 1060 fparent2 = nullid
1060 1061
1061 1062 # is the file changed?
1062 1063 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1063 1064 changelist.append(fname)
1064 1065 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1065 1066
1066 1067 # are just the flags changed during merge?
1067 1068 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1068 1069 changelist.append(fname)
1069 1070
1070 1071 return fparent1
1071 1072
1072 1073 def commit(self, text="", user=None, date=None, match=None, force=False,
1073 1074 editor=False, extra={}):
1074 1075 """Add a new revision to current repository.
1075 1076
1076 1077 Revision information is gathered from the working directory,
1077 1078 match can be used to filter the committed files. If editor is
1078 1079 supplied, it is called to get a commit message.
1079 1080 """
1080 1081
1081 1082 def fail(f, msg):
1082 1083 raise util.Abort('%s: %s' % (f, msg))
1083 1084
1084 1085 if not match:
1085 1086 match = matchmod.always(self.root, '')
1086 1087
1087 1088 if not force:
1088 1089 vdirs = []
1089 1090 match.dir = vdirs.append
1090 1091 match.bad = fail
1091 1092
1092 1093 wlock = self.wlock()
1093 1094 try:
1094 1095 wctx = self[None]
1095 1096 merge = len(wctx.parents()) > 1
1096 1097
1097 1098 if (not force and merge and match and
1098 1099 (match.files() or match.anypats())):
1099 1100 raise util.Abort(_('cannot partially commit a merge '
1100 1101 '(do not specify files or patterns)'))
1101 1102
1102 1103 changes = self.status(match=match, clean=force)
1103 1104 if force:
1104 1105 changes[0].extend(changes[6]) # mq may commit unchanged files
1105 1106
1106 1107 # check subrepos
1107 1108 subs = []
1108 1109 removedsubs = set()
1109 1110 if '.hgsub' in wctx:
1110 1111 # only manage subrepos and .hgsubstate if .hgsub is present
1111 1112 for p in wctx.parents():
1112 1113 removedsubs.update(s for s in p.substate if match(s))
1113 1114 for s in wctx.substate:
1114 1115 removedsubs.discard(s)
1115 1116 if match(s) and wctx.sub(s).dirty():
1116 1117 subs.append(s)
1117 1118 if (subs or removedsubs):
1118 1119 if (not match('.hgsub') and
1119 1120 '.hgsub' in (wctx.modified() + wctx.added())):
1120 1121 raise util.Abort(
1121 1122 _("can't commit subrepos without .hgsub"))
1122 1123 if '.hgsubstate' not in changes[0]:
1123 1124 changes[0].insert(0, '.hgsubstate')
1124 1125 if '.hgsubstate' in changes[2]:
1125 1126 changes[2].remove('.hgsubstate')
1126 1127 elif '.hgsub' in changes[2]:
1127 1128 # clean up .hgsubstate when .hgsub is removed
1128 1129 if ('.hgsubstate' in wctx and
1129 1130 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1130 1131 changes[2].insert(0, '.hgsubstate')
1131 1132
1132 1133 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1133 1134 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1134 1135 if changedsubs:
1135 1136 raise util.Abort(_("uncommitted changes in subrepo %s")
1136 1137 % changedsubs[0],
1137 1138 hint=_("use --subrepos for recursive commit"))
1138 1139
1139 1140 # make sure all explicit patterns are matched
1140 1141 if not force and match.files():
1141 1142 matched = set(changes[0] + changes[1] + changes[2])
1142 1143
1143 1144 for f in match.files():
1144 1145 if f == '.' or f in matched or f in wctx.substate:
1145 1146 continue
1146 1147 if f in changes[3]: # missing
1147 1148 fail(f, _('file not found!'))
1148 1149 if f in vdirs: # visited directory
1149 1150 d = f + '/'
1150 1151 for mf in matched:
1151 1152 if mf.startswith(d):
1152 1153 break
1153 1154 else:
1154 1155 fail(f, _("no match under directory!"))
1155 1156 elif f not in self.dirstate:
1156 1157 fail(f, _("file not tracked!"))
1157 1158
1158 1159 if (not force and not extra.get("close") and not merge
1159 1160 and not (changes[0] or changes[1] or changes[2])
1160 1161 and wctx.branch() == wctx.p1().branch()):
1161 1162 return None
1162 1163
1163 1164 ms = mergemod.mergestate(self)
1164 1165 for f in changes[0]:
1165 1166 if f in ms and ms[f] == 'u':
1166 1167 raise util.Abort(_("unresolved merge conflicts "
1167 1168 "(see hg help resolve)"))
1168 1169
1169 1170 cctx = context.workingctx(self, text, user, date, extra, changes)
1170 1171 if editor:
1171 1172 cctx._text = editor(self, cctx, subs)
1172 1173 edited = (text != cctx._text)
1173 1174
1174 1175 # commit subs
1175 1176 if subs or removedsubs:
1176 1177 state = wctx.substate.copy()
1177 1178 for s in sorted(subs):
1178 1179 sub = wctx.sub(s)
1179 1180 self.ui.status(_('committing subrepository %s\n') %
1180 1181 subrepo.subrelpath(sub))
1181 1182 sr = sub.commit(cctx._text, user, date)
1182 1183 state[s] = (state[s][0], sr)
1183 1184 subrepo.writestate(self, state)
1184 1185
1185 1186 # Save commit message in case this transaction gets rolled back
1186 1187 # (e.g. by a pretxncommit hook). Leave the content alone on
1187 1188 # the assumption that the user will use the same editor again.
1188 1189 msgfn = self.savecommitmessage(cctx._text)
1189 1190
1190 1191 p1, p2 = self.dirstate.parents()
1191 1192 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1192 1193 try:
1193 1194 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1194 1195 ret = self.commitctx(cctx, True)
1195 1196 except:
1196 1197 if edited:
1197 1198 self.ui.write(
1198 1199 _('note: commit message saved in %s\n') % msgfn)
1199 1200 raise
1200 1201
1201 1202 # update bookmarks, dirstate and mergestate
1202 1203 bookmarks.update(self, p1, ret)
1203 1204 for f in changes[0] + changes[1]:
1204 1205 self.dirstate.normal(f)
1205 1206 for f in changes[2]:
1206 1207 self.dirstate.drop(f)
1207 1208 self.dirstate.setparents(ret)
1208 1209 ms.reset()
1209 1210 finally:
1210 1211 wlock.release()
1211 1212
1212 1213 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1213 1214 return ret
1214 1215
1215 1216 def commitctx(self, ctx, error=False):
1216 1217 """Add a new revision to current repository.
1217 1218 Revision information is passed via the context argument.
1218 1219 """
1219 1220
1220 1221 tr = lock = None
1221 1222 removed = list(ctx.removed())
1222 1223 p1, p2 = ctx.p1(), ctx.p2()
1223 1224 user = ctx.user()
1224 1225
1225 1226 lock = self.lock()
1226 1227 try:
1227 1228 tr = self.transaction("commit")
1228 1229 trp = weakref.proxy(tr)
1229 1230
1230 1231 if ctx.files():
1231 1232 m1 = p1.manifest().copy()
1232 1233 m2 = p2.manifest()
1233 1234
1234 1235 # check in files
1235 1236 new = {}
1236 1237 changed = []
1237 1238 linkrev = len(self)
1238 1239 for f in sorted(ctx.modified() + ctx.added()):
1239 1240 self.ui.note(f + "\n")
1240 1241 try:
1241 1242 fctx = ctx[f]
1242 1243 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1243 1244 changed)
1244 1245 m1.set(f, fctx.flags())
1245 1246 except OSError, inst:
1246 1247 self.ui.warn(_("trouble committing %s!\n") % f)
1247 1248 raise
1248 1249 except IOError, inst:
1249 1250 errcode = getattr(inst, 'errno', errno.ENOENT)
1250 1251 if error or errcode and errcode != errno.ENOENT:
1251 1252 self.ui.warn(_("trouble committing %s!\n") % f)
1252 1253 raise
1253 1254 else:
1254 1255 removed.append(f)
1255 1256
1256 1257 # update manifest
1257 1258 m1.update(new)
1258 1259 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1259 1260 drop = [f for f in removed if f in m1]
1260 1261 for f in drop:
1261 1262 del m1[f]
1262 1263 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1263 1264 p2.manifestnode(), (new, drop))
1264 1265 files = changed + removed
1265 1266 else:
1266 1267 mn = p1.manifestnode()
1267 1268 files = []
1268 1269
1269 1270 # update changelog
1270 1271 self.changelog.delayupdate()
1271 1272 n = self.changelog.add(mn, files, ctx.description(),
1272 1273 trp, p1.node(), p2.node(),
1273 1274 user, ctx.date(), ctx.extra().copy())
1274 1275 p = lambda: self.changelog.writepending() and self.root or ""
1275 1276 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1276 1277 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1277 1278 parent2=xp2, pending=p)
1278 1279 self.changelog.finalize(trp)
1279 1280 # set the new commit is proper phase
1280 1281 targetphase = phases.newcommitphase(self.ui)
1281 1282 if targetphase:
1282 1283 # retract boundary do not alter parent changeset.
1283 1284 # if a parent have higher the resulting phase will
1284 1285 # be compliant anyway
1285 1286 #
1286 1287 # if minimal phase was 0 we don't need to retract anything
1287 1288 phases.retractboundary(self, targetphase, [n])
1288 1289 tr.close()
1289 1290 self.updatebranchcache()
1290 1291 return n
1291 1292 finally:
1292 1293 if tr:
1293 1294 tr.release()
1294 1295 lock.release()
1295 1296
1296 1297 def destroyed(self):
1297 1298 '''Inform the repository that nodes have been destroyed.
1298 1299 Intended for use by strip and rollback, so there's a common
1299 1300 place for anything that has to be done after destroying history.'''
1300 1301 # XXX it might be nice if we could take the list of destroyed
1301 1302 # nodes, but I don't see an easy way for rollback() to do that
1302 1303
1303 1304 # Ensure the persistent tag cache is updated. Doing it now
1304 1305 # means that the tag cache only has to worry about destroyed
1305 1306 # heads immediately after a strip/rollback. That in turn
1306 1307 # guarantees that "cachetip == currenttip" (comparing both rev
1307 1308 # and node) always means no nodes have been added or destroyed.
1308 1309
1309 1310 # XXX this is suboptimal when qrefresh'ing: we strip the current
1310 1311 # head, refresh the tag cache, then immediately add a new head.
1311 1312 # But I think doing it this way is necessary for the "instant
1312 1313 # tag cache retrieval" case to work.
1313 1314 self.invalidatecaches()
1314 1315
1315 1316 # Discard all cache entries to force reloading everything.
1316 1317 self._filecache.clear()
1317 1318
1318 1319 def walk(self, match, node=None):
1319 1320 '''
1320 1321 walk recursively through the directory tree or a given
1321 1322 changeset, finding all files matched by the match
1322 1323 function
1323 1324 '''
1324 1325 return self[node].walk(match)
1325 1326
1326 1327 def status(self, node1='.', node2=None, match=None,
1327 1328 ignored=False, clean=False, unknown=False,
1328 1329 listsubrepos=False):
1329 1330 """return status of files between two nodes or node and working directory
1330 1331
1331 1332 If node1 is None, use the first dirstate parent instead.
1332 1333 If node2 is None, compare node1 with working directory.
1333 1334 """
1334 1335
1335 1336 def mfmatches(ctx):
1336 1337 mf = ctx.manifest().copy()
1337 1338 for fn in mf.keys():
1338 1339 if not match(fn):
1339 1340 del mf[fn]
1340 1341 return mf
1341 1342
1342 1343 if isinstance(node1, context.changectx):
1343 1344 ctx1 = node1
1344 1345 else:
1345 1346 ctx1 = self[node1]
1346 1347 if isinstance(node2, context.changectx):
1347 1348 ctx2 = node2
1348 1349 else:
1349 1350 ctx2 = self[node2]
1350 1351
1351 1352 working = ctx2.rev() is None
1352 1353 parentworking = working and ctx1 == self['.']
1353 1354 match = match or matchmod.always(self.root, self.getcwd())
1354 1355 listignored, listclean, listunknown = ignored, clean, unknown
1355 1356
1356 1357 # load earliest manifest first for caching reasons
1357 1358 if not working and ctx2.rev() < ctx1.rev():
1358 1359 ctx2.manifest()
1359 1360
1360 1361 if not parentworking:
1361 1362 def bad(f, msg):
1362 1363 # 'f' may be a directory pattern from 'match.files()',
1363 1364 # so 'f not in ctx1' is not enough
1364 1365 if f not in ctx1 and f not in ctx1.dirs():
1365 1366 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 1367 match.bad = bad
1367 1368
1368 1369 if working: # we need to scan the working dir
1369 1370 subrepos = []
1370 1371 if '.hgsub' in self.dirstate:
1371 1372 subrepos = ctx2.substate.keys()
1372 1373 s = self.dirstate.status(match, subrepos, listignored,
1373 1374 listclean, listunknown)
1374 1375 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375 1376
1376 1377 # check for any possibly clean files
1377 1378 if parentworking and cmp:
1378 1379 fixup = []
1379 1380 # do a full compare of any files that might have changed
1380 1381 for f in sorted(cmp):
1381 1382 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 1383 or ctx1[f].cmp(ctx2[f])):
1383 1384 modified.append(f)
1384 1385 else:
1385 1386 fixup.append(f)
1386 1387
1387 1388 # update dirstate for files that are actually clean
1388 1389 if fixup:
1389 1390 if listclean:
1390 1391 clean += fixup
1391 1392
1392 1393 try:
1393 1394 # updating the dirstate is optional
1394 1395 # so we don't wait on the lock
1395 1396 wlock = self.wlock(False)
1396 1397 try:
1397 1398 for f in fixup:
1398 1399 self.dirstate.normal(f)
1399 1400 finally:
1400 1401 wlock.release()
1401 1402 except error.LockError:
1402 1403 pass
1403 1404
1404 1405 if not parentworking:
1405 1406 mf1 = mfmatches(ctx1)
1406 1407 if working:
1407 1408 # we are comparing working dir against non-parent
1408 1409 # generate a pseudo-manifest for the working dir
1409 1410 mf2 = mfmatches(self['.'])
1410 1411 for f in cmp + modified + added:
1411 1412 mf2[f] = None
1412 1413 mf2.set(f, ctx2.flags(f))
1413 1414 for f in removed:
1414 1415 if f in mf2:
1415 1416 del mf2[f]
1416 1417 else:
1417 1418 # we are comparing two revisions
1418 1419 deleted, unknown, ignored = [], [], []
1419 1420 mf2 = mfmatches(ctx2)
1420 1421
1421 1422 modified, added, clean = [], [], []
1422 1423 for fn in mf2:
1423 1424 if fn in mf1:
1424 1425 if (fn not in deleted and
1425 1426 (mf1.flags(fn) != mf2.flags(fn) or
1426 1427 (mf1[fn] != mf2[fn] and
1427 1428 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 1429 modified.append(fn)
1429 1430 elif listclean:
1430 1431 clean.append(fn)
1431 1432 del mf1[fn]
1432 1433 elif fn not in deleted:
1433 1434 added.append(fn)
1434 1435 removed = mf1.keys()
1435 1436
1436 1437 if working and modified and not self.dirstate._checklink:
1437 1438 # Symlink placeholders may get non-symlink-like contents
1438 1439 # via user error or dereferencing by NFS or Samba servers,
1439 1440 # so we filter out any placeholders that don't look like a
1440 1441 # symlink
1441 1442 sane = []
1442 1443 for f in modified:
1443 1444 if ctx2.flags(f) == 'l':
1444 1445 d = ctx2[f].data()
1445 1446 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 1447 self.ui.debug('ignoring suspect symlink placeholder'
1447 1448 ' "%s"\n' % f)
1448 1449 continue
1449 1450 sane.append(f)
1450 1451 modified = sane
1451 1452
1452 1453 r = modified, added, removed, deleted, unknown, ignored, clean
1453 1454
1454 1455 if listsubrepos:
1455 1456 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 1457 if working:
1457 1458 rev2 = None
1458 1459 else:
1459 1460 rev2 = ctx2.substate[subpath][1]
1460 1461 try:
1461 1462 submatch = matchmod.narrowmatcher(subpath, match)
1462 1463 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 1464 clean=listclean, unknown=listunknown,
1464 1465 listsubrepos=True)
1465 1466 for rfiles, sfiles in zip(r, s):
1466 1467 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 1468 except error.LookupError:
1468 1469 self.ui.status(_("skipping missing subrepository: %s\n")
1469 1470 % subpath)
1470 1471
1471 1472 for l in r:
1472 1473 l.sort()
1473 1474 return r
1474 1475
1475 1476 def heads(self, start=None):
1476 1477 heads = self.changelog.heads(start)
1477 1478 # sort the output in rev descending order
1478 1479 return sorted(heads, key=self.changelog.rev, reverse=True)
1479 1480
1480 1481 def branchheads(self, branch=None, start=None, closed=False):
1481 1482 '''return a (possibly filtered) list of heads for the given branch
1482 1483
1483 1484 Heads are returned in topological order, from newest to oldest.
1484 1485 If branch is None, use the dirstate branch.
1485 1486 If start is not None, return only heads reachable from start.
1486 1487 If closed is True, return heads that are marked as closed as well.
1487 1488 '''
1488 1489 if branch is None:
1489 1490 branch = self[None].branch()
1490 1491 branches = self.branchmap()
1491 1492 if branch not in branches:
1492 1493 return []
1493 1494 # the cache returns heads ordered lowest to highest
1494 1495 bheads = list(reversed(branches[branch]))
1495 1496 if start is not None:
1496 1497 # filter out the heads that cannot be reached from startrev
1497 1498 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 1499 bheads = [h for h in bheads if h in fbheads]
1499 1500 if not closed:
1500 1501 bheads = [h for h in bheads if
1501 1502 ('close' not in self.changelog.read(h)[5])]
1502 1503 return bheads
1503 1504
1504 1505 def branches(self, nodes):
1505 1506 if not nodes:
1506 1507 nodes = [self.changelog.tip()]
1507 1508 b = []
1508 1509 for n in nodes:
1509 1510 t = n
1510 1511 while True:
1511 1512 p = self.changelog.parents(n)
1512 1513 if p[1] != nullid or p[0] == nullid:
1513 1514 b.append((t, n, p[0], p[1]))
1514 1515 break
1515 1516 n = p[0]
1516 1517 return b
1517 1518
1518 1519 def between(self, pairs):
1519 1520 r = []
1520 1521
1521 1522 for top, bottom in pairs:
1522 1523 n, l, i = top, [], 0
1523 1524 f = 1
1524 1525
1525 1526 while n != bottom and n != nullid:
1526 1527 p = self.changelog.parents(n)[0]
1527 1528 if i == f:
1528 1529 l.append(n)
1529 1530 f = f * 2
1530 1531 n = p
1531 1532 i += 1
1532 1533
1533 1534 r.append(l)
1534 1535
1535 1536 return r
1536 1537
1537 1538 def pull(self, remote, heads=None, force=False):
1538 1539 lock = self.lock()
1539 1540 try:
1540 1541 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 1542 force=force)
1542 1543 common, fetch, rheads = tmp
1543 1544 if not fetch:
1544 1545 self.ui.status(_("no changes found\n"))
1545 1546 added = []
1546 1547 result = 0
1547 1548 else:
1548 1549 if heads is None and list(common) == [nullid]:
1549 1550 self.ui.status(_("requesting all changes\n"))
1550 1551 elif heads is None and remote.capable('changegroupsubset'):
1551 1552 # issue1320, avoid a race if remote changed after discovery
1552 1553 heads = rheads
1553 1554
1554 1555 if remote.capable('getbundle'):
1555 1556 cg = remote.getbundle('pull', common=common,
1556 1557 heads=heads or rheads)
1557 1558 elif heads is None:
1558 1559 cg = remote.changegroup(fetch, 'pull')
1559 1560 elif not remote.capable('changegroupsubset'):
1560 1561 raise util.Abort(_("partial pull cannot be done because "
1561 1562 "other repository doesn't support "
1562 1563 "changegroupsubset."))
1563 1564 else:
1564 1565 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 1566 clstart = len(self.changelog)
1566 1567 result = self.addchangegroup(cg, 'pull', remote.url())
1567 1568 clend = len(self.changelog)
1568 1569 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569 1570
1570 1571 # compute target subset
1571 1572 if heads is None:
1572 1573 # We pulled every thing possible
1573 1574 # sync on everything common
1574 1575 subset = common + added
1575 1576 else:
1576 1577 # We pulled a specific subset
1577 1578 # sync on this subset
1578 1579 subset = heads
1579 1580
1580 1581 # Get remote phases data from remote
1581 1582 remotephases = remote.listkeys('phases')
1582 1583 publishing = bool(remotephases.get('publishing', False))
1583 1584 if remotephases and not publishing:
1584 1585 # remote is new and unpublishing
1585 1586 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 1587 remotephases)
1587 1588 phases.advanceboundary(self, phases.public, pheads)
1588 1589 phases.advanceboundary(self, phases.draft, subset)
1589 1590 else:
1590 1591 # Remote is old or publishing all common changesets
1591 1592 # should be seen as public
1592 1593 phases.advanceboundary(self, phases.public, subset)
1593 1594 finally:
1594 1595 lock.release()
1595 1596
1596 1597 return result
1597 1598
1598 1599 def checkpush(self, force, revs):
1599 1600 """Extensions can override this function if additional checks have
1600 1601 to be performed before pushing, or call it if they override push
1601 1602 command.
1602 1603 """
1603 1604 pass
1604 1605
1605 1606 def push(self, remote, force=False, revs=None, newbranch=False):
1606 1607 '''Push outgoing changesets (limited by revs) from the current
1607 1608 repository to remote. Return an integer:
1608 1609 - None means nothing to push
1609 1610 - 0 means HTTP error
1610 1611 - 1 means we pushed and remote head count is unchanged *or*
1611 1612 we have outgoing changesets but refused to push
1612 1613 - other values as described by addchangegroup()
1613 1614 '''
1614 1615 # there are two ways to push to remote repo:
1615 1616 #
1616 1617 # addchangegroup assumes local user can lock remote
1617 1618 # repo (local filesystem, old ssh servers).
1618 1619 #
1619 1620 # unbundle assumes local user cannot lock remote repo (new ssh
1620 1621 # servers, http servers).
1621 1622
1622 1623 # get local lock as we might write phase data
1623 1624 locallock = self.lock()
1624 1625 try:
1625 1626 self.checkpush(force, revs)
1626 1627 lock = None
1627 1628 unbundle = remote.capable('unbundle')
1628 1629 if not unbundle:
1629 1630 lock = remote.lock()
1630 1631 try:
1631 1632 # discovery
1632 1633 fci = discovery.findcommonincoming
1633 1634 commoninc = fci(self, remote, force=force)
1634 1635 common, inc, remoteheads = commoninc
1635 1636 fco = discovery.findcommonoutgoing
1636 1637 outgoing = fco(self, remote, onlyheads=revs,
1637 1638 commoninc=commoninc, force=force)
1638 1639
1639 1640
1640 1641 if not outgoing.missing:
1641 1642 # nothing to push
1642 1643 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 1644 ret = None
1644 1645 else:
1645 1646 # something to push
1646 1647 if not force:
1647 1648 discovery.checkheads(self, remote, outgoing,
1648 1649 remoteheads, newbranch,
1649 1650 bool(inc))
1650 1651
1651 1652 # create a changegroup from local
1652 1653 if revs is None and not outgoing.excluded:
1653 1654 # push everything,
1654 1655 # use the fast path, no race possible on push
1655 1656 cg = self._changegroup(outgoing.missing, 'push')
1656 1657 else:
1657 1658 cg = self.getlocalbundle('push', outgoing)
1658 1659
1659 1660 # apply changegroup to remote
1660 1661 if unbundle:
1661 1662 # local repo finds heads on server, finds out what
1662 1663 # revs it must push. once revs transferred, if server
1663 1664 # finds it has different heads (someone else won
1664 1665 # commit/push race), server aborts.
1665 1666 if force:
1666 1667 remoteheads = ['force']
1667 1668 # ssh: return remote's addchangegroup()
1668 1669 # http: return remote's addchangegroup() or 0 for error
1669 1670 ret = remote.unbundle(cg, remoteheads, 'push')
1670 1671 else:
1671 1672 # we return an integer indicating remote head count change
1672 1673 ret = remote.addchangegroup(cg, 'push', self.url())
1673 1674
1674 1675 if ret:
1675 1676 # push succeed, synchonize target of the push
1676 1677 cheads = outgoing.missingheads
1677 1678 elif revs is None:
1678 1679 # All out push fails. synchronize all common
1679 1680 cheads = outgoing.commonheads
1680 1681 else:
1681 1682 # I want cheads = heads(::missingheads and ::commonheads)
1682 1683 # (missingheads is revs with secret changeset filtered out)
1683 1684 #
1684 1685 # This can be expressed as:
1685 1686 # cheads = ( (missingheads and ::commonheads)
1686 1687 # + (commonheads and ::missingheads))"
1687 1688 # )
1688 1689 #
1689 1690 # while trying to push we already computed the following:
1690 1691 # common = (::commonheads)
1691 1692 # missing = ((commonheads::missingheads) - commonheads)
1692 1693 #
1693 1694 # We can pick:
1694 1695 # * missingheads part of comon (::commonheads)
1695 1696 common = set(outgoing.common)
1696 1697 cheads = [node for node in revs if node in common]
1697 1698 # and
1698 1699 # * commonheads parents on missing
1699 1700 revset = self.set('%ln and parents(roots(%ln))',
1700 1701 outgoing.commonheads,
1701 1702 outgoing.missing)
1702 1703 cheads.extend(c.node() for c in revset)
1703 1704 # even when we don't push, exchanging phase data is useful
1704 1705 remotephases = remote.listkeys('phases')
1705 1706 if not remotephases: # old server or public only repo
1706 1707 phases.advanceboundary(self, phases.public, cheads)
1707 1708 # don't push any phase data as there is nothing to push
1708 1709 else:
1709 1710 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 1711 pheads, droots = ana
1711 1712 ### Apply remote phase on local
1712 1713 if remotephases.get('publishing', False):
1713 1714 phases.advanceboundary(self, phases.public, cheads)
1714 1715 else: # publish = False
1715 1716 phases.advanceboundary(self, phases.public, pheads)
1716 1717 phases.advanceboundary(self, phases.draft, cheads)
1717 1718 ### Apply local phase on remote
1718 1719
1719 1720 # Get the list of all revs draft on remote by public here.
1720 1721 # XXX Beware that revset break if droots is not strictly
1721 1722 # XXX root we may want to ensure it is but it is costly
1722 1723 outdated = self.set('heads((%ln::%ln) and public())',
1723 1724 droots, cheads)
1724 1725 for newremotehead in outdated:
1725 1726 r = remote.pushkey('phases',
1726 1727 newremotehead.hex(),
1727 1728 str(phases.draft),
1728 1729 str(phases.public))
1729 1730 if not r:
1730 1731 self.ui.warn(_('updating %s to public failed!\n')
1731 1732 % newremotehead)
1732 1733 finally:
1733 1734 if lock is not None:
1734 1735 lock.release()
1735 1736 finally:
1736 1737 locallock.release()
1737 1738
1738 1739 self.ui.debug("checking for updated bookmarks\n")
1739 1740 rb = remote.listkeys('bookmarks')
1740 1741 for k in rb.keys():
1741 1742 if k in self._bookmarks:
1742 1743 nr, nl = rb[k], hex(self._bookmarks[k])
1743 1744 if nr in self:
1744 1745 cr = self[nr]
1745 1746 cl = self[nl]
1746 1747 if cl in cr.descendants():
1747 1748 r = remote.pushkey('bookmarks', k, nr, nl)
1748 1749 if r:
1749 1750 self.ui.status(_("updating bookmark %s\n") % k)
1750 1751 else:
1751 1752 self.ui.warn(_('updating bookmark %s'
1752 1753 ' failed!\n') % k)
1753 1754
1754 1755 return ret
1755 1756
1756 1757 def changegroupinfo(self, nodes, source):
1757 1758 if self.ui.verbose or source == 'bundle':
1758 1759 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 1760 if self.ui.debugflag:
1760 1761 self.ui.debug("list of changesets:\n")
1761 1762 for node in nodes:
1762 1763 self.ui.debug("%s\n" % hex(node))
1763 1764
1764 1765 def changegroupsubset(self, bases, heads, source):
1765 1766 """Compute a changegroup consisting of all the nodes that are
1766 1767 descendants of any of the bases and ancestors of any of the heads.
1767 1768 Return a chunkbuffer object whose read() method will return
1768 1769 successive changegroup chunks.
1769 1770
1770 1771 It is fairly complex as determining which filenodes and which
1771 1772 manifest nodes need to be included for the changeset to be complete
1772 1773 is non-trivial.
1773 1774
1774 1775 Another wrinkle is doing the reverse, figuring out which changeset in
1775 1776 the changegroup a particular filenode or manifestnode belongs to.
1776 1777 """
1777 1778 cl = self.changelog
1778 1779 if not bases:
1779 1780 bases = [nullid]
1780 1781 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 1782 # We assume that all ancestors of bases are known
1782 1783 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 1784 return self._changegroupsubset(common, csets, heads, source)
1784 1785
1785 1786 def getlocalbundle(self, source, outgoing):
1786 1787 """Like getbundle, but taking a discovery.outgoing as an argument.
1787 1788
1788 1789 This is only implemented for local repos and reuses potentially
1789 1790 precomputed sets in outgoing."""
1790 1791 if not outgoing.missing:
1791 1792 return None
1792 1793 return self._changegroupsubset(outgoing.common,
1793 1794 outgoing.missing,
1794 1795 outgoing.missingheads,
1795 1796 source)
1796 1797
1797 1798 def getbundle(self, source, heads=None, common=None):
1798 1799 """Like changegroupsubset, but returns the set difference between the
1799 1800 ancestors of heads and the ancestors common.
1800 1801
1801 1802 If heads is None, use the local heads. If common is None, use [nullid].
1802 1803
1803 1804 The nodes in common might not all be known locally due to the way the
1804 1805 current discovery protocol works.
1805 1806 """
1806 1807 cl = self.changelog
1807 1808 if common:
1808 1809 nm = cl.nodemap
1809 1810 common = [n for n in common if n in nm]
1810 1811 else:
1811 1812 common = [nullid]
1812 1813 if not heads:
1813 1814 heads = cl.heads()
1814 1815 return self.getlocalbundle(source,
1815 1816 discovery.outgoing(cl, common, heads))
1816 1817
1817 1818 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818 1819
1819 1820 cl = self.changelog
1820 1821 mf = self.manifest
1821 1822 mfs = {} # needed manifests
1822 1823 fnodes = {} # needed file nodes
1823 1824 changedfiles = set()
1824 1825 fstate = ['', {}]
1825 1826 count = [0]
1826 1827
1827 1828 # can we go through the fast path ?
1828 1829 heads.sort()
1829 1830 if heads == sorted(self.heads()):
1830 1831 return self._changegroup(csets, source)
1831 1832
1832 1833 # slow path
1833 1834 self.hook('preoutgoing', throw=True, source=source)
1834 1835 self.changegroupinfo(csets, source)
1835 1836
1836 1837 # filter any nodes that claim to be part of the known set
1837 1838 def prune(revlog, missing):
1838 1839 return [n for n in missing
1839 1840 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1840 1841
1841 1842 def lookup(revlog, x):
1842 1843 if revlog == cl:
1843 1844 c = cl.read(x)
1844 1845 changedfiles.update(c[3])
1845 1846 mfs.setdefault(c[0], x)
1846 1847 count[0] += 1
1847 1848 self.ui.progress(_('bundling'), count[0],
1848 1849 unit=_('changesets'), total=len(csets))
1849 1850 return x
1850 1851 elif revlog == mf:
1851 1852 clnode = mfs[x]
1852 1853 mdata = mf.readfast(x)
1853 1854 for f in changedfiles:
1854 1855 if f in mdata:
1855 1856 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1856 1857 count[0] += 1
1857 1858 self.ui.progress(_('bundling'), count[0],
1858 1859 unit=_('manifests'), total=len(mfs))
1859 1860 return mfs[x]
1860 1861 else:
1861 1862 self.ui.progress(
1862 1863 _('bundling'), count[0], item=fstate[0],
1863 1864 unit=_('files'), total=len(changedfiles))
1864 1865 return fstate[1][x]
1865 1866
1866 1867 bundler = changegroup.bundle10(lookup)
1867 1868 reorder = self.ui.config('bundle', 'reorder', 'auto')
1868 1869 if reorder == 'auto':
1869 1870 reorder = None
1870 1871 else:
1871 1872 reorder = util.parsebool(reorder)
1872 1873
1873 1874 def gengroup():
1874 1875 # Create a changenode group generator that will call our functions
1875 1876 # back to lookup the owning changenode and collect information.
1876 1877 for chunk in cl.group(csets, bundler, reorder=reorder):
1877 1878 yield chunk
1878 1879 self.ui.progress(_('bundling'), None)
1879 1880
1880 1881 # Create a generator for the manifestnodes that calls our lookup
1881 1882 # and data collection functions back.
1882 1883 count[0] = 0
1883 1884 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1884 1885 yield chunk
1885 1886 self.ui.progress(_('bundling'), None)
1886 1887
1887 1888 mfs.clear()
1888 1889
1889 1890 # Go through all our files in order sorted by name.
1890 1891 count[0] = 0
1891 1892 for fname in sorted(changedfiles):
1892 1893 filerevlog = self.file(fname)
1893 1894 if not len(filerevlog):
1894 1895 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 1896 fstate[0] = fname
1896 1897 fstate[1] = fnodes.pop(fname, {})
1897 1898
1898 1899 nodelist = prune(filerevlog, fstate[1])
1899 1900 if nodelist:
1900 1901 count[0] += 1
1901 1902 yield bundler.fileheader(fname)
1902 1903 for chunk in filerevlog.group(nodelist, bundler, reorder):
1903 1904 yield chunk
1904 1905
1905 1906 # Signal that no more groups are left.
1906 1907 yield bundler.close()
1907 1908 self.ui.progress(_('bundling'), None)
1908 1909
1909 1910 if csets:
1910 1911 self.hook('outgoing', node=hex(csets[0]), source=source)
1911 1912
1912 1913 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1913 1914
1914 1915 def changegroup(self, basenodes, source):
1915 1916 # to avoid a race we use changegroupsubset() (issue1320)
1916 1917 return self.changegroupsubset(basenodes, self.heads(), source)
1917 1918
1918 1919 def _changegroup(self, nodes, source):
1919 1920 """Compute the changegroup of all nodes that we have that a recipient
1920 1921 doesn't. Return a chunkbuffer object whose read() method will return
1921 1922 successive changegroup chunks.
1922 1923
1923 1924 This is much easier than the previous function as we can assume that
1924 1925 the recipient has any changenode we aren't sending them.
1925 1926
1926 1927 nodes is the set of nodes to send"""
1927 1928
1928 1929 cl = self.changelog
1929 1930 mf = self.manifest
1930 1931 mfs = {}
1931 1932 changedfiles = set()
1932 1933 fstate = ['']
1933 1934 count = [0]
1934 1935
1935 1936 self.hook('preoutgoing', throw=True, source=source)
1936 1937 self.changegroupinfo(nodes, source)
1937 1938
1938 1939 revset = set([cl.rev(n) for n in nodes])
1939 1940
1940 1941 def gennodelst(log):
1941 1942 return [log.node(r) for r in log if log.linkrev(r) in revset]
1942 1943
1943 1944 def lookup(revlog, x):
1944 1945 if revlog == cl:
1945 1946 c = cl.read(x)
1946 1947 changedfiles.update(c[3])
1947 1948 mfs.setdefault(c[0], x)
1948 1949 count[0] += 1
1949 1950 self.ui.progress(_('bundling'), count[0],
1950 1951 unit=_('changesets'), total=len(nodes))
1951 1952 return x
1952 1953 elif revlog == mf:
1953 1954 count[0] += 1
1954 1955 self.ui.progress(_('bundling'), count[0],
1955 1956 unit=_('manifests'), total=len(mfs))
1956 1957 return cl.node(revlog.linkrev(revlog.rev(x)))
1957 1958 else:
1958 1959 self.ui.progress(
1959 1960 _('bundling'), count[0], item=fstate[0],
1960 1961 total=len(changedfiles), unit=_('files'))
1961 1962 return cl.node(revlog.linkrev(revlog.rev(x)))
1962 1963
1963 1964 bundler = changegroup.bundle10(lookup)
1964 1965 reorder = self.ui.config('bundle', 'reorder', 'auto')
1965 1966 if reorder == 'auto':
1966 1967 reorder = None
1967 1968 else:
1968 1969 reorder = util.parsebool(reorder)
1969 1970
1970 1971 def gengroup():
1971 1972 '''yield a sequence of changegroup chunks (strings)'''
1972 1973 # construct a list of all changed files
1973 1974
1974 1975 for chunk in cl.group(nodes, bundler, reorder=reorder):
1975 1976 yield chunk
1976 1977 self.ui.progress(_('bundling'), None)
1977 1978
1978 1979 count[0] = 0
1979 1980 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1980 1981 yield chunk
1981 1982 self.ui.progress(_('bundling'), None)
1982 1983
1983 1984 count[0] = 0
1984 1985 for fname in sorted(changedfiles):
1985 1986 filerevlog = self.file(fname)
1986 1987 if not len(filerevlog):
1987 1988 raise util.Abort(_("empty or missing revlog for %s") % fname)
1988 1989 fstate[0] = fname
1989 1990 nodelist = gennodelst(filerevlog)
1990 1991 if nodelist:
1991 1992 count[0] += 1
1992 1993 yield bundler.fileheader(fname)
1993 1994 for chunk in filerevlog.group(nodelist, bundler, reorder):
1994 1995 yield chunk
1995 1996 yield bundler.close()
1996 1997 self.ui.progress(_('bundling'), None)
1997 1998
1998 1999 if nodes:
1999 2000 self.hook('outgoing', node=hex(nodes[0]), source=source)
2000 2001
2001 2002 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2002 2003
2003 2004 def addchangegroup(self, source, srctype, url, emptyok=False):
2004 2005 """Add the changegroup returned by source.read() to this repo.
2005 2006 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2006 2007 the URL of the repo where this changegroup is coming from.
2007 2008
2008 2009 Return an integer summarizing the change to this repo:
2009 2010 - nothing changed or no source: 0
2010 2011 - more heads than before: 1+added heads (2..n)
2011 2012 - fewer heads than before: -1-removed heads (-2..-n)
2012 2013 - number of heads stays the same: 1
2013 2014 """
2014 2015 def csmap(x):
2015 2016 self.ui.debug("add changeset %s\n" % short(x))
2016 2017 return len(cl)
2017 2018
2018 2019 def revmap(x):
2019 2020 return cl.rev(x)
2020 2021
2021 2022 if not source:
2022 2023 return 0
2023 2024
2024 2025 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2025 2026
2026 2027 changesets = files = revisions = 0
2027 2028 efiles = set()
2028 2029
2029 2030 # write changelog data to temp files so concurrent readers will not see
2030 2031 # inconsistent view
2031 2032 cl = self.changelog
2032 2033 cl.delayupdate()
2033 2034 oldheads = cl.heads()
2034 2035
2035 2036 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2036 2037 try:
2037 2038 trp = weakref.proxy(tr)
2038 2039 # pull off the changeset group
2039 2040 self.ui.status(_("adding changesets\n"))
2040 2041 clstart = len(cl)
2041 2042 class prog(object):
2042 2043 step = _('changesets')
2043 2044 count = 1
2044 2045 ui = self.ui
2045 2046 total = None
2046 2047 def __call__(self):
2047 2048 self.ui.progress(self.step, self.count, unit=_('chunks'),
2048 2049 total=self.total)
2049 2050 self.count += 1
2050 2051 pr = prog()
2051 2052 source.callback = pr
2052 2053
2053 2054 source.changelogheader()
2054 2055 srccontent = cl.addgroup(source, csmap, trp)
2055 2056 if not (srccontent or emptyok):
2056 2057 raise util.Abort(_("received changelog group is empty"))
2057 2058 clend = len(cl)
2058 2059 changesets = clend - clstart
2059 2060 for c in xrange(clstart, clend):
2060 2061 efiles.update(self[c].files())
2061 2062 efiles = len(efiles)
2062 2063 self.ui.progress(_('changesets'), None)
2063 2064
2064 2065 # pull off the manifest group
2065 2066 self.ui.status(_("adding manifests\n"))
2066 2067 pr.step = _('manifests')
2067 2068 pr.count = 1
2068 2069 pr.total = changesets # manifests <= changesets
2069 2070 # no need to check for empty manifest group here:
2070 2071 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2071 2072 # no new manifest will be created and the manifest group will
2072 2073 # be empty during the pull
2073 2074 source.manifestheader()
2074 2075 self.manifest.addgroup(source, revmap, trp)
2075 2076 self.ui.progress(_('manifests'), None)
2076 2077
2077 2078 needfiles = {}
2078 2079 if self.ui.configbool('server', 'validate', default=False):
2079 2080 # validate incoming csets have their manifests
2080 2081 for cset in xrange(clstart, clend):
2081 2082 mfest = self.changelog.read(self.changelog.node(cset))[0]
2082 2083 mfest = self.manifest.readdelta(mfest)
2083 2084 # store file nodes we must see
2084 2085 for f, n in mfest.iteritems():
2085 2086 needfiles.setdefault(f, set()).add(n)
2086 2087
2087 2088 # process the files
2088 2089 self.ui.status(_("adding file changes\n"))
2089 2090 pr.step = _('files')
2090 2091 pr.count = 1
2091 2092 pr.total = efiles
2092 2093 source.callback = None
2093 2094
2094 2095 while True:
2095 2096 chunkdata = source.filelogheader()
2096 2097 if not chunkdata:
2097 2098 break
2098 2099 f = chunkdata["filename"]
2099 2100 self.ui.debug("adding %s revisions\n" % f)
2100 2101 pr()
2101 2102 fl = self.file(f)
2102 2103 o = len(fl)
2103 2104 if not fl.addgroup(source, revmap, trp):
2104 2105 raise util.Abort(_("received file revlog group is empty"))
2105 2106 revisions += len(fl) - o
2106 2107 files += 1
2107 2108 if f in needfiles:
2108 2109 needs = needfiles[f]
2109 2110 for new in xrange(o, len(fl)):
2110 2111 n = fl.node(new)
2111 2112 if n in needs:
2112 2113 needs.remove(n)
2113 2114 if not needs:
2114 2115 del needfiles[f]
2115 2116 self.ui.progress(_('files'), None)
2116 2117
2117 2118 for f, needs in needfiles.iteritems():
2118 2119 fl = self.file(f)
2119 2120 for n in needs:
2120 2121 try:
2121 2122 fl.rev(n)
2122 2123 except error.LookupError:
2123 2124 raise util.Abort(
2124 2125 _('missing file data for %s:%s - run hg verify') %
2125 2126 (f, hex(n)))
2126 2127
2127 2128 dh = 0
2128 2129 if oldheads:
2129 2130 heads = cl.heads()
2130 2131 dh = len(heads) - len(oldheads)
2131 2132 for h in heads:
2132 2133 if h not in oldheads and 'close' in self[h].extra():
2133 2134 dh -= 1
2134 2135 htext = ""
2135 2136 if dh:
2136 2137 htext = _(" (%+d heads)") % dh
2137 2138
2138 2139 self.ui.status(_("added %d changesets"
2139 2140 " with %d changes to %d files%s\n")
2140 2141 % (changesets, revisions, files, htext))
2141 2142
2142 2143 if changesets > 0:
2143 2144 p = lambda: cl.writepending() and self.root or ""
2144 2145 self.hook('pretxnchangegroup', throw=True,
2145 2146 node=hex(cl.node(clstart)), source=srctype,
2146 2147 url=url, pending=p)
2147 2148
2148 2149 added = [cl.node(r) for r in xrange(clstart, clend)]
2149 2150 publishing = self.ui.configbool('phases', 'publish', True)
2150 2151 if srctype == 'push':
2151 2152 # Old server can not push the boundary themself.
2152 2153 # New server won't push the boundary if changeset already
2153 2154 # existed locally as secrete
2154 2155 #
2155 2156 # We should not use added here but the list of all change in
2156 2157 # the bundle
2157 2158 if publishing:
2158 2159 phases.advanceboundary(self, phases.public, srccontent)
2159 2160 else:
2160 2161 phases.advanceboundary(self, phases.draft, srccontent)
2161 2162 phases.retractboundary(self, phases.draft, added)
2162 2163 elif srctype != 'strip':
2163 2164 # publishing only alter behavior during push
2164 2165 #
2165 2166 # strip should not touch boundary at all
2166 2167 phases.retractboundary(self, phases.draft, added)
2167 2168
2168 2169 # make changelog see real files again
2169 2170 cl.finalize(trp)
2170 2171
2171 2172 tr.close()
2172 2173
2173 2174 if changesets > 0:
2174 2175 def runhooks():
2175 2176 # forcefully update the on-disk branch cache
2176 2177 self.ui.debug("updating the branch cache\n")
2177 2178 self.updatebranchcache()
2178 2179 self.hook("changegroup", node=hex(cl.node(clstart)),
2179 2180 source=srctype, url=url)
2180 2181
2181 2182 for n in added:
2182 2183 self.hook("incoming", node=hex(n), source=srctype,
2183 2184 url=url)
2184 2185 self._afterlock(runhooks)
2185 2186
2186 2187 finally:
2187 2188 tr.release()
2188 2189 # never return 0 here:
2189 2190 if dh < 0:
2190 2191 return dh - 1
2191 2192 else:
2192 2193 return dh + 1
2193 2194
2194 2195 def stream_in(self, remote, requirements):
2195 2196 lock = self.lock()
2196 2197 try:
2197 2198 fp = remote.stream_out()
2198 2199 l = fp.readline()
2199 2200 try:
2200 2201 resp = int(l)
2201 2202 except ValueError:
2202 2203 raise error.ResponseError(
2203 2204 _('Unexpected response from remote server:'), l)
2204 2205 if resp == 1:
2205 2206 raise util.Abort(_('operation forbidden by server'))
2206 2207 elif resp == 2:
2207 2208 raise util.Abort(_('locking the remote repository failed'))
2208 2209 elif resp != 0:
2209 2210 raise util.Abort(_('the server sent an unknown error code'))
2210 2211 self.ui.status(_('streaming all changes\n'))
2211 2212 l = fp.readline()
2212 2213 try:
2213 2214 total_files, total_bytes = map(int, l.split(' ', 1))
2214 2215 except (ValueError, TypeError):
2215 2216 raise error.ResponseError(
2216 2217 _('Unexpected response from remote server:'), l)
2217 2218 self.ui.status(_('%d files to transfer, %s of data\n') %
2218 2219 (total_files, util.bytecount(total_bytes)))
2219 2220 start = time.time()
2220 2221 for i in xrange(total_files):
2221 2222 # XXX doesn't support '\n' or '\r' in filenames
2222 2223 l = fp.readline()
2223 2224 try:
2224 2225 name, size = l.split('\0', 1)
2225 2226 size = int(size)
2226 2227 except (ValueError, TypeError):
2227 2228 raise error.ResponseError(
2228 2229 _('Unexpected response from remote server:'), l)
2229 2230 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2230 2231 # for backwards compat, name was partially encoded
2231 2232 ofp = self.sopener(store.decodedir(name), 'w')
2232 2233 for chunk in util.filechunkiter(fp, limit=size):
2233 2234 ofp.write(chunk)
2234 2235 ofp.close()
2235 2236 elapsed = time.time() - start
2236 2237 if elapsed <= 0:
2237 2238 elapsed = 0.001
2238 2239 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2239 2240 (util.bytecount(total_bytes), elapsed,
2240 2241 util.bytecount(total_bytes / elapsed)))
2241 2242
2242 2243 # new requirements = old non-format requirements + new format-related
2243 2244 # requirements from the streamed-in repository
2244 2245 requirements.update(set(self.requirements) - self.supportedformats)
2245 2246 self._applyrequirements(requirements)
2246 2247 self._writerequirements()
2247 2248
2248 2249 self.invalidate()
2249 2250 return len(self.heads()) + 1
2250 2251 finally:
2251 2252 lock.release()
2252 2253
2253 2254 def clone(self, remote, heads=[], stream=False):
2254 2255 '''clone remote repository.
2255 2256
2256 2257 keyword arguments:
2257 2258 heads: list of revs to clone (forces use of pull)
2258 2259 stream: use streaming clone if possible'''
2259 2260
2260 2261 # now, all clients that can request uncompressed clones can
2261 2262 # read repo formats supported by all servers that can serve
2262 2263 # them.
2263 2264
2264 2265 # if revlog format changes, client will have to check version
2265 2266 # and format flags on "stream" capability, and use
2266 2267 # uncompressed only if compatible.
2267 2268
2268 2269 if stream and not heads:
2269 2270 # 'stream' means remote revlog format is revlogv1 only
2270 2271 if remote.capable('stream'):
2271 2272 return self.stream_in(remote, set(('revlogv1',)))
2272 2273 # otherwise, 'streamreqs' contains the remote revlog format
2273 2274 streamreqs = remote.capable('streamreqs')
2274 2275 if streamreqs:
2275 2276 streamreqs = set(streamreqs.split(','))
2276 2277 # if we support it, stream in and adjust our requirements
2277 2278 if not streamreqs - self.supportedformats:
2278 2279 return self.stream_in(remote, streamreqs)
2279 2280 return self.pull(remote, heads)
2280 2281
2281 2282 def pushkey(self, namespace, key, old, new):
2282 2283 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2283 2284 old=old, new=new)
2284 2285 ret = pushkey.push(self, namespace, key, old, new)
2285 2286 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2286 2287 ret=ret)
2287 2288 return ret
2288 2289
2289 2290 def listkeys(self, namespace):
2290 2291 self.hook('prelistkeys', throw=True, namespace=namespace)
2291 2292 values = pushkey.list(self, namespace)
2292 2293 self.hook('listkeys', namespace=namespace, values=values)
2293 2294 return values
2294 2295
2295 2296 def debugwireargs(self, one, two, three=None, four=None, five=None):
2296 2297 '''used to test argument passing over the wire'''
2297 2298 return "%s %s %s %s %s" % (one, two, three, four, five)
2298 2299
2299 2300 def savecommitmessage(self, text):
2300 2301 fp = self.opener('last-message.txt', 'wb')
2301 2302 try:
2302 2303 fp.write(text)
2303 2304 finally:
2304 2305 fp.close()
2305 2306 return self.pathto(fp.name[len(self.root)+1:])
2306 2307
2307 2308 # used to avoid circular references so destructors work
2308 2309 def aftertrans(files):
2309 2310 renamefiles = [tuple(t) for t in files]
2310 2311 def a():
2311 2312 for src, dest in renamefiles:
2312 2313 util.rename(src, dest)
2313 2314 return a
2314 2315
2315 2316 def undoname(fn):
2316 2317 base, name = os.path.split(fn)
2317 2318 assert name.startswith('journal')
2318 2319 return os.path.join(base, name.replace('journal', 'undo', 1))
2319 2320
2320 2321 def instance(ui, path, create):
2321 2322 return localrepository(ui, util.urllocalpath(path), create)
2322 2323
2323 2324 def islocal(path):
2324 2325 return True
General Comments 0
You need to be logged in to leave comments. Login now