##// END OF EJS Templates
localrepo: refactor retrieving of journal/undo files paths...
Idan Kamara -
r16236:97efd26e default
parent child Browse files
Show More
@@ -1,2344 +1,2348 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 self._dirtyphases = False
45 45 # A list of callback to shape the phase if no data were found.
46 46 # Callback are in the form: func(repo, roots) --> processed root.
47 47 # This list it to be filled by extension during repo setup
48 48 self._phasedefaults = []
49 49
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 extensions.loadall(self.ui)
53 53 except IOError:
54 54 pass
55 55
56 56 if not os.path.isdir(self.path):
57 57 if create:
58 58 if not os.path.exists(path):
59 59 util.makedirs(path)
60 60 util.makedir(self.path, notindexed=True)
61 61 requirements = ["revlogv1"]
62 62 if self.ui.configbool('format', 'usestore', True):
63 63 os.mkdir(os.path.join(self.path, "store"))
64 64 requirements.append("store")
65 65 if self.ui.configbool('format', 'usefncache', True):
66 66 requirements.append("fncache")
67 67 if self.ui.configbool('format', 'dotencode', True):
68 68 requirements.append('dotencode')
69 69 # create an invalid changelog
70 70 self.opener.append(
71 71 "00changelog.i",
72 72 '\0\0\0\2' # represents revlogv2
73 73 ' dummy changelog to prevent using the old repo layout'
74 74 )
75 75 if self.ui.configbool('format', 'generaldelta', False):
76 76 requirements.append("generaldelta")
77 77 requirements = set(requirements)
78 78 else:
79 79 raise error.RepoError(_("repository %s not found") % path)
80 80 elif create:
81 81 raise error.RepoError(_("repository %s already exists") % path)
82 82 else:
83 83 try:
84 84 requirements = scmutil.readrequires(self.opener, self.supported)
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 requirements = set()
89 89
90 90 self.sharedpath = self.path
91 91 try:
92 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 93 if not os.path.exists(s):
94 94 raise error.RepoError(
95 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 96 self.sharedpath = s
97 97 except IOError, inst:
98 98 if inst.errno != errno.ENOENT:
99 99 raise
100 100
101 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 102 self.spath = self.store.path
103 103 self.sopener = self.store.opener
104 104 self.sjoin = self.store.join
105 105 self.opener.createmode = self.store.createmode
106 106 self._applyrequirements(requirements)
107 107 if create:
108 108 self._writerequirements()
109 109
110 110
111 111 self._branchcache = None
112 112 self._branchcachetip = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 # A cache for various files under .hg/ that tracks file changes,
118 118 # (used by the filecache decorator)
119 119 #
120 120 # Maps a property name to its util.filecacheentry
121 121 self._filecache = {}
122 122
123 123 def _applyrequirements(self, requirements):
124 124 self.requirements = requirements
125 125 openerreqs = set(('revlogv1', 'generaldelta'))
126 126 self.sopener.options = dict((r, 1) for r in requirements
127 127 if r in openerreqs)
128 128
129 129 def _writerequirements(self):
130 130 reqfile = self.opener("requires", "w")
131 131 for r in self.requirements:
132 132 reqfile.write("%s\n" % r)
133 133 reqfile.close()
134 134
135 135 def _checknested(self, path):
136 136 """Determine if path is a legal nested repository."""
137 137 if not path.startswith(self.root):
138 138 return False
139 139 subpath = path[len(self.root) + 1:]
140 140 normsubpath = util.pconvert(subpath)
141 141
142 142 # XXX: Checking against the current working copy is wrong in
143 143 # the sense that it can reject things like
144 144 #
145 145 # $ hg cat -r 10 sub/x.txt
146 146 #
147 147 # if sub/ is no longer a subrepository in the working copy
148 148 # parent revision.
149 149 #
150 150 # However, it can of course also allow things that would have
151 151 # been rejected before, such as the above cat command if sub/
152 152 # is a subrepository now, but was a normal directory before.
153 153 # The old path auditor would have rejected by mistake since it
154 154 # panics when it sees sub/.hg/.
155 155 #
156 156 # All in all, checking against the working copy seems sensible
157 157 # since we want to prevent access to nested repositories on
158 158 # the filesystem *now*.
159 159 ctx = self[None]
160 160 parts = util.splitpath(subpath)
161 161 while parts:
162 162 prefix = '/'.join(parts)
163 163 if prefix in ctx.substate:
164 164 if prefix == normsubpath:
165 165 return True
166 166 else:
167 167 sub = ctx.sub(prefix)
168 168 return sub.checknested(subpath[len(prefix) + 1:])
169 169 else:
170 170 parts.pop()
171 171 return False
172 172
173 173 @filecache('bookmarks')
174 174 def _bookmarks(self):
175 175 return bookmarks.read(self)
176 176
177 177 @filecache('bookmarks.current')
178 178 def _bookmarkcurrent(self):
179 179 return bookmarks.readcurrent(self)
180 180
181 181 def _writebookmarks(self, marks):
182 182 bookmarks.write(self)
183 183
184 184 @storecache('phaseroots')
185 185 def _phaseroots(self):
186 186 self._dirtyphases = False
187 187 phaseroots = phases.readroots(self)
188 188 phases.filterunknown(self, phaseroots)
189 189 return phaseroots
190 190
191 191 @propertycache
192 192 def _phaserev(self):
193 193 cache = [phases.public] * len(self)
194 194 for phase in phases.trackedphases:
195 195 roots = map(self.changelog.rev, self._phaseroots[phase])
196 196 if roots:
197 197 for rev in roots:
198 198 cache[rev] = phase
199 199 for rev in self.changelog.descendants(*roots):
200 200 cache[rev] = phase
201 201 return cache
202 202
203 203 @storecache('00changelog.i')
204 204 def changelog(self):
205 205 c = changelog.changelog(self.sopener)
206 206 if 'HG_PENDING' in os.environ:
207 207 p = os.environ['HG_PENDING']
208 208 if p.startswith(self.root):
209 209 c.readpending('00changelog.i.a')
210 210 return c
211 211
212 212 @storecache('00manifest.i')
213 213 def manifest(self):
214 214 return manifest.manifest(self.sopener)
215 215
216 216 @filecache('dirstate')
217 217 def dirstate(self):
218 218 warned = [0]
219 219 def validate(node):
220 220 try:
221 221 self.changelog.rev(node)
222 222 return node
223 223 except error.LookupError:
224 224 if not warned[0]:
225 225 warned[0] = True
226 226 self.ui.warn(_("warning: ignoring unknown"
227 227 " working parent %s!\n") % short(node))
228 228 return nullid
229 229
230 230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 231
232 232 def __getitem__(self, changeid):
233 233 if changeid is None:
234 234 return context.workingctx(self)
235 235 return context.changectx(self, changeid)
236 236
237 237 def __contains__(self, changeid):
238 238 try:
239 239 return bool(self.lookup(changeid))
240 240 except error.RepoLookupError:
241 241 return False
242 242
243 243 def __nonzero__(self):
244 244 return True
245 245
246 246 def __len__(self):
247 247 return len(self.changelog)
248 248
249 249 def __iter__(self):
250 250 for i in xrange(len(self)):
251 251 yield i
252 252
253 253 def revs(self, expr, *args):
254 254 '''Return a list of revisions matching the given revset'''
255 255 expr = revset.formatspec(expr, *args)
256 256 m = revset.match(None, expr)
257 257 return [r for r in m(self, range(len(self)))]
258 258
259 259 def set(self, expr, *args):
260 260 '''
261 261 Yield a context for each matching revision, after doing arg
262 262 replacement via revset.formatspec
263 263 '''
264 264 for r in self.revs(expr, *args):
265 265 yield self[r]
266 266
267 267 def url(self):
268 268 return 'file:' + self.root
269 269
270 270 def hook(self, name, throw=False, **args):
271 271 return hook.hook(self.ui, self, name, throw, **args)
272 272
273 273 tag_disallowed = ':\r\n'
274 274
275 275 def _tag(self, names, node, message, local, user, date, extra={}):
276 276 if isinstance(names, str):
277 277 allchars = names
278 278 names = (names,)
279 279 else:
280 280 allchars = ''.join(names)
281 281 for c in self.tag_disallowed:
282 282 if c in allchars:
283 283 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 284
285 285 branches = self.branchmap()
286 286 for name in names:
287 287 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 288 local=local)
289 289 if name in branches:
290 290 self.ui.warn(_("warning: tag %s conflicts with existing"
291 291 " branch name\n") % name)
292 292
293 293 def writetags(fp, names, munge, prevtags):
294 294 fp.seek(0, 2)
295 295 if prevtags and prevtags[-1] != '\n':
296 296 fp.write('\n')
297 297 for name in names:
298 298 m = munge and munge(name) or name
299 299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 300 old = self.tags().get(name, nullid)
301 301 fp.write('%s %s\n' % (hex(old), m))
302 302 fp.write('%s %s\n' % (hex(node), m))
303 303 fp.close()
304 304
305 305 prevtags = ''
306 306 if local:
307 307 try:
308 308 fp = self.opener('localtags', 'r+')
309 309 except IOError:
310 310 fp = self.opener('localtags', 'a')
311 311 else:
312 312 prevtags = fp.read()
313 313
314 314 # local tags are stored in the current charset
315 315 writetags(fp, names, None, prevtags)
316 316 for name in names:
317 317 self.hook('tag', node=hex(node), tag=name, local=local)
318 318 return
319 319
320 320 try:
321 321 fp = self.wfile('.hgtags', 'rb+')
322 322 except IOError, e:
323 323 if e.errno != errno.ENOENT:
324 324 raise
325 325 fp = self.wfile('.hgtags', 'ab')
326 326 else:
327 327 prevtags = fp.read()
328 328
329 329 # committed tags are stored in UTF-8
330 330 writetags(fp, names, encoding.fromlocal, prevtags)
331 331
332 332 fp.close()
333 333
334 334 self.invalidatecaches()
335 335
336 336 if '.hgtags' not in self.dirstate:
337 337 self[None].add(['.hgtags'])
338 338
339 339 m = matchmod.exact(self.root, '', ['.hgtags'])
340 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 341
342 342 for name in names:
343 343 self.hook('tag', node=hex(node), tag=name, local=local)
344 344
345 345 return tagnode
346 346
347 347 def tag(self, names, node, message, local, user, date):
348 348 '''tag a revision with one or more symbolic names.
349 349
350 350 names is a list of strings or, when adding a single tag, names may be a
351 351 string.
352 352
353 353 if local is True, the tags are stored in a per-repository file.
354 354 otherwise, they are stored in the .hgtags file, and a new
355 355 changeset is committed with the change.
356 356
357 357 keyword arguments:
358 358
359 359 local: whether to store tags in non-version-controlled file
360 360 (default False)
361 361
362 362 message: commit message to use if committing
363 363
364 364 user: name of user to use if committing
365 365
366 366 date: date tuple to use if committing'''
367 367
368 368 if not local:
369 369 for x in self.status()[:5]:
370 370 if '.hgtags' in x:
371 371 raise util.Abort(_('working copy of .hgtags is changed '
372 372 '(please commit .hgtags manually)'))
373 373
374 374 self.tags() # instantiate the cache
375 375 self._tag(names, node, message, local, user, date)
376 376
377 377 @propertycache
378 378 def _tagscache(self):
379 379 '''Returns a tagscache object that contains various tags related caches.'''
380 380
381 381 # This simplifies its cache management by having one decorated
382 382 # function (this one) and the rest simply fetch things from it.
383 383 class tagscache(object):
384 384 def __init__(self):
385 385 # These two define the set of tags for this repository. tags
386 386 # maps tag name to node; tagtypes maps tag name to 'global' or
387 387 # 'local'. (Global tags are defined by .hgtags across all
388 388 # heads, and local tags are defined in .hg/localtags.)
389 389 # They constitute the in-memory cache of tags.
390 390 self.tags = self.tagtypes = None
391 391
392 392 self.nodetagscache = self.tagslist = None
393 393
394 394 cache = tagscache()
395 395 cache.tags, cache.tagtypes = self._findtags()
396 396
397 397 return cache
398 398
399 399 def tags(self):
400 400 '''return a mapping of tag to node'''
401 401 return self._tagscache.tags
402 402
403 403 def _findtags(self):
404 404 '''Do the hard work of finding tags. Return a pair of dicts
405 405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
406 406 maps tag name to a string like \'global\' or \'local\'.
407 407 Subclasses or extensions are free to add their own tags, but
408 408 should be aware that the returned dicts will be retained for the
409 409 duration of the localrepo object.'''
410 410
411 411 # XXX what tagtype should subclasses/extensions use? Currently
412 412 # mq and bookmarks add tags, but do not set the tagtype at all.
413 413 # Should each extension invent its own tag type? Should there
414 414 # be one tagtype for all such "virtual" tags? Or is the status
415 415 # quo fine?
416 416
417 417 alltags = {} # map tag name to (node, hist)
418 418 tagtypes = {}
419 419
420 420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
421 421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
422 422
423 423 # Build the return dicts. Have to re-encode tag names because
424 424 # the tags module always uses UTF-8 (in order not to lose info
425 425 # writing to the cache), but the rest of Mercurial wants them in
426 426 # local encoding.
427 427 tags = {}
428 428 for (name, (node, hist)) in alltags.iteritems():
429 429 if node != nullid:
430 430 try:
431 431 # ignore tags to unknown nodes
432 432 self.changelog.lookup(node)
433 433 tags[encoding.tolocal(name)] = node
434 434 except error.LookupError:
435 435 pass
436 436 tags['tip'] = self.changelog.tip()
437 437 tagtypes = dict([(encoding.tolocal(name), value)
438 438 for (name, value) in tagtypes.iteritems()])
439 439 return (tags, tagtypes)
440 440
441 441 def tagtype(self, tagname):
442 442 '''
443 443 return the type of the given tag. result can be:
444 444
445 445 'local' : a local tag
446 446 'global' : a global tag
447 447 None : tag does not exist
448 448 '''
449 449
450 450 return self._tagscache.tagtypes.get(tagname)
451 451
452 452 def tagslist(self):
453 453 '''return a list of tags ordered by revision'''
454 454 if not self._tagscache.tagslist:
455 455 l = []
456 456 for t, n in self.tags().iteritems():
457 457 r = self.changelog.rev(n)
458 458 l.append((r, t, n))
459 459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
460 460
461 461 return self._tagscache.tagslist
462 462
463 463 def nodetags(self, node):
464 464 '''return the tags associated with a node'''
465 465 if not self._tagscache.nodetagscache:
466 466 nodetagscache = {}
467 467 for t, n in self.tags().iteritems():
468 468 nodetagscache.setdefault(n, []).append(t)
469 469 for tags in nodetagscache.itervalues():
470 470 tags.sort()
471 471 self._tagscache.nodetagscache = nodetagscache
472 472 return self._tagscache.nodetagscache.get(node, [])
473 473
474 474 def nodebookmarks(self, node):
475 475 marks = []
476 476 for bookmark, n in self._bookmarks.iteritems():
477 477 if n == node:
478 478 marks.append(bookmark)
479 479 return sorted(marks)
480 480
481 481 def _branchtags(self, partial, lrev):
482 482 # TODO: rename this function?
483 483 tiprev = len(self) - 1
484 484 if lrev != tiprev:
485 485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
486 486 self._updatebranchcache(partial, ctxgen)
487 487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
488 488
489 489 return partial
490 490
491 491 def updatebranchcache(self):
492 492 tip = self.changelog.tip()
493 493 if self._branchcache is not None and self._branchcachetip == tip:
494 494 return
495 495
496 496 oldtip = self._branchcachetip
497 497 self._branchcachetip = tip
498 498 if oldtip is None or oldtip not in self.changelog.nodemap:
499 499 partial, last, lrev = self._readbranchcache()
500 500 else:
501 501 lrev = self.changelog.rev(oldtip)
502 502 partial = self._branchcache
503 503
504 504 self._branchtags(partial, lrev)
505 505 # this private cache holds all heads (not just tips)
506 506 self._branchcache = partial
507 507
508 508 def branchmap(self):
509 509 '''returns a dictionary {branch: [branchheads]}'''
510 510 self.updatebranchcache()
511 511 return self._branchcache
512 512
513 513 def branchtags(self):
514 514 '''return a dict where branch names map to the tipmost head of
515 515 the branch, open heads come before closed'''
516 516 bt = {}
517 517 for bn, heads in self.branchmap().iteritems():
518 518 tip = heads[-1]
519 519 for h in reversed(heads):
520 520 if 'close' not in self.changelog.read(h)[5]:
521 521 tip = h
522 522 break
523 523 bt[bn] = tip
524 524 return bt
525 525
526 526 def _readbranchcache(self):
527 527 partial = {}
528 528 try:
529 529 f = self.opener("cache/branchheads")
530 530 lines = f.read().split('\n')
531 531 f.close()
532 532 except (IOError, OSError):
533 533 return {}, nullid, nullrev
534 534
535 535 try:
536 536 last, lrev = lines.pop(0).split(" ", 1)
537 537 last, lrev = bin(last), int(lrev)
538 538 if lrev >= len(self) or self[lrev].node() != last:
539 539 # invalidate the cache
540 540 raise ValueError('invalidating branch cache (tip differs)')
541 541 for l in lines:
542 542 if not l:
543 543 continue
544 544 node, label = l.split(" ", 1)
545 545 label = encoding.tolocal(label.strip())
546 546 partial.setdefault(label, []).append(bin(node))
547 547 except KeyboardInterrupt:
548 548 raise
549 549 except Exception, inst:
550 550 if self.ui.debugflag:
551 551 self.ui.warn(str(inst), '\n')
552 552 partial, last, lrev = {}, nullid, nullrev
553 553 return partial, last, lrev
554 554
555 555 def _writebranchcache(self, branches, tip, tiprev):
556 556 try:
557 557 f = self.opener("cache/branchheads", "w", atomictemp=True)
558 558 f.write("%s %s\n" % (hex(tip), tiprev))
559 559 for label, nodes in branches.iteritems():
560 560 for node in nodes:
561 561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
562 562 f.close()
563 563 except (IOError, OSError):
564 564 pass
565 565
566 566 def _updatebranchcache(self, partial, ctxgen):
567 567 # collect new branch entries
568 568 newbranches = {}
569 569 for c in ctxgen:
570 570 newbranches.setdefault(c.branch(), []).append(c.node())
571 571 # if older branchheads are reachable from new ones, they aren't
572 572 # really branchheads. Note checking parents is insufficient:
573 573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
574 574 for branch, newnodes in newbranches.iteritems():
575 575 bheads = partial.setdefault(branch, [])
576 576 bheads.extend(newnodes)
577 577 if len(bheads) <= 1:
578 578 continue
579 579 bheads = sorted(bheads, key=lambda x: self[x].rev())
580 580 # starting from tip means fewer passes over reachable
581 581 while newnodes:
582 582 latest = newnodes.pop()
583 583 if latest not in bheads:
584 584 continue
585 585 minbhrev = self[bheads[0]].node()
586 586 reachable = self.changelog.reachable(latest, minbhrev)
587 587 reachable.remove(latest)
588 588 if reachable:
589 589 bheads = [b for b in bheads if b not in reachable]
590 590 partial[branch] = bheads
591 591
592 592 def lookup(self, key):
593 593 if isinstance(key, int):
594 594 return self.changelog.node(key)
595 595 elif key == '.':
596 596 return self.dirstate.p1()
597 597 elif key == 'null':
598 598 return nullid
599 599 elif key == 'tip':
600 600 return self.changelog.tip()
601 601 n = self.changelog._match(key)
602 602 if n:
603 603 return n
604 604 if key in self._bookmarks:
605 605 return self._bookmarks[key]
606 606 if key in self.tags():
607 607 return self.tags()[key]
608 608 if key in self.branchtags():
609 609 return self.branchtags()[key]
610 610 n = self.changelog._partialmatch(key)
611 611 if n:
612 612 return n
613 613
614 614 # can't find key, check if it might have come from damaged dirstate
615 615 if key in self.dirstate.parents():
616 616 raise error.Abort(_("working directory has unknown parent '%s'!")
617 617 % short(key))
618 618 try:
619 619 if len(key) == 20:
620 620 key = hex(key)
621 621 except TypeError:
622 622 pass
623 623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
624 624
625 625 def lookupbranch(self, key, remote=None):
626 626 repo = remote or self
627 627 if key in repo.branchmap():
628 628 return key
629 629
630 630 repo = (remote and remote.local()) and remote or self
631 631 return repo[key].branch()
632 632
633 633 def known(self, nodes):
634 634 nm = self.changelog.nodemap
635 635 result = []
636 636 for n in nodes:
637 637 r = nm.get(n)
638 638 resp = not (r is None or self._phaserev[r] >= phases.secret)
639 639 result.append(resp)
640 640 return result
641 641
642 642 def local(self):
643 643 return self
644 644
645 645 def join(self, f):
646 646 return os.path.join(self.path, f)
647 647
648 648 def wjoin(self, f):
649 649 return os.path.join(self.root, f)
650 650
651 651 def file(self, f):
652 652 if f[0] == '/':
653 653 f = f[1:]
654 654 return filelog.filelog(self.sopener, f)
655 655
656 656 def changectx(self, changeid):
657 657 return self[changeid]
658 658
659 659 def parents(self, changeid=None):
660 660 '''get list of changectxs for parents of changeid'''
661 661 return self[changeid].parents()
662 662
663 663 def filectx(self, path, changeid=None, fileid=None):
664 664 """changeid can be a changeset revision, node, or tag.
665 665 fileid can be a file revision or node."""
666 666 return context.filectx(self, path, changeid, fileid)
667 667
668 668 def getcwd(self):
669 669 return self.dirstate.getcwd()
670 670
671 671 def pathto(self, f, cwd=None):
672 672 return self.dirstate.pathto(f, cwd)
673 673
674 674 def wfile(self, f, mode='r'):
675 675 return self.wopener(f, mode)
676 676
677 677 def _link(self, f):
678 678 return os.path.islink(self.wjoin(f))
679 679
680 680 def _loadfilter(self, filter):
681 681 if filter not in self.filterpats:
682 682 l = []
683 683 for pat, cmd in self.ui.configitems(filter):
684 684 if cmd == '!':
685 685 continue
686 686 mf = matchmod.match(self.root, '', [pat])
687 687 fn = None
688 688 params = cmd
689 689 for name, filterfn in self._datafilters.iteritems():
690 690 if cmd.startswith(name):
691 691 fn = filterfn
692 692 params = cmd[len(name):].lstrip()
693 693 break
694 694 if not fn:
695 695 fn = lambda s, c, **kwargs: util.filter(s, c)
696 696 # Wrap old filters not supporting keyword arguments
697 697 if not inspect.getargspec(fn)[2]:
698 698 oldfn = fn
699 699 fn = lambda s, c, **kwargs: oldfn(s, c)
700 700 l.append((mf, fn, params))
701 701 self.filterpats[filter] = l
702 702 return self.filterpats[filter]
703 703
704 704 def _filter(self, filterpats, filename, data):
705 705 for mf, fn, cmd in filterpats:
706 706 if mf(filename):
707 707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
708 708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
709 709 break
710 710
711 711 return data
712 712
713 713 @propertycache
714 714 def _encodefilterpats(self):
715 715 return self._loadfilter('encode')
716 716
717 717 @propertycache
718 718 def _decodefilterpats(self):
719 719 return self._loadfilter('decode')
720 720
721 721 def adddatafilter(self, name, filter):
722 722 self._datafilters[name] = filter
723 723
724 724 def wread(self, filename):
725 725 if self._link(filename):
726 726 data = os.readlink(self.wjoin(filename))
727 727 else:
728 728 data = self.wopener.read(filename)
729 729 return self._filter(self._encodefilterpats, filename, data)
730 730
731 731 def wwrite(self, filename, data, flags):
732 732 data = self._filter(self._decodefilterpats, filename, data)
733 733 if 'l' in flags:
734 734 self.wopener.symlink(data, filename)
735 735 else:
736 736 self.wopener.write(filename, data)
737 737 if 'x' in flags:
738 738 util.setflags(self.wjoin(filename), False, True)
739 739
740 740 def wwritedata(self, filename, data):
741 741 return self._filter(self._decodefilterpats, filename, data)
742 742
743 743 def transaction(self, desc):
744 744 tr = self._transref and self._transref() or None
745 745 if tr and tr.running():
746 746 return tr.nest()
747 747
748 748 # abort here if the journal already exists
749 749 if os.path.exists(self.sjoin("journal")):
750 750 raise error.RepoError(
751 751 _("abandoned transaction found - run hg recover"))
752 752
753 journalfiles = self._writejournal(desc)
754 renames = [(x, undoname(x)) for x in journalfiles]
753 self._writejournal(desc)
754 renames = [(x, undoname(x)) for x in self._journalfiles()]
755 755
756 756 tr = transaction.transaction(self.ui.warn, self.sopener,
757 757 self.sjoin("journal"),
758 758 aftertrans(renames),
759 759 self.store.createmode)
760 760 self._transref = weakref.ref(tr)
761 761 return tr
762 762
763 def _journalfiles(self):
764 return (self.sjoin('journal'), self.join('journal.dirstate'),
765 self.join('journal.branch'), self.join('journal.desc'),
766 self.join('journal.bookmarks'),
767 self.sjoin('journal.phaseroots'))
768
769 def undofiles(self):
770 return [undoname(x) for x in self._journalfiles()]
771
763 772 def _writejournal(self, desc):
764 773 # save dirstate for rollback
765 774 try:
766 775 ds = self.opener.read("dirstate")
767 776 except IOError:
768 777 ds = ""
769 778 self.opener.write("journal.dirstate", ds)
770 779 self.opener.write("journal.branch",
771 780 encoding.fromlocal(self.dirstate.branch()))
772 781 self.opener.write("journal.desc",
773 782 "%d\n%s\n" % (len(self), desc))
774 783
775 784 bkname = self.join('bookmarks')
776 785 if os.path.exists(bkname):
777 786 util.copyfile(bkname, self.join('journal.bookmarks'))
778 787 else:
779 788 self.opener.write('journal.bookmarks', '')
780 789 phasesname = self.sjoin('phaseroots')
781 790 if os.path.exists(phasesname):
782 791 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
783 792 else:
784 793 self.sopener.write('journal.phaseroots', '')
785 794
786 return (self.sjoin('journal'), self.join('journal.dirstate'),
787 self.join('journal.branch'), self.join('journal.desc'),
788 self.join('journal.bookmarks'),
789 self.sjoin('journal.phaseroots'))
790
791 795 def recover(self):
792 796 lock = self.lock()
793 797 try:
794 798 if os.path.exists(self.sjoin("journal")):
795 799 self.ui.status(_("rolling back interrupted transaction\n"))
796 800 transaction.rollback(self.sopener, self.sjoin("journal"),
797 801 self.ui.warn)
798 802 self.invalidate()
799 803 return True
800 804 else:
801 805 self.ui.warn(_("no interrupted transaction available\n"))
802 806 return False
803 807 finally:
804 808 lock.release()
805 809
806 810 def rollback(self, dryrun=False, force=False):
807 811 wlock = lock = None
808 812 try:
809 813 wlock = self.wlock()
810 814 lock = self.lock()
811 815 if os.path.exists(self.sjoin("undo")):
812 816 return self._rollback(dryrun, force)
813 817 else:
814 818 self.ui.warn(_("no rollback information available\n"))
815 819 return 1
816 820 finally:
817 821 release(lock, wlock)
818 822
819 823 def _rollback(self, dryrun, force):
820 824 ui = self.ui
821 825 try:
822 826 args = self.opener.read('undo.desc').splitlines()
823 827 (oldlen, desc, detail) = (int(args[0]), args[1], None)
824 828 if len(args) >= 3:
825 829 detail = args[2]
826 830 oldtip = oldlen - 1
827 831
828 832 if detail and ui.verbose:
829 833 msg = (_('repository tip rolled back to revision %s'
830 834 ' (undo %s: %s)\n')
831 835 % (oldtip, desc, detail))
832 836 else:
833 837 msg = (_('repository tip rolled back to revision %s'
834 838 ' (undo %s)\n')
835 839 % (oldtip, desc))
836 840 except IOError:
837 841 msg = _('rolling back unknown transaction\n')
838 842 desc = None
839 843
840 844 if not force and self['.'] != self['tip'] and desc == 'commit':
841 845 raise util.Abort(
842 846 _('rollback of last commit while not checked out '
843 847 'may lose data'), hint=_('use -f to force'))
844 848
845 849 ui.status(msg)
846 850 if dryrun:
847 851 return 0
848 852
849 853 parents = self.dirstate.parents()
850 854 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
851 855 if os.path.exists(self.join('undo.bookmarks')):
852 856 util.rename(self.join('undo.bookmarks'),
853 857 self.join('bookmarks'))
854 858 if os.path.exists(self.sjoin('undo.phaseroots')):
855 859 util.rename(self.sjoin('undo.phaseroots'),
856 860 self.sjoin('phaseroots'))
857 861 self.invalidate()
858 862
859 863 parentgone = (parents[0] not in self.changelog.nodemap or
860 864 parents[1] not in self.changelog.nodemap)
861 865 if parentgone:
862 866 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
863 867 try:
864 868 branch = self.opener.read('undo.branch')
865 869 self.dirstate.setbranch(branch)
866 870 except IOError:
867 871 ui.warn(_('named branch could not be reset: '
868 872 'current branch is still \'%s\'\n')
869 873 % self.dirstate.branch())
870 874
871 875 self.dirstate.invalidate()
872 876 parents = tuple([p.rev() for p in self.parents()])
873 877 if len(parents) > 1:
874 878 ui.status(_('working directory now based on '
875 879 'revisions %d and %d\n') % parents)
876 880 else:
877 881 ui.status(_('working directory now based on '
878 882 'revision %d\n') % parents)
879 883 self.destroyed()
880 884 return 0
881 885
882 886 def invalidatecaches(self):
883 887 def delcache(name):
884 888 try:
885 889 delattr(self, name)
886 890 except AttributeError:
887 891 pass
888 892
889 893 delcache('_tagscache')
890 894 delcache('_phaserev')
891 895
892 896 self._branchcache = None # in UTF-8
893 897 self._branchcachetip = None
894 898
895 899 def invalidatedirstate(self):
896 900 '''Invalidates the dirstate, causing the next call to dirstate
897 901 to check if it was modified since the last time it was read,
898 902 rereading it if it has.
899 903
900 904 This is different to dirstate.invalidate() that it doesn't always
901 905 rereads the dirstate. Use dirstate.invalidate() if you want to
902 906 explicitly read the dirstate again (i.e. restoring it to a previous
903 907 known good state).'''
904 908 if 'dirstate' in self.__dict__:
905 909 for k in self.dirstate._filecache:
906 910 try:
907 911 delattr(self.dirstate, k)
908 912 except AttributeError:
909 913 pass
910 914 delattr(self, 'dirstate')
911 915
912 916 def invalidate(self):
913 917 for k in self._filecache:
914 918 # dirstate is invalidated separately in invalidatedirstate()
915 919 if k == 'dirstate':
916 920 continue
917 921
918 922 try:
919 923 delattr(self, k)
920 924 except AttributeError:
921 925 pass
922 926 self.invalidatecaches()
923 927
924 928 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
925 929 try:
926 930 l = lock.lock(lockname, 0, releasefn, desc=desc)
927 931 except error.LockHeld, inst:
928 932 if not wait:
929 933 raise
930 934 self.ui.warn(_("waiting for lock on %s held by %r\n") %
931 935 (desc, inst.locker))
932 936 # default to 600 seconds timeout
933 937 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
934 938 releasefn, desc=desc)
935 939 if acquirefn:
936 940 acquirefn()
937 941 return l
938 942
939 943 def _afterlock(self, callback):
940 944 """add a callback to the current repository lock.
941 945
942 946 The callback will be executed on lock release."""
943 947 l = self._lockref and self._lockref()
944 948 if l:
945 949 l.postrelease.append(callback)
946 950
947 951 def lock(self, wait=True):
948 952 '''Lock the repository store (.hg/store) and return a weak reference
949 953 to the lock. Use this before modifying the store (e.g. committing or
950 954 stripping). If you are opening a transaction, get a lock as well.)'''
951 955 l = self._lockref and self._lockref()
952 956 if l is not None and l.held:
953 957 l.lock()
954 958 return l
955 959
956 960 def unlock():
957 961 self.store.write()
958 962 if self._dirtyphases:
959 963 phases.writeroots(self)
960 964 self._dirtyphases = False
961 965 for k, ce in self._filecache.items():
962 966 if k == 'dirstate':
963 967 continue
964 968 ce.refresh()
965 969
966 970 l = self._lock(self.sjoin("lock"), wait, unlock,
967 971 self.invalidate, _('repository %s') % self.origroot)
968 972 self._lockref = weakref.ref(l)
969 973 return l
970 974
971 975 def wlock(self, wait=True):
972 976 '''Lock the non-store parts of the repository (everything under
973 977 .hg except .hg/store) and return a weak reference to the lock.
974 978 Use this before modifying files in .hg.'''
975 979 l = self._wlockref and self._wlockref()
976 980 if l is not None and l.held:
977 981 l.lock()
978 982 return l
979 983
980 984 def unlock():
981 985 self.dirstate.write()
982 986 ce = self._filecache.get('dirstate')
983 987 if ce:
984 988 ce.refresh()
985 989
986 990 l = self._lock(self.join("wlock"), wait, unlock,
987 991 self.invalidatedirstate, _('working directory of %s') %
988 992 self.origroot)
989 993 self._wlockref = weakref.ref(l)
990 994 return l
991 995
992 996 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
993 997 """
994 998 commit an individual file as part of a larger transaction
995 999 """
996 1000
997 1001 fname = fctx.path()
998 1002 text = fctx.data()
999 1003 flog = self.file(fname)
1000 1004 fparent1 = manifest1.get(fname, nullid)
1001 1005 fparent2 = fparent2o = manifest2.get(fname, nullid)
1002 1006
1003 1007 meta = {}
1004 1008 copy = fctx.renamed()
1005 1009 if copy and copy[0] != fname:
1006 1010 # Mark the new revision of this file as a copy of another
1007 1011 # file. This copy data will effectively act as a parent
1008 1012 # of this new revision. If this is a merge, the first
1009 1013 # parent will be the nullid (meaning "look up the copy data")
1010 1014 # and the second one will be the other parent. For example:
1011 1015 #
1012 1016 # 0 --- 1 --- 3 rev1 changes file foo
1013 1017 # \ / rev2 renames foo to bar and changes it
1014 1018 # \- 2 -/ rev3 should have bar with all changes and
1015 1019 # should record that bar descends from
1016 1020 # bar in rev2 and foo in rev1
1017 1021 #
1018 1022 # this allows this merge to succeed:
1019 1023 #
1020 1024 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1021 1025 # \ / merging rev3 and rev4 should use bar@rev2
1022 1026 # \- 2 --- 4 as the merge base
1023 1027 #
1024 1028
1025 1029 cfname = copy[0]
1026 1030 crev = manifest1.get(cfname)
1027 1031 newfparent = fparent2
1028 1032
1029 1033 if manifest2: # branch merge
1030 1034 if fparent2 == nullid or crev is None: # copied on remote side
1031 1035 if cfname in manifest2:
1032 1036 crev = manifest2[cfname]
1033 1037 newfparent = fparent1
1034 1038
1035 1039 # find source in nearest ancestor if we've lost track
1036 1040 if not crev:
1037 1041 self.ui.debug(" %s: searching for copy revision for %s\n" %
1038 1042 (fname, cfname))
1039 1043 for ancestor in self[None].ancestors():
1040 1044 if cfname in ancestor:
1041 1045 crev = ancestor[cfname].filenode()
1042 1046 break
1043 1047
1044 1048 if crev:
1045 1049 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1046 1050 meta["copy"] = cfname
1047 1051 meta["copyrev"] = hex(crev)
1048 1052 fparent1, fparent2 = nullid, newfparent
1049 1053 else:
1050 1054 self.ui.warn(_("warning: can't find ancestor for '%s' "
1051 1055 "copied from '%s'!\n") % (fname, cfname))
1052 1056
1053 1057 elif fparent2 != nullid:
1054 1058 # is one parent an ancestor of the other?
1055 1059 fparentancestor = flog.ancestor(fparent1, fparent2)
1056 1060 if fparentancestor == fparent1:
1057 1061 fparent1, fparent2 = fparent2, nullid
1058 1062 elif fparentancestor == fparent2:
1059 1063 fparent2 = nullid
1060 1064
1061 1065 # is the file changed?
1062 1066 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1063 1067 changelist.append(fname)
1064 1068 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1065 1069
1066 1070 # are just the flags changed during merge?
1067 1071 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1068 1072 changelist.append(fname)
1069 1073
1070 1074 return fparent1
1071 1075
1072 1076 def commit(self, text="", user=None, date=None, match=None, force=False,
1073 1077 editor=False, extra={}):
1074 1078 """Add a new revision to current repository.
1075 1079
1076 1080 Revision information is gathered from the working directory,
1077 1081 match can be used to filter the committed files. If editor is
1078 1082 supplied, it is called to get a commit message.
1079 1083 """
1080 1084
1081 1085 def fail(f, msg):
1082 1086 raise util.Abort('%s: %s' % (f, msg))
1083 1087
1084 1088 if not match:
1085 1089 match = matchmod.always(self.root, '')
1086 1090
1087 1091 if not force:
1088 1092 vdirs = []
1089 1093 match.dir = vdirs.append
1090 1094 match.bad = fail
1091 1095
1092 1096 wlock = self.wlock()
1093 1097 try:
1094 1098 wctx = self[None]
1095 1099 merge = len(wctx.parents()) > 1
1096 1100
1097 1101 if (not force and merge and match and
1098 1102 (match.files() or match.anypats())):
1099 1103 raise util.Abort(_('cannot partially commit a merge '
1100 1104 '(do not specify files or patterns)'))
1101 1105
1102 1106 changes = self.status(match=match, clean=force)
1103 1107 if force:
1104 1108 changes[0].extend(changes[6]) # mq may commit unchanged files
1105 1109
1106 1110 # check subrepos
1107 1111 subs = []
1108 1112 commitsubs = set()
1109 1113 newstate = wctx.substate.copy()
1110 1114 # only manage subrepos and .hgsubstate if .hgsub is present
1111 1115 if '.hgsub' in wctx:
1112 1116 # we'll decide whether to track this ourselves, thanks
1113 1117 if '.hgsubstate' in changes[0]:
1114 1118 changes[0].remove('.hgsubstate')
1115 1119 if '.hgsubstate' in changes[2]:
1116 1120 changes[2].remove('.hgsubstate')
1117 1121
1118 1122 # compare current state to last committed state
1119 1123 # build new substate based on last committed state
1120 1124 oldstate = wctx.p1().substate
1121 1125 for s in sorted(newstate.keys()):
1122 1126 if not match(s):
1123 1127 # ignore working copy, use old state if present
1124 1128 if s in oldstate:
1125 1129 newstate[s] = oldstate[s]
1126 1130 continue
1127 1131 if not force:
1128 1132 raise util.Abort(
1129 1133 _("commit with new subrepo %s excluded") % s)
1130 1134 if wctx.sub(s).dirty(True):
1131 1135 if not self.ui.configbool('ui', 'commitsubrepos'):
1132 1136 raise util.Abort(
1133 1137 _("uncommitted changes in subrepo %s") % s,
1134 1138 hint=_("use --subrepos for recursive commit"))
1135 1139 subs.append(s)
1136 1140 commitsubs.add(s)
1137 1141 else:
1138 1142 bs = wctx.sub(s).basestate()
1139 1143 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1140 1144 if oldstate.get(s, (None, None, None))[1] != bs:
1141 1145 subs.append(s)
1142 1146
1143 1147 # check for removed subrepos
1144 1148 for p in wctx.parents():
1145 1149 r = [s for s in p.substate if s not in newstate]
1146 1150 subs += [s for s in r if match(s)]
1147 1151 if subs:
1148 1152 if (not match('.hgsub') and
1149 1153 '.hgsub' in (wctx.modified() + wctx.added())):
1150 1154 raise util.Abort(
1151 1155 _("can't commit subrepos without .hgsub"))
1152 1156 changes[0].insert(0, '.hgsubstate')
1153 1157
1154 1158 elif '.hgsub' in changes[2]:
1155 1159 # clean up .hgsubstate when .hgsub is removed
1156 1160 if ('.hgsubstate' in wctx and
1157 1161 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1158 1162 changes[2].insert(0, '.hgsubstate')
1159 1163
1160 1164 # make sure all explicit patterns are matched
1161 1165 if not force and match.files():
1162 1166 matched = set(changes[0] + changes[1] + changes[2])
1163 1167
1164 1168 for f in match.files():
1165 1169 if f == '.' or f in matched or f in wctx.substate:
1166 1170 continue
1167 1171 if f in changes[3]: # missing
1168 1172 fail(f, _('file not found!'))
1169 1173 if f in vdirs: # visited directory
1170 1174 d = f + '/'
1171 1175 for mf in matched:
1172 1176 if mf.startswith(d):
1173 1177 break
1174 1178 else:
1175 1179 fail(f, _("no match under directory!"))
1176 1180 elif f not in self.dirstate:
1177 1181 fail(f, _("file not tracked!"))
1178 1182
1179 1183 if (not force and not extra.get("close") and not merge
1180 1184 and not (changes[0] or changes[1] or changes[2])
1181 1185 and wctx.branch() == wctx.p1().branch()):
1182 1186 return None
1183 1187
1184 1188 ms = mergemod.mergestate(self)
1185 1189 for f in changes[0]:
1186 1190 if f in ms and ms[f] == 'u':
1187 1191 raise util.Abort(_("unresolved merge conflicts "
1188 1192 "(see hg help resolve)"))
1189 1193
1190 1194 cctx = context.workingctx(self, text, user, date, extra, changes)
1191 1195 if editor:
1192 1196 cctx._text = editor(self, cctx, subs)
1193 1197 edited = (text != cctx._text)
1194 1198
1195 1199 # commit subs and write new state
1196 1200 if subs:
1197 1201 for s in sorted(commitsubs):
1198 1202 sub = wctx.sub(s)
1199 1203 self.ui.status(_('committing subrepository %s\n') %
1200 1204 subrepo.subrelpath(sub))
1201 1205 sr = sub.commit(cctx._text, user, date)
1202 1206 newstate[s] = (newstate[s][0], sr)
1203 1207 subrepo.writestate(self, newstate)
1204 1208
1205 1209 # Save commit message in case this transaction gets rolled back
1206 1210 # (e.g. by a pretxncommit hook). Leave the content alone on
1207 1211 # the assumption that the user will use the same editor again.
1208 1212 msgfn = self.savecommitmessage(cctx._text)
1209 1213
1210 1214 p1, p2 = self.dirstate.parents()
1211 1215 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1212 1216 try:
1213 1217 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1214 1218 ret = self.commitctx(cctx, True)
1215 1219 except:
1216 1220 if edited:
1217 1221 self.ui.write(
1218 1222 _('note: commit message saved in %s\n') % msgfn)
1219 1223 raise
1220 1224
1221 1225 # update bookmarks, dirstate and mergestate
1222 1226 bookmarks.update(self, p1, ret)
1223 1227 for f in changes[0] + changes[1]:
1224 1228 self.dirstate.normal(f)
1225 1229 for f in changes[2]:
1226 1230 self.dirstate.drop(f)
1227 1231 self.dirstate.setparents(ret)
1228 1232 ms.reset()
1229 1233 finally:
1230 1234 wlock.release()
1231 1235
1232 1236 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1233 1237 return ret
1234 1238
1235 1239 def commitctx(self, ctx, error=False):
1236 1240 """Add a new revision to current repository.
1237 1241 Revision information is passed via the context argument.
1238 1242 """
1239 1243
1240 1244 tr = lock = None
1241 1245 removed = list(ctx.removed())
1242 1246 p1, p2 = ctx.p1(), ctx.p2()
1243 1247 user = ctx.user()
1244 1248
1245 1249 lock = self.lock()
1246 1250 try:
1247 1251 tr = self.transaction("commit")
1248 1252 trp = weakref.proxy(tr)
1249 1253
1250 1254 if ctx.files():
1251 1255 m1 = p1.manifest().copy()
1252 1256 m2 = p2.manifest()
1253 1257
1254 1258 # check in files
1255 1259 new = {}
1256 1260 changed = []
1257 1261 linkrev = len(self)
1258 1262 for f in sorted(ctx.modified() + ctx.added()):
1259 1263 self.ui.note(f + "\n")
1260 1264 try:
1261 1265 fctx = ctx[f]
1262 1266 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1263 1267 changed)
1264 1268 m1.set(f, fctx.flags())
1265 1269 except OSError, inst:
1266 1270 self.ui.warn(_("trouble committing %s!\n") % f)
1267 1271 raise
1268 1272 except IOError, inst:
1269 1273 errcode = getattr(inst, 'errno', errno.ENOENT)
1270 1274 if error or errcode and errcode != errno.ENOENT:
1271 1275 self.ui.warn(_("trouble committing %s!\n") % f)
1272 1276 raise
1273 1277 else:
1274 1278 removed.append(f)
1275 1279
1276 1280 # update manifest
1277 1281 m1.update(new)
1278 1282 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1279 1283 drop = [f for f in removed if f in m1]
1280 1284 for f in drop:
1281 1285 del m1[f]
1282 1286 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1283 1287 p2.manifestnode(), (new, drop))
1284 1288 files = changed + removed
1285 1289 else:
1286 1290 mn = p1.manifestnode()
1287 1291 files = []
1288 1292
1289 1293 # update changelog
1290 1294 self.changelog.delayupdate()
1291 1295 n = self.changelog.add(mn, files, ctx.description(),
1292 1296 trp, p1.node(), p2.node(),
1293 1297 user, ctx.date(), ctx.extra().copy())
1294 1298 p = lambda: self.changelog.writepending() and self.root or ""
1295 1299 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1296 1300 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1297 1301 parent2=xp2, pending=p)
1298 1302 self.changelog.finalize(trp)
1299 1303 # set the new commit is proper phase
1300 1304 targetphase = phases.newcommitphase(self.ui)
1301 1305 if targetphase:
1302 1306 # retract boundary do not alter parent changeset.
1303 1307 # if a parent have higher the resulting phase will
1304 1308 # be compliant anyway
1305 1309 #
1306 1310 # if minimal phase was 0 we don't need to retract anything
1307 1311 phases.retractboundary(self, targetphase, [n])
1308 1312 tr.close()
1309 1313 self.updatebranchcache()
1310 1314 return n
1311 1315 finally:
1312 1316 if tr:
1313 1317 tr.release()
1314 1318 lock.release()
1315 1319
1316 1320 def destroyed(self):
1317 1321 '''Inform the repository that nodes have been destroyed.
1318 1322 Intended for use by strip and rollback, so there's a common
1319 1323 place for anything that has to be done after destroying history.'''
1320 1324 # XXX it might be nice if we could take the list of destroyed
1321 1325 # nodes, but I don't see an easy way for rollback() to do that
1322 1326
1323 1327 # Ensure the persistent tag cache is updated. Doing it now
1324 1328 # means that the tag cache only has to worry about destroyed
1325 1329 # heads immediately after a strip/rollback. That in turn
1326 1330 # guarantees that "cachetip == currenttip" (comparing both rev
1327 1331 # and node) always means no nodes have been added or destroyed.
1328 1332
1329 1333 # XXX this is suboptimal when qrefresh'ing: we strip the current
1330 1334 # head, refresh the tag cache, then immediately add a new head.
1331 1335 # But I think doing it this way is necessary for the "instant
1332 1336 # tag cache retrieval" case to work.
1333 1337 self.invalidatecaches()
1334 1338
1335 1339 # Discard all cache entries to force reloading everything.
1336 1340 self._filecache.clear()
1337 1341
1338 1342 def walk(self, match, node=None):
1339 1343 '''
1340 1344 walk recursively through the directory tree or a given
1341 1345 changeset, finding all files matched by the match
1342 1346 function
1343 1347 '''
1344 1348 return self[node].walk(match)
1345 1349
1346 1350 def status(self, node1='.', node2=None, match=None,
1347 1351 ignored=False, clean=False, unknown=False,
1348 1352 listsubrepos=False):
1349 1353 """return status of files between two nodes or node and working directory
1350 1354
1351 1355 If node1 is None, use the first dirstate parent instead.
1352 1356 If node2 is None, compare node1 with working directory.
1353 1357 """
1354 1358
1355 1359 def mfmatches(ctx):
1356 1360 mf = ctx.manifest().copy()
1357 1361 for fn in mf.keys():
1358 1362 if not match(fn):
1359 1363 del mf[fn]
1360 1364 return mf
1361 1365
1362 1366 if isinstance(node1, context.changectx):
1363 1367 ctx1 = node1
1364 1368 else:
1365 1369 ctx1 = self[node1]
1366 1370 if isinstance(node2, context.changectx):
1367 1371 ctx2 = node2
1368 1372 else:
1369 1373 ctx2 = self[node2]
1370 1374
1371 1375 working = ctx2.rev() is None
1372 1376 parentworking = working and ctx1 == self['.']
1373 1377 match = match or matchmod.always(self.root, self.getcwd())
1374 1378 listignored, listclean, listunknown = ignored, clean, unknown
1375 1379
1376 1380 # load earliest manifest first for caching reasons
1377 1381 if not working and ctx2.rev() < ctx1.rev():
1378 1382 ctx2.manifest()
1379 1383
1380 1384 if not parentworking:
1381 1385 def bad(f, msg):
1382 1386 # 'f' may be a directory pattern from 'match.files()',
1383 1387 # so 'f not in ctx1' is not enough
1384 1388 if f not in ctx1 and f not in ctx1.dirs():
1385 1389 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1386 1390 match.bad = bad
1387 1391
1388 1392 if working: # we need to scan the working dir
1389 1393 subrepos = []
1390 1394 if '.hgsub' in self.dirstate:
1391 1395 subrepos = ctx2.substate.keys()
1392 1396 s = self.dirstate.status(match, subrepos, listignored,
1393 1397 listclean, listunknown)
1394 1398 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1395 1399
1396 1400 # check for any possibly clean files
1397 1401 if parentworking and cmp:
1398 1402 fixup = []
1399 1403 # do a full compare of any files that might have changed
1400 1404 for f in sorted(cmp):
1401 1405 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1402 1406 or ctx1[f].cmp(ctx2[f])):
1403 1407 modified.append(f)
1404 1408 else:
1405 1409 fixup.append(f)
1406 1410
1407 1411 # update dirstate for files that are actually clean
1408 1412 if fixup:
1409 1413 if listclean:
1410 1414 clean += fixup
1411 1415
1412 1416 try:
1413 1417 # updating the dirstate is optional
1414 1418 # so we don't wait on the lock
1415 1419 wlock = self.wlock(False)
1416 1420 try:
1417 1421 for f in fixup:
1418 1422 self.dirstate.normal(f)
1419 1423 finally:
1420 1424 wlock.release()
1421 1425 except error.LockError:
1422 1426 pass
1423 1427
1424 1428 if not parentworking:
1425 1429 mf1 = mfmatches(ctx1)
1426 1430 if working:
1427 1431 # we are comparing working dir against non-parent
1428 1432 # generate a pseudo-manifest for the working dir
1429 1433 mf2 = mfmatches(self['.'])
1430 1434 for f in cmp + modified + added:
1431 1435 mf2[f] = None
1432 1436 mf2.set(f, ctx2.flags(f))
1433 1437 for f in removed:
1434 1438 if f in mf2:
1435 1439 del mf2[f]
1436 1440 else:
1437 1441 # we are comparing two revisions
1438 1442 deleted, unknown, ignored = [], [], []
1439 1443 mf2 = mfmatches(ctx2)
1440 1444
1441 1445 modified, added, clean = [], [], []
1442 1446 for fn in mf2:
1443 1447 if fn in mf1:
1444 1448 if (fn not in deleted and
1445 1449 (mf1.flags(fn) != mf2.flags(fn) or
1446 1450 (mf1[fn] != mf2[fn] and
1447 1451 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1448 1452 modified.append(fn)
1449 1453 elif listclean:
1450 1454 clean.append(fn)
1451 1455 del mf1[fn]
1452 1456 elif fn not in deleted:
1453 1457 added.append(fn)
1454 1458 removed = mf1.keys()
1455 1459
1456 1460 if working and modified and not self.dirstate._checklink:
1457 1461 # Symlink placeholders may get non-symlink-like contents
1458 1462 # via user error or dereferencing by NFS or Samba servers,
1459 1463 # so we filter out any placeholders that don't look like a
1460 1464 # symlink
1461 1465 sane = []
1462 1466 for f in modified:
1463 1467 if ctx2.flags(f) == 'l':
1464 1468 d = ctx2[f].data()
1465 1469 if len(d) >= 1024 or '\n' in d or util.binary(d):
1466 1470 self.ui.debug('ignoring suspect symlink placeholder'
1467 1471 ' "%s"\n' % f)
1468 1472 continue
1469 1473 sane.append(f)
1470 1474 modified = sane
1471 1475
1472 1476 r = modified, added, removed, deleted, unknown, ignored, clean
1473 1477
1474 1478 if listsubrepos:
1475 1479 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1476 1480 if working:
1477 1481 rev2 = None
1478 1482 else:
1479 1483 rev2 = ctx2.substate[subpath][1]
1480 1484 try:
1481 1485 submatch = matchmod.narrowmatcher(subpath, match)
1482 1486 s = sub.status(rev2, match=submatch, ignored=listignored,
1483 1487 clean=listclean, unknown=listunknown,
1484 1488 listsubrepos=True)
1485 1489 for rfiles, sfiles in zip(r, s):
1486 1490 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1487 1491 except error.LookupError:
1488 1492 self.ui.status(_("skipping missing subrepository: %s\n")
1489 1493 % subpath)
1490 1494
1491 1495 for l in r:
1492 1496 l.sort()
1493 1497 return r
1494 1498
1495 1499 def heads(self, start=None):
1496 1500 heads = self.changelog.heads(start)
1497 1501 # sort the output in rev descending order
1498 1502 return sorted(heads, key=self.changelog.rev, reverse=True)
1499 1503
1500 1504 def branchheads(self, branch=None, start=None, closed=False):
1501 1505 '''return a (possibly filtered) list of heads for the given branch
1502 1506
1503 1507 Heads are returned in topological order, from newest to oldest.
1504 1508 If branch is None, use the dirstate branch.
1505 1509 If start is not None, return only heads reachable from start.
1506 1510 If closed is True, return heads that are marked as closed as well.
1507 1511 '''
1508 1512 if branch is None:
1509 1513 branch = self[None].branch()
1510 1514 branches = self.branchmap()
1511 1515 if branch not in branches:
1512 1516 return []
1513 1517 # the cache returns heads ordered lowest to highest
1514 1518 bheads = list(reversed(branches[branch]))
1515 1519 if start is not None:
1516 1520 # filter out the heads that cannot be reached from startrev
1517 1521 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1518 1522 bheads = [h for h in bheads if h in fbheads]
1519 1523 if not closed:
1520 1524 bheads = [h for h in bheads if
1521 1525 ('close' not in self.changelog.read(h)[5])]
1522 1526 return bheads
1523 1527
1524 1528 def branches(self, nodes):
1525 1529 if not nodes:
1526 1530 nodes = [self.changelog.tip()]
1527 1531 b = []
1528 1532 for n in nodes:
1529 1533 t = n
1530 1534 while True:
1531 1535 p = self.changelog.parents(n)
1532 1536 if p[1] != nullid or p[0] == nullid:
1533 1537 b.append((t, n, p[0], p[1]))
1534 1538 break
1535 1539 n = p[0]
1536 1540 return b
1537 1541
1538 1542 def between(self, pairs):
1539 1543 r = []
1540 1544
1541 1545 for top, bottom in pairs:
1542 1546 n, l, i = top, [], 0
1543 1547 f = 1
1544 1548
1545 1549 while n != bottom and n != nullid:
1546 1550 p = self.changelog.parents(n)[0]
1547 1551 if i == f:
1548 1552 l.append(n)
1549 1553 f = f * 2
1550 1554 n = p
1551 1555 i += 1
1552 1556
1553 1557 r.append(l)
1554 1558
1555 1559 return r
1556 1560
1557 1561 def pull(self, remote, heads=None, force=False):
1558 1562 lock = self.lock()
1559 1563 try:
1560 1564 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1561 1565 force=force)
1562 1566 common, fetch, rheads = tmp
1563 1567 if not fetch:
1564 1568 self.ui.status(_("no changes found\n"))
1565 1569 added = []
1566 1570 result = 0
1567 1571 else:
1568 1572 if heads is None and list(common) == [nullid]:
1569 1573 self.ui.status(_("requesting all changes\n"))
1570 1574 elif heads is None and remote.capable('changegroupsubset'):
1571 1575 # issue1320, avoid a race if remote changed after discovery
1572 1576 heads = rheads
1573 1577
1574 1578 if remote.capable('getbundle'):
1575 1579 cg = remote.getbundle('pull', common=common,
1576 1580 heads=heads or rheads)
1577 1581 elif heads is None:
1578 1582 cg = remote.changegroup(fetch, 'pull')
1579 1583 elif not remote.capable('changegroupsubset'):
1580 1584 raise util.Abort(_("partial pull cannot be done because "
1581 1585 "other repository doesn't support "
1582 1586 "changegroupsubset."))
1583 1587 else:
1584 1588 cg = remote.changegroupsubset(fetch, heads, 'pull')
1585 1589 clstart = len(self.changelog)
1586 1590 result = self.addchangegroup(cg, 'pull', remote.url())
1587 1591 clend = len(self.changelog)
1588 1592 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1589 1593
1590 1594 # compute target subset
1591 1595 if heads is None:
1592 1596 # We pulled every thing possible
1593 1597 # sync on everything common
1594 1598 subset = common + added
1595 1599 else:
1596 1600 # We pulled a specific subset
1597 1601 # sync on this subset
1598 1602 subset = heads
1599 1603
1600 1604 # Get remote phases data from remote
1601 1605 remotephases = remote.listkeys('phases')
1602 1606 publishing = bool(remotephases.get('publishing', False))
1603 1607 if remotephases and not publishing:
1604 1608 # remote is new and unpublishing
1605 1609 pheads, _dr = phases.analyzeremotephases(self, subset,
1606 1610 remotephases)
1607 1611 phases.advanceboundary(self, phases.public, pheads)
1608 1612 phases.advanceboundary(self, phases.draft, subset)
1609 1613 else:
1610 1614 # Remote is old or publishing all common changesets
1611 1615 # should be seen as public
1612 1616 phases.advanceboundary(self, phases.public, subset)
1613 1617 finally:
1614 1618 lock.release()
1615 1619
1616 1620 return result
1617 1621
1618 1622 def checkpush(self, force, revs):
1619 1623 """Extensions can override this function if additional checks have
1620 1624 to be performed before pushing, or call it if they override push
1621 1625 command.
1622 1626 """
1623 1627 pass
1624 1628
1625 1629 def push(self, remote, force=False, revs=None, newbranch=False):
1626 1630 '''Push outgoing changesets (limited by revs) from the current
1627 1631 repository to remote. Return an integer:
1628 1632 - None means nothing to push
1629 1633 - 0 means HTTP error
1630 1634 - 1 means we pushed and remote head count is unchanged *or*
1631 1635 we have outgoing changesets but refused to push
1632 1636 - other values as described by addchangegroup()
1633 1637 '''
1634 1638 # there are two ways to push to remote repo:
1635 1639 #
1636 1640 # addchangegroup assumes local user can lock remote
1637 1641 # repo (local filesystem, old ssh servers).
1638 1642 #
1639 1643 # unbundle assumes local user cannot lock remote repo (new ssh
1640 1644 # servers, http servers).
1641 1645
1642 1646 # get local lock as we might write phase data
1643 1647 locallock = self.lock()
1644 1648 try:
1645 1649 self.checkpush(force, revs)
1646 1650 lock = None
1647 1651 unbundle = remote.capable('unbundle')
1648 1652 if not unbundle:
1649 1653 lock = remote.lock()
1650 1654 try:
1651 1655 # discovery
1652 1656 fci = discovery.findcommonincoming
1653 1657 commoninc = fci(self, remote, force=force)
1654 1658 common, inc, remoteheads = commoninc
1655 1659 fco = discovery.findcommonoutgoing
1656 1660 outgoing = fco(self, remote, onlyheads=revs,
1657 1661 commoninc=commoninc, force=force)
1658 1662
1659 1663
1660 1664 if not outgoing.missing:
1661 1665 # nothing to push
1662 1666 scmutil.nochangesfound(self.ui, outgoing.excluded)
1663 1667 ret = None
1664 1668 else:
1665 1669 # something to push
1666 1670 if not force:
1667 1671 discovery.checkheads(self, remote, outgoing,
1668 1672 remoteheads, newbranch,
1669 1673 bool(inc))
1670 1674
1671 1675 # create a changegroup from local
1672 1676 if revs is None and not outgoing.excluded:
1673 1677 # push everything,
1674 1678 # use the fast path, no race possible on push
1675 1679 cg = self._changegroup(outgoing.missing, 'push')
1676 1680 else:
1677 1681 cg = self.getlocalbundle('push', outgoing)
1678 1682
1679 1683 # apply changegroup to remote
1680 1684 if unbundle:
1681 1685 # local repo finds heads on server, finds out what
1682 1686 # revs it must push. once revs transferred, if server
1683 1687 # finds it has different heads (someone else won
1684 1688 # commit/push race), server aborts.
1685 1689 if force:
1686 1690 remoteheads = ['force']
1687 1691 # ssh: return remote's addchangegroup()
1688 1692 # http: return remote's addchangegroup() or 0 for error
1689 1693 ret = remote.unbundle(cg, remoteheads, 'push')
1690 1694 else:
1691 1695 # we return an integer indicating remote head count change
1692 1696 ret = remote.addchangegroup(cg, 'push', self.url())
1693 1697
1694 1698 if ret:
1695 1699 # push succeed, synchonize target of the push
1696 1700 cheads = outgoing.missingheads
1697 1701 elif revs is None:
1698 1702 # All out push fails. synchronize all common
1699 1703 cheads = outgoing.commonheads
1700 1704 else:
1701 1705 # I want cheads = heads(::missingheads and ::commonheads)
1702 1706 # (missingheads is revs with secret changeset filtered out)
1703 1707 #
1704 1708 # This can be expressed as:
1705 1709 # cheads = ( (missingheads and ::commonheads)
1706 1710 # + (commonheads and ::missingheads))"
1707 1711 # )
1708 1712 #
1709 1713 # while trying to push we already computed the following:
1710 1714 # common = (::commonheads)
1711 1715 # missing = ((commonheads::missingheads) - commonheads)
1712 1716 #
1713 1717 # We can pick:
1714 1718 # * missingheads part of comon (::commonheads)
1715 1719 common = set(outgoing.common)
1716 1720 cheads = [node for node in revs if node in common]
1717 1721 # and
1718 1722 # * commonheads parents on missing
1719 1723 revset = self.set('%ln and parents(roots(%ln))',
1720 1724 outgoing.commonheads,
1721 1725 outgoing.missing)
1722 1726 cheads.extend(c.node() for c in revset)
1723 1727 # even when we don't push, exchanging phase data is useful
1724 1728 remotephases = remote.listkeys('phases')
1725 1729 if not remotephases: # old server or public only repo
1726 1730 phases.advanceboundary(self, phases.public, cheads)
1727 1731 # don't push any phase data as there is nothing to push
1728 1732 else:
1729 1733 ana = phases.analyzeremotephases(self, cheads, remotephases)
1730 1734 pheads, droots = ana
1731 1735 ### Apply remote phase on local
1732 1736 if remotephases.get('publishing', False):
1733 1737 phases.advanceboundary(self, phases.public, cheads)
1734 1738 else: # publish = False
1735 1739 phases.advanceboundary(self, phases.public, pheads)
1736 1740 phases.advanceboundary(self, phases.draft, cheads)
1737 1741 ### Apply local phase on remote
1738 1742
1739 1743 # Get the list of all revs draft on remote by public here.
1740 1744 # XXX Beware that revset break if droots is not strictly
1741 1745 # XXX root we may want to ensure it is but it is costly
1742 1746 outdated = self.set('heads((%ln::%ln) and public())',
1743 1747 droots, cheads)
1744 1748 for newremotehead in outdated:
1745 1749 r = remote.pushkey('phases',
1746 1750 newremotehead.hex(),
1747 1751 str(phases.draft),
1748 1752 str(phases.public))
1749 1753 if not r:
1750 1754 self.ui.warn(_('updating %s to public failed!\n')
1751 1755 % newremotehead)
1752 1756 finally:
1753 1757 if lock is not None:
1754 1758 lock.release()
1755 1759 finally:
1756 1760 locallock.release()
1757 1761
1758 1762 self.ui.debug("checking for updated bookmarks\n")
1759 1763 rb = remote.listkeys('bookmarks')
1760 1764 for k in rb.keys():
1761 1765 if k in self._bookmarks:
1762 1766 nr, nl = rb[k], hex(self._bookmarks[k])
1763 1767 if nr in self:
1764 1768 cr = self[nr]
1765 1769 cl = self[nl]
1766 1770 if cl in cr.descendants():
1767 1771 r = remote.pushkey('bookmarks', k, nr, nl)
1768 1772 if r:
1769 1773 self.ui.status(_("updating bookmark %s\n") % k)
1770 1774 else:
1771 1775 self.ui.warn(_('updating bookmark %s'
1772 1776 ' failed!\n') % k)
1773 1777
1774 1778 return ret
1775 1779
1776 1780 def changegroupinfo(self, nodes, source):
1777 1781 if self.ui.verbose or source == 'bundle':
1778 1782 self.ui.status(_("%d changesets found\n") % len(nodes))
1779 1783 if self.ui.debugflag:
1780 1784 self.ui.debug("list of changesets:\n")
1781 1785 for node in nodes:
1782 1786 self.ui.debug("%s\n" % hex(node))
1783 1787
1784 1788 def changegroupsubset(self, bases, heads, source):
1785 1789 """Compute a changegroup consisting of all the nodes that are
1786 1790 descendants of any of the bases and ancestors of any of the heads.
1787 1791 Return a chunkbuffer object whose read() method will return
1788 1792 successive changegroup chunks.
1789 1793
1790 1794 It is fairly complex as determining which filenodes and which
1791 1795 manifest nodes need to be included for the changeset to be complete
1792 1796 is non-trivial.
1793 1797
1794 1798 Another wrinkle is doing the reverse, figuring out which changeset in
1795 1799 the changegroup a particular filenode or manifestnode belongs to.
1796 1800 """
1797 1801 cl = self.changelog
1798 1802 if not bases:
1799 1803 bases = [nullid]
1800 1804 csets, bases, heads = cl.nodesbetween(bases, heads)
1801 1805 # We assume that all ancestors of bases are known
1802 1806 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1803 1807 return self._changegroupsubset(common, csets, heads, source)
1804 1808
1805 1809 def getlocalbundle(self, source, outgoing):
1806 1810 """Like getbundle, but taking a discovery.outgoing as an argument.
1807 1811
1808 1812 This is only implemented for local repos and reuses potentially
1809 1813 precomputed sets in outgoing."""
1810 1814 if not outgoing.missing:
1811 1815 return None
1812 1816 return self._changegroupsubset(outgoing.common,
1813 1817 outgoing.missing,
1814 1818 outgoing.missingheads,
1815 1819 source)
1816 1820
1817 1821 def getbundle(self, source, heads=None, common=None):
1818 1822 """Like changegroupsubset, but returns the set difference between the
1819 1823 ancestors of heads and the ancestors common.
1820 1824
1821 1825 If heads is None, use the local heads. If common is None, use [nullid].
1822 1826
1823 1827 The nodes in common might not all be known locally due to the way the
1824 1828 current discovery protocol works.
1825 1829 """
1826 1830 cl = self.changelog
1827 1831 if common:
1828 1832 nm = cl.nodemap
1829 1833 common = [n for n in common if n in nm]
1830 1834 else:
1831 1835 common = [nullid]
1832 1836 if not heads:
1833 1837 heads = cl.heads()
1834 1838 return self.getlocalbundle(source,
1835 1839 discovery.outgoing(cl, common, heads))
1836 1840
1837 1841 def _changegroupsubset(self, commonrevs, csets, heads, source):
1838 1842
1839 1843 cl = self.changelog
1840 1844 mf = self.manifest
1841 1845 mfs = {} # needed manifests
1842 1846 fnodes = {} # needed file nodes
1843 1847 changedfiles = set()
1844 1848 fstate = ['', {}]
1845 1849 count = [0]
1846 1850
1847 1851 # can we go through the fast path ?
1848 1852 heads.sort()
1849 1853 if heads == sorted(self.heads()):
1850 1854 return self._changegroup(csets, source)
1851 1855
1852 1856 # slow path
1853 1857 self.hook('preoutgoing', throw=True, source=source)
1854 1858 self.changegroupinfo(csets, source)
1855 1859
1856 1860 # filter any nodes that claim to be part of the known set
1857 1861 def prune(revlog, missing):
1858 1862 return [n for n in missing
1859 1863 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1860 1864
1861 1865 def lookup(revlog, x):
1862 1866 if revlog == cl:
1863 1867 c = cl.read(x)
1864 1868 changedfiles.update(c[3])
1865 1869 mfs.setdefault(c[0], x)
1866 1870 count[0] += 1
1867 1871 self.ui.progress(_('bundling'), count[0],
1868 1872 unit=_('changesets'), total=len(csets))
1869 1873 return x
1870 1874 elif revlog == mf:
1871 1875 clnode = mfs[x]
1872 1876 mdata = mf.readfast(x)
1873 1877 for f in changedfiles:
1874 1878 if f in mdata:
1875 1879 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1876 1880 count[0] += 1
1877 1881 self.ui.progress(_('bundling'), count[0],
1878 1882 unit=_('manifests'), total=len(mfs))
1879 1883 return mfs[x]
1880 1884 else:
1881 1885 self.ui.progress(
1882 1886 _('bundling'), count[0], item=fstate[0],
1883 1887 unit=_('files'), total=len(changedfiles))
1884 1888 return fstate[1][x]
1885 1889
1886 1890 bundler = changegroup.bundle10(lookup)
1887 1891 reorder = self.ui.config('bundle', 'reorder', 'auto')
1888 1892 if reorder == 'auto':
1889 1893 reorder = None
1890 1894 else:
1891 1895 reorder = util.parsebool(reorder)
1892 1896
1893 1897 def gengroup():
1894 1898 # Create a changenode group generator that will call our functions
1895 1899 # back to lookup the owning changenode and collect information.
1896 1900 for chunk in cl.group(csets, bundler, reorder=reorder):
1897 1901 yield chunk
1898 1902 self.ui.progress(_('bundling'), None)
1899 1903
1900 1904 # Create a generator for the manifestnodes that calls our lookup
1901 1905 # and data collection functions back.
1902 1906 count[0] = 0
1903 1907 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1904 1908 yield chunk
1905 1909 self.ui.progress(_('bundling'), None)
1906 1910
1907 1911 mfs.clear()
1908 1912
1909 1913 # Go through all our files in order sorted by name.
1910 1914 count[0] = 0
1911 1915 for fname in sorted(changedfiles):
1912 1916 filerevlog = self.file(fname)
1913 1917 if not len(filerevlog):
1914 1918 raise util.Abort(_("empty or missing revlog for %s") % fname)
1915 1919 fstate[0] = fname
1916 1920 fstate[1] = fnodes.pop(fname, {})
1917 1921
1918 1922 nodelist = prune(filerevlog, fstate[1])
1919 1923 if nodelist:
1920 1924 count[0] += 1
1921 1925 yield bundler.fileheader(fname)
1922 1926 for chunk in filerevlog.group(nodelist, bundler, reorder):
1923 1927 yield chunk
1924 1928
1925 1929 # Signal that no more groups are left.
1926 1930 yield bundler.close()
1927 1931 self.ui.progress(_('bundling'), None)
1928 1932
1929 1933 if csets:
1930 1934 self.hook('outgoing', node=hex(csets[0]), source=source)
1931 1935
1932 1936 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1933 1937
1934 1938 def changegroup(self, basenodes, source):
1935 1939 # to avoid a race we use changegroupsubset() (issue1320)
1936 1940 return self.changegroupsubset(basenodes, self.heads(), source)
1937 1941
1938 1942 def _changegroup(self, nodes, source):
1939 1943 """Compute the changegroup of all nodes that we have that a recipient
1940 1944 doesn't. Return a chunkbuffer object whose read() method will return
1941 1945 successive changegroup chunks.
1942 1946
1943 1947 This is much easier than the previous function as we can assume that
1944 1948 the recipient has any changenode we aren't sending them.
1945 1949
1946 1950 nodes is the set of nodes to send"""
1947 1951
1948 1952 cl = self.changelog
1949 1953 mf = self.manifest
1950 1954 mfs = {}
1951 1955 changedfiles = set()
1952 1956 fstate = ['']
1953 1957 count = [0]
1954 1958
1955 1959 self.hook('preoutgoing', throw=True, source=source)
1956 1960 self.changegroupinfo(nodes, source)
1957 1961
1958 1962 revset = set([cl.rev(n) for n in nodes])
1959 1963
1960 1964 def gennodelst(log):
1961 1965 return [log.node(r) for r in log if log.linkrev(r) in revset]
1962 1966
1963 1967 def lookup(revlog, x):
1964 1968 if revlog == cl:
1965 1969 c = cl.read(x)
1966 1970 changedfiles.update(c[3])
1967 1971 mfs.setdefault(c[0], x)
1968 1972 count[0] += 1
1969 1973 self.ui.progress(_('bundling'), count[0],
1970 1974 unit=_('changesets'), total=len(nodes))
1971 1975 return x
1972 1976 elif revlog == mf:
1973 1977 count[0] += 1
1974 1978 self.ui.progress(_('bundling'), count[0],
1975 1979 unit=_('manifests'), total=len(mfs))
1976 1980 return cl.node(revlog.linkrev(revlog.rev(x)))
1977 1981 else:
1978 1982 self.ui.progress(
1979 1983 _('bundling'), count[0], item=fstate[0],
1980 1984 total=len(changedfiles), unit=_('files'))
1981 1985 return cl.node(revlog.linkrev(revlog.rev(x)))
1982 1986
1983 1987 bundler = changegroup.bundle10(lookup)
1984 1988 reorder = self.ui.config('bundle', 'reorder', 'auto')
1985 1989 if reorder == 'auto':
1986 1990 reorder = None
1987 1991 else:
1988 1992 reorder = util.parsebool(reorder)
1989 1993
1990 1994 def gengroup():
1991 1995 '''yield a sequence of changegroup chunks (strings)'''
1992 1996 # construct a list of all changed files
1993 1997
1994 1998 for chunk in cl.group(nodes, bundler, reorder=reorder):
1995 1999 yield chunk
1996 2000 self.ui.progress(_('bundling'), None)
1997 2001
1998 2002 count[0] = 0
1999 2003 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2000 2004 yield chunk
2001 2005 self.ui.progress(_('bundling'), None)
2002 2006
2003 2007 count[0] = 0
2004 2008 for fname in sorted(changedfiles):
2005 2009 filerevlog = self.file(fname)
2006 2010 if not len(filerevlog):
2007 2011 raise util.Abort(_("empty or missing revlog for %s") % fname)
2008 2012 fstate[0] = fname
2009 2013 nodelist = gennodelst(filerevlog)
2010 2014 if nodelist:
2011 2015 count[0] += 1
2012 2016 yield bundler.fileheader(fname)
2013 2017 for chunk in filerevlog.group(nodelist, bundler, reorder):
2014 2018 yield chunk
2015 2019 yield bundler.close()
2016 2020 self.ui.progress(_('bundling'), None)
2017 2021
2018 2022 if nodes:
2019 2023 self.hook('outgoing', node=hex(nodes[0]), source=source)
2020 2024
2021 2025 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022 2026
2023 2027 def addchangegroup(self, source, srctype, url, emptyok=False):
2024 2028 """Add the changegroup returned by source.read() to this repo.
2025 2029 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2026 2030 the URL of the repo where this changegroup is coming from.
2027 2031
2028 2032 Return an integer summarizing the change to this repo:
2029 2033 - nothing changed or no source: 0
2030 2034 - more heads than before: 1+added heads (2..n)
2031 2035 - fewer heads than before: -1-removed heads (-2..-n)
2032 2036 - number of heads stays the same: 1
2033 2037 """
2034 2038 def csmap(x):
2035 2039 self.ui.debug("add changeset %s\n" % short(x))
2036 2040 return len(cl)
2037 2041
2038 2042 def revmap(x):
2039 2043 return cl.rev(x)
2040 2044
2041 2045 if not source:
2042 2046 return 0
2043 2047
2044 2048 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2045 2049
2046 2050 changesets = files = revisions = 0
2047 2051 efiles = set()
2048 2052
2049 2053 # write changelog data to temp files so concurrent readers will not see
2050 2054 # inconsistent view
2051 2055 cl = self.changelog
2052 2056 cl.delayupdate()
2053 2057 oldheads = cl.heads()
2054 2058
2055 2059 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2056 2060 try:
2057 2061 trp = weakref.proxy(tr)
2058 2062 # pull off the changeset group
2059 2063 self.ui.status(_("adding changesets\n"))
2060 2064 clstart = len(cl)
2061 2065 class prog(object):
2062 2066 step = _('changesets')
2063 2067 count = 1
2064 2068 ui = self.ui
2065 2069 total = None
2066 2070 def __call__(self):
2067 2071 self.ui.progress(self.step, self.count, unit=_('chunks'),
2068 2072 total=self.total)
2069 2073 self.count += 1
2070 2074 pr = prog()
2071 2075 source.callback = pr
2072 2076
2073 2077 source.changelogheader()
2074 2078 srccontent = cl.addgroup(source, csmap, trp)
2075 2079 if not (srccontent or emptyok):
2076 2080 raise util.Abort(_("received changelog group is empty"))
2077 2081 clend = len(cl)
2078 2082 changesets = clend - clstart
2079 2083 for c in xrange(clstart, clend):
2080 2084 efiles.update(self[c].files())
2081 2085 efiles = len(efiles)
2082 2086 self.ui.progress(_('changesets'), None)
2083 2087
2084 2088 # pull off the manifest group
2085 2089 self.ui.status(_("adding manifests\n"))
2086 2090 pr.step = _('manifests')
2087 2091 pr.count = 1
2088 2092 pr.total = changesets # manifests <= changesets
2089 2093 # no need to check for empty manifest group here:
2090 2094 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2091 2095 # no new manifest will be created and the manifest group will
2092 2096 # be empty during the pull
2093 2097 source.manifestheader()
2094 2098 self.manifest.addgroup(source, revmap, trp)
2095 2099 self.ui.progress(_('manifests'), None)
2096 2100
2097 2101 needfiles = {}
2098 2102 if self.ui.configbool('server', 'validate', default=False):
2099 2103 # validate incoming csets have their manifests
2100 2104 for cset in xrange(clstart, clend):
2101 2105 mfest = self.changelog.read(self.changelog.node(cset))[0]
2102 2106 mfest = self.manifest.readdelta(mfest)
2103 2107 # store file nodes we must see
2104 2108 for f, n in mfest.iteritems():
2105 2109 needfiles.setdefault(f, set()).add(n)
2106 2110
2107 2111 # process the files
2108 2112 self.ui.status(_("adding file changes\n"))
2109 2113 pr.step = _('files')
2110 2114 pr.count = 1
2111 2115 pr.total = efiles
2112 2116 source.callback = None
2113 2117
2114 2118 while True:
2115 2119 chunkdata = source.filelogheader()
2116 2120 if not chunkdata:
2117 2121 break
2118 2122 f = chunkdata["filename"]
2119 2123 self.ui.debug("adding %s revisions\n" % f)
2120 2124 pr()
2121 2125 fl = self.file(f)
2122 2126 o = len(fl)
2123 2127 if not fl.addgroup(source, revmap, trp):
2124 2128 raise util.Abort(_("received file revlog group is empty"))
2125 2129 revisions += len(fl) - o
2126 2130 files += 1
2127 2131 if f in needfiles:
2128 2132 needs = needfiles[f]
2129 2133 for new in xrange(o, len(fl)):
2130 2134 n = fl.node(new)
2131 2135 if n in needs:
2132 2136 needs.remove(n)
2133 2137 if not needs:
2134 2138 del needfiles[f]
2135 2139 self.ui.progress(_('files'), None)
2136 2140
2137 2141 for f, needs in needfiles.iteritems():
2138 2142 fl = self.file(f)
2139 2143 for n in needs:
2140 2144 try:
2141 2145 fl.rev(n)
2142 2146 except error.LookupError:
2143 2147 raise util.Abort(
2144 2148 _('missing file data for %s:%s - run hg verify') %
2145 2149 (f, hex(n)))
2146 2150
2147 2151 dh = 0
2148 2152 if oldheads:
2149 2153 heads = cl.heads()
2150 2154 dh = len(heads) - len(oldheads)
2151 2155 for h in heads:
2152 2156 if h not in oldheads and 'close' in self[h].extra():
2153 2157 dh -= 1
2154 2158 htext = ""
2155 2159 if dh:
2156 2160 htext = _(" (%+d heads)") % dh
2157 2161
2158 2162 self.ui.status(_("added %d changesets"
2159 2163 " with %d changes to %d files%s\n")
2160 2164 % (changesets, revisions, files, htext))
2161 2165
2162 2166 if changesets > 0:
2163 2167 p = lambda: cl.writepending() and self.root or ""
2164 2168 self.hook('pretxnchangegroup', throw=True,
2165 2169 node=hex(cl.node(clstart)), source=srctype,
2166 2170 url=url, pending=p)
2167 2171
2168 2172 added = [cl.node(r) for r in xrange(clstart, clend)]
2169 2173 publishing = self.ui.configbool('phases', 'publish', True)
2170 2174 if srctype == 'push':
2171 2175 # Old server can not push the boundary themself.
2172 2176 # New server won't push the boundary if changeset already
2173 2177 # existed locally as secrete
2174 2178 #
2175 2179 # We should not use added here but the list of all change in
2176 2180 # the bundle
2177 2181 if publishing:
2178 2182 phases.advanceboundary(self, phases.public, srccontent)
2179 2183 else:
2180 2184 phases.advanceboundary(self, phases.draft, srccontent)
2181 2185 phases.retractboundary(self, phases.draft, added)
2182 2186 elif srctype != 'strip':
2183 2187 # publishing only alter behavior during push
2184 2188 #
2185 2189 # strip should not touch boundary at all
2186 2190 phases.retractboundary(self, phases.draft, added)
2187 2191
2188 2192 # make changelog see real files again
2189 2193 cl.finalize(trp)
2190 2194
2191 2195 tr.close()
2192 2196
2193 2197 if changesets > 0:
2194 2198 def runhooks():
2195 2199 # forcefully update the on-disk branch cache
2196 2200 self.ui.debug("updating the branch cache\n")
2197 2201 self.updatebranchcache()
2198 2202 self.hook("changegroup", node=hex(cl.node(clstart)),
2199 2203 source=srctype, url=url)
2200 2204
2201 2205 for n in added:
2202 2206 self.hook("incoming", node=hex(n), source=srctype,
2203 2207 url=url)
2204 2208 self._afterlock(runhooks)
2205 2209
2206 2210 finally:
2207 2211 tr.release()
2208 2212 # never return 0 here:
2209 2213 if dh < 0:
2210 2214 return dh - 1
2211 2215 else:
2212 2216 return dh + 1
2213 2217
2214 2218 def stream_in(self, remote, requirements):
2215 2219 lock = self.lock()
2216 2220 try:
2217 2221 fp = remote.stream_out()
2218 2222 l = fp.readline()
2219 2223 try:
2220 2224 resp = int(l)
2221 2225 except ValueError:
2222 2226 raise error.ResponseError(
2223 2227 _('Unexpected response from remote server:'), l)
2224 2228 if resp == 1:
2225 2229 raise util.Abort(_('operation forbidden by server'))
2226 2230 elif resp == 2:
2227 2231 raise util.Abort(_('locking the remote repository failed'))
2228 2232 elif resp != 0:
2229 2233 raise util.Abort(_('the server sent an unknown error code'))
2230 2234 self.ui.status(_('streaming all changes\n'))
2231 2235 l = fp.readline()
2232 2236 try:
2233 2237 total_files, total_bytes = map(int, l.split(' ', 1))
2234 2238 except (ValueError, TypeError):
2235 2239 raise error.ResponseError(
2236 2240 _('Unexpected response from remote server:'), l)
2237 2241 self.ui.status(_('%d files to transfer, %s of data\n') %
2238 2242 (total_files, util.bytecount(total_bytes)))
2239 2243 start = time.time()
2240 2244 for i in xrange(total_files):
2241 2245 # XXX doesn't support '\n' or '\r' in filenames
2242 2246 l = fp.readline()
2243 2247 try:
2244 2248 name, size = l.split('\0', 1)
2245 2249 size = int(size)
2246 2250 except (ValueError, TypeError):
2247 2251 raise error.ResponseError(
2248 2252 _('Unexpected response from remote server:'), l)
2249 2253 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2250 2254 # for backwards compat, name was partially encoded
2251 2255 ofp = self.sopener(store.decodedir(name), 'w')
2252 2256 for chunk in util.filechunkiter(fp, limit=size):
2253 2257 ofp.write(chunk)
2254 2258 ofp.close()
2255 2259 elapsed = time.time() - start
2256 2260 if elapsed <= 0:
2257 2261 elapsed = 0.001
2258 2262 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2259 2263 (util.bytecount(total_bytes), elapsed,
2260 2264 util.bytecount(total_bytes / elapsed)))
2261 2265
2262 2266 # new requirements = old non-format requirements + new format-related
2263 2267 # requirements from the streamed-in repository
2264 2268 requirements.update(set(self.requirements) - self.supportedformats)
2265 2269 self._applyrequirements(requirements)
2266 2270 self._writerequirements()
2267 2271
2268 2272 self.invalidate()
2269 2273 return len(self.heads()) + 1
2270 2274 finally:
2271 2275 lock.release()
2272 2276
2273 2277 def clone(self, remote, heads=[], stream=False):
2274 2278 '''clone remote repository.
2275 2279
2276 2280 keyword arguments:
2277 2281 heads: list of revs to clone (forces use of pull)
2278 2282 stream: use streaming clone if possible'''
2279 2283
2280 2284 # now, all clients that can request uncompressed clones can
2281 2285 # read repo formats supported by all servers that can serve
2282 2286 # them.
2283 2287
2284 2288 # if revlog format changes, client will have to check version
2285 2289 # and format flags on "stream" capability, and use
2286 2290 # uncompressed only if compatible.
2287 2291
2288 2292 if stream and not heads:
2289 2293 # 'stream' means remote revlog format is revlogv1 only
2290 2294 if remote.capable('stream'):
2291 2295 return self.stream_in(remote, set(('revlogv1',)))
2292 2296 # otherwise, 'streamreqs' contains the remote revlog format
2293 2297 streamreqs = remote.capable('streamreqs')
2294 2298 if streamreqs:
2295 2299 streamreqs = set(streamreqs.split(','))
2296 2300 # if we support it, stream in and adjust our requirements
2297 2301 if not streamreqs - self.supportedformats:
2298 2302 return self.stream_in(remote, streamreqs)
2299 2303 return self.pull(remote, heads)
2300 2304
2301 2305 def pushkey(self, namespace, key, old, new):
2302 2306 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2303 2307 old=old, new=new)
2304 2308 ret = pushkey.push(self, namespace, key, old, new)
2305 2309 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2306 2310 ret=ret)
2307 2311 return ret
2308 2312
2309 2313 def listkeys(self, namespace):
2310 2314 self.hook('prelistkeys', throw=True, namespace=namespace)
2311 2315 values = pushkey.list(self, namespace)
2312 2316 self.hook('listkeys', namespace=namespace, values=values)
2313 2317 return values
2314 2318
2315 2319 def debugwireargs(self, one, two, three=None, four=None, five=None):
2316 2320 '''used to test argument passing over the wire'''
2317 2321 return "%s %s %s %s %s" % (one, two, three, four, five)
2318 2322
2319 2323 def savecommitmessage(self, text):
2320 2324 fp = self.opener('last-message.txt', 'wb')
2321 2325 try:
2322 2326 fp.write(text)
2323 2327 finally:
2324 2328 fp.close()
2325 2329 return self.pathto(fp.name[len(self.root)+1:])
2326 2330
2327 2331 # used to avoid circular references so destructors work
2328 2332 def aftertrans(files):
2329 2333 renamefiles = [tuple(t) for t in files]
2330 2334 def a():
2331 2335 for src, dest in renamefiles:
2332 2336 util.rename(src, dest)
2333 2337 return a
2334 2338
2335 2339 def undoname(fn):
2336 2340 base, name = os.path.split(fn)
2337 2341 assert name.startswith('journal')
2338 2342 return os.path.join(base, name.replace('journal', 'undo', 1))
2339 2343
2340 2344 def instance(ui, path, create):
2341 2345 return localrepository(ui, util.urllocalpath(path), create)
2342 2346
2343 2347 def islocal(path):
2344 2348 return True
General Comments 0
You need to be logged in to leave comments. Login now