##// END OF EJS Templates
Add a phases.new-commit option to control minimal phase of new commit...
Pierre-Yves David -
r15706:ebaefd8c default
parent child Browse files
Show More
@@ -1,2212 +1,2219 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 extensions.loadall(self.ui)
44 44 except IOError:
45 45 pass
46 46
47 47 if not os.path.isdir(self.path):
48 48 if create:
49 49 if not os.path.exists(path):
50 50 util.makedirs(path)
51 51 util.makedir(self.path, notindexed=True)
52 52 requirements = ["revlogv1"]
53 53 if self.ui.configbool('format', 'usestore', True):
54 54 os.mkdir(os.path.join(self.path, "store"))
55 55 requirements.append("store")
56 56 if self.ui.configbool('format', 'usefncache', True):
57 57 requirements.append("fncache")
58 58 if self.ui.configbool('format', 'dotencode', True):
59 59 requirements.append('dotencode')
60 60 # create an invalid changelog
61 61 self.opener.append(
62 62 "00changelog.i",
63 63 '\0\0\0\2' # represents revlogv2
64 64 ' dummy changelog to prevent using the old repo layout'
65 65 )
66 66 if self.ui.configbool('format', 'generaldelta', False):
67 67 requirements.append("generaldelta")
68 68 requirements = set(requirements)
69 69 else:
70 70 raise error.RepoError(_("repository %s not found") % path)
71 71 elif create:
72 72 raise error.RepoError(_("repository %s already exists") % path)
73 73 else:
74 74 try:
75 75 requirements = scmutil.readrequires(self.opener, self.supported)
76 76 except IOError, inst:
77 77 if inst.errno != errno.ENOENT:
78 78 raise
79 79 requirements = set()
80 80
81 81 self.sharedpath = self.path
82 82 try:
83 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 84 if not os.path.exists(s):
85 85 raise error.RepoError(
86 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 87 self.sharedpath = s
88 88 except IOError, inst:
89 89 if inst.errno != errno.ENOENT:
90 90 raise
91 91
92 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 93 self.spath = self.store.path
94 94 self.sopener = self.store.opener
95 95 self.sjoin = self.store.join
96 96 self.opener.createmode = self.store.createmode
97 97 self._applyrequirements(requirements)
98 98 if create:
99 99 self._writerequirements()
100 100
101 101
102 102 self._branchcache = None
103 103 self._branchcachetip = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 # A cache for various files under .hg/ that tracks file changes,
109 109 # (used by the filecache decorator)
110 110 #
111 111 # Maps a property name to its util.filecacheentry
112 112 self._filecache = {}
113 113
114 114 def _applyrequirements(self, requirements):
115 115 self.requirements = requirements
116 116 openerreqs = set(('revlogv1', 'generaldelta'))
117 117 self.sopener.options = dict((r, 1) for r in requirements
118 118 if r in openerreqs)
119 119
120 120 def _writerequirements(self):
121 121 reqfile = self.opener("requires", "w")
122 122 for r in self.requirements:
123 123 reqfile.write("%s\n" % r)
124 124 reqfile.close()
125 125
126 126 def _checknested(self, path):
127 127 """Determine if path is a legal nested repository."""
128 128 if not path.startswith(self.root):
129 129 return False
130 130 subpath = path[len(self.root) + 1:]
131 131
132 132 # XXX: Checking against the current working copy is wrong in
133 133 # the sense that it can reject things like
134 134 #
135 135 # $ hg cat -r 10 sub/x.txt
136 136 #
137 137 # if sub/ is no longer a subrepository in the working copy
138 138 # parent revision.
139 139 #
140 140 # However, it can of course also allow things that would have
141 141 # been rejected before, such as the above cat command if sub/
142 142 # is a subrepository now, but was a normal directory before.
143 143 # The old path auditor would have rejected by mistake since it
144 144 # panics when it sees sub/.hg/.
145 145 #
146 146 # All in all, checking against the working copy seems sensible
147 147 # since we want to prevent access to nested repositories on
148 148 # the filesystem *now*.
149 149 ctx = self[None]
150 150 parts = util.splitpath(subpath)
151 151 while parts:
152 152 prefix = os.sep.join(parts)
153 153 if prefix in ctx.substate:
154 154 if prefix == subpath:
155 155 return True
156 156 else:
157 157 sub = ctx.sub(prefix)
158 158 return sub.checknested(subpath[len(prefix) + 1:])
159 159 else:
160 160 parts.pop()
161 161 return False
162 162
163 163 @filecache('bookmarks')
164 164 def _bookmarks(self):
165 165 return bookmarks.read(self)
166 166
167 167 @filecache('bookmarks.current')
168 168 def _bookmarkcurrent(self):
169 169 return bookmarks.readcurrent(self)
170 170
171 171 def _writebookmarks(self, marks):
172 172 bookmarks.write(self)
173 173
174 174 @filecache('phaseroots')
175 175 def _phaseroots(self):
176 176 self._dirtyphases = False
177 177 phaseroots = phases.readroots(self)
178 178 phases.filterunknown(self, phaseroots)
179 179 return phaseroots
180 180
181 181 @propertycache
182 182 def _phaserev(self):
183 183 cache = [0] * len(self)
184 184 for phase in phases.trackedphases:
185 185 roots = map(self.changelog.rev, self._phaseroots[phase])
186 186 if roots:
187 187 for rev in roots:
188 188 cache[rev] = phase
189 189 for rev in self.changelog.descendants(*roots):
190 190 cache[rev] = phase
191 191 return cache
192 192
193 193 @filecache('00changelog.i', True)
194 194 def changelog(self):
195 195 c = changelog.changelog(self.sopener)
196 196 if 'HG_PENDING' in os.environ:
197 197 p = os.environ['HG_PENDING']
198 198 if p.startswith(self.root):
199 199 c.readpending('00changelog.i.a')
200 200 return c
201 201
202 202 @filecache('00manifest.i', True)
203 203 def manifest(self):
204 204 return manifest.manifest(self.sopener)
205 205
206 206 @filecache('dirstate')
207 207 def dirstate(self):
208 208 warned = [0]
209 209 def validate(node):
210 210 try:
211 211 self.changelog.rev(node)
212 212 return node
213 213 except error.LookupError:
214 214 if not warned[0]:
215 215 warned[0] = True
216 216 self.ui.warn(_("warning: ignoring unknown"
217 217 " working parent %s!\n") % short(node))
218 218 return nullid
219 219
220 220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 221
222 222 def __getitem__(self, changeid):
223 223 if changeid is None:
224 224 return context.workingctx(self)
225 225 return context.changectx(self, changeid)
226 226
227 227 def __contains__(self, changeid):
228 228 try:
229 229 return bool(self.lookup(changeid))
230 230 except error.RepoLookupError:
231 231 return False
232 232
233 233 def __nonzero__(self):
234 234 return True
235 235
236 236 def __len__(self):
237 237 return len(self.changelog)
238 238
239 239 def __iter__(self):
240 240 for i in xrange(len(self)):
241 241 yield i
242 242
243 243 def revs(self, expr, *args):
244 244 '''Return a list of revisions matching the given revset'''
245 245 expr = revset.formatspec(expr, *args)
246 246 m = revset.match(None, expr)
247 247 return [r for r in m(self, range(len(self)))]
248 248
249 249 def set(self, expr, *args):
250 250 '''
251 251 Yield a context for each matching revision, after doing arg
252 252 replacement via revset.formatspec
253 253 '''
254 254 for r in self.revs(expr, *args):
255 255 yield self[r]
256 256
257 257 def url(self):
258 258 return 'file:' + self.root
259 259
260 260 def hook(self, name, throw=False, **args):
261 261 return hook.hook(self.ui, self, name, throw, **args)
262 262
263 263 tag_disallowed = ':\r\n'
264 264
265 265 def _tag(self, names, node, message, local, user, date, extra={}):
266 266 if isinstance(names, str):
267 267 allchars = names
268 268 names = (names,)
269 269 else:
270 270 allchars = ''.join(names)
271 271 for c in self.tag_disallowed:
272 272 if c in allchars:
273 273 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 274
275 275 branches = self.branchmap()
276 276 for name in names:
277 277 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 278 local=local)
279 279 if name in branches:
280 280 self.ui.warn(_("warning: tag %s conflicts with existing"
281 281 " branch name\n") % name)
282 282
283 283 def writetags(fp, names, munge, prevtags):
284 284 fp.seek(0, 2)
285 285 if prevtags and prevtags[-1] != '\n':
286 286 fp.write('\n')
287 287 for name in names:
288 288 m = munge and munge(name) or name
289 289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 290 old = self.tags().get(name, nullid)
291 291 fp.write('%s %s\n' % (hex(old), m))
292 292 fp.write('%s %s\n' % (hex(node), m))
293 293 fp.close()
294 294
295 295 prevtags = ''
296 296 if local:
297 297 try:
298 298 fp = self.opener('localtags', 'r+')
299 299 except IOError:
300 300 fp = self.opener('localtags', 'a')
301 301 else:
302 302 prevtags = fp.read()
303 303
304 304 # local tags are stored in the current charset
305 305 writetags(fp, names, None, prevtags)
306 306 for name in names:
307 307 self.hook('tag', node=hex(node), tag=name, local=local)
308 308 return
309 309
310 310 try:
311 311 fp = self.wfile('.hgtags', 'rb+')
312 312 except IOError, e:
313 313 if e.errno != errno.ENOENT:
314 314 raise
315 315 fp = self.wfile('.hgtags', 'ab')
316 316 else:
317 317 prevtags = fp.read()
318 318
319 319 # committed tags are stored in UTF-8
320 320 writetags(fp, names, encoding.fromlocal, prevtags)
321 321
322 322 fp.close()
323 323
324 324 if '.hgtags' not in self.dirstate:
325 325 self[None].add(['.hgtags'])
326 326
327 327 m = matchmod.exact(self.root, '', ['.hgtags'])
328 328 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 329
330 330 for name in names:
331 331 self.hook('tag', node=hex(node), tag=name, local=local)
332 332
333 333 return tagnode
334 334
335 335 def tag(self, names, node, message, local, user, date):
336 336 '''tag a revision with one or more symbolic names.
337 337
338 338 names is a list of strings or, when adding a single tag, names may be a
339 339 string.
340 340
341 341 if local is True, the tags are stored in a per-repository file.
342 342 otherwise, they are stored in the .hgtags file, and a new
343 343 changeset is committed with the change.
344 344
345 345 keyword arguments:
346 346
347 347 local: whether to store tags in non-version-controlled file
348 348 (default False)
349 349
350 350 message: commit message to use if committing
351 351
352 352 user: name of user to use if committing
353 353
354 354 date: date tuple to use if committing'''
355 355
356 356 if not local:
357 357 for x in self.status()[:5]:
358 358 if '.hgtags' in x:
359 359 raise util.Abort(_('working copy of .hgtags is changed '
360 360 '(please commit .hgtags manually)'))
361 361
362 362 self.tags() # instantiate the cache
363 363 self._tag(names, node, message, local, user, date)
364 364
365 365 @propertycache
366 366 def _tagscache(self):
367 367 '''Returns a tagscache object that contains various tags related caches.'''
368 368
369 369 # This simplifies its cache management by having one decorated
370 370 # function (this one) and the rest simply fetch things from it.
371 371 class tagscache(object):
372 372 def __init__(self):
373 373 # These two define the set of tags for this repository. tags
374 374 # maps tag name to node; tagtypes maps tag name to 'global' or
375 375 # 'local'. (Global tags are defined by .hgtags across all
376 376 # heads, and local tags are defined in .hg/localtags.)
377 377 # They constitute the in-memory cache of tags.
378 378 self.tags = self.tagtypes = None
379 379
380 380 self.nodetagscache = self.tagslist = None
381 381
382 382 cache = tagscache()
383 383 cache.tags, cache.tagtypes = self._findtags()
384 384
385 385 return cache
386 386
387 387 def tags(self):
388 388 '''return a mapping of tag to node'''
389 389 return self._tagscache.tags
390 390
391 391 def _findtags(self):
392 392 '''Do the hard work of finding tags. Return a pair of dicts
393 393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 394 maps tag name to a string like \'global\' or \'local\'.
395 395 Subclasses or extensions are free to add their own tags, but
396 396 should be aware that the returned dicts will be retained for the
397 397 duration of the localrepo object.'''
398 398
399 399 # XXX what tagtype should subclasses/extensions use? Currently
400 400 # mq and bookmarks add tags, but do not set the tagtype at all.
401 401 # Should each extension invent its own tag type? Should there
402 402 # be one tagtype for all such "virtual" tags? Or is the status
403 403 # quo fine?
404 404
405 405 alltags = {} # map tag name to (node, hist)
406 406 tagtypes = {}
407 407
408 408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 410
411 411 # Build the return dicts. Have to re-encode tag names because
412 412 # the tags module always uses UTF-8 (in order not to lose info
413 413 # writing to the cache), but the rest of Mercurial wants them in
414 414 # local encoding.
415 415 tags = {}
416 416 for (name, (node, hist)) in alltags.iteritems():
417 417 if node != nullid:
418 418 try:
419 419 # ignore tags to unknown nodes
420 420 self.changelog.lookup(node)
421 421 tags[encoding.tolocal(name)] = node
422 422 except error.LookupError:
423 423 pass
424 424 tags['tip'] = self.changelog.tip()
425 425 tagtypes = dict([(encoding.tolocal(name), value)
426 426 for (name, value) in tagtypes.iteritems()])
427 427 return (tags, tagtypes)
428 428
429 429 def tagtype(self, tagname):
430 430 '''
431 431 return the type of the given tag. result can be:
432 432
433 433 'local' : a local tag
434 434 'global' : a global tag
435 435 None : tag does not exist
436 436 '''
437 437
438 438 return self._tagscache.tagtypes.get(tagname)
439 439
440 440 def tagslist(self):
441 441 '''return a list of tags ordered by revision'''
442 442 if not self._tagscache.tagslist:
443 443 l = []
444 444 for t, n in self.tags().iteritems():
445 445 r = self.changelog.rev(n)
446 446 l.append((r, t, n))
447 447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 448
449 449 return self._tagscache.tagslist
450 450
451 451 def nodetags(self, node):
452 452 '''return the tags associated with a node'''
453 453 if not self._tagscache.nodetagscache:
454 454 nodetagscache = {}
455 455 for t, n in self.tags().iteritems():
456 456 nodetagscache.setdefault(n, []).append(t)
457 457 for tags in nodetagscache.itervalues():
458 458 tags.sort()
459 459 self._tagscache.nodetagscache = nodetagscache
460 460 return self._tagscache.nodetagscache.get(node, [])
461 461
462 462 def nodebookmarks(self, node):
463 463 marks = []
464 464 for bookmark, n in self._bookmarks.iteritems():
465 465 if n == node:
466 466 marks.append(bookmark)
467 467 return sorted(marks)
468 468
469 469 def _branchtags(self, partial, lrev):
470 470 # TODO: rename this function?
471 471 tiprev = len(self) - 1
472 472 if lrev != tiprev:
473 473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 474 self._updatebranchcache(partial, ctxgen)
475 475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 476
477 477 return partial
478 478
479 479 def updatebranchcache(self):
480 480 tip = self.changelog.tip()
481 481 if self._branchcache is not None and self._branchcachetip == tip:
482 482 return self._branchcache
483 483
484 484 oldtip = self._branchcachetip
485 485 self._branchcachetip = tip
486 486 if oldtip is None or oldtip not in self.changelog.nodemap:
487 487 partial, last, lrev = self._readbranchcache()
488 488 else:
489 489 lrev = self.changelog.rev(oldtip)
490 490 partial = self._branchcache
491 491
492 492 self._branchtags(partial, lrev)
493 493 # this private cache holds all heads (not just tips)
494 494 self._branchcache = partial
495 495
496 496 def branchmap(self):
497 497 '''returns a dictionary {branch: [branchheads]}'''
498 498 self.updatebranchcache()
499 499 return self._branchcache
500 500
501 501 def branchtags(self):
502 502 '''return a dict where branch names map to the tipmost head of
503 503 the branch, open heads come before closed'''
504 504 bt = {}
505 505 for bn, heads in self.branchmap().iteritems():
506 506 tip = heads[-1]
507 507 for h in reversed(heads):
508 508 if 'close' not in self.changelog.read(h)[5]:
509 509 tip = h
510 510 break
511 511 bt[bn] = tip
512 512 return bt
513 513
514 514 def _readbranchcache(self):
515 515 partial = {}
516 516 try:
517 517 f = self.opener("cache/branchheads")
518 518 lines = f.read().split('\n')
519 519 f.close()
520 520 except (IOError, OSError):
521 521 return {}, nullid, nullrev
522 522
523 523 try:
524 524 last, lrev = lines.pop(0).split(" ", 1)
525 525 last, lrev = bin(last), int(lrev)
526 526 if lrev >= len(self) or self[lrev].node() != last:
527 527 # invalidate the cache
528 528 raise ValueError('invalidating branch cache (tip differs)')
529 529 for l in lines:
530 530 if not l:
531 531 continue
532 532 node, label = l.split(" ", 1)
533 533 label = encoding.tolocal(label.strip())
534 534 partial.setdefault(label, []).append(bin(node))
535 535 except KeyboardInterrupt:
536 536 raise
537 537 except Exception, inst:
538 538 if self.ui.debugflag:
539 539 self.ui.warn(str(inst), '\n')
540 540 partial, last, lrev = {}, nullid, nullrev
541 541 return partial, last, lrev
542 542
543 543 def _writebranchcache(self, branches, tip, tiprev):
544 544 try:
545 545 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 546 f.write("%s %s\n" % (hex(tip), tiprev))
547 547 for label, nodes in branches.iteritems():
548 548 for node in nodes:
549 549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 550 f.close()
551 551 except (IOError, OSError):
552 552 pass
553 553
554 554 def _updatebranchcache(self, partial, ctxgen):
555 555 # collect new branch entries
556 556 newbranches = {}
557 557 for c in ctxgen:
558 558 newbranches.setdefault(c.branch(), []).append(c.node())
559 559 # if older branchheads are reachable from new ones, they aren't
560 560 # really branchheads. Note checking parents is insufficient:
561 561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 562 for branch, newnodes in newbranches.iteritems():
563 563 bheads = partial.setdefault(branch, [])
564 564 bheads.extend(newnodes)
565 565 if len(bheads) <= 1:
566 566 continue
567 567 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 568 # starting from tip means fewer passes over reachable
569 569 while newnodes:
570 570 latest = newnodes.pop()
571 571 if latest not in bheads:
572 572 continue
573 573 minbhrev = self[bheads[0]].node()
574 574 reachable = self.changelog.reachable(latest, minbhrev)
575 575 reachable.remove(latest)
576 576 if reachable:
577 577 bheads = [b for b in bheads if b not in reachable]
578 578 partial[branch] = bheads
579 579
580 580 def lookup(self, key):
581 581 if isinstance(key, int):
582 582 return self.changelog.node(key)
583 583 elif key == '.':
584 584 return self.dirstate.p1()
585 585 elif key == 'null':
586 586 return nullid
587 587 elif key == 'tip':
588 588 return self.changelog.tip()
589 589 n = self.changelog._match(key)
590 590 if n:
591 591 return n
592 592 if key in self._bookmarks:
593 593 return self._bookmarks[key]
594 594 if key in self.tags():
595 595 return self.tags()[key]
596 596 if key in self.branchtags():
597 597 return self.branchtags()[key]
598 598 n = self.changelog._partialmatch(key)
599 599 if n:
600 600 return n
601 601
602 602 # can't find key, check if it might have come from damaged dirstate
603 603 if key in self.dirstate.parents():
604 604 raise error.Abort(_("working directory has unknown parent '%s'!")
605 605 % short(key))
606 606 try:
607 607 if len(key) == 20:
608 608 key = hex(key)
609 609 except TypeError:
610 610 pass
611 611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 612
613 613 def lookupbranch(self, key, remote=None):
614 614 repo = remote or self
615 615 if key in repo.branchmap():
616 616 return key
617 617
618 618 repo = (remote and remote.local()) and remote or self
619 619 return repo[key].branch()
620 620
621 621 def known(self, nodes):
622 622 nm = self.changelog.nodemap
623 623 return [(n in nm) for n in nodes]
624 624
625 625 def local(self):
626 626 return self
627 627
628 628 def join(self, f):
629 629 return os.path.join(self.path, f)
630 630
631 631 def wjoin(self, f):
632 632 return os.path.join(self.root, f)
633 633
634 634 def file(self, f):
635 635 if f[0] == '/':
636 636 f = f[1:]
637 637 return filelog.filelog(self.sopener, f)
638 638
639 639 def changectx(self, changeid):
640 640 return self[changeid]
641 641
642 642 def parents(self, changeid=None):
643 643 '''get list of changectxs for parents of changeid'''
644 644 return self[changeid].parents()
645 645
646 646 def filectx(self, path, changeid=None, fileid=None):
647 647 """changeid can be a changeset revision, node, or tag.
648 648 fileid can be a file revision or node."""
649 649 return context.filectx(self, path, changeid, fileid)
650 650
651 651 def getcwd(self):
652 652 return self.dirstate.getcwd()
653 653
654 654 def pathto(self, f, cwd=None):
655 655 return self.dirstate.pathto(f, cwd)
656 656
657 657 def wfile(self, f, mode='r'):
658 658 return self.wopener(f, mode)
659 659
660 660 def _link(self, f):
661 661 return os.path.islink(self.wjoin(f))
662 662
663 663 def _loadfilter(self, filter):
664 664 if filter not in self.filterpats:
665 665 l = []
666 666 for pat, cmd in self.ui.configitems(filter):
667 667 if cmd == '!':
668 668 continue
669 669 mf = matchmod.match(self.root, '', [pat])
670 670 fn = None
671 671 params = cmd
672 672 for name, filterfn in self._datafilters.iteritems():
673 673 if cmd.startswith(name):
674 674 fn = filterfn
675 675 params = cmd[len(name):].lstrip()
676 676 break
677 677 if not fn:
678 678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 679 # Wrap old filters not supporting keyword arguments
680 680 if not inspect.getargspec(fn)[2]:
681 681 oldfn = fn
682 682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 683 l.append((mf, fn, params))
684 684 self.filterpats[filter] = l
685 685 return self.filterpats[filter]
686 686
687 687 def _filter(self, filterpats, filename, data):
688 688 for mf, fn, cmd in filterpats:
689 689 if mf(filename):
690 690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 692 break
693 693
694 694 return data
695 695
696 696 @propertycache
697 697 def _encodefilterpats(self):
698 698 return self._loadfilter('encode')
699 699
700 700 @propertycache
701 701 def _decodefilterpats(self):
702 702 return self._loadfilter('decode')
703 703
704 704 def adddatafilter(self, name, filter):
705 705 self._datafilters[name] = filter
706 706
707 707 def wread(self, filename):
708 708 if self._link(filename):
709 709 data = os.readlink(self.wjoin(filename))
710 710 else:
711 711 data = self.wopener.read(filename)
712 712 return self._filter(self._encodefilterpats, filename, data)
713 713
714 714 def wwrite(self, filename, data, flags):
715 715 data = self._filter(self._decodefilterpats, filename, data)
716 716 if 'l' in flags:
717 717 self.wopener.symlink(data, filename)
718 718 else:
719 719 self.wopener.write(filename, data)
720 720 if 'x' in flags:
721 721 util.setflags(self.wjoin(filename), False, True)
722 722
723 723 def wwritedata(self, filename, data):
724 724 return self._filter(self._decodefilterpats, filename, data)
725 725
726 726 def transaction(self, desc):
727 727 tr = self._transref and self._transref() or None
728 728 if tr and tr.running():
729 729 return tr.nest()
730 730
731 731 # abort here if the journal already exists
732 732 if os.path.exists(self.sjoin("journal")):
733 733 raise error.RepoError(
734 734 _("abandoned transaction found - run hg recover"))
735 735
736 736 journalfiles = self._writejournal(desc)
737 737 renames = [(x, undoname(x)) for x in journalfiles]
738 738
739 739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 740 self.sjoin("journal"),
741 741 aftertrans(renames),
742 742 self.store.createmode)
743 743 self._transref = weakref.ref(tr)
744 744 return tr
745 745
746 746 def _writejournal(self, desc):
747 747 # save dirstate for rollback
748 748 try:
749 749 ds = self.opener.read("dirstate")
750 750 except IOError:
751 751 ds = ""
752 752 self.opener.write("journal.dirstate", ds)
753 753 self.opener.write("journal.branch",
754 754 encoding.fromlocal(self.dirstate.branch()))
755 755 self.opener.write("journal.desc",
756 756 "%d\n%s\n" % (len(self), desc))
757 757
758 758 bkname = self.join('bookmarks')
759 759 if os.path.exists(bkname):
760 760 util.copyfile(bkname, self.join('journal.bookmarks'))
761 761 else:
762 762 self.opener.write('journal.bookmarks', '')
763 763 phasesname = self.sjoin('phaseroots')
764 764 if os.path.exists(phasesname):
765 765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 766 else:
767 767 self.sopener.write('journal.phaseroots', '')
768 768
769 769 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 770 self.join('journal.branch'), self.join('journal.desc'),
771 771 self.join('journal.bookmarks'),
772 772 self.sjoin('journal.phaseroots'))
773 773
774 774 def recover(self):
775 775 lock = self.lock()
776 776 try:
777 777 if os.path.exists(self.sjoin("journal")):
778 778 self.ui.status(_("rolling back interrupted transaction\n"))
779 779 transaction.rollback(self.sopener, self.sjoin("journal"),
780 780 self.ui.warn)
781 781 self.invalidate()
782 782 return True
783 783 else:
784 784 self.ui.warn(_("no interrupted transaction available\n"))
785 785 return False
786 786 finally:
787 787 lock.release()
788 788
789 789 def rollback(self, dryrun=False, force=False):
790 790 wlock = lock = None
791 791 try:
792 792 wlock = self.wlock()
793 793 lock = self.lock()
794 794 if os.path.exists(self.sjoin("undo")):
795 795 return self._rollback(dryrun, force)
796 796 else:
797 797 self.ui.warn(_("no rollback information available\n"))
798 798 return 1
799 799 finally:
800 800 release(lock, wlock)
801 801
802 802 def _rollback(self, dryrun, force):
803 803 ui = self.ui
804 804 try:
805 805 args = self.opener.read('undo.desc').splitlines()
806 806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 807 if len(args) >= 3:
808 808 detail = args[2]
809 809 oldtip = oldlen - 1
810 810
811 811 if detail and ui.verbose:
812 812 msg = (_('repository tip rolled back to revision %s'
813 813 ' (undo %s: %s)\n')
814 814 % (oldtip, desc, detail))
815 815 else:
816 816 msg = (_('repository tip rolled back to revision %s'
817 817 ' (undo %s)\n')
818 818 % (oldtip, desc))
819 819 except IOError:
820 820 msg = _('rolling back unknown transaction\n')
821 821 desc = None
822 822
823 823 if not force and self['.'] != self['tip'] and desc == 'commit':
824 824 raise util.Abort(
825 825 _('rollback of last commit while not checked out '
826 826 'may lose data'), hint=_('use -f to force'))
827 827
828 828 ui.status(msg)
829 829 if dryrun:
830 830 return 0
831 831
832 832 parents = self.dirstate.parents()
833 833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 834 if os.path.exists(self.join('undo.bookmarks')):
835 835 util.rename(self.join('undo.bookmarks'),
836 836 self.join('bookmarks'))
837 837 if os.path.exists(self.sjoin('undo.phaseroots')):
838 838 util.rename(self.sjoin('undo.phaseroots'),
839 839 self.sjoin('phaseroots'))
840 840 self.invalidate()
841 841
842 842 parentgone = (parents[0] not in self.changelog.nodemap or
843 843 parents[1] not in self.changelog.nodemap)
844 844 if parentgone:
845 845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 846 try:
847 847 branch = self.opener.read('undo.branch')
848 848 self.dirstate.setbranch(branch)
849 849 except IOError:
850 850 ui.warn(_('named branch could not be reset: '
851 851 'current branch is still \'%s\'\n')
852 852 % self.dirstate.branch())
853 853
854 854 self.dirstate.invalidate()
855 855 parents = tuple([p.rev() for p in self.parents()])
856 856 if len(parents) > 1:
857 857 ui.status(_('working directory now based on '
858 858 'revisions %d and %d\n') % parents)
859 859 else:
860 860 ui.status(_('working directory now based on '
861 861 'revision %d\n') % parents)
862 862 self.destroyed()
863 863 return 0
864 864
865 865 def invalidatecaches(self):
866 866 try:
867 867 delattr(self, '_tagscache')
868 868 except AttributeError:
869 869 pass
870 870
871 871 self._branchcache = None # in UTF-8
872 872 self._branchcachetip = None
873 873
874 874 def invalidatedirstate(self):
875 875 '''Invalidates the dirstate, causing the next call to dirstate
876 876 to check if it was modified since the last time it was read,
877 877 rereading it if it has.
878 878
879 879 This is different to dirstate.invalidate() that it doesn't always
880 880 rereads the dirstate. Use dirstate.invalidate() if you want to
881 881 explicitly read the dirstate again (i.e. restoring it to a previous
882 882 known good state).'''
883 883 try:
884 884 delattr(self, 'dirstate')
885 885 except AttributeError:
886 886 pass
887 887
888 888 def invalidate(self):
889 889 for k in self._filecache:
890 890 # dirstate is invalidated separately in invalidatedirstate()
891 891 if k == 'dirstate':
892 892 continue
893 893
894 894 try:
895 895 delattr(self, k)
896 896 except AttributeError:
897 897 pass
898 898 self.invalidatecaches()
899 899
900 900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 901 try:
902 902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 903 except error.LockHeld, inst:
904 904 if not wait:
905 905 raise
906 906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 907 (desc, inst.locker))
908 908 # default to 600 seconds timeout
909 909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 910 releasefn, desc=desc)
911 911 if acquirefn:
912 912 acquirefn()
913 913 return l
914 914
915 915 def _afterlock(self, callback):
916 916 """add a callback to the current repository lock.
917 917
918 918 The callback will be executed on lock release."""
919 919 l = self._lockref and self._lockref()
920 920 if l:
921 921 l.postrelease.append(callback)
922 922
923 923 def lock(self, wait=True):
924 924 '''Lock the repository store (.hg/store) and return a weak reference
925 925 to the lock. Use this before modifying the store (e.g. committing or
926 926 stripping). If you are opening a transaction, get a lock as well.)'''
927 927 l = self._lockref and self._lockref()
928 928 if l is not None and l.held:
929 929 l.lock()
930 930 return l
931 931
932 932 def unlock():
933 933 self.store.write()
934 934 if self._dirtyphases:
935 935 phases.writeroots(self)
936 936 for k, ce in self._filecache.items():
937 937 if k == 'dirstate':
938 938 continue
939 939 ce.refresh()
940 940
941 941 l = self._lock(self.sjoin("lock"), wait, unlock,
942 942 self.invalidate, _('repository %s') % self.origroot)
943 943 self._lockref = weakref.ref(l)
944 944 return l
945 945
946 946 def wlock(self, wait=True):
947 947 '''Lock the non-store parts of the repository (everything under
948 948 .hg except .hg/store) and return a weak reference to the lock.
949 949 Use this before modifying files in .hg.'''
950 950 l = self._wlockref and self._wlockref()
951 951 if l is not None and l.held:
952 952 l.lock()
953 953 return l
954 954
955 955 def unlock():
956 956 self.dirstate.write()
957 957 ce = self._filecache.get('dirstate')
958 958 if ce:
959 959 ce.refresh()
960 960
961 961 l = self._lock(self.join("wlock"), wait, unlock,
962 962 self.invalidatedirstate, _('working directory of %s') %
963 963 self.origroot)
964 964 self._wlockref = weakref.ref(l)
965 965 return l
966 966
967 967 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 968 """
969 969 commit an individual file as part of a larger transaction
970 970 """
971 971
972 972 fname = fctx.path()
973 973 text = fctx.data()
974 974 flog = self.file(fname)
975 975 fparent1 = manifest1.get(fname, nullid)
976 976 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 977
978 978 meta = {}
979 979 copy = fctx.renamed()
980 980 if copy and copy[0] != fname:
981 981 # Mark the new revision of this file as a copy of another
982 982 # file. This copy data will effectively act as a parent
983 983 # of this new revision. If this is a merge, the first
984 984 # parent will be the nullid (meaning "look up the copy data")
985 985 # and the second one will be the other parent. For example:
986 986 #
987 987 # 0 --- 1 --- 3 rev1 changes file foo
988 988 # \ / rev2 renames foo to bar and changes it
989 989 # \- 2 -/ rev3 should have bar with all changes and
990 990 # should record that bar descends from
991 991 # bar in rev2 and foo in rev1
992 992 #
993 993 # this allows this merge to succeed:
994 994 #
995 995 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 996 # \ / merging rev3 and rev4 should use bar@rev2
997 997 # \- 2 --- 4 as the merge base
998 998 #
999 999
1000 1000 cfname = copy[0]
1001 1001 crev = manifest1.get(cfname)
1002 1002 newfparent = fparent2
1003 1003
1004 1004 if manifest2: # branch merge
1005 1005 if fparent2 == nullid or crev is None: # copied on remote side
1006 1006 if cfname in manifest2:
1007 1007 crev = manifest2[cfname]
1008 1008 newfparent = fparent1
1009 1009
1010 1010 # find source in nearest ancestor if we've lost track
1011 1011 if not crev:
1012 1012 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 1013 (fname, cfname))
1014 1014 for ancestor in self[None].ancestors():
1015 1015 if cfname in ancestor:
1016 1016 crev = ancestor[cfname].filenode()
1017 1017 break
1018 1018
1019 1019 if crev:
1020 1020 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 1021 meta["copy"] = cfname
1022 1022 meta["copyrev"] = hex(crev)
1023 1023 fparent1, fparent2 = nullid, newfparent
1024 1024 else:
1025 1025 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 1026 "copied from '%s'!\n") % (fname, cfname))
1027 1027
1028 1028 elif fparent2 != nullid:
1029 1029 # is one parent an ancestor of the other?
1030 1030 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 1031 if fparentancestor == fparent1:
1032 1032 fparent1, fparent2 = fparent2, nullid
1033 1033 elif fparentancestor == fparent2:
1034 1034 fparent2 = nullid
1035 1035
1036 1036 # is the file changed?
1037 1037 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 1038 changelist.append(fname)
1039 1039 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 1040
1041 1041 # are just the flags changed during merge?
1042 1042 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 1043 changelist.append(fname)
1044 1044
1045 1045 return fparent1
1046 1046
1047 1047 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 1048 editor=False, extra={}):
1049 1049 """Add a new revision to current repository.
1050 1050
1051 1051 Revision information is gathered from the working directory,
1052 1052 match can be used to filter the committed files. If editor is
1053 1053 supplied, it is called to get a commit message.
1054 1054 """
1055 1055
1056 1056 def fail(f, msg):
1057 1057 raise util.Abort('%s: %s' % (f, msg))
1058 1058
1059 1059 if not match:
1060 1060 match = matchmod.always(self.root, '')
1061 1061
1062 1062 if not force:
1063 1063 vdirs = []
1064 1064 match.dir = vdirs.append
1065 1065 match.bad = fail
1066 1066
1067 1067 wlock = self.wlock()
1068 1068 try:
1069 1069 wctx = self[None]
1070 1070 merge = len(wctx.parents()) > 1
1071 1071
1072 1072 if (not force and merge and match and
1073 1073 (match.files() or match.anypats())):
1074 1074 raise util.Abort(_('cannot partially commit a merge '
1075 1075 '(do not specify files or patterns)'))
1076 1076
1077 1077 changes = self.status(match=match, clean=force)
1078 1078 if force:
1079 1079 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 1080
1081 1081 # check subrepos
1082 1082 subs = []
1083 1083 removedsubs = set()
1084 1084 if '.hgsub' in wctx:
1085 1085 # only manage subrepos and .hgsubstate if .hgsub is present
1086 1086 for p in wctx.parents():
1087 1087 removedsubs.update(s for s in p.substate if match(s))
1088 1088 for s in wctx.substate:
1089 1089 removedsubs.discard(s)
1090 1090 if match(s) and wctx.sub(s).dirty():
1091 1091 subs.append(s)
1092 1092 if (subs or removedsubs):
1093 1093 if (not match('.hgsub') and
1094 1094 '.hgsub' in (wctx.modified() + wctx.added())):
1095 1095 raise util.Abort(
1096 1096 _("can't commit subrepos without .hgsub"))
1097 1097 if '.hgsubstate' not in changes[0]:
1098 1098 changes[0].insert(0, '.hgsubstate')
1099 1099 if '.hgsubstate' in changes[2]:
1100 1100 changes[2].remove('.hgsubstate')
1101 1101 elif '.hgsub' in changes[2]:
1102 1102 # clean up .hgsubstate when .hgsub is removed
1103 1103 if ('.hgsubstate' in wctx and
1104 1104 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 1105 changes[2].insert(0, '.hgsubstate')
1106 1106
1107 1107 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 1108 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 1109 if changedsubs:
1110 1110 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 1111 % changedsubs[0],
1112 1112 hint=_("use --subrepos for recursive commit"))
1113 1113
1114 1114 # make sure all explicit patterns are matched
1115 1115 if not force and match.files():
1116 1116 matched = set(changes[0] + changes[1] + changes[2])
1117 1117
1118 1118 for f in match.files():
1119 1119 if f == '.' or f in matched or f in wctx.substate:
1120 1120 continue
1121 1121 if f in changes[3]: # missing
1122 1122 fail(f, _('file not found!'))
1123 1123 if f in vdirs: # visited directory
1124 1124 d = f + '/'
1125 1125 for mf in matched:
1126 1126 if mf.startswith(d):
1127 1127 break
1128 1128 else:
1129 1129 fail(f, _("no match under directory!"))
1130 1130 elif f not in self.dirstate:
1131 1131 fail(f, _("file not tracked!"))
1132 1132
1133 1133 if (not force and not extra.get("close") and not merge
1134 1134 and not (changes[0] or changes[1] or changes[2])
1135 1135 and wctx.branch() == wctx.p1().branch()):
1136 1136 return None
1137 1137
1138 1138 ms = mergemod.mergestate(self)
1139 1139 for f in changes[0]:
1140 1140 if f in ms and ms[f] == 'u':
1141 1141 raise util.Abort(_("unresolved merge conflicts "
1142 1142 "(see hg help resolve)"))
1143 1143
1144 1144 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 1145 if editor:
1146 1146 cctx._text = editor(self, cctx, subs)
1147 1147 edited = (text != cctx._text)
1148 1148
1149 1149 # commit subs
1150 1150 if subs or removedsubs:
1151 1151 state = wctx.substate.copy()
1152 1152 for s in sorted(subs):
1153 1153 sub = wctx.sub(s)
1154 1154 self.ui.status(_('committing subrepository %s\n') %
1155 1155 subrepo.subrelpath(sub))
1156 1156 sr = sub.commit(cctx._text, user, date)
1157 1157 state[s] = (state[s][0], sr)
1158 1158 subrepo.writestate(self, state)
1159 1159
1160 1160 # Save commit message in case this transaction gets rolled back
1161 1161 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 1162 # the assumption that the user will use the same editor again.
1163 1163 msgfn = self.savecommitmessage(cctx._text)
1164 1164
1165 1165 p1, p2 = self.dirstate.parents()
1166 1166 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 1167 try:
1168 1168 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 1169 ret = self.commitctx(cctx, True)
1170 1170 except:
1171 1171 if edited:
1172 1172 self.ui.write(
1173 1173 _('note: commit message saved in %s\n') % msgfn)
1174 1174 raise
1175 1175
1176 1176 # update bookmarks, dirstate and mergestate
1177 1177 bookmarks.update(self, p1, ret)
1178 1178 for f in changes[0] + changes[1]:
1179 1179 self.dirstate.normal(f)
1180 1180 for f in changes[2]:
1181 1181 self.dirstate.drop(f)
1182 1182 self.dirstate.setparents(ret)
1183 1183 ms.reset()
1184 1184 finally:
1185 1185 wlock.release()
1186 1186
1187 1187 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 1188 return ret
1189 1189
1190 1190 def commitctx(self, ctx, error=False):
1191 1191 """Add a new revision to current repository.
1192 1192 Revision information is passed via the context argument.
1193 1193 """
1194 1194
1195 1195 tr = lock = None
1196 1196 removed = list(ctx.removed())
1197 1197 p1, p2 = ctx.p1(), ctx.p2()
1198 1198 user = ctx.user()
1199 1199
1200 1200 lock = self.lock()
1201 1201 try:
1202 1202 tr = self.transaction("commit")
1203 1203 trp = weakref.proxy(tr)
1204 1204
1205 1205 if ctx.files():
1206 1206 m1 = p1.manifest().copy()
1207 1207 m2 = p2.manifest()
1208 1208
1209 1209 # check in files
1210 1210 new = {}
1211 1211 changed = []
1212 1212 linkrev = len(self)
1213 1213 for f in sorted(ctx.modified() + ctx.added()):
1214 1214 self.ui.note(f + "\n")
1215 1215 try:
1216 1216 fctx = ctx[f]
1217 1217 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 1218 changed)
1219 1219 m1.set(f, fctx.flags())
1220 1220 except OSError, inst:
1221 1221 self.ui.warn(_("trouble committing %s!\n") % f)
1222 1222 raise
1223 1223 except IOError, inst:
1224 1224 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 1225 if error or errcode and errcode != errno.ENOENT:
1226 1226 self.ui.warn(_("trouble committing %s!\n") % f)
1227 1227 raise
1228 1228 else:
1229 1229 removed.append(f)
1230 1230
1231 1231 # update manifest
1232 1232 m1.update(new)
1233 1233 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 1234 drop = [f for f in removed if f in m1]
1235 1235 for f in drop:
1236 1236 del m1[f]
1237 1237 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 1238 p2.manifestnode(), (new, drop))
1239 1239 files = changed + removed
1240 1240 else:
1241 1241 mn = p1.manifestnode()
1242 1242 files = []
1243 1243
1244 1244 # update changelog
1245 1245 self.changelog.delayupdate()
1246 1246 n = self.changelog.add(mn, files, ctx.description(),
1247 1247 trp, p1.node(), p2.node(),
1248 1248 user, ctx.date(), ctx.extra().copy())
1249 1249 p = lambda: self.changelog.writepending() and self.root or ""
1250 1250 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 1251 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 1252 parent2=xp2, pending=p)
1253 1253 self.changelog.finalize(trp)
1254 # ensure the new commit is 1-phase
1255 phases.retractboundary(self, 1, [n])
1254 # set the new commit is proper phase
1255 targetphase = self.ui.configint('phases', 'new-commit', 1)
1256 if targetphase:
1257 # retract boundary do not alter parent changeset.
1258 # if a parent have higher the resulting phase will
1259 # be compliant anyway
1260 #
1261 # if minimal phase was 0 we don't need to retract anything
1262 phases.retractboundary(self, targetphase, [n])
1256 1263 tr.close()
1257 1264
1258 1265 if self._branchcache:
1259 1266 self.updatebranchcache()
1260 1267 return n
1261 1268 finally:
1262 1269 if tr:
1263 1270 tr.release()
1264 1271 lock.release()
1265 1272
1266 1273 def destroyed(self):
1267 1274 '''Inform the repository that nodes have been destroyed.
1268 1275 Intended for use by strip and rollback, so there's a common
1269 1276 place for anything that has to be done after destroying history.'''
1270 1277 # XXX it might be nice if we could take the list of destroyed
1271 1278 # nodes, but I don't see an easy way for rollback() to do that
1272 1279
1273 1280 # Ensure the persistent tag cache is updated. Doing it now
1274 1281 # means that the tag cache only has to worry about destroyed
1275 1282 # heads immediately after a strip/rollback. That in turn
1276 1283 # guarantees that "cachetip == currenttip" (comparing both rev
1277 1284 # and node) always means no nodes have been added or destroyed.
1278 1285
1279 1286 # XXX this is suboptimal when qrefresh'ing: we strip the current
1280 1287 # head, refresh the tag cache, then immediately add a new head.
1281 1288 # But I think doing it this way is necessary for the "instant
1282 1289 # tag cache retrieval" case to work.
1283 1290 self.invalidatecaches()
1284 1291
1285 1292 def walk(self, match, node=None):
1286 1293 '''
1287 1294 walk recursively through the directory tree or a given
1288 1295 changeset, finding all files matched by the match
1289 1296 function
1290 1297 '''
1291 1298 return self[node].walk(match)
1292 1299
1293 1300 def status(self, node1='.', node2=None, match=None,
1294 1301 ignored=False, clean=False, unknown=False,
1295 1302 listsubrepos=False):
1296 1303 """return status of files between two nodes or node and working directory
1297 1304
1298 1305 If node1 is None, use the first dirstate parent instead.
1299 1306 If node2 is None, compare node1 with working directory.
1300 1307 """
1301 1308
1302 1309 def mfmatches(ctx):
1303 1310 mf = ctx.manifest().copy()
1304 1311 for fn in mf.keys():
1305 1312 if not match(fn):
1306 1313 del mf[fn]
1307 1314 return mf
1308 1315
1309 1316 if isinstance(node1, context.changectx):
1310 1317 ctx1 = node1
1311 1318 else:
1312 1319 ctx1 = self[node1]
1313 1320 if isinstance(node2, context.changectx):
1314 1321 ctx2 = node2
1315 1322 else:
1316 1323 ctx2 = self[node2]
1317 1324
1318 1325 working = ctx2.rev() is None
1319 1326 parentworking = working and ctx1 == self['.']
1320 1327 match = match or matchmod.always(self.root, self.getcwd())
1321 1328 listignored, listclean, listunknown = ignored, clean, unknown
1322 1329
1323 1330 # load earliest manifest first for caching reasons
1324 1331 if not working and ctx2.rev() < ctx1.rev():
1325 1332 ctx2.manifest()
1326 1333
1327 1334 if not parentworking:
1328 1335 def bad(f, msg):
1329 1336 if f not in ctx1:
1330 1337 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1331 1338 match.bad = bad
1332 1339
1333 1340 if working: # we need to scan the working dir
1334 1341 subrepos = []
1335 1342 if '.hgsub' in self.dirstate:
1336 1343 subrepos = ctx2.substate.keys()
1337 1344 s = self.dirstate.status(match, subrepos, listignored,
1338 1345 listclean, listunknown)
1339 1346 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1340 1347
1341 1348 # check for any possibly clean files
1342 1349 if parentworking and cmp:
1343 1350 fixup = []
1344 1351 # do a full compare of any files that might have changed
1345 1352 for f in sorted(cmp):
1346 1353 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1347 1354 or ctx1[f].cmp(ctx2[f])):
1348 1355 modified.append(f)
1349 1356 else:
1350 1357 fixup.append(f)
1351 1358
1352 1359 # update dirstate for files that are actually clean
1353 1360 if fixup:
1354 1361 if listclean:
1355 1362 clean += fixup
1356 1363
1357 1364 try:
1358 1365 # updating the dirstate is optional
1359 1366 # so we don't wait on the lock
1360 1367 wlock = self.wlock(False)
1361 1368 try:
1362 1369 for f in fixup:
1363 1370 self.dirstate.normal(f)
1364 1371 finally:
1365 1372 wlock.release()
1366 1373 except error.LockError:
1367 1374 pass
1368 1375
1369 1376 if not parentworking:
1370 1377 mf1 = mfmatches(ctx1)
1371 1378 if working:
1372 1379 # we are comparing working dir against non-parent
1373 1380 # generate a pseudo-manifest for the working dir
1374 1381 mf2 = mfmatches(self['.'])
1375 1382 for f in cmp + modified + added:
1376 1383 mf2[f] = None
1377 1384 mf2.set(f, ctx2.flags(f))
1378 1385 for f in removed:
1379 1386 if f in mf2:
1380 1387 del mf2[f]
1381 1388 else:
1382 1389 # we are comparing two revisions
1383 1390 deleted, unknown, ignored = [], [], []
1384 1391 mf2 = mfmatches(ctx2)
1385 1392
1386 1393 modified, added, clean = [], [], []
1387 1394 for fn in mf2:
1388 1395 if fn in mf1:
1389 1396 if (fn not in deleted and
1390 1397 (mf1.flags(fn) != mf2.flags(fn) or
1391 1398 (mf1[fn] != mf2[fn] and
1392 1399 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1393 1400 modified.append(fn)
1394 1401 elif listclean:
1395 1402 clean.append(fn)
1396 1403 del mf1[fn]
1397 1404 elif fn not in deleted:
1398 1405 added.append(fn)
1399 1406 removed = mf1.keys()
1400 1407
1401 1408 if working and modified and not self.dirstate._checklink:
1402 1409 # Symlink placeholders may get non-symlink-like contents
1403 1410 # via user error or dereferencing by NFS or Samba servers,
1404 1411 # so we filter out any placeholders that don't look like a
1405 1412 # symlink
1406 1413 sane = []
1407 1414 for f in modified:
1408 1415 if ctx2.flags(f) == 'l':
1409 1416 d = ctx2[f].data()
1410 1417 if len(d) >= 1024 or '\n' in d or util.binary(d):
1411 1418 self.ui.debug('ignoring suspect symlink placeholder'
1412 1419 ' "%s"\n' % f)
1413 1420 continue
1414 1421 sane.append(f)
1415 1422 modified = sane
1416 1423
1417 1424 r = modified, added, removed, deleted, unknown, ignored, clean
1418 1425
1419 1426 if listsubrepos:
1420 1427 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1421 1428 if working:
1422 1429 rev2 = None
1423 1430 else:
1424 1431 rev2 = ctx2.substate[subpath][1]
1425 1432 try:
1426 1433 submatch = matchmod.narrowmatcher(subpath, match)
1427 1434 s = sub.status(rev2, match=submatch, ignored=listignored,
1428 1435 clean=listclean, unknown=listunknown,
1429 1436 listsubrepos=True)
1430 1437 for rfiles, sfiles in zip(r, s):
1431 1438 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1432 1439 except error.LookupError:
1433 1440 self.ui.status(_("skipping missing subrepository: %s\n")
1434 1441 % subpath)
1435 1442
1436 1443 for l in r:
1437 1444 l.sort()
1438 1445 return r
1439 1446
1440 1447 def heads(self, start=None):
1441 1448 heads = self.changelog.heads(start)
1442 1449 # sort the output in rev descending order
1443 1450 return sorted(heads, key=self.changelog.rev, reverse=True)
1444 1451
1445 1452 def branchheads(self, branch=None, start=None, closed=False):
1446 1453 '''return a (possibly filtered) list of heads for the given branch
1447 1454
1448 1455 Heads are returned in topological order, from newest to oldest.
1449 1456 If branch is None, use the dirstate branch.
1450 1457 If start is not None, return only heads reachable from start.
1451 1458 If closed is True, return heads that are marked as closed as well.
1452 1459 '''
1453 1460 if branch is None:
1454 1461 branch = self[None].branch()
1455 1462 branches = self.branchmap()
1456 1463 if branch not in branches:
1457 1464 return []
1458 1465 # the cache returns heads ordered lowest to highest
1459 1466 bheads = list(reversed(branches[branch]))
1460 1467 if start is not None:
1461 1468 # filter out the heads that cannot be reached from startrev
1462 1469 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1463 1470 bheads = [h for h in bheads if h in fbheads]
1464 1471 if not closed:
1465 1472 bheads = [h for h in bheads if
1466 1473 ('close' not in self.changelog.read(h)[5])]
1467 1474 return bheads
1468 1475
1469 1476 def branches(self, nodes):
1470 1477 if not nodes:
1471 1478 nodes = [self.changelog.tip()]
1472 1479 b = []
1473 1480 for n in nodes:
1474 1481 t = n
1475 1482 while True:
1476 1483 p = self.changelog.parents(n)
1477 1484 if p[1] != nullid or p[0] == nullid:
1478 1485 b.append((t, n, p[0], p[1]))
1479 1486 break
1480 1487 n = p[0]
1481 1488 return b
1482 1489
1483 1490 def between(self, pairs):
1484 1491 r = []
1485 1492
1486 1493 for top, bottom in pairs:
1487 1494 n, l, i = top, [], 0
1488 1495 f = 1
1489 1496
1490 1497 while n != bottom and n != nullid:
1491 1498 p = self.changelog.parents(n)[0]
1492 1499 if i == f:
1493 1500 l.append(n)
1494 1501 f = f * 2
1495 1502 n = p
1496 1503 i += 1
1497 1504
1498 1505 r.append(l)
1499 1506
1500 1507 return r
1501 1508
1502 1509 def pull(self, remote, heads=None, force=False):
1503 1510 lock = self.lock()
1504 1511 try:
1505 1512 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1506 1513 force=force)
1507 1514 common, fetch, rheads = tmp
1508 1515 if not fetch:
1509 1516 self.ui.status(_("no changes found\n"))
1510 1517 added = []
1511 1518 result = 0
1512 1519 else:
1513 1520 if heads is None and list(common) == [nullid]:
1514 1521 self.ui.status(_("requesting all changes\n"))
1515 1522 elif heads is None and remote.capable('changegroupsubset'):
1516 1523 # issue1320, avoid a race if remote changed after discovery
1517 1524 heads = rheads
1518 1525
1519 1526 if remote.capable('getbundle'):
1520 1527 cg = remote.getbundle('pull', common=common,
1521 1528 heads=heads or rheads)
1522 1529 elif heads is None:
1523 1530 cg = remote.changegroup(fetch, 'pull')
1524 1531 elif not remote.capable('changegroupsubset'):
1525 1532 raise util.Abort(_("partial pull cannot be done because "
1526 1533 "other repository doesn't support "
1527 1534 "changegroupsubset."))
1528 1535 else:
1529 1536 cg = remote.changegroupsubset(fetch, heads, 'pull')
1530 1537 clstart = len(self.changelog)
1531 1538 result = self.addchangegroup(cg, 'pull', remote.url())
1532 1539 clend = len(self.changelog)
1533 1540 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1534 1541
1535 1542
1536 1543 # Get remote phases data from remote
1537 1544 remotephases = remote.listkeys('phases')
1538 1545 publishing = bool(remotephases.get('publishing', False))
1539 1546 if remotephases and not publishing:
1540 1547 # remote is new and unpublishing
1541 1548 subset = common + added
1542 1549 rheads, rroots = phases.analyzeremotephases(self, subset,
1543 1550 remotephases)
1544 1551 for phase, boundary in enumerate(rheads):
1545 1552 phases.advanceboundary(self, phase, boundary)
1546 1553 else:
1547 1554 # Remote is old or publishing all common changesets
1548 1555 # should be seen as public
1549 1556 phases.advanceboundary(self, 0, common + added)
1550 1557 finally:
1551 1558 lock.release()
1552 1559
1553 1560 return result
1554 1561
1555 1562 def checkpush(self, force, revs):
1556 1563 """Extensions can override this function if additional checks have
1557 1564 to be performed before pushing, or call it if they override push
1558 1565 command.
1559 1566 """
1560 1567 pass
1561 1568
1562 1569 def push(self, remote, force=False, revs=None, newbranch=False):
1563 1570 '''Push outgoing changesets (limited by revs) from the current
1564 1571 repository to remote. Return an integer:
1565 1572 - 0 means HTTP error *or* nothing to push
1566 1573 - 1 means we pushed and remote head count is unchanged *or*
1567 1574 we have outgoing changesets but refused to push
1568 1575 - other values as described by addchangegroup()
1569 1576 '''
1570 1577 # there are two ways to push to remote repo:
1571 1578 #
1572 1579 # addchangegroup assumes local user can lock remote
1573 1580 # repo (local filesystem, old ssh servers).
1574 1581 #
1575 1582 # unbundle assumes local user cannot lock remote repo (new ssh
1576 1583 # servers, http servers).
1577 1584
1578 1585 self.checkpush(force, revs)
1579 1586 lock = None
1580 1587 unbundle = remote.capable('unbundle')
1581 1588 if not unbundle:
1582 1589 lock = remote.lock()
1583 1590 try:
1584 1591 # get local lock as we might write phase data
1585 1592 locallock = self.lock()
1586 1593 try:
1587 1594 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1588 1595 revs, newbranch)
1589 1596 ret = remote_heads
1590 1597 # create a callback for addchangegroup.
1591 1598 # If will be used branch of the conditionnal too.
1592 1599 if cg is not None:
1593 1600 if unbundle:
1594 1601 # local repo finds heads on server, finds out what
1595 1602 # revs it must push. once revs transferred, if server
1596 1603 # finds it has different heads (someone else won
1597 1604 # commit/push race), server aborts.
1598 1605 if force:
1599 1606 remote_heads = ['force']
1600 1607 # ssh: return remote's addchangegroup()
1601 1608 # http: return remote's addchangegroup() or 0 for error
1602 1609 ret = remote.unbundle(cg, remote_heads, 'push')
1603 1610 else:
1604 1611 # we return an integer indicating remote head count change
1605 1612 ret = remote.addchangegroup(cg, 'push', self.url())
1606 1613
1607 1614 # even when we don't push, exchanging phase data is useful
1608 1615 remotephases = remote.listkeys('phases')
1609 1616 if not remotephases: # old server or public only repo
1610 1617 phases.advanceboundary(self, 0, fut)
1611 1618 # don't push any phase data as there is nothing to push
1612 1619 else:
1613 1620 ana = phases.analyzeremotephases(self, fut, remotephases)
1614 1621 rheads, rroots = ana
1615 1622 ### Apply remote phase on local
1616 1623 if remotephases.get('publishing', False):
1617 1624 phases.advanceboundary(self, 0, fut)
1618 1625 else: # publish = False
1619 1626 for phase, rpheads in enumerate(rheads):
1620 1627 phases.advanceboundary(self, phase, rpheads)
1621 1628 ### Apply local phase on remote
1622 1629 #
1623 1630 # XXX If push failed we should use strict common and not
1624 1631 # future to avoir pushing phase data on unknown changeset.
1625 1632 # This is to done later.
1626 1633 futctx = [self[n] for n in fut if n != nullid]
1627 1634 for phase in phases.trackedphases[::-1]:
1628 1635 prevphase = phase -1
1629 1636 # get all candidate for head in previous phase
1630 1637 inprev = [ctx for ctx in futctx
1631 1638 if ctx.phase() == prevphase]
1632 1639 for newremotehead in self.set('heads(%ld & (%ln::))',
1633 1640 inprev, rroots[phase]):
1634 1641 r = remote.pushkey('phases',
1635 1642 newremotehead.hex(),
1636 1643 str(phase), str(prevphase))
1637 1644 if not r:
1638 1645 self.ui.warn(_('updating phase of %s'
1639 1646 'to %s failed!\n')
1640 1647 % (newremotehead, prevphase))
1641 1648 finally:
1642 1649 locallock.release()
1643 1650 finally:
1644 1651 if lock is not None:
1645 1652 lock.release()
1646 1653
1647 1654 self.ui.debug("checking for updated bookmarks\n")
1648 1655 rb = remote.listkeys('bookmarks')
1649 1656 for k in rb.keys():
1650 1657 if k in self._bookmarks:
1651 1658 nr, nl = rb[k], hex(self._bookmarks[k])
1652 1659 if nr in self:
1653 1660 cr = self[nr]
1654 1661 cl = self[nl]
1655 1662 if cl in cr.descendants():
1656 1663 r = remote.pushkey('bookmarks', k, nr, nl)
1657 1664 if r:
1658 1665 self.ui.status(_("updating bookmark %s\n") % k)
1659 1666 else:
1660 1667 self.ui.warn(_('updating bookmark %s'
1661 1668 ' failed!\n') % k)
1662 1669
1663 1670 return ret
1664 1671
1665 1672 def changegroupinfo(self, nodes, source):
1666 1673 if self.ui.verbose or source == 'bundle':
1667 1674 self.ui.status(_("%d changesets found\n") % len(nodes))
1668 1675 if self.ui.debugflag:
1669 1676 self.ui.debug("list of changesets:\n")
1670 1677 for node in nodes:
1671 1678 self.ui.debug("%s\n" % hex(node))
1672 1679
1673 1680 def changegroupsubset(self, bases, heads, source):
1674 1681 """Compute a changegroup consisting of all the nodes that are
1675 1682 descendants of any of the bases and ancestors of any of the heads.
1676 1683 Return a chunkbuffer object whose read() method will return
1677 1684 successive changegroup chunks.
1678 1685
1679 1686 It is fairly complex as determining which filenodes and which
1680 1687 manifest nodes need to be included for the changeset to be complete
1681 1688 is non-trivial.
1682 1689
1683 1690 Another wrinkle is doing the reverse, figuring out which changeset in
1684 1691 the changegroup a particular filenode or manifestnode belongs to.
1685 1692 """
1686 1693 cl = self.changelog
1687 1694 if not bases:
1688 1695 bases = [nullid]
1689 1696 csets, bases, heads = cl.nodesbetween(bases, heads)
1690 1697 # We assume that all ancestors of bases are known
1691 1698 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1692 1699 return self._changegroupsubset(common, csets, heads, source)
1693 1700
1694 1701 def getbundle(self, source, heads=None, common=None):
1695 1702 """Like changegroupsubset, but returns the set difference between the
1696 1703 ancestors of heads and the ancestors common.
1697 1704
1698 1705 If heads is None, use the local heads. If common is None, use [nullid].
1699 1706
1700 1707 The nodes in common might not all be known locally due to the way the
1701 1708 current discovery protocol works.
1702 1709 """
1703 1710 cl = self.changelog
1704 1711 if common:
1705 1712 nm = cl.nodemap
1706 1713 common = [n for n in common if n in nm]
1707 1714 else:
1708 1715 common = [nullid]
1709 1716 if not heads:
1710 1717 heads = cl.heads()
1711 1718 common, missing = cl.findcommonmissing(common, heads)
1712 1719 if not missing:
1713 1720 return None
1714 1721 return self._changegroupsubset(common, missing, heads, source)
1715 1722
1716 1723 def _changegroupsubset(self, commonrevs, csets, heads, source):
1717 1724
1718 1725 cl = self.changelog
1719 1726 mf = self.manifest
1720 1727 mfs = {} # needed manifests
1721 1728 fnodes = {} # needed file nodes
1722 1729 changedfiles = set()
1723 1730 fstate = ['', {}]
1724 1731 count = [0]
1725 1732
1726 1733 # can we go through the fast path ?
1727 1734 heads.sort()
1728 1735 if heads == sorted(self.heads()):
1729 1736 return self._changegroup(csets, source)
1730 1737
1731 1738 # slow path
1732 1739 self.hook('preoutgoing', throw=True, source=source)
1733 1740 self.changegroupinfo(csets, source)
1734 1741
1735 1742 # filter any nodes that claim to be part of the known set
1736 1743 def prune(revlog, missing):
1737 1744 return [n for n in missing
1738 1745 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1739 1746
1740 1747 def lookup(revlog, x):
1741 1748 if revlog == cl:
1742 1749 c = cl.read(x)
1743 1750 changedfiles.update(c[3])
1744 1751 mfs.setdefault(c[0], x)
1745 1752 count[0] += 1
1746 1753 self.ui.progress(_('bundling'), count[0],
1747 1754 unit=_('changesets'), total=len(csets))
1748 1755 return x
1749 1756 elif revlog == mf:
1750 1757 clnode = mfs[x]
1751 1758 mdata = mf.readfast(x)
1752 1759 for f in changedfiles:
1753 1760 if f in mdata:
1754 1761 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1755 1762 count[0] += 1
1756 1763 self.ui.progress(_('bundling'), count[0],
1757 1764 unit=_('manifests'), total=len(mfs))
1758 1765 return mfs[x]
1759 1766 else:
1760 1767 self.ui.progress(
1761 1768 _('bundling'), count[0], item=fstate[0],
1762 1769 unit=_('files'), total=len(changedfiles))
1763 1770 return fstate[1][x]
1764 1771
1765 1772 bundler = changegroup.bundle10(lookup)
1766 1773 reorder = self.ui.config('bundle', 'reorder', 'auto')
1767 1774 if reorder == 'auto':
1768 1775 reorder = None
1769 1776 else:
1770 1777 reorder = util.parsebool(reorder)
1771 1778
1772 1779 def gengroup():
1773 1780 # Create a changenode group generator that will call our functions
1774 1781 # back to lookup the owning changenode and collect information.
1775 1782 for chunk in cl.group(csets, bundler, reorder=reorder):
1776 1783 yield chunk
1777 1784 self.ui.progress(_('bundling'), None)
1778 1785
1779 1786 # Create a generator for the manifestnodes that calls our lookup
1780 1787 # and data collection functions back.
1781 1788 count[0] = 0
1782 1789 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1783 1790 yield chunk
1784 1791 self.ui.progress(_('bundling'), None)
1785 1792
1786 1793 mfs.clear()
1787 1794
1788 1795 # Go through all our files in order sorted by name.
1789 1796 count[0] = 0
1790 1797 for fname in sorted(changedfiles):
1791 1798 filerevlog = self.file(fname)
1792 1799 if not len(filerevlog):
1793 1800 raise util.Abort(_("empty or missing revlog for %s") % fname)
1794 1801 fstate[0] = fname
1795 1802 fstate[1] = fnodes.pop(fname, {})
1796 1803
1797 1804 nodelist = prune(filerevlog, fstate[1])
1798 1805 if nodelist:
1799 1806 count[0] += 1
1800 1807 yield bundler.fileheader(fname)
1801 1808 for chunk in filerevlog.group(nodelist, bundler, reorder):
1802 1809 yield chunk
1803 1810
1804 1811 # Signal that no more groups are left.
1805 1812 yield bundler.close()
1806 1813 self.ui.progress(_('bundling'), None)
1807 1814
1808 1815 if csets:
1809 1816 self.hook('outgoing', node=hex(csets[0]), source=source)
1810 1817
1811 1818 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1812 1819
1813 1820 def changegroup(self, basenodes, source):
1814 1821 # to avoid a race we use changegroupsubset() (issue1320)
1815 1822 return self.changegroupsubset(basenodes, self.heads(), source)
1816 1823
1817 1824 def _changegroup(self, nodes, source):
1818 1825 """Compute the changegroup of all nodes that we have that a recipient
1819 1826 doesn't. Return a chunkbuffer object whose read() method will return
1820 1827 successive changegroup chunks.
1821 1828
1822 1829 This is much easier than the previous function as we can assume that
1823 1830 the recipient has any changenode we aren't sending them.
1824 1831
1825 1832 nodes is the set of nodes to send"""
1826 1833
1827 1834 cl = self.changelog
1828 1835 mf = self.manifest
1829 1836 mfs = {}
1830 1837 changedfiles = set()
1831 1838 fstate = ['']
1832 1839 count = [0]
1833 1840
1834 1841 self.hook('preoutgoing', throw=True, source=source)
1835 1842 self.changegroupinfo(nodes, source)
1836 1843
1837 1844 revset = set([cl.rev(n) for n in nodes])
1838 1845
1839 1846 def gennodelst(log):
1840 1847 return [log.node(r) for r in log if log.linkrev(r) in revset]
1841 1848
1842 1849 def lookup(revlog, x):
1843 1850 if revlog == cl:
1844 1851 c = cl.read(x)
1845 1852 changedfiles.update(c[3])
1846 1853 mfs.setdefault(c[0], x)
1847 1854 count[0] += 1
1848 1855 self.ui.progress(_('bundling'), count[0],
1849 1856 unit=_('changesets'), total=len(nodes))
1850 1857 return x
1851 1858 elif revlog == mf:
1852 1859 count[0] += 1
1853 1860 self.ui.progress(_('bundling'), count[0],
1854 1861 unit=_('manifests'), total=len(mfs))
1855 1862 return cl.node(revlog.linkrev(revlog.rev(x)))
1856 1863 else:
1857 1864 self.ui.progress(
1858 1865 _('bundling'), count[0], item=fstate[0],
1859 1866 total=len(changedfiles), unit=_('files'))
1860 1867 return cl.node(revlog.linkrev(revlog.rev(x)))
1861 1868
1862 1869 bundler = changegroup.bundle10(lookup)
1863 1870 reorder = self.ui.config('bundle', 'reorder', 'auto')
1864 1871 if reorder == 'auto':
1865 1872 reorder = None
1866 1873 else:
1867 1874 reorder = util.parsebool(reorder)
1868 1875
1869 1876 def gengroup():
1870 1877 '''yield a sequence of changegroup chunks (strings)'''
1871 1878 # construct a list of all changed files
1872 1879
1873 1880 for chunk in cl.group(nodes, bundler, reorder=reorder):
1874 1881 yield chunk
1875 1882 self.ui.progress(_('bundling'), None)
1876 1883
1877 1884 count[0] = 0
1878 1885 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1879 1886 yield chunk
1880 1887 self.ui.progress(_('bundling'), None)
1881 1888
1882 1889 count[0] = 0
1883 1890 for fname in sorted(changedfiles):
1884 1891 filerevlog = self.file(fname)
1885 1892 if not len(filerevlog):
1886 1893 raise util.Abort(_("empty or missing revlog for %s") % fname)
1887 1894 fstate[0] = fname
1888 1895 nodelist = gennodelst(filerevlog)
1889 1896 if nodelist:
1890 1897 count[0] += 1
1891 1898 yield bundler.fileheader(fname)
1892 1899 for chunk in filerevlog.group(nodelist, bundler, reorder):
1893 1900 yield chunk
1894 1901 yield bundler.close()
1895 1902 self.ui.progress(_('bundling'), None)
1896 1903
1897 1904 if nodes:
1898 1905 self.hook('outgoing', node=hex(nodes[0]), source=source)
1899 1906
1900 1907 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1901 1908
1902 1909 def addchangegroup(self, source, srctype, url, emptyok=False):
1903 1910 """Add the changegroup returned by source.read() to this repo.
1904 1911 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1905 1912 the URL of the repo where this changegroup is coming from.
1906 1913
1907 1914 Return an integer summarizing the change to this repo:
1908 1915 - nothing changed or no source: 0
1909 1916 - more heads than before: 1+added heads (2..n)
1910 1917 - fewer heads than before: -1-removed heads (-2..-n)
1911 1918 - number of heads stays the same: 1
1912 1919 """
1913 1920 def csmap(x):
1914 1921 self.ui.debug("add changeset %s\n" % short(x))
1915 1922 return len(cl)
1916 1923
1917 1924 def revmap(x):
1918 1925 return cl.rev(x)
1919 1926
1920 1927 if not source:
1921 1928 return 0
1922 1929
1923 1930 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1924 1931
1925 1932 changesets = files = revisions = 0
1926 1933 efiles = set()
1927 1934
1928 1935 # write changelog data to temp files so concurrent readers will not see
1929 1936 # inconsistent view
1930 1937 cl = self.changelog
1931 1938 cl.delayupdate()
1932 1939 oldheads = cl.heads()
1933 1940
1934 1941 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1935 1942 try:
1936 1943 trp = weakref.proxy(tr)
1937 1944 # pull off the changeset group
1938 1945 self.ui.status(_("adding changesets\n"))
1939 1946 clstart = len(cl)
1940 1947 class prog(object):
1941 1948 step = _('changesets')
1942 1949 count = 1
1943 1950 ui = self.ui
1944 1951 total = None
1945 1952 def __call__(self):
1946 1953 self.ui.progress(self.step, self.count, unit=_('chunks'),
1947 1954 total=self.total)
1948 1955 self.count += 1
1949 1956 pr = prog()
1950 1957 source.callback = pr
1951 1958
1952 1959 source.changelogheader()
1953 1960 if (cl.addgroup(source, csmap, trp) is None
1954 1961 and not emptyok):
1955 1962 raise util.Abort(_("received changelog group is empty"))
1956 1963 clend = len(cl)
1957 1964 changesets = clend - clstart
1958 1965 for c in xrange(clstart, clend):
1959 1966 efiles.update(self[c].files())
1960 1967 efiles = len(efiles)
1961 1968 self.ui.progress(_('changesets'), None)
1962 1969
1963 1970 # pull off the manifest group
1964 1971 self.ui.status(_("adding manifests\n"))
1965 1972 pr.step = _('manifests')
1966 1973 pr.count = 1
1967 1974 pr.total = changesets # manifests <= changesets
1968 1975 # no need to check for empty manifest group here:
1969 1976 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1970 1977 # no new manifest will be created and the manifest group will
1971 1978 # be empty during the pull
1972 1979 source.manifestheader()
1973 1980 self.manifest.addgroup(source, revmap, trp)
1974 1981 self.ui.progress(_('manifests'), None)
1975 1982
1976 1983 needfiles = {}
1977 1984 if self.ui.configbool('server', 'validate', default=False):
1978 1985 # validate incoming csets have their manifests
1979 1986 for cset in xrange(clstart, clend):
1980 1987 mfest = self.changelog.read(self.changelog.node(cset))[0]
1981 1988 mfest = self.manifest.readdelta(mfest)
1982 1989 # store file nodes we must see
1983 1990 for f, n in mfest.iteritems():
1984 1991 needfiles.setdefault(f, set()).add(n)
1985 1992
1986 1993 # process the files
1987 1994 self.ui.status(_("adding file changes\n"))
1988 1995 pr.step = _('files')
1989 1996 pr.count = 1
1990 1997 pr.total = efiles
1991 1998 source.callback = None
1992 1999
1993 2000 while True:
1994 2001 chunkdata = source.filelogheader()
1995 2002 if not chunkdata:
1996 2003 break
1997 2004 f = chunkdata["filename"]
1998 2005 self.ui.debug("adding %s revisions\n" % f)
1999 2006 pr()
2000 2007 fl = self.file(f)
2001 2008 o = len(fl)
2002 2009 if fl.addgroup(source, revmap, trp) is None:
2003 2010 raise util.Abort(_("received file revlog group is empty"))
2004 2011 revisions += len(fl) - o
2005 2012 files += 1
2006 2013 if f in needfiles:
2007 2014 needs = needfiles[f]
2008 2015 for new in xrange(o, len(fl)):
2009 2016 n = fl.node(new)
2010 2017 if n in needs:
2011 2018 needs.remove(n)
2012 2019 if not needs:
2013 2020 del needfiles[f]
2014 2021 self.ui.progress(_('files'), None)
2015 2022
2016 2023 for f, needs in needfiles.iteritems():
2017 2024 fl = self.file(f)
2018 2025 for n in needs:
2019 2026 try:
2020 2027 fl.rev(n)
2021 2028 except error.LookupError:
2022 2029 raise util.Abort(
2023 2030 _('missing file data for %s:%s - run hg verify') %
2024 2031 (f, hex(n)))
2025 2032
2026 2033 dh = 0
2027 2034 if oldheads:
2028 2035 heads = cl.heads()
2029 2036 dh = len(heads) - len(oldheads)
2030 2037 for h in heads:
2031 2038 if h not in oldheads and 'close' in self[h].extra():
2032 2039 dh -= 1
2033 2040 htext = ""
2034 2041 if dh:
2035 2042 htext = _(" (%+d heads)") % dh
2036 2043
2037 2044 self.ui.status(_("added %d changesets"
2038 2045 " with %d changes to %d files%s\n")
2039 2046 % (changesets, revisions, files, htext))
2040 2047
2041 2048 if changesets > 0:
2042 2049 p = lambda: cl.writepending() and self.root or ""
2043 2050 self.hook('pretxnchangegroup', throw=True,
2044 2051 node=hex(cl.node(clstart)), source=srctype,
2045 2052 url=url, pending=p)
2046 2053
2047 2054 added = [cl.node(r) for r in xrange(clstart, clend)]
2048 2055 publishing = self.ui.configbool('phases', 'publish', True)
2049 2056 if publishing and srctype == 'push':
2050 2057 # Old server can not push the boundary themself.
2051 2058 # This clause ensure pushed changeset are alway marked as public
2052 2059 phases.advanceboundary(self, 0, added)
2053 2060 elif srctype != 'strip': # strip should not touch boundary at all
2054 2061 phases.retractboundary(self, 1, added)
2055 2062
2056 2063 # make changelog see real files again
2057 2064 cl.finalize(trp)
2058 2065
2059 2066 tr.close()
2060 2067
2061 2068 if changesets > 0:
2062 2069 def runhooks():
2063 2070 # forcefully update the on-disk branch cache
2064 2071 self.ui.debug("updating the branch cache\n")
2065 2072 self.updatebranchcache()
2066 2073 self.hook("changegroup", node=hex(cl.node(clstart)),
2067 2074 source=srctype, url=url)
2068 2075
2069 2076 for n in added:
2070 2077 self.hook("incoming", node=hex(n), source=srctype,
2071 2078 url=url)
2072 2079 self._afterlock(runhooks)
2073 2080
2074 2081 finally:
2075 2082 tr.release()
2076 2083 # never return 0 here:
2077 2084 if dh < 0:
2078 2085 return dh - 1
2079 2086 else:
2080 2087 return dh + 1
2081 2088
2082 2089 def stream_in(self, remote, requirements):
2083 2090 lock = self.lock()
2084 2091 try:
2085 2092 fp = remote.stream_out()
2086 2093 l = fp.readline()
2087 2094 try:
2088 2095 resp = int(l)
2089 2096 except ValueError:
2090 2097 raise error.ResponseError(
2091 2098 _('Unexpected response from remote server:'), l)
2092 2099 if resp == 1:
2093 2100 raise util.Abort(_('operation forbidden by server'))
2094 2101 elif resp == 2:
2095 2102 raise util.Abort(_('locking the remote repository failed'))
2096 2103 elif resp != 0:
2097 2104 raise util.Abort(_('the server sent an unknown error code'))
2098 2105 self.ui.status(_('streaming all changes\n'))
2099 2106 l = fp.readline()
2100 2107 try:
2101 2108 total_files, total_bytes = map(int, l.split(' ', 1))
2102 2109 except (ValueError, TypeError):
2103 2110 raise error.ResponseError(
2104 2111 _('Unexpected response from remote server:'), l)
2105 2112 self.ui.status(_('%d files to transfer, %s of data\n') %
2106 2113 (total_files, util.bytecount(total_bytes)))
2107 2114 start = time.time()
2108 2115 for i in xrange(total_files):
2109 2116 # XXX doesn't support '\n' or '\r' in filenames
2110 2117 l = fp.readline()
2111 2118 try:
2112 2119 name, size = l.split('\0', 1)
2113 2120 size = int(size)
2114 2121 except (ValueError, TypeError):
2115 2122 raise error.ResponseError(
2116 2123 _('Unexpected response from remote server:'), l)
2117 2124 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2118 2125 # for backwards compat, name was partially encoded
2119 2126 ofp = self.sopener(store.decodedir(name), 'w')
2120 2127 for chunk in util.filechunkiter(fp, limit=size):
2121 2128 ofp.write(chunk)
2122 2129 ofp.close()
2123 2130 elapsed = time.time() - start
2124 2131 if elapsed <= 0:
2125 2132 elapsed = 0.001
2126 2133 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2127 2134 (util.bytecount(total_bytes), elapsed,
2128 2135 util.bytecount(total_bytes / elapsed)))
2129 2136
2130 2137 # new requirements = old non-format requirements + new format-related
2131 2138 # requirements from the streamed-in repository
2132 2139 requirements.update(set(self.requirements) - self.supportedformats)
2133 2140 self._applyrequirements(requirements)
2134 2141 self._writerequirements()
2135 2142
2136 2143 self.invalidate()
2137 2144 return len(self.heads()) + 1
2138 2145 finally:
2139 2146 lock.release()
2140 2147
2141 2148 def clone(self, remote, heads=[], stream=False):
2142 2149 '''clone remote repository.
2143 2150
2144 2151 keyword arguments:
2145 2152 heads: list of revs to clone (forces use of pull)
2146 2153 stream: use streaming clone if possible'''
2147 2154
2148 2155 # now, all clients that can request uncompressed clones can
2149 2156 # read repo formats supported by all servers that can serve
2150 2157 # them.
2151 2158
2152 2159 # if revlog format changes, client will have to check version
2153 2160 # and format flags on "stream" capability, and use
2154 2161 # uncompressed only if compatible.
2155 2162
2156 2163 if stream and not heads:
2157 2164 # 'stream' means remote revlog format is revlogv1 only
2158 2165 if remote.capable('stream'):
2159 2166 return self.stream_in(remote, set(('revlogv1',)))
2160 2167 # otherwise, 'streamreqs' contains the remote revlog format
2161 2168 streamreqs = remote.capable('streamreqs')
2162 2169 if streamreqs:
2163 2170 streamreqs = set(streamreqs.split(','))
2164 2171 # if we support it, stream in and adjust our requirements
2165 2172 if not streamreqs - self.supportedformats:
2166 2173 return self.stream_in(remote, streamreqs)
2167 2174 return self.pull(remote, heads)
2168 2175
2169 2176 def pushkey(self, namespace, key, old, new):
2170 2177 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2171 2178 old=old, new=new)
2172 2179 ret = pushkey.push(self, namespace, key, old, new)
2173 2180 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2174 2181 ret=ret)
2175 2182 return ret
2176 2183
2177 2184 def listkeys(self, namespace):
2178 2185 self.hook('prelistkeys', throw=True, namespace=namespace)
2179 2186 values = pushkey.list(self, namespace)
2180 2187 self.hook('listkeys', namespace=namespace, values=values)
2181 2188 return values
2182 2189
2183 2190 def debugwireargs(self, one, two, three=None, four=None, five=None):
2184 2191 '''used to test argument passing over the wire'''
2185 2192 return "%s %s %s %s %s" % (one, two, three, four, five)
2186 2193
2187 2194 def savecommitmessage(self, text):
2188 2195 fp = self.opener('last-message.txt', 'wb')
2189 2196 try:
2190 2197 fp.write(text)
2191 2198 finally:
2192 2199 fp.close()
2193 2200 return self.pathto(fp.name[len(self.root)+1:])
2194 2201
2195 2202 # used to avoid circular references so destructors work
2196 2203 def aftertrans(files):
2197 2204 renamefiles = [tuple(t) for t in files]
2198 2205 def a():
2199 2206 for src, dest in renamefiles:
2200 2207 util.rename(src, dest)
2201 2208 return a
2202 2209
2203 2210 def undoname(fn):
2204 2211 base, name = os.path.split(fn)
2205 2212 assert name.startswith('journal')
2206 2213 return os.path.join(base, name.replace('journal', 'undo', 1))
2207 2214
2208 2215 def instance(ui, path, create):
2209 2216 return localrepository(ui, util.urllocalpath(path), create)
2210 2217
2211 2218 def islocal(path):
2212 2219 return True
General Comments 0
You need to be logged in to leave comments. Login now