##// END OF EJS Templates
localrepo: add "vfs" fields to "localrepository" for migration from "opener"...
FUJIWARA Katsunori -
r17156:70343650 default
parent child Browse files
Show More
@@ -1,2454 +1,2457
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 class localrepository(repo.repository):
27 27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 28 'known', 'getbundle'))
29 29 supportedformats = set(('revlogv1', 'generaldelta'))
30 30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 31 'dotencode'))
32 32 openerreqs = set(('revlogv1', 'generaldelta'))
33 33 requirements = ['revlogv1']
34 34
35 35 def _baserequirements(self, create):
36 36 return self.requirements[:]
37 37
38 38 def __init__(self, baseui, path=None, create=False):
39 39 repo.repository.__init__(self)
40 40 self.root = os.path.realpath(util.expandpath(path))
41 41 self.path = os.path.join(self.root, ".hg")
42 42 self.origroot = path
43 43 self.auditor = scmutil.pathauditor(self.root, self._checknested)
44 44 self.opener = scmutil.opener(self.path)
45 self.vfs = self.opener
45 46 self.wopener = scmutil.opener(self.root)
47 self.wvfs = self.wopener
46 48 self.baseui = baseui
47 49 self.ui = baseui.copy()
48 50 # A list of callback to shape the phase if no data were found.
49 51 # Callback are in the form: func(repo, roots) --> processed root.
50 52 # This list it to be filled by extension during repo setup
51 53 self._phasedefaults = []
52 54
53 55 try:
54 56 self.ui.readconfig(self.join("hgrc"), self.root)
55 57 extensions.loadall(self.ui)
56 58 except IOError:
57 59 pass
58 60
59 61 if not os.path.isdir(self.path):
60 62 if create:
61 63 if not os.path.exists(path):
62 64 util.makedirs(path)
63 65 util.makedir(self.path, notindexed=True)
64 66 requirements = self._baserequirements(create)
65 67 if self.ui.configbool('format', 'usestore', True):
66 68 os.mkdir(os.path.join(self.path, "store"))
67 69 requirements.append("store")
68 70 if self.ui.configbool('format', 'usefncache', True):
69 71 requirements.append("fncache")
70 72 if self.ui.configbool('format', 'dotencode', True):
71 73 requirements.append('dotencode')
72 74 # create an invalid changelog
73 75 self.opener.append(
74 76 "00changelog.i",
75 77 '\0\0\0\2' # represents revlogv2
76 78 ' dummy changelog to prevent using the old repo layout'
77 79 )
78 80 if self.ui.configbool('format', 'generaldelta', False):
79 81 requirements.append("generaldelta")
80 82 requirements = set(requirements)
81 83 else:
82 84 raise error.RepoError(_("repository %s not found") % path)
83 85 elif create:
84 86 raise error.RepoError(_("repository %s already exists") % path)
85 87 else:
86 88 try:
87 89 requirements = scmutil.readrequires(self.opener, self.supported)
88 90 except IOError, inst:
89 91 if inst.errno != errno.ENOENT:
90 92 raise
91 93 requirements = set()
92 94
93 95 self.sharedpath = self.path
94 96 try:
95 97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
96 98 if not os.path.exists(s):
97 99 raise error.RepoError(
98 100 _('.hg/sharedpath points to nonexistent directory %s') % s)
99 101 self.sharedpath = s
100 102 except IOError, inst:
101 103 if inst.errno != errno.ENOENT:
102 104 raise
103 105
104 106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
105 107 self.spath = self.store.path
106 108 self.sopener = self.store.opener
109 self.svfs = self.sopener
107 110 self.sjoin = self.store.join
108 111 self.opener.createmode = self.store.createmode
109 112 self._applyrequirements(requirements)
110 113 if create:
111 114 self._writerequirements()
112 115
113 116
114 117 self._branchcache = None
115 118 self._branchcachetip = None
116 119 self.filterpats = {}
117 120 self._datafilters = {}
118 121 self._transref = self._lockref = self._wlockref = None
119 122
120 123 # A cache for various files under .hg/ that tracks file changes,
121 124 # (used by the filecache decorator)
122 125 #
123 126 # Maps a property name to its util.filecacheentry
124 127 self._filecache = {}
125 128
126 129 def _applyrequirements(self, requirements):
127 130 self.requirements = requirements
128 131 self.sopener.options = dict((r, 1) for r in requirements
129 132 if r in self.openerreqs)
130 133
131 134 def _writerequirements(self):
132 135 reqfile = self.opener("requires", "w")
133 136 for r in self.requirements:
134 137 reqfile.write("%s\n" % r)
135 138 reqfile.close()
136 139
137 140 def _checknested(self, path):
138 141 """Determine if path is a legal nested repository."""
139 142 if not path.startswith(self.root):
140 143 return False
141 144 subpath = path[len(self.root) + 1:]
142 145 normsubpath = util.pconvert(subpath)
143 146
144 147 # XXX: Checking against the current working copy is wrong in
145 148 # the sense that it can reject things like
146 149 #
147 150 # $ hg cat -r 10 sub/x.txt
148 151 #
149 152 # if sub/ is no longer a subrepository in the working copy
150 153 # parent revision.
151 154 #
152 155 # However, it can of course also allow things that would have
153 156 # been rejected before, such as the above cat command if sub/
154 157 # is a subrepository now, but was a normal directory before.
155 158 # The old path auditor would have rejected by mistake since it
156 159 # panics when it sees sub/.hg/.
157 160 #
158 161 # All in all, checking against the working copy seems sensible
159 162 # since we want to prevent access to nested repositories on
160 163 # the filesystem *now*.
161 164 ctx = self[None]
162 165 parts = util.splitpath(subpath)
163 166 while parts:
164 167 prefix = '/'.join(parts)
165 168 if prefix in ctx.substate:
166 169 if prefix == normsubpath:
167 170 return True
168 171 else:
169 172 sub = ctx.sub(prefix)
170 173 return sub.checknested(subpath[len(prefix) + 1:])
171 174 else:
172 175 parts.pop()
173 176 return False
174 177
175 178 @filecache('bookmarks')
176 179 def _bookmarks(self):
177 180 return bookmarks.read(self)
178 181
179 182 @filecache('bookmarks.current')
180 183 def _bookmarkcurrent(self):
181 184 return bookmarks.readcurrent(self)
182 185
183 186 def _writebookmarks(self, marks):
184 187 bookmarks.write(self)
185 188
186 189 def bookmarkheads(self, bookmark):
187 190 name = bookmark.split('@', 1)[0]
188 191 heads = []
189 192 for mark, n in self._bookmarks.iteritems():
190 193 if mark.split('@', 1)[0] == name:
191 194 heads.append(n)
192 195 return heads
193 196
194 197 @storecache('phaseroots')
195 198 def _phasecache(self):
196 199 return phases.phasecache(self, self._phasedefaults)
197 200
198 201 @storecache('obsstore')
199 202 def obsstore(self):
200 203 store = obsolete.obsstore(self.sopener)
201 204 return store
202 205
203 206 @storecache('00changelog.i')
204 207 def changelog(self):
205 208 c = changelog.changelog(self.sopener)
206 209 if 'HG_PENDING' in os.environ:
207 210 p = os.environ['HG_PENDING']
208 211 if p.startswith(self.root):
209 212 c.readpending('00changelog.i.a')
210 213 return c
211 214
212 215 @storecache('00manifest.i')
213 216 def manifest(self):
214 217 return manifest.manifest(self.sopener)
215 218
216 219 @filecache('dirstate')
217 220 def dirstate(self):
218 221 warned = [0]
219 222 def validate(node):
220 223 try:
221 224 self.changelog.rev(node)
222 225 return node
223 226 except error.LookupError:
224 227 if not warned[0]:
225 228 warned[0] = True
226 229 self.ui.warn(_("warning: ignoring unknown"
227 230 " working parent %s!\n") % short(node))
228 231 return nullid
229 232
230 233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 234
232 235 def __getitem__(self, changeid):
233 236 if changeid is None:
234 237 return context.workingctx(self)
235 238 return context.changectx(self, changeid)
236 239
237 240 def __contains__(self, changeid):
238 241 try:
239 242 return bool(self.lookup(changeid))
240 243 except error.RepoLookupError:
241 244 return False
242 245
243 246 def __nonzero__(self):
244 247 return True
245 248
246 249 def __len__(self):
247 250 return len(self.changelog)
248 251
249 252 def __iter__(self):
250 253 for i in xrange(len(self)):
251 254 yield i
252 255
253 256 def revs(self, expr, *args):
254 257 '''Return a list of revisions matching the given revset'''
255 258 expr = revset.formatspec(expr, *args)
256 259 m = revset.match(None, expr)
257 260 return [r for r in m(self, range(len(self)))]
258 261
259 262 def set(self, expr, *args):
260 263 '''
261 264 Yield a context for each matching revision, after doing arg
262 265 replacement via revset.formatspec
263 266 '''
264 267 for r in self.revs(expr, *args):
265 268 yield self[r]
266 269
267 270 def url(self):
268 271 return 'file:' + self.root
269 272
270 273 def hook(self, name, throw=False, **args):
271 274 return hook.hook(self.ui, self, name, throw, **args)
272 275
273 276 tag_disallowed = ':\r\n'
274 277
275 278 def _tag(self, names, node, message, local, user, date, extra={}):
276 279 if isinstance(names, str):
277 280 allchars = names
278 281 names = (names,)
279 282 else:
280 283 allchars = ''.join(names)
281 284 for c in self.tag_disallowed:
282 285 if c in allchars:
283 286 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 287
285 288 branches = self.branchmap()
286 289 for name in names:
287 290 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 291 local=local)
289 292 if name in branches:
290 293 self.ui.warn(_("warning: tag %s conflicts with existing"
291 294 " branch name\n") % name)
292 295
293 296 def writetags(fp, names, munge, prevtags):
294 297 fp.seek(0, 2)
295 298 if prevtags and prevtags[-1] != '\n':
296 299 fp.write('\n')
297 300 for name in names:
298 301 m = munge and munge(name) or name
299 302 if (self._tagscache.tagtypes and
300 303 name in self._tagscache.tagtypes):
301 304 old = self.tags().get(name, nullid)
302 305 fp.write('%s %s\n' % (hex(old), m))
303 306 fp.write('%s %s\n' % (hex(node), m))
304 307 fp.close()
305 308
306 309 prevtags = ''
307 310 if local:
308 311 try:
309 312 fp = self.opener('localtags', 'r+')
310 313 except IOError:
311 314 fp = self.opener('localtags', 'a')
312 315 else:
313 316 prevtags = fp.read()
314 317
315 318 # local tags are stored in the current charset
316 319 writetags(fp, names, None, prevtags)
317 320 for name in names:
318 321 self.hook('tag', node=hex(node), tag=name, local=local)
319 322 return
320 323
321 324 try:
322 325 fp = self.wfile('.hgtags', 'rb+')
323 326 except IOError, e:
324 327 if e.errno != errno.ENOENT:
325 328 raise
326 329 fp = self.wfile('.hgtags', 'ab')
327 330 else:
328 331 prevtags = fp.read()
329 332
330 333 # committed tags are stored in UTF-8
331 334 writetags(fp, names, encoding.fromlocal, prevtags)
332 335
333 336 fp.close()
334 337
335 338 self.invalidatecaches()
336 339
337 340 if '.hgtags' not in self.dirstate:
338 341 self[None].add(['.hgtags'])
339 342
340 343 m = matchmod.exact(self.root, '', ['.hgtags'])
341 344 tagnode = self.commit(message, user, date, extra=extra, match=m)
342 345
343 346 for name in names:
344 347 self.hook('tag', node=hex(node), tag=name, local=local)
345 348
346 349 return tagnode
347 350
348 351 def tag(self, names, node, message, local, user, date):
349 352 '''tag a revision with one or more symbolic names.
350 353
351 354 names is a list of strings or, when adding a single tag, names may be a
352 355 string.
353 356
354 357 if local is True, the tags are stored in a per-repository file.
355 358 otherwise, they are stored in the .hgtags file, and a new
356 359 changeset is committed with the change.
357 360
358 361 keyword arguments:
359 362
360 363 local: whether to store tags in non-version-controlled file
361 364 (default False)
362 365
363 366 message: commit message to use if committing
364 367
365 368 user: name of user to use if committing
366 369
367 370 date: date tuple to use if committing'''
368 371
369 372 if not local:
370 373 for x in self.status()[:5]:
371 374 if '.hgtags' in x:
372 375 raise util.Abort(_('working copy of .hgtags is changed '
373 376 '(please commit .hgtags manually)'))
374 377
375 378 self.tags() # instantiate the cache
376 379 self._tag(names, node, message, local, user, date)
377 380
378 381 @propertycache
379 382 def _tagscache(self):
380 383 '''Returns a tagscache object that contains various tags related
381 384 caches.'''
382 385
383 386 # This simplifies its cache management by having one decorated
384 387 # function (this one) and the rest simply fetch things from it.
385 388 class tagscache(object):
386 389 def __init__(self):
387 390 # These two define the set of tags for this repository. tags
388 391 # maps tag name to node; tagtypes maps tag name to 'global' or
389 392 # 'local'. (Global tags are defined by .hgtags across all
390 393 # heads, and local tags are defined in .hg/localtags.)
391 394 # They constitute the in-memory cache of tags.
392 395 self.tags = self.tagtypes = None
393 396
394 397 self.nodetagscache = self.tagslist = None
395 398
396 399 cache = tagscache()
397 400 cache.tags, cache.tagtypes = self._findtags()
398 401
399 402 return cache
400 403
401 404 def tags(self):
402 405 '''return a mapping of tag to node'''
403 406 t = {}
404 407 for k, v in self._tagscache.tags.iteritems():
405 408 try:
406 409 # ignore tags to unknown nodes
407 410 self.changelog.rev(v)
408 411 t[k] = v
409 412 except (error.LookupError, ValueError):
410 413 pass
411 414 return t
412 415
413 416 def _findtags(self):
414 417 '''Do the hard work of finding tags. Return a pair of dicts
415 418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
416 419 maps tag name to a string like \'global\' or \'local\'.
417 420 Subclasses or extensions are free to add their own tags, but
418 421 should be aware that the returned dicts will be retained for the
419 422 duration of the localrepo object.'''
420 423
421 424 # XXX what tagtype should subclasses/extensions use? Currently
422 425 # mq and bookmarks add tags, but do not set the tagtype at all.
423 426 # Should each extension invent its own tag type? Should there
424 427 # be one tagtype for all such "virtual" tags? Or is the status
425 428 # quo fine?
426 429
427 430 alltags = {} # map tag name to (node, hist)
428 431 tagtypes = {}
429 432
430 433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
431 434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
432 435
433 436 # Build the return dicts. Have to re-encode tag names because
434 437 # the tags module always uses UTF-8 (in order not to lose info
435 438 # writing to the cache), but the rest of Mercurial wants them in
436 439 # local encoding.
437 440 tags = {}
438 441 for (name, (node, hist)) in alltags.iteritems():
439 442 if node != nullid:
440 443 tags[encoding.tolocal(name)] = node
441 444 tags['tip'] = self.changelog.tip()
442 445 tagtypes = dict([(encoding.tolocal(name), value)
443 446 for (name, value) in tagtypes.iteritems()])
444 447 return (tags, tagtypes)
445 448
446 449 def tagtype(self, tagname):
447 450 '''
448 451 return the type of the given tag. result can be:
449 452
450 453 'local' : a local tag
451 454 'global' : a global tag
452 455 None : tag does not exist
453 456 '''
454 457
455 458 return self._tagscache.tagtypes.get(tagname)
456 459
457 460 def tagslist(self):
458 461 '''return a list of tags ordered by revision'''
459 462 if not self._tagscache.tagslist:
460 463 l = []
461 464 for t, n in self.tags().iteritems():
462 465 r = self.changelog.rev(n)
463 466 l.append((r, t, n))
464 467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
465 468
466 469 return self._tagscache.tagslist
467 470
468 471 def nodetags(self, node):
469 472 '''return the tags associated with a node'''
470 473 if not self._tagscache.nodetagscache:
471 474 nodetagscache = {}
472 475 for t, n in self._tagscache.tags.iteritems():
473 476 nodetagscache.setdefault(n, []).append(t)
474 477 for tags in nodetagscache.itervalues():
475 478 tags.sort()
476 479 self._tagscache.nodetagscache = nodetagscache
477 480 return self._tagscache.nodetagscache.get(node, [])
478 481
479 482 def nodebookmarks(self, node):
480 483 marks = []
481 484 for bookmark, n in self._bookmarks.iteritems():
482 485 if n == node:
483 486 marks.append(bookmark)
484 487 return sorted(marks)
485 488
486 489 def _branchtags(self, partial, lrev):
487 490 # TODO: rename this function?
488 491 tiprev = len(self) - 1
489 492 if lrev != tiprev:
490 493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
491 494 self._updatebranchcache(partial, ctxgen)
492 495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
493 496
494 497 return partial
495 498
496 499 def updatebranchcache(self):
497 500 tip = self.changelog.tip()
498 501 if self._branchcache is not None and self._branchcachetip == tip:
499 502 return
500 503
501 504 oldtip = self._branchcachetip
502 505 self._branchcachetip = tip
503 506 if oldtip is None or oldtip not in self.changelog.nodemap:
504 507 partial, last, lrev = self._readbranchcache()
505 508 else:
506 509 lrev = self.changelog.rev(oldtip)
507 510 partial = self._branchcache
508 511
509 512 self._branchtags(partial, lrev)
510 513 # this private cache holds all heads (not just the branch tips)
511 514 self._branchcache = partial
512 515
513 516 def branchmap(self):
514 517 '''returns a dictionary {branch: [branchheads]}'''
515 518 self.updatebranchcache()
516 519 return self._branchcache
517 520
518 521 def _branchtip(self, heads):
519 522 '''return the tipmost branch head in heads'''
520 523 tip = heads[-1]
521 524 for h in reversed(heads):
522 525 if not self[h].closesbranch():
523 526 tip = h
524 527 break
525 528 return tip
526 529
527 530 def branchtip(self, branch):
528 531 '''return the tip node for a given branch'''
529 532 if branch not in self.branchmap():
530 533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
531 534 return self._branchtip(self.branchmap()[branch])
532 535
533 536 def branchtags(self):
534 537 '''return a dict where branch names map to the tipmost head of
535 538 the branch, open heads come before closed'''
536 539 bt = {}
537 540 for bn, heads in self.branchmap().iteritems():
538 541 bt[bn] = self._branchtip(heads)
539 542 return bt
540 543
541 544 def _readbranchcache(self):
542 545 partial = {}
543 546 try:
544 547 f = self.opener("cache/branchheads")
545 548 lines = f.read().split('\n')
546 549 f.close()
547 550 except (IOError, OSError):
548 551 return {}, nullid, nullrev
549 552
550 553 try:
551 554 last, lrev = lines.pop(0).split(" ", 1)
552 555 last, lrev = bin(last), int(lrev)
553 556 if lrev >= len(self) or self[lrev].node() != last:
554 557 # invalidate the cache
555 558 raise ValueError('invalidating branch cache (tip differs)')
556 559 for l in lines:
557 560 if not l:
558 561 continue
559 562 node, label = l.split(" ", 1)
560 563 label = encoding.tolocal(label.strip())
561 564 if not node in self:
562 565 raise ValueError('invalidating branch cache because node '+
563 566 '%s does not exist' % node)
564 567 partial.setdefault(label, []).append(bin(node))
565 568 except KeyboardInterrupt:
566 569 raise
567 570 except Exception, inst:
568 571 if self.ui.debugflag:
569 572 self.ui.warn(str(inst), '\n')
570 573 partial, last, lrev = {}, nullid, nullrev
571 574 return partial, last, lrev
572 575
573 576 def _writebranchcache(self, branches, tip, tiprev):
574 577 try:
575 578 f = self.opener("cache/branchheads", "w", atomictemp=True)
576 579 f.write("%s %s\n" % (hex(tip), tiprev))
577 580 for label, nodes in branches.iteritems():
578 581 for node in nodes:
579 582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
580 583 f.close()
581 584 except (IOError, OSError):
582 585 pass
583 586
584 587 def _updatebranchcache(self, partial, ctxgen):
585 588 """Given a branchhead cache, partial, that may have extra nodes or be
586 589 missing heads, and a generator of nodes that are at least a superset of
587 590 heads missing, this function updates partial to be correct.
588 591 """
589 592 # collect new branch entries
590 593 newbranches = {}
591 594 for c in ctxgen:
592 595 newbranches.setdefault(c.branch(), []).append(c.node())
593 596 # if older branchheads are reachable from new ones, they aren't
594 597 # really branchheads. Note checking parents is insufficient:
595 598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
596 599 for branch, newnodes in newbranches.iteritems():
597 600 bheads = partial.setdefault(branch, [])
598 601 # Remove candidate heads that no longer are in the repo (e.g., as
599 602 # the result of a strip that just happened). Avoid using 'node in
600 603 # self' here because that dives down into branchcache code somewhat
601 604 # recrusively.
602 605 bheadrevs = [self.changelog.rev(node) for node in bheads
603 606 if self.changelog.hasnode(node)]
604 607 newheadrevs = [self.changelog.rev(node) for node in newnodes
605 608 if self.changelog.hasnode(node)]
606 609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
607 610 # Remove duplicates - nodes that are in newheadrevs and are already
608 611 # in bheadrevs. This can happen if you strip a node whose parent
609 612 # was already a head (because they're on different branches).
610 613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
611 614
612 615 # Starting from tip means fewer passes over reachable. If we know
613 616 # the new candidates are not ancestors of existing heads, we don't
614 617 # have to examine ancestors of existing heads
615 618 if ctxisnew:
616 619 iterrevs = sorted(newheadrevs)
617 620 else:
618 621 iterrevs = list(bheadrevs)
619 622
620 623 # This loop prunes out two kinds of heads - heads that are
621 624 # superceded by a head in newheadrevs, and newheadrevs that are not
622 625 # heads because an existing head is their descendant.
623 626 while iterrevs:
624 627 latest = iterrevs.pop()
625 628 if latest not in bheadrevs:
626 629 continue
627 630 ancestors = set(self.changelog.ancestors([latest],
628 631 bheadrevs[0]))
629 632 if ancestors:
630 633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
631 634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
632 635
633 636 # There may be branches that cease to exist when the last commit in the
634 637 # branch was stripped. This code filters them out. Note that the
635 638 # branch that ceased to exist may not be in newbranches because
636 639 # newbranches is the set of candidate heads, which when you strip the
637 640 # last commit in a branch will be the parent branch.
638 641 for branch in partial:
639 642 nodes = [head for head in partial[branch]
640 643 if self.changelog.hasnode(head)]
641 644 if not nodes:
642 645 del partial[branch]
643 646
644 647 def lookup(self, key):
645 648 return self[key].node()
646 649
647 650 def lookupbranch(self, key, remote=None):
648 651 repo = remote or self
649 652 if key in repo.branchmap():
650 653 return key
651 654
652 655 repo = (remote and remote.local()) and remote or self
653 656 return repo[key].branch()
654 657
655 658 def known(self, nodes):
656 659 nm = self.changelog.nodemap
657 660 pc = self._phasecache
658 661 result = []
659 662 for n in nodes:
660 663 r = nm.get(n)
661 664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
662 665 result.append(resp)
663 666 return result
664 667
665 668 def local(self):
666 669 return self
667 670
668 671 def join(self, f):
669 672 return os.path.join(self.path, f)
670 673
671 674 def wjoin(self, f):
672 675 return os.path.join(self.root, f)
673 676
674 677 def file(self, f):
675 678 if f[0] == '/':
676 679 f = f[1:]
677 680 return filelog.filelog(self.sopener, f)
678 681
679 682 def changectx(self, changeid):
680 683 return self[changeid]
681 684
682 685 def parents(self, changeid=None):
683 686 '''get list of changectxs for parents of changeid'''
684 687 return self[changeid].parents()
685 688
686 689 def setparents(self, p1, p2=nullid):
687 690 copies = self.dirstate.setparents(p1, p2)
688 691 if copies:
689 692 # Adjust copy records, the dirstate cannot do it, it
690 693 # requires access to parents manifests. Preserve them
691 694 # only for entries added to first parent.
692 695 pctx = self[p1]
693 696 for f in copies:
694 697 if f not in pctx and copies[f] in pctx:
695 698 self.dirstate.copy(copies[f], f)
696 699
697 700 def filectx(self, path, changeid=None, fileid=None):
698 701 """changeid can be a changeset revision, node, or tag.
699 702 fileid can be a file revision or node."""
700 703 return context.filectx(self, path, changeid, fileid)
701 704
702 705 def getcwd(self):
703 706 return self.dirstate.getcwd()
704 707
705 708 def pathto(self, f, cwd=None):
706 709 return self.dirstate.pathto(f, cwd)
707 710
708 711 def wfile(self, f, mode='r'):
709 712 return self.wopener(f, mode)
710 713
711 714 def _link(self, f):
712 715 return os.path.islink(self.wjoin(f))
713 716
714 717 def _loadfilter(self, filter):
715 718 if filter not in self.filterpats:
716 719 l = []
717 720 for pat, cmd in self.ui.configitems(filter):
718 721 if cmd == '!':
719 722 continue
720 723 mf = matchmod.match(self.root, '', [pat])
721 724 fn = None
722 725 params = cmd
723 726 for name, filterfn in self._datafilters.iteritems():
724 727 if cmd.startswith(name):
725 728 fn = filterfn
726 729 params = cmd[len(name):].lstrip()
727 730 break
728 731 if not fn:
729 732 fn = lambda s, c, **kwargs: util.filter(s, c)
730 733 # Wrap old filters not supporting keyword arguments
731 734 if not inspect.getargspec(fn)[2]:
732 735 oldfn = fn
733 736 fn = lambda s, c, **kwargs: oldfn(s, c)
734 737 l.append((mf, fn, params))
735 738 self.filterpats[filter] = l
736 739 return self.filterpats[filter]
737 740
738 741 def _filter(self, filterpats, filename, data):
739 742 for mf, fn, cmd in filterpats:
740 743 if mf(filename):
741 744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
742 745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
743 746 break
744 747
745 748 return data
746 749
747 750 @propertycache
748 751 def _encodefilterpats(self):
749 752 return self._loadfilter('encode')
750 753
751 754 @propertycache
752 755 def _decodefilterpats(self):
753 756 return self._loadfilter('decode')
754 757
755 758 def adddatafilter(self, name, filter):
756 759 self._datafilters[name] = filter
757 760
758 761 def wread(self, filename):
759 762 if self._link(filename):
760 763 data = os.readlink(self.wjoin(filename))
761 764 else:
762 765 data = self.wopener.read(filename)
763 766 return self._filter(self._encodefilterpats, filename, data)
764 767
765 768 def wwrite(self, filename, data, flags):
766 769 data = self._filter(self._decodefilterpats, filename, data)
767 770 if 'l' in flags:
768 771 self.wopener.symlink(data, filename)
769 772 else:
770 773 self.wopener.write(filename, data)
771 774 if 'x' in flags:
772 775 util.setflags(self.wjoin(filename), False, True)
773 776
774 777 def wwritedata(self, filename, data):
775 778 return self._filter(self._decodefilterpats, filename, data)
776 779
777 780 def transaction(self, desc):
778 781 tr = self._transref and self._transref() or None
779 782 if tr and tr.running():
780 783 return tr.nest()
781 784
782 785 # abort here if the journal already exists
783 786 if os.path.exists(self.sjoin("journal")):
784 787 raise error.RepoError(
785 788 _("abandoned transaction found - run hg recover"))
786 789
787 790 self._writejournal(desc)
788 791 renames = [(x, undoname(x)) for x in self._journalfiles()]
789 792
790 793 tr = transaction.transaction(self.ui.warn, self.sopener,
791 794 self.sjoin("journal"),
792 795 aftertrans(renames),
793 796 self.store.createmode)
794 797 self._transref = weakref.ref(tr)
795 798 return tr
796 799
797 800 def _journalfiles(self):
798 801 return (self.sjoin('journal'), self.join('journal.dirstate'),
799 802 self.join('journal.branch'), self.join('journal.desc'),
800 803 self.join('journal.bookmarks'),
801 804 self.sjoin('journal.phaseroots'))
802 805
803 806 def undofiles(self):
804 807 return [undoname(x) for x in self._journalfiles()]
805 808
806 809 def _writejournal(self, desc):
807 810 self.opener.write("journal.dirstate",
808 811 self.opener.tryread("dirstate"))
809 812 self.opener.write("journal.branch",
810 813 encoding.fromlocal(self.dirstate.branch()))
811 814 self.opener.write("journal.desc",
812 815 "%d\n%s\n" % (len(self), desc))
813 816 self.opener.write("journal.bookmarks",
814 817 self.opener.tryread("bookmarks"))
815 818 self.sopener.write("journal.phaseroots",
816 819 self.sopener.tryread("phaseroots"))
817 820
818 821 def recover(self):
819 822 lock = self.lock()
820 823 try:
821 824 if os.path.exists(self.sjoin("journal")):
822 825 self.ui.status(_("rolling back interrupted transaction\n"))
823 826 transaction.rollback(self.sopener, self.sjoin("journal"),
824 827 self.ui.warn)
825 828 self.invalidate()
826 829 return True
827 830 else:
828 831 self.ui.warn(_("no interrupted transaction available\n"))
829 832 return False
830 833 finally:
831 834 lock.release()
832 835
833 836 def rollback(self, dryrun=False, force=False):
834 837 wlock = lock = None
835 838 try:
836 839 wlock = self.wlock()
837 840 lock = self.lock()
838 841 if os.path.exists(self.sjoin("undo")):
839 842 return self._rollback(dryrun, force)
840 843 else:
841 844 self.ui.warn(_("no rollback information available\n"))
842 845 return 1
843 846 finally:
844 847 release(lock, wlock)
845 848
846 849 def _rollback(self, dryrun, force):
847 850 ui = self.ui
848 851 try:
849 852 args = self.opener.read('undo.desc').splitlines()
850 853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
851 854 if len(args) >= 3:
852 855 detail = args[2]
853 856 oldtip = oldlen - 1
854 857
855 858 if detail and ui.verbose:
856 859 msg = (_('repository tip rolled back to revision %s'
857 860 ' (undo %s: %s)\n')
858 861 % (oldtip, desc, detail))
859 862 else:
860 863 msg = (_('repository tip rolled back to revision %s'
861 864 ' (undo %s)\n')
862 865 % (oldtip, desc))
863 866 except IOError:
864 867 msg = _('rolling back unknown transaction\n')
865 868 desc = None
866 869
867 870 if not force and self['.'] != self['tip'] and desc == 'commit':
868 871 raise util.Abort(
869 872 _('rollback of last commit while not checked out '
870 873 'may lose data'), hint=_('use -f to force'))
871 874
872 875 ui.status(msg)
873 876 if dryrun:
874 877 return 0
875 878
876 879 parents = self.dirstate.parents()
877 880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
878 881 if os.path.exists(self.join('undo.bookmarks')):
879 882 util.rename(self.join('undo.bookmarks'),
880 883 self.join('bookmarks'))
881 884 if os.path.exists(self.sjoin('undo.phaseroots')):
882 885 util.rename(self.sjoin('undo.phaseroots'),
883 886 self.sjoin('phaseroots'))
884 887 self.invalidate()
885 888
886 889 parentgone = (parents[0] not in self.changelog.nodemap or
887 890 parents[1] not in self.changelog.nodemap)
888 891 if parentgone:
889 892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
890 893 try:
891 894 branch = self.opener.read('undo.branch')
892 895 self.dirstate.setbranch(branch)
893 896 except IOError:
894 897 ui.warn(_('named branch could not be reset: '
895 898 'current branch is still \'%s\'\n')
896 899 % self.dirstate.branch())
897 900
898 901 self.dirstate.invalidate()
899 902 parents = tuple([p.rev() for p in self.parents()])
900 903 if len(parents) > 1:
901 904 ui.status(_('working directory now based on '
902 905 'revisions %d and %d\n') % parents)
903 906 else:
904 907 ui.status(_('working directory now based on '
905 908 'revision %d\n') % parents)
906 909 # TODO: if we know which new heads may result from this rollback, pass
907 910 # them to destroy(), which will prevent the branchhead cache from being
908 911 # invalidated.
909 912 self.destroyed()
910 913 return 0
911 914
912 915 def invalidatecaches(self):
913 916 def delcache(name):
914 917 try:
915 918 delattr(self, name)
916 919 except AttributeError:
917 920 pass
918 921
919 922 delcache('_tagscache')
920 923
921 924 self._branchcache = None # in UTF-8
922 925 self._branchcachetip = None
923 926
924 927 def invalidatedirstate(self):
925 928 '''Invalidates the dirstate, causing the next call to dirstate
926 929 to check if it was modified since the last time it was read,
927 930 rereading it if it has.
928 931
929 932 This is different to dirstate.invalidate() that it doesn't always
930 933 rereads the dirstate. Use dirstate.invalidate() if you want to
931 934 explicitly read the dirstate again (i.e. restoring it to a previous
932 935 known good state).'''
933 936 if 'dirstate' in self.__dict__:
934 937 for k in self.dirstate._filecache:
935 938 try:
936 939 delattr(self.dirstate, k)
937 940 except AttributeError:
938 941 pass
939 942 delattr(self, 'dirstate')
940 943
941 944 def invalidate(self):
942 945 for k in self._filecache:
943 946 # dirstate is invalidated separately in invalidatedirstate()
944 947 if k == 'dirstate':
945 948 continue
946 949
947 950 try:
948 951 delattr(self, k)
949 952 except AttributeError:
950 953 pass
951 954 self.invalidatecaches()
952 955
953 956 # Discard all cache entries to force reloading everything.
954 957 self._filecache.clear()
955 958
956 959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
957 960 try:
958 961 l = lock.lock(lockname, 0, releasefn, desc=desc)
959 962 except error.LockHeld, inst:
960 963 if not wait:
961 964 raise
962 965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
963 966 (desc, inst.locker))
964 967 # default to 600 seconds timeout
965 968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
966 969 releasefn, desc=desc)
967 970 if acquirefn:
968 971 acquirefn()
969 972 return l
970 973
971 974 def _afterlock(self, callback):
972 975 """add a callback to the current repository lock.
973 976
974 977 The callback will be executed on lock release."""
975 978 l = self._lockref and self._lockref()
976 979 if l:
977 980 l.postrelease.append(callback)
978 981 else:
979 982 callback()
980 983
981 984 def lock(self, wait=True):
982 985 '''Lock the repository store (.hg/store) and return a weak reference
983 986 to the lock. Use this before modifying the store (e.g. committing or
984 987 stripping). If you are opening a transaction, get a lock as well.)'''
985 988 l = self._lockref and self._lockref()
986 989 if l is not None and l.held:
987 990 l.lock()
988 991 return l
989 992
990 993 def unlock():
991 994 self.store.write()
992 995 if '_phasecache' in vars(self):
993 996 self._phasecache.write()
994 997 for k, ce in self._filecache.items():
995 998 if k == 'dirstate':
996 999 continue
997 1000 ce.refresh()
998 1001
999 1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1000 1003 self.invalidate, _('repository %s') % self.origroot)
1001 1004 self._lockref = weakref.ref(l)
1002 1005 return l
1003 1006
1004 1007 def wlock(self, wait=True):
1005 1008 '''Lock the non-store parts of the repository (everything under
1006 1009 .hg except .hg/store) and return a weak reference to the lock.
1007 1010 Use this before modifying files in .hg.'''
1008 1011 l = self._wlockref and self._wlockref()
1009 1012 if l is not None and l.held:
1010 1013 l.lock()
1011 1014 return l
1012 1015
1013 1016 def unlock():
1014 1017 self.dirstate.write()
1015 1018 ce = self._filecache.get('dirstate')
1016 1019 if ce:
1017 1020 ce.refresh()
1018 1021
1019 1022 l = self._lock(self.join("wlock"), wait, unlock,
1020 1023 self.invalidatedirstate, _('working directory of %s') %
1021 1024 self.origroot)
1022 1025 self._wlockref = weakref.ref(l)
1023 1026 return l
1024 1027
1025 1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1026 1029 """
1027 1030 commit an individual file as part of a larger transaction
1028 1031 """
1029 1032
1030 1033 fname = fctx.path()
1031 1034 text = fctx.data()
1032 1035 flog = self.file(fname)
1033 1036 fparent1 = manifest1.get(fname, nullid)
1034 1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1035 1038
1036 1039 meta = {}
1037 1040 copy = fctx.renamed()
1038 1041 if copy and copy[0] != fname:
1039 1042 # Mark the new revision of this file as a copy of another
1040 1043 # file. This copy data will effectively act as a parent
1041 1044 # of this new revision. If this is a merge, the first
1042 1045 # parent will be the nullid (meaning "look up the copy data")
1043 1046 # and the second one will be the other parent. For example:
1044 1047 #
1045 1048 # 0 --- 1 --- 3 rev1 changes file foo
1046 1049 # \ / rev2 renames foo to bar and changes it
1047 1050 # \- 2 -/ rev3 should have bar with all changes and
1048 1051 # should record that bar descends from
1049 1052 # bar in rev2 and foo in rev1
1050 1053 #
1051 1054 # this allows this merge to succeed:
1052 1055 #
1053 1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1054 1057 # \ / merging rev3 and rev4 should use bar@rev2
1055 1058 # \- 2 --- 4 as the merge base
1056 1059 #
1057 1060
1058 1061 cfname = copy[0]
1059 1062 crev = manifest1.get(cfname)
1060 1063 newfparent = fparent2
1061 1064
1062 1065 if manifest2: # branch merge
1063 1066 if fparent2 == nullid or crev is None: # copied on remote side
1064 1067 if cfname in manifest2:
1065 1068 crev = manifest2[cfname]
1066 1069 newfparent = fparent1
1067 1070
1068 1071 # find source in nearest ancestor if we've lost track
1069 1072 if not crev:
1070 1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1071 1074 (fname, cfname))
1072 1075 for ancestor in self[None].ancestors():
1073 1076 if cfname in ancestor:
1074 1077 crev = ancestor[cfname].filenode()
1075 1078 break
1076 1079
1077 1080 if crev:
1078 1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1079 1082 meta["copy"] = cfname
1080 1083 meta["copyrev"] = hex(crev)
1081 1084 fparent1, fparent2 = nullid, newfparent
1082 1085 else:
1083 1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1084 1087 "copied from '%s'!\n") % (fname, cfname))
1085 1088
1086 1089 elif fparent2 != nullid:
1087 1090 # is one parent an ancestor of the other?
1088 1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1089 1092 if fparentancestor == fparent1:
1090 1093 fparent1, fparent2 = fparent2, nullid
1091 1094 elif fparentancestor == fparent2:
1092 1095 fparent2 = nullid
1093 1096
1094 1097 # is the file changed?
1095 1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1096 1099 changelist.append(fname)
1097 1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1098 1101
1099 1102 # are just the flags changed during merge?
1100 1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1101 1104 changelist.append(fname)
1102 1105
1103 1106 return fparent1
1104 1107
1105 1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1106 1109 editor=False, extra={}):
1107 1110 """Add a new revision to current repository.
1108 1111
1109 1112 Revision information is gathered from the working directory,
1110 1113 match can be used to filter the committed files. If editor is
1111 1114 supplied, it is called to get a commit message.
1112 1115 """
1113 1116
1114 1117 def fail(f, msg):
1115 1118 raise util.Abort('%s: %s' % (f, msg))
1116 1119
1117 1120 if not match:
1118 1121 match = matchmod.always(self.root, '')
1119 1122
1120 1123 if not force:
1121 1124 vdirs = []
1122 1125 match.dir = vdirs.append
1123 1126 match.bad = fail
1124 1127
1125 1128 wlock = self.wlock()
1126 1129 try:
1127 1130 wctx = self[None]
1128 1131 merge = len(wctx.parents()) > 1
1129 1132
1130 1133 if (not force and merge and match and
1131 1134 (match.files() or match.anypats())):
1132 1135 raise util.Abort(_('cannot partially commit a merge '
1133 1136 '(do not specify files or patterns)'))
1134 1137
1135 1138 changes = self.status(match=match, clean=force)
1136 1139 if force:
1137 1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1138 1141
1139 1142 # check subrepos
1140 1143 subs = []
1141 1144 commitsubs = set()
1142 1145 newstate = wctx.substate.copy()
1143 1146 # only manage subrepos and .hgsubstate if .hgsub is present
1144 1147 if '.hgsub' in wctx:
1145 1148 # we'll decide whether to track this ourselves, thanks
1146 1149 if '.hgsubstate' in changes[0]:
1147 1150 changes[0].remove('.hgsubstate')
1148 1151 if '.hgsubstate' in changes[2]:
1149 1152 changes[2].remove('.hgsubstate')
1150 1153
1151 1154 # compare current state to last committed state
1152 1155 # build new substate based on last committed state
1153 1156 oldstate = wctx.p1().substate
1154 1157 for s in sorted(newstate.keys()):
1155 1158 if not match(s):
1156 1159 # ignore working copy, use old state if present
1157 1160 if s in oldstate:
1158 1161 newstate[s] = oldstate[s]
1159 1162 continue
1160 1163 if not force:
1161 1164 raise util.Abort(
1162 1165 _("commit with new subrepo %s excluded") % s)
1163 1166 if wctx.sub(s).dirty(True):
1164 1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1165 1168 raise util.Abort(
1166 1169 _("uncommitted changes in subrepo %s") % s,
1167 1170 hint=_("use --subrepos for recursive commit"))
1168 1171 subs.append(s)
1169 1172 commitsubs.add(s)
1170 1173 else:
1171 1174 bs = wctx.sub(s).basestate()
1172 1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1173 1176 if oldstate.get(s, (None, None, None))[1] != bs:
1174 1177 subs.append(s)
1175 1178
1176 1179 # check for removed subrepos
1177 1180 for p in wctx.parents():
1178 1181 r = [s for s in p.substate if s not in newstate]
1179 1182 subs += [s for s in r if match(s)]
1180 1183 if subs:
1181 1184 if (not match('.hgsub') and
1182 1185 '.hgsub' in (wctx.modified() + wctx.added())):
1183 1186 raise util.Abort(
1184 1187 _("can't commit subrepos without .hgsub"))
1185 1188 changes[0].insert(0, '.hgsubstate')
1186 1189
1187 1190 elif '.hgsub' in changes[2]:
1188 1191 # clean up .hgsubstate when .hgsub is removed
1189 1192 if ('.hgsubstate' in wctx and
1190 1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1191 1194 changes[2].insert(0, '.hgsubstate')
1192 1195
1193 1196 # make sure all explicit patterns are matched
1194 1197 if not force and match.files():
1195 1198 matched = set(changes[0] + changes[1] + changes[2])
1196 1199
1197 1200 for f in match.files():
1198 1201 if f == '.' or f in matched or f in wctx.substate:
1199 1202 continue
1200 1203 if f in changes[3]: # missing
1201 1204 fail(f, _('file not found!'))
1202 1205 if f in vdirs: # visited directory
1203 1206 d = f + '/'
1204 1207 for mf in matched:
1205 1208 if mf.startswith(d):
1206 1209 break
1207 1210 else:
1208 1211 fail(f, _("no match under directory!"))
1209 1212 elif f not in self.dirstate:
1210 1213 fail(f, _("file not tracked!"))
1211 1214
1212 1215 if (not force and not extra.get("close") and not merge
1213 1216 and not (changes[0] or changes[1] or changes[2])
1214 1217 and wctx.branch() == wctx.p1().branch()):
1215 1218 return None
1216 1219
1217 1220 if merge and changes[3]:
1218 1221 raise util.Abort(_("cannot commit merge with missing files"))
1219 1222
1220 1223 ms = mergemod.mergestate(self)
1221 1224 for f in changes[0]:
1222 1225 if f in ms and ms[f] == 'u':
1223 1226 raise util.Abort(_("unresolved merge conflicts "
1224 1227 "(see hg help resolve)"))
1225 1228
1226 1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1227 1230 if editor:
1228 1231 cctx._text = editor(self, cctx, subs)
1229 1232 edited = (text != cctx._text)
1230 1233
1231 1234 # commit subs and write new state
1232 1235 if subs:
1233 1236 for s in sorted(commitsubs):
1234 1237 sub = wctx.sub(s)
1235 1238 self.ui.status(_('committing subrepository %s\n') %
1236 1239 subrepo.subrelpath(sub))
1237 1240 sr = sub.commit(cctx._text, user, date)
1238 1241 newstate[s] = (newstate[s][0], sr)
1239 1242 subrepo.writestate(self, newstate)
1240 1243
1241 1244 # Save commit message in case this transaction gets rolled back
1242 1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1243 1246 # the assumption that the user will use the same editor again.
1244 1247 msgfn = self.savecommitmessage(cctx._text)
1245 1248
1246 1249 p1, p2 = self.dirstate.parents()
1247 1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1248 1251 try:
1249 1252 self.hook("precommit", throw=True, parent1=hookp1,
1250 1253 parent2=hookp2)
1251 1254 ret = self.commitctx(cctx, True)
1252 1255 except: # re-raises
1253 1256 if edited:
1254 1257 self.ui.write(
1255 1258 _('note: commit message saved in %s\n') % msgfn)
1256 1259 raise
1257 1260
1258 1261 # update bookmarks, dirstate and mergestate
1259 1262 bookmarks.update(self, [p1, p2], ret)
1260 1263 for f in changes[0] + changes[1]:
1261 1264 self.dirstate.normal(f)
1262 1265 for f in changes[2]:
1263 1266 self.dirstate.drop(f)
1264 1267 self.dirstate.setparents(ret)
1265 1268 ms.reset()
1266 1269 finally:
1267 1270 wlock.release()
1268 1271
1269 1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1270 1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1271 1274 self._afterlock(commithook)
1272 1275 return ret
1273 1276
1274 1277 def commitctx(self, ctx, error=False):
1275 1278 """Add a new revision to current repository.
1276 1279 Revision information is passed via the context argument.
1277 1280 """
1278 1281
1279 1282 tr = lock = None
1280 1283 removed = list(ctx.removed())
1281 1284 p1, p2 = ctx.p1(), ctx.p2()
1282 1285 user = ctx.user()
1283 1286
1284 1287 lock = self.lock()
1285 1288 try:
1286 1289 tr = self.transaction("commit")
1287 1290 trp = weakref.proxy(tr)
1288 1291
1289 1292 if ctx.files():
1290 1293 m1 = p1.manifest().copy()
1291 1294 m2 = p2.manifest()
1292 1295
1293 1296 # check in files
1294 1297 new = {}
1295 1298 changed = []
1296 1299 linkrev = len(self)
1297 1300 for f in sorted(ctx.modified() + ctx.added()):
1298 1301 self.ui.note(f + "\n")
1299 1302 try:
1300 1303 fctx = ctx[f]
1301 1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1302 1305 changed)
1303 1306 m1.set(f, fctx.flags())
1304 1307 except OSError, inst:
1305 1308 self.ui.warn(_("trouble committing %s!\n") % f)
1306 1309 raise
1307 1310 except IOError, inst:
1308 1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1309 1312 if error or errcode and errcode != errno.ENOENT:
1310 1313 self.ui.warn(_("trouble committing %s!\n") % f)
1311 1314 raise
1312 1315 else:
1313 1316 removed.append(f)
1314 1317
1315 1318 # update manifest
1316 1319 m1.update(new)
1317 1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1318 1321 drop = [f for f in removed if f in m1]
1319 1322 for f in drop:
1320 1323 del m1[f]
1321 1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1322 1325 p2.manifestnode(), (new, drop))
1323 1326 files = changed + removed
1324 1327 else:
1325 1328 mn = p1.manifestnode()
1326 1329 files = []
1327 1330
1328 1331 # update changelog
1329 1332 self.changelog.delayupdate()
1330 1333 n = self.changelog.add(mn, files, ctx.description(),
1331 1334 trp, p1.node(), p2.node(),
1332 1335 user, ctx.date(), ctx.extra().copy())
1333 1336 p = lambda: self.changelog.writepending() and self.root or ""
1334 1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1335 1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1336 1339 parent2=xp2, pending=p)
1337 1340 self.changelog.finalize(trp)
1338 1341 # set the new commit is proper phase
1339 1342 targetphase = phases.newcommitphase(self.ui)
1340 1343 if targetphase:
1341 1344 # retract boundary do not alter parent changeset.
1342 1345 # if a parent have higher the resulting phase will
1343 1346 # be compliant anyway
1344 1347 #
1345 1348 # if minimal phase was 0 we don't need to retract anything
1346 1349 phases.retractboundary(self, targetphase, [n])
1347 1350 tr.close()
1348 1351 self.updatebranchcache()
1349 1352 return n
1350 1353 finally:
1351 1354 if tr:
1352 1355 tr.release()
1353 1356 lock.release()
1354 1357
1355 1358 def destroyed(self, newheadnodes=None):
1356 1359 '''Inform the repository that nodes have been destroyed.
1357 1360 Intended for use by strip and rollback, so there's a common
1358 1361 place for anything that has to be done after destroying history.
1359 1362
1360 1363 If you know the branchheadcache was uptodate before nodes were removed
1361 1364 and you also know the set of candidate new heads that may have resulted
1362 1365 from the destruction, you can set newheadnodes. This will enable the
1363 1366 code to update the branchheads cache, rather than having future code
1364 1367 decide it's invalid and regenrating it from scratch.
1365 1368 '''
1366 1369 # If we have info, newheadnodes, on how to update the branch cache, do
1367 1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1368 1371 # will be caught the next time it is read.
1369 1372 if newheadnodes:
1370 1373 tiprev = len(self) - 1
1371 1374 ctxgen = (self[node] for node in newheadnodes
1372 1375 if self.changelog.hasnode(node))
1373 1376 self._updatebranchcache(self._branchcache, ctxgen)
1374 1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1375 1378 tiprev)
1376 1379
1377 1380 # Ensure the persistent tag cache is updated. Doing it now
1378 1381 # means that the tag cache only has to worry about destroyed
1379 1382 # heads immediately after a strip/rollback. That in turn
1380 1383 # guarantees that "cachetip == currenttip" (comparing both rev
1381 1384 # and node) always means no nodes have been added or destroyed.
1382 1385
1383 1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1384 1387 # head, refresh the tag cache, then immediately add a new head.
1385 1388 # But I think doing it this way is necessary for the "instant
1386 1389 # tag cache retrieval" case to work.
1387 1390 self.invalidatecaches()
1388 1391
1389 1392 def walk(self, match, node=None):
1390 1393 '''
1391 1394 walk recursively through the directory tree or a given
1392 1395 changeset, finding all files matched by the match
1393 1396 function
1394 1397 '''
1395 1398 return self[node].walk(match)
1396 1399
1397 1400 def status(self, node1='.', node2=None, match=None,
1398 1401 ignored=False, clean=False, unknown=False,
1399 1402 listsubrepos=False):
1400 1403 """return status of files between two nodes or node and working
1401 1404 directory.
1402 1405
1403 1406 If node1 is None, use the first dirstate parent instead.
1404 1407 If node2 is None, compare node1 with working directory.
1405 1408 """
1406 1409
1407 1410 def mfmatches(ctx):
1408 1411 mf = ctx.manifest().copy()
1409 1412 if match.always():
1410 1413 return mf
1411 1414 for fn in mf.keys():
1412 1415 if not match(fn):
1413 1416 del mf[fn]
1414 1417 return mf
1415 1418
1416 1419 if isinstance(node1, context.changectx):
1417 1420 ctx1 = node1
1418 1421 else:
1419 1422 ctx1 = self[node1]
1420 1423 if isinstance(node2, context.changectx):
1421 1424 ctx2 = node2
1422 1425 else:
1423 1426 ctx2 = self[node2]
1424 1427
1425 1428 working = ctx2.rev() is None
1426 1429 parentworking = working and ctx1 == self['.']
1427 1430 match = match or matchmod.always(self.root, self.getcwd())
1428 1431 listignored, listclean, listunknown = ignored, clean, unknown
1429 1432
1430 1433 # load earliest manifest first for caching reasons
1431 1434 if not working and ctx2.rev() < ctx1.rev():
1432 1435 ctx2.manifest()
1433 1436
1434 1437 if not parentworking:
1435 1438 def bad(f, msg):
1436 1439 # 'f' may be a directory pattern from 'match.files()',
1437 1440 # so 'f not in ctx1' is not enough
1438 1441 if f not in ctx1 and f not in ctx1.dirs():
1439 1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1440 1443 match.bad = bad
1441 1444
1442 1445 if working: # we need to scan the working dir
1443 1446 subrepos = []
1444 1447 if '.hgsub' in self.dirstate:
1445 1448 subrepos = ctx2.substate.keys()
1446 1449 s = self.dirstate.status(match, subrepos, listignored,
1447 1450 listclean, listunknown)
1448 1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1449 1452
1450 1453 # check for any possibly clean files
1451 1454 if parentworking and cmp:
1452 1455 fixup = []
1453 1456 # do a full compare of any files that might have changed
1454 1457 for f in sorted(cmp):
1455 1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1456 1459 or ctx1[f].cmp(ctx2[f])):
1457 1460 modified.append(f)
1458 1461 else:
1459 1462 fixup.append(f)
1460 1463
1461 1464 # update dirstate for files that are actually clean
1462 1465 if fixup:
1463 1466 if listclean:
1464 1467 clean += fixup
1465 1468
1466 1469 try:
1467 1470 # updating the dirstate is optional
1468 1471 # so we don't wait on the lock
1469 1472 wlock = self.wlock(False)
1470 1473 try:
1471 1474 for f in fixup:
1472 1475 self.dirstate.normal(f)
1473 1476 finally:
1474 1477 wlock.release()
1475 1478 except error.LockError:
1476 1479 pass
1477 1480
1478 1481 if not parentworking:
1479 1482 mf1 = mfmatches(ctx1)
1480 1483 if working:
1481 1484 # we are comparing working dir against non-parent
1482 1485 # generate a pseudo-manifest for the working dir
1483 1486 mf2 = mfmatches(self['.'])
1484 1487 for f in cmp + modified + added:
1485 1488 mf2[f] = None
1486 1489 mf2.set(f, ctx2.flags(f))
1487 1490 for f in removed:
1488 1491 if f in mf2:
1489 1492 del mf2[f]
1490 1493 else:
1491 1494 # we are comparing two revisions
1492 1495 deleted, unknown, ignored = [], [], []
1493 1496 mf2 = mfmatches(ctx2)
1494 1497
1495 1498 modified, added, clean = [], [], []
1496 1499 withflags = mf1.withflags() | mf2.withflags()
1497 1500 for fn in mf2:
1498 1501 if fn in mf1:
1499 1502 if (fn not in deleted and
1500 1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1501 1504 (mf1[fn] != mf2[fn] and
1502 1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1503 1506 modified.append(fn)
1504 1507 elif listclean:
1505 1508 clean.append(fn)
1506 1509 del mf1[fn]
1507 1510 elif fn not in deleted:
1508 1511 added.append(fn)
1509 1512 removed = mf1.keys()
1510 1513
1511 1514 if working and modified and not self.dirstate._checklink:
1512 1515 # Symlink placeholders may get non-symlink-like contents
1513 1516 # via user error or dereferencing by NFS or Samba servers,
1514 1517 # so we filter out any placeholders that don't look like a
1515 1518 # symlink
1516 1519 sane = []
1517 1520 for f in modified:
1518 1521 if ctx2.flags(f) == 'l':
1519 1522 d = ctx2[f].data()
1520 1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1521 1524 self.ui.debug('ignoring suspect symlink placeholder'
1522 1525 ' "%s"\n' % f)
1523 1526 continue
1524 1527 sane.append(f)
1525 1528 modified = sane
1526 1529
1527 1530 r = modified, added, removed, deleted, unknown, ignored, clean
1528 1531
1529 1532 if listsubrepos:
1530 1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1531 1534 if working:
1532 1535 rev2 = None
1533 1536 else:
1534 1537 rev2 = ctx2.substate[subpath][1]
1535 1538 try:
1536 1539 submatch = matchmod.narrowmatcher(subpath, match)
1537 1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1538 1541 clean=listclean, unknown=listunknown,
1539 1542 listsubrepos=True)
1540 1543 for rfiles, sfiles in zip(r, s):
1541 1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1542 1545 except error.LookupError:
1543 1546 self.ui.status(_("skipping missing subrepository: %s\n")
1544 1547 % subpath)
1545 1548
1546 1549 for l in r:
1547 1550 l.sort()
1548 1551 return r
1549 1552
1550 1553 def heads(self, start=None):
1551 1554 heads = self.changelog.heads(start)
1552 1555 # sort the output in rev descending order
1553 1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1554 1557
1555 1558 def branchheads(self, branch=None, start=None, closed=False):
1556 1559 '''return a (possibly filtered) list of heads for the given branch
1557 1560
1558 1561 Heads are returned in topological order, from newest to oldest.
1559 1562 If branch is None, use the dirstate branch.
1560 1563 If start is not None, return only heads reachable from start.
1561 1564 If closed is True, return heads that are marked as closed as well.
1562 1565 '''
1563 1566 if branch is None:
1564 1567 branch = self[None].branch()
1565 1568 branches = self.branchmap()
1566 1569 if branch not in branches:
1567 1570 return []
1568 1571 # the cache returns heads ordered lowest to highest
1569 1572 bheads = list(reversed(branches[branch]))
1570 1573 if start is not None:
1571 1574 # filter out the heads that cannot be reached from startrev
1572 1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1573 1576 bheads = [h for h in bheads if h in fbheads]
1574 1577 if not closed:
1575 1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1576 1579 return bheads
1577 1580
1578 1581 def branches(self, nodes):
1579 1582 if not nodes:
1580 1583 nodes = [self.changelog.tip()]
1581 1584 b = []
1582 1585 for n in nodes:
1583 1586 t = n
1584 1587 while True:
1585 1588 p = self.changelog.parents(n)
1586 1589 if p[1] != nullid or p[0] == nullid:
1587 1590 b.append((t, n, p[0], p[1]))
1588 1591 break
1589 1592 n = p[0]
1590 1593 return b
1591 1594
1592 1595 def between(self, pairs):
1593 1596 r = []
1594 1597
1595 1598 for top, bottom in pairs:
1596 1599 n, l, i = top, [], 0
1597 1600 f = 1
1598 1601
1599 1602 while n != bottom and n != nullid:
1600 1603 p = self.changelog.parents(n)[0]
1601 1604 if i == f:
1602 1605 l.append(n)
1603 1606 f = f * 2
1604 1607 n = p
1605 1608 i += 1
1606 1609
1607 1610 r.append(l)
1608 1611
1609 1612 return r
1610 1613
1611 1614 def pull(self, remote, heads=None, force=False):
1612 1615 # don't open transaction for nothing or you break future useful
1613 1616 # rollback call
1614 1617 tr = None
1615 1618 trname = 'pull\n' + util.hidepassword(remote.url())
1616 1619 lock = self.lock()
1617 1620 try:
1618 1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1619 1622 force=force)
1620 1623 common, fetch, rheads = tmp
1621 1624 if not fetch:
1622 1625 self.ui.status(_("no changes found\n"))
1623 1626 added = []
1624 1627 result = 0
1625 1628 else:
1626 1629 tr = self.transaction(trname)
1627 1630 if heads is None and list(common) == [nullid]:
1628 1631 self.ui.status(_("requesting all changes\n"))
1629 1632 elif heads is None and remote.capable('changegroupsubset'):
1630 1633 # issue1320, avoid a race if remote changed after discovery
1631 1634 heads = rheads
1632 1635
1633 1636 if remote.capable('getbundle'):
1634 1637 cg = remote.getbundle('pull', common=common,
1635 1638 heads=heads or rheads)
1636 1639 elif heads is None:
1637 1640 cg = remote.changegroup(fetch, 'pull')
1638 1641 elif not remote.capable('changegroupsubset'):
1639 1642 raise util.Abort(_("partial pull cannot be done because "
1640 1643 "other repository doesn't support "
1641 1644 "changegroupsubset."))
1642 1645 else:
1643 1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1644 1647 clstart = len(self.changelog)
1645 1648 result = self.addchangegroup(cg, 'pull', remote.url())
1646 1649 clend = len(self.changelog)
1647 1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1648 1651
1649 1652 # compute target subset
1650 1653 if heads is None:
1651 1654 # We pulled every thing possible
1652 1655 # sync on everything common
1653 1656 subset = common + added
1654 1657 else:
1655 1658 # We pulled a specific subset
1656 1659 # sync on this subset
1657 1660 subset = heads
1658 1661
1659 1662 # Get remote phases data from remote
1660 1663 remotephases = remote.listkeys('phases')
1661 1664 publishing = bool(remotephases.get('publishing', False))
1662 1665 if remotephases and not publishing:
1663 1666 # remote is new and unpublishing
1664 1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1665 1668 remotephases)
1666 1669 phases.advanceboundary(self, phases.public, pheads)
1667 1670 phases.advanceboundary(self, phases.draft, subset)
1668 1671 else:
1669 1672 # Remote is old or publishing all common changesets
1670 1673 # should be seen as public
1671 1674 phases.advanceboundary(self, phases.public, subset)
1672 1675
1673 1676 remoteobs = remote.listkeys('obsolete')
1674 1677 if 'dump' in remoteobs:
1675 1678 if tr is None:
1676 1679 tr = self.transaction(trname)
1677 1680 data = base85.b85decode(remoteobs['dump'])
1678 1681 self.obsstore.mergemarkers(tr, data)
1679 1682 if tr is not None:
1680 1683 tr.close()
1681 1684 finally:
1682 1685 if tr is not None:
1683 1686 tr.release()
1684 1687 lock.release()
1685 1688
1686 1689 return result
1687 1690
1688 1691 def checkpush(self, force, revs):
1689 1692 """Extensions can override this function if additional checks have
1690 1693 to be performed before pushing, or call it if they override push
1691 1694 command.
1692 1695 """
1693 1696 pass
1694 1697
1695 1698 def push(self, remote, force=False, revs=None, newbranch=False):
1696 1699 '''Push outgoing changesets (limited by revs) from the current
1697 1700 repository to remote. Return an integer:
1698 1701 - None means nothing to push
1699 1702 - 0 means HTTP error
1700 1703 - 1 means we pushed and remote head count is unchanged *or*
1701 1704 we have outgoing changesets but refused to push
1702 1705 - other values as described by addchangegroup()
1703 1706 '''
1704 1707 # there are two ways to push to remote repo:
1705 1708 #
1706 1709 # addchangegroup assumes local user can lock remote
1707 1710 # repo (local filesystem, old ssh servers).
1708 1711 #
1709 1712 # unbundle assumes local user cannot lock remote repo (new ssh
1710 1713 # servers, http servers).
1711 1714
1712 1715 # get local lock as we might write phase data
1713 1716 locallock = self.lock()
1714 1717 try:
1715 1718 self.checkpush(force, revs)
1716 1719 lock = None
1717 1720 unbundle = remote.capable('unbundle')
1718 1721 if not unbundle:
1719 1722 lock = remote.lock()
1720 1723 try:
1721 1724 # discovery
1722 1725 fci = discovery.findcommonincoming
1723 1726 commoninc = fci(self, remote, force=force)
1724 1727 common, inc, remoteheads = commoninc
1725 1728 fco = discovery.findcommonoutgoing
1726 1729 outgoing = fco(self, remote, onlyheads=revs,
1727 1730 commoninc=commoninc, force=force)
1728 1731
1729 1732
1730 1733 if not outgoing.missing:
1731 1734 # nothing to push
1732 1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1733 1736 ret = None
1734 1737 else:
1735 1738 # something to push
1736 1739 if not force:
1737 1740 discovery.checkheads(self, remote, outgoing,
1738 1741 remoteheads, newbranch,
1739 1742 bool(inc))
1740 1743
1741 1744 # create a changegroup from local
1742 1745 if revs is None and not outgoing.excluded:
1743 1746 # push everything,
1744 1747 # use the fast path, no race possible on push
1745 1748 cg = self._changegroup(outgoing.missing, 'push')
1746 1749 else:
1747 1750 cg = self.getlocalbundle('push', outgoing)
1748 1751
1749 1752 # apply changegroup to remote
1750 1753 if unbundle:
1751 1754 # local repo finds heads on server, finds out what
1752 1755 # revs it must push. once revs transferred, if server
1753 1756 # finds it has different heads (someone else won
1754 1757 # commit/push race), server aborts.
1755 1758 if force:
1756 1759 remoteheads = ['force']
1757 1760 # ssh: return remote's addchangegroup()
1758 1761 # http: return remote's addchangegroup() or 0 for error
1759 1762 ret = remote.unbundle(cg, remoteheads, 'push')
1760 1763 else:
1761 1764 # we return an integer indicating remote head count
1762 1765 # change
1763 1766 ret = remote.addchangegroup(cg, 'push', self.url())
1764 1767
1765 1768 if ret:
1766 1769 # push succeed, synchonize target of the push
1767 1770 cheads = outgoing.missingheads
1768 1771 elif revs is None:
1769 1772 # All out push fails. synchronize all common
1770 1773 cheads = outgoing.commonheads
1771 1774 else:
1772 1775 # I want cheads = heads(::missingheads and ::commonheads)
1773 1776 # (missingheads is revs with secret changeset filtered out)
1774 1777 #
1775 1778 # This can be expressed as:
1776 1779 # cheads = ( (missingheads and ::commonheads)
1777 1780 # + (commonheads and ::missingheads))"
1778 1781 # )
1779 1782 #
1780 1783 # while trying to push we already computed the following:
1781 1784 # common = (::commonheads)
1782 1785 # missing = ((commonheads::missingheads) - commonheads)
1783 1786 #
1784 1787 # We can pick:
1785 1788 # * missingheads part of comon (::commonheads)
1786 1789 common = set(outgoing.common)
1787 1790 cheads = [node for node in revs if node in common]
1788 1791 # and
1789 1792 # * commonheads parents on missing
1790 1793 revset = self.set('%ln and parents(roots(%ln))',
1791 1794 outgoing.commonheads,
1792 1795 outgoing.missing)
1793 1796 cheads.extend(c.node() for c in revset)
1794 1797 # even when we don't push, exchanging phase data is useful
1795 1798 remotephases = remote.listkeys('phases')
1796 1799 if not remotephases: # old server or public only repo
1797 1800 phases.advanceboundary(self, phases.public, cheads)
1798 1801 # don't push any phase data as there is nothing to push
1799 1802 else:
1800 1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1801 1804 pheads, droots = ana
1802 1805 ### Apply remote phase on local
1803 1806 if remotephases.get('publishing', False):
1804 1807 phases.advanceboundary(self, phases.public, cheads)
1805 1808 else: # publish = False
1806 1809 phases.advanceboundary(self, phases.public, pheads)
1807 1810 phases.advanceboundary(self, phases.draft, cheads)
1808 1811 ### Apply local phase on remote
1809 1812
1810 1813 # Get the list of all revs draft on remote by public here.
1811 1814 # XXX Beware that revset break if droots is not strictly
1812 1815 # XXX root we may want to ensure it is but it is costly
1813 1816 outdated = self.set('heads((%ln::%ln) and public())',
1814 1817 droots, cheads)
1815 1818 for newremotehead in outdated:
1816 1819 r = remote.pushkey('phases',
1817 1820 newremotehead.hex(),
1818 1821 str(phases.draft),
1819 1822 str(phases.public))
1820 1823 if not r:
1821 1824 self.ui.warn(_('updating %s to public failed!\n')
1822 1825 % newremotehead)
1823 1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1824 1827 data = self.listkeys('obsolete')['dump']
1825 1828 r = remote.pushkey('obsolete', 'dump', '', data)
1826 1829 if not r:
1827 1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1828 1831 finally:
1829 1832 if lock is not None:
1830 1833 lock.release()
1831 1834 finally:
1832 1835 locallock.release()
1833 1836
1834 1837 self.ui.debug("checking for updated bookmarks\n")
1835 1838 rb = remote.listkeys('bookmarks')
1836 1839 for k in rb.keys():
1837 1840 if k in self._bookmarks:
1838 1841 nr, nl = rb[k], hex(self._bookmarks[k])
1839 1842 if nr in self:
1840 1843 cr = self[nr]
1841 1844 cl = self[nl]
1842 1845 if cl in cr.descendants():
1843 1846 r = remote.pushkey('bookmarks', k, nr, nl)
1844 1847 if r:
1845 1848 self.ui.status(_("updating bookmark %s\n") % k)
1846 1849 else:
1847 1850 self.ui.warn(_('updating bookmark %s'
1848 1851 ' failed!\n') % k)
1849 1852
1850 1853 return ret
1851 1854
1852 1855 def changegroupinfo(self, nodes, source):
1853 1856 if self.ui.verbose or source == 'bundle':
1854 1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1855 1858 if self.ui.debugflag:
1856 1859 self.ui.debug("list of changesets:\n")
1857 1860 for node in nodes:
1858 1861 self.ui.debug("%s\n" % hex(node))
1859 1862
1860 1863 def changegroupsubset(self, bases, heads, source):
1861 1864 """Compute a changegroup consisting of all the nodes that are
1862 1865 descendants of any of the bases and ancestors of any of the heads.
1863 1866 Return a chunkbuffer object whose read() method will return
1864 1867 successive changegroup chunks.
1865 1868
1866 1869 It is fairly complex as determining which filenodes and which
1867 1870 manifest nodes need to be included for the changeset to be complete
1868 1871 is non-trivial.
1869 1872
1870 1873 Another wrinkle is doing the reverse, figuring out which changeset in
1871 1874 the changegroup a particular filenode or manifestnode belongs to.
1872 1875 """
1873 1876 cl = self.changelog
1874 1877 if not bases:
1875 1878 bases = [nullid]
1876 1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1877 1880 # We assume that all ancestors of bases are known
1878 1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1879 1882 return self._changegroupsubset(common, csets, heads, source)
1880 1883
1881 1884 def getlocalbundle(self, source, outgoing):
1882 1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1883 1886
1884 1887 This is only implemented for local repos and reuses potentially
1885 1888 precomputed sets in outgoing."""
1886 1889 if not outgoing.missing:
1887 1890 return None
1888 1891 return self._changegroupsubset(outgoing.common,
1889 1892 outgoing.missing,
1890 1893 outgoing.missingheads,
1891 1894 source)
1892 1895
1893 1896 def getbundle(self, source, heads=None, common=None):
1894 1897 """Like changegroupsubset, but returns the set difference between the
1895 1898 ancestors of heads and the ancestors common.
1896 1899
1897 1900 If heads is None, use the local heads. If common is None, use [nullid].
1898 1901
1899 1902 The nodes in common might not all be known locally due to the way the
1900 1903 current discovery protocol works.
1901 1904 """
1902 1905 cl = self.changelog
1903 1906 if common:
1904 1907 nm = cl.nodemap
1905 1908 common = [n for n in common if n in nm]
1906 1909 else:
1907 1910 common = [nullid]
1908 1911 if not heads:
1909 1912 heads = cl.heads()
1910 1913 return self.getlocalbundle(source,
1911 1914 discovery.outgoing(cl, common, heads))
1912 1915
1913 1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1914 1917
1915 1918 cl = self.changelog
1916 1919 mf = self.manifest
1917 1920 mfs = {} # needed manifests
1918 1921 fnodes = {} # needed file nodes
1919 1922 changedfiles = set()
1920 1923 fstate = ['', {}]
1921 1924 count = [0, 0]
1922 1925
1923 1926 # can we go through the fast path ?
1924 1927 heads.sort()
1925 1928 if heads == sorted(self.heads()):
1926 1929 return self._changegroup(csets, source)
1927 1930
1928 1931 # slow path
1929 1932 self.hook('preoutgoing', throw=True, source=source)
1930 1933 self.changegroupinfo(csets, source)
1931 1934
1932 1935 # filter any nodes that claim to be part of the known set
1933 1936 def prune(revlog, missing):
1934 1937 rr, rl = revlog.rev, revlog.linkrev
1935 1938 return [n for n in missing
1936 1939 if rl(rr(n)) not in commonrevs]
1937 1940
1938 1941 progress = self.ui.progress
1939 1942 _bundling = _('bundling')
1940 1943 _changesets = _('changesets')
1941 1944 _manifests = _('manifests')
1942 1945 _files = _('files')
1943 1946
1944 1947 def lookup(revlog, x):
1945 1948 if revlog == cl:
1946 1949 c = cl.read(x)
1947 1950 changedfiles.update(c[3])
1948 1951 mfs.setdefault(c[0], x)
1949 1952 count[0] += 1
1950 1953 progress(_bundling, count[0],
1951 1954 unit=_changesets, total=count[1])
1952 1955 return x
1953 1956 elif revlog == mf:
1954 1957 clnode = mfs[x]
1955 1958 mdata = mf.readfast(x)
1956 1959 for f, n in mdata.iteritems():
1957 1960 if f in changedfiles:
1958 1961 fnodes[f].setdefault(n, clnode)
1959 1962 count[0] += 1
1960 1963 progress(_bundling, count[0],
1961 1964 unit=_manifests, total=count[1])
1962 1965 return clnode
1963 1966 else:
1964 1967 progress(_bundling, count[0], item=fstate[0],
1965 1968 unit=_files, total=count[1])
1966 1969 return fstate[1][x]
1967 1970
1968 1971 bundler = changegroup.bundle10(lookup)
1969 1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1970 1973 if reorder == 'auto':
1971 1974 reorder = None
1972 1975 else:
1973 1976 reorder = util.parsebool(reorder)
1974 1977
1975 1978 def gengroup():
1976 1979 # Create a changenode group generator that will call our functions
1977 1980 # back to lookup the owning changenode and collect information.
1978 1981 count[:] = [0, len(csets)]
1979 1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1980 1983 yield chunk
1981 1984 progress(_bundling, None)
1982 1985
1983 1986 # Create a generator for the manifestnodes that calls our lookup
1984 1987 # and data collection functions back.
1985 1988 for f in changedfiles:
1986 1989 fnodes[f] = {}
1987 1990 count[:] = [0, len(mfs)]
1988 1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1989 1992 yield chunk
1990 1993 progress(_bundling, None)
1991 1994
1992 1995 mfs.clear()
1993 1996
1994 1997 # Go through all our files in order sorted by name.
1995 1998 count[:] = [0, len(changedfiles)]
1996 1999 for fname in sorted(changedfiles):
1997 2000 filerevlog = self.file(fname)
1998 2001 if not len(filerevlog):
1999 2002 raise util.Abort(_("empty or missing revlog for %s")
2000 2003 % fname)
2001 2004 fstate[0] = fname
2002 2005 fstate[1] = fnodes.pop(fname, {})
2003 2006
2004 2007 nodelist = prune(filerevlog, fstate[1])
2005 2008 if nodelist:
2006 2009 count[0] += 1
2007 2010 yield bundler.fileheader(fname)
2008 2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2009 2012 yield chunk
2010 2013
2011 2014 # Signal that no more groups are left.
2012 2015 yield bundler.close()
2013 2016 progress(_bundling, None)
2014 2017
2015 2018 if csets:
2016 2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2017 2020
2018 2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2019 2022
2020 2023 def changegroup(self, basenodes, source):
2021 2024 # to avoid a race we use changegroupsubset() (issue1320)
2022 2025 return self.changegroupsubset(basenodes, self.heads(), source)
2023 2026
2024 2027 def _changegroup(self, nodes, source):
2025 2028 """Compute the changegroup of all nodes that we have that a recipient
2026 2029 doesn't. Return a chunkbuffer object whose read() method will return
2027 2030 successive changegroup chunks.
2028 2031
2029 2032 This is much easier than the previous function as we can assume that
2030 2033 the recipient has any changenode we aren't sending them.
2031 2034
2032 2035 nodes is the set of nodes to send"""
2033 2036
2034 2037 cl = self.changelog
2035 2038 mf = self.manifest
2036 2039 mfs = {}
2037 2040 changedfiles = set()
2038 2041 fstate = ['']
2039 2042 count = [0, 0]
2040 2043
2041 2044 self.hook('preoutgoing', throw=True, source=source)
2042 2045 self.changegroupinfo(nodes, source)
2043 2046
2044 2047 revset = set([cl.rev(n) for n in nodes])
2045 2048
2046 2049 def gennodelst(log):
2047 2050 ln, llr = log.node, log.linkrev
2048 2051 return [ln(r) for r in log if llr(r) in revset]
2049 2052
2050 2053 progress = self.ui.progress
2051 2054 _bundling = _('bundling')
2052 2055 _changesets = _('changesets')
2053 2056 _manifests = _('manifests')
2054 2057 _files = _('files')
2055 2058
2056 2059 def lookup(revlog, x):
2057 2060 if revlog == cl:
2058 2061 c = cl.read(x)
2059 2062 changedfiles.update(c[3])
2060 2063 mfs.setdefault(c[0], x)
2061 2064 count[0] += 1
2062 2065 progress(_bundling, count[0],
2063 2066 unit=_changesets, total=count[1])
2064 2067 return x
2065 2068 elif revlog == mf:
2066 2069 count[0] += 1
2067 2070 progress(_bundling, count[0],
2068 2071 unit=_manifests, total=count[1])
2069 2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2070 2073 else:
2071 2074 progress(_bundling, count[0], item=fstate[0],
2072 2075 total=count[1], unit=_files)
2073 2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2074 2077
2075 2078 bundler = changegroup.bundle10(lookup)
2076 2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2077 2080 if reorder == 'auto':
2078 2081 reorder = None
2079 2082 else:
2080 2083 reorder = util.parsebool(reorder)
2081 2084
2082 2085 def gengroup():
2083 2086 '''yield a sequence of changegroup chunks (strings)'''
2084 2087 # construct a list of all changed files
2085 2088
2086 2089 count[:] = [0, len(nodes)]
2087 2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2088 2091 yield chunk
2089 2092 progress(_bundling, None)
2090 2093
2091 2094 count[:] = [0, len(mfs)]
2092 2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2093 2096 yield chunk
2094 2097 progress(_bundling, None)
2095 2098
2096 2099 count[:] = [0, len(changedfiles)]
2097 2100 for fname in sorted(changedfiles):
2098 2101 filerevlog = self.file(fname)
2099 2102 if not len(filerevlog):
2100 2103 raise util.Abort(_("empty or missing revlog for %s")
2101 2104 % fname)
2102 2105 fstate[0] = fname
2103 2106 nodelist = gennodelst(filerevlog)
2104 2107 if nodelist:
2105 2108 count[0] += 1
2106 2109 yield bundler.fileheader(fname)
2107 2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2108 2111 yield chunk
2109 2112 yield bundler.close()
2110 2113 progress(_bundling, None)
2111 2114
2112 2115 if nodes:
2113 2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2114 2117
2115 2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2116 2119
2117 2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2118 2121 """Add the changegroup returned by source.read() to this repo.
2119 2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2120 2123 the URL of the repo where this changegroup is coming from.
2121 2124
2122 2125 Return an integer summarizing the change to this repo:
2123 2126 - nothing changed or no source: 0
2124 2127 - more heads than before: 1+added heads (2..n)
2125 2128 - fewer heads than before: -1-removed heads (-2..-n)
2126 2129 - number of heads stays the same: 1
2127 2130 """
2128 2131 def csmap(x):
2129 2132 self.ui.debug("add changeset %s\n" % short(x))
2130 2133 return len(cl)
2131 2134
2132 2135 def revmap(x):
2133 2136 return cl.rev(x)
2134 2137
2135 2138 if not source:
2136 2139 return 0
2137 2140
2138 2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2139 2142
2140 2143 changesets = files = revisions = 0
2141 2144 efiles = set()
2142 2145
2143 2146 # write changelog data to temp files so concurrent readers will not see
2144 2147 # inconsistent view
2145 2148 cl = self.changelog
2146 2149 cl.delayupdate()
2147 2150 oldheads = cl.heads()
2148 2151
2149 2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2150 2153 try:
2151 2154 trp = weakref.proxy(tr)
2152 2155 # pull off the changeset group
2153 2156 self.ui.status(_("adding changesets\n"))
2154 2157 clstart = len(cl)
2155 2158 class prog(object):
2156 2159 step = _('changesets')
2157 2160 count = 1
2158 2161 ui = self.ui
2159 2162 total = None
2160 2163 def __call__(self):
2161 2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2162 2165 total=self.total)
2163 2166 self.count += 1
2164 2167 pr = prog()
2165 2168 source.callback = pr
2166 2169
2167 2170 source.changelogheader()
2168 2171 srccontent = cl.addgroup(source, csmap, trp)
2169 2172 if not (srccontent or emptyok):
2170 2173 raise util.Abort(_("received changelog group is empty"))
2171 2174 clend = len(cl)
2172 2175 changesets = clend - clstart
2173 2176 for c in xrange(clstart, clend):
2174 2177 efiles.update(self[c].files())
2175 2178 efiles = len(efiles)
2176 2179 self.ui.progress(_('changesets'), None)
2177 2180
2178 2181 # pull off the manifest group
2179 2182 self.ui.status(_("adding manifests\n"))
2180 2183 pr.step = _('manifests')
2181 2184 pr.count = 1
2182 2185 pr.total = changesets # manifests <= changesets
2183 2186 # no need to check for empty manifest group here:
2184 2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2185 2188 # no new manifest will be created and the manifest group will
2186 2189 # be empty during the pull
2187 2190 source.manifestheader()
2188 2191 self.manifest.addgroup(source, revmap, trp)
2189 2192 self.ui.progress(_('manifests'), None)
2190 2193
2191 2194 needfiles = {}
2192 2195 if self.ui.configbool('server', 'validate', default=False):
2193 2196 # validate incoming csets have their manifests
2194 2197 for cset in xrange(clstart, clend):
2195 2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2196 2199 mfest = self.manifest.readdelta(mfest)
2197 2200 # store file nodes we must see
2198 2201 for f, n in mfest.iteritems():
2199 2202 needfiles.setdefault(f, set()).add(n)
2200 2203
2201 2204 # process the files
2202 2205 self.ui.status(_("adding file changes\n"))
2203 2206 pr.step = _('files')
2204 2207 pr.count = 1
2205 2208 pr.total = efiles
2206 2209 source.callback = None
2207 2210
2208 2211 while True:
2209 2212 chunkdata = source.filelogheader()
2210 2213 if not chunkdata:
2211 2214 break
2212 2215 f = chunkdata["filename"]
2213 2216 self.ui.debug("adding %s revisions\n" % f)
2214 2217 pr()
2215 2218 fl = self.file(f)
2216 2219 o = len(fl)
2217 2220 if not fl.addgroup(source, revmap, trp):
2218 2221 raise util.Abort(_("received file revlog group is empty"))
2219 2222 revisions += len(fl) - o
2220 2223 files += 1
2221 2224 if f in needfiles:
2222 2225 needs = needfiles[f]
2223 2226 for new in xrange(o, len(fl)):
2224 2227 n = fl.node(new)
2225 2228 if n in needs:
2226 2229 needs.remove(n)
2227 2230 if not needs:
2228 2231 del needfiles[f]
2229 2232 self.ui.progress(_('files'), None)
2230 2233
2231 2234 for f, needs in needfiles.iteritems():
2232 2235 fl = self.file(f)
2233 2236 for n in needs:
2234 2237 try:
2235 2238 fl.rev(n)
2236 2239 except error.LookupError:
2237 2240 raise util.Abort(
2238 2241 _('missing file data for %s:%s - run hg verify') %
2239 2242 (f, hex(n)))
2240 2243
2241 2244 dh = 0
2242 2245 if oldheads:
2243 2246 heads = cl.heads()
2244 2247 dh = len(heads) - len(oldheads)
2245 2248 for h in heads:
2246 2249 if h not in oldheads and self[h].closesbranch():
2247 2250 dh -= 1
2248 2251 htext = ""
2249 2252 if dh:
2250 2253 htext = _(" (%+d heads)") % dh
2251 2254
2252 2255 self.ui.status(_("added %d changesets"
2253 2256 " with %d changes to %d files%s\n")
2254 2257 % (changesets, revisions, files, htext))
2255 2258
2256 2259 if changesets > 0:
2257 2260 p = lambda: cl.writepending() and self.root or ""
2258 2261 self.hook('pretxnchangegroup', throw=True,
2259 2262 node=hex(cl.node(clstart)), source=srctype,
2260 2263 url=url, pending=p)
2261 2264
2262 2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2263 2266 publishing = self.ui.configbool('phases', 'publish', True)
2264 2267 if srctype == 'push':
2265 2268 # Old server can not push the boundary themself.
2266 2269 # New server won't push the boundary if changeset already
2267 2270 # existed locally as secrete
2268 2271 #
2269 2272 # We should not use added here but the list of all change in
2270 2273 # the bundle
2271 2274 if publishing:
2272 2275 phases.advanceboundary(self, phases.public, srccontent)
2273 2276 else:
2274 2277 phases.advanceboundary(self, phases.draft, srccontent)
2275 2278 phases.retractboundary(self, phases.draft, added)
2276 2279 elif srctype != 'strip':
2277 2280 # publishing only alter behavior during push
2278 2281 #
2279 2282 # strip should not touch boundary at all
2280 2283 phases.retractboundary(self, phases.draft, added)
2281 2284
2282 2285 # make changelog see real files again
2283 2286 cl.finalize(trp)
2284 2287
2285 2288 tr.close()
2286 2289
2287 2290 if changesets > 0:
2288 2291 def runhooks():
2289 2292 # forcefully update the on-disk branch cache
2290 2293 self.ui.debug("updating the branch cache\n")
2291 2294 self.updatebranchcache()
2292 2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2293 2296 source=srctype, url=url)
2294 2297
2295 2298 for n in added:
2296 2299 self.hook("incoming", node=hex(n), source=srctype,
2297 2300 url=url)
2298 2301 self._afterlock(runhooks)
2299 2302
2300 2303 finally:
2301 2304 tr.release()
2302 2305 # never return 0 here:
2303 2306 if dh < 0:
2304 2307 return dh - 1
2305 2308 else:
2306 2309 return dh + 1
2307 2310
2308 2311 def stream_in(self, remote, requirements):
2309 2312 lock = self.lock()
2310 2313 try:
2311 2314 fp = remote.stream_out()
2312 2315 l = fp.readline()
2313 2316 try:
2314 2317 resp = int(l)
2315 2318 except ValueError:
2316 2319 raise error.ResponseError(
2317 2320 _('unexpected response from remote server:'), l)
2318 2321 if resp == 1:
2319 2322 raise util.Abort(_('operation forbidden by server'))
2320 2323 elif resp == 2:
2321 2324 raise util.Abort(_('locking the remote repository failed'))
2322 2325 elif resp != 0:
2323 2326 raise util.Abort(_('the server sent an unknown error code'))
2324 2327 self.ui.status(_('streaming all changes\n'))
2325 2328 l = fp.readline()
2326 2329 try:
2327 2330 total_files, total_bytes = map(int, l.split(' ', 1))
2328 2331 except (ValueError, TypeError):
2329 2332 raise error.ResponseError(
2330 2333 _('unexpected response from remote server:'), l)
2331 2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2332 2335 (total_files, util.bytecount(total_bytes)))
2333 2336 handled_bytes = 0
2334 2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2335 2338 start = time.time()
2336 2339 for i in xrange(total_files):
2337 2340 # XXX doesn't support '\n' or '\r' in filenames
2338 2341 l = fp.readline()
2339 2342 try:
2340 2343 name, size = l.split('\0', 1)
2341 2344 size = int(size)
2342 2345 except (ValueError, TypeError):
2343 2346 raise error.ResponseError(
2344 2347 _('unexpected response from remote server:'), l)
2345 2348 if self.ui.debugflag:
2346 2349 self.ui.debug('adding %s (%s)\n' %
2347 2350 (name, util.bytecount(size)))
2348 2351 # for backwards compat, name was partially encoded
2349 2352 ofp = self.sopener(store.decodedir(name), 'w')
2350 2353 for chunk in util.filechunkiter(fp, limit=size):
2351 2354 handled_bytes += len(chunk)
2352 2355 self.ui.progress(_('clone'), handled_bytes,
2353 2356 total=total_bytes)
2354 2357 ofp.write(chunk)
2355 2358 ofp.close()
2356 2359 elapsed = time.time() - start
2357 2360 if elapsed <= 0:
2358 2361 elapsed = 0.001
2359 2362 self.ui.progress(_('clone'), None)
2360 2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2361 2364 (util.bytecount(total_bytes), elapsed,
2362 2365 util.bytecount(total_bytes / elapsed)))
2363 2366
2364 2367 # new requirements = old non-format requirements +
2365 2368 # new format-related
2366 2369 # requirements from the streamed-in repository
2367 2370 requirements.update(set(self.requirements) - self.supportedformats)
2368 2371 self._applyrequirements(requirements)
2369 2372 self._writerequirements()
2370 2373
2371 2374 self.invalidate()
2372 2375 return len(self.heads()) + 1
2373 2376 finally:
2374 2377 lock.release()
2375 2378
2376 2379 def clone(self, remote, heads=[], stream=False):
2377 2380 '''clone remote repository.
2378 2381
2379 2382 keyword arguments:
2380 2383 heads: list of revs to clone (forces use of pull)
2381 2384 stream: use streaming clone if possible'''
2382 2385
2383 2386 # now, all clients that can request uncompressed clones can
2384 2387 # read repo formats supported by all servers that can serve
2385 2388 # them.
2386 2389
2387 2390 # if revlog format changes, client will have to check version
2388 2391 # and format flags on "stream" capability, and use
2389 2392 # uncompressed only if compatible.
2390 2393
2391 2394 if not stream:
2392 2395 # if the server explicitely prefer to stream (for fast LANs)
2393 2396 stream = remote.capable('stream-preferred')
2394 2397
2395 2398 if stream and not heads:
2396 2399 # 'stream' means remote revlog format is revlogv1 only
2397 2400 if remote.capable('stream'):
2398 2401 return self.stream_in(remote, set(('revlogv1',)))
2399 2402 # otherwise, 'streamreqs' contains the remote revlog format
2400 2403 streamreqs = remote.capable('streamreqs')
2401 2404 if streamreqs:
2402 2405 streamreqs = set(streamreqs.split(','))
2403 2406 # if we support it, stream in and adjust our requirements
2404 2407 if not streamreqs - self.supportedformats:
2405 2408 return self.stream_in(remote, streamreqs)
2406 2409 return self.pull(remote, heads)
2407 2410
2408 2411 def pushkey(self, namespace, key, old, new):
2409 2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2410 2413 old=old, new=new)
2411 2414 ret = pushkey.push(self, namespace, key, old, new)
2412 2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2413 2416 ret=ret)
2414 2417 return ret
2415 2418
2416 2419 def listkeys(self, namespace):
2417 2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2418 2421 values = pushkey.list(self, namespace)
2419 2422 self.hook('listkeys', namespace=namespace, values=values)
2420 2423 return values
2421 2424
2422 2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2423 2426 '''used to test argument passing over the wire'''
2424 2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2425 2428
2426 2429 def savecommitmessage(self, text):
2427 2430 fp = self.opener('last-message.txt', 'wb')
2428 2431 try:
2429 2432 fp.write(text)
2430 2433 finally:
2431 2434 fp.close()
2432 2435 return self.pathto(fp.name[len(self.root)+1:])
2433 2436
2434 2437 # used to avoid circular references so destructors work
2435 2438 def aftertrans(files):
2436 2439 renamefiles = [tuple(t) for t in files]
2437 2440 def a():
2438 2441 for src, dest in renamefiles:
2439 2442 try:
2440 2443 util.rename(src, dest)
2441 2444 except OSError: # journal file does not yet exist
2442 2445 pass
2443 2446 return a
2444 2447
2445 2448 def undoname(fn):
2446 2449 base, name = os.path.split(fn)
2447 2450 assert name.startswith('journal')
2448 2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2449 2452
2450 2453 def instance(ui, path, create):
2451 2454 return localrepository(ui, util.urllocalpath(path), create)
2452 2455
2453 2456 def islocal(path):
2454 2457 return True
@@ -1,140 +1,142
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from i18n import _
11 11 import changelog, byterange, url, error
12 12 import localrepo, manifest, util, scmutil, store
13 13 import urllib, urllib2, errno
14 14
15 15 class httprangereader(object):
16 16 def __init__(self, url, opener):
17 17 # we assume opener has HTTPRangeHandler
18 18 self.url = url
19 19 self.pos = 0
20 20 self.opener = opener
21 21 self.name = url
22 22 def seek(self, pos):
23 23 self.pos = pos
24 24 def read(self, bytes=None):
25 25 req = urllib2.Request(self.url)
26 26 end = ''
27 27 if bytes:
28 28 end = self.pos + bytes - 1
29 29 if self.pos or end:
30 30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
31 31
32 32 try:
33 33 f = self.opener.open(req)
34 34 data = f.read()
35 35 # Python 2.6+ defines a getcode() function, and 2.4 and
36 36 # 2.5 appear to always have an undocumented code attribute
37 37 # set. If we can't read either of those, fall back to 206
38 38 # and hope for the best.
39 39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
40 40 except urllib2.HTTPError, inst:
41 41 num = inst.code == 404 and errno.ENOENT or None
42 42 raise IOError(num, inst)
43 43 except urllib2.URLError, inst:
44 44 raise IOError(None, inst.reason[1])
45 45
46 46 if code == 200:
47 47 # HTTPRangeHandler does nothing if remote does not support
48 48 # Range headers and returns the full entity. Let's slice it.
49 49 if bytes:
50 50 data = data[self.pos:self.pos + bytes]
51 51 else:
52 52 data = data[self.pos:]
53 53 elif bytes:
54 54 data = data[:bytes]
55 55 self.pos += len(data)
56 56 return data
57 57 def __iter__(self):
58 58 return iter(self.read().splitlines(1))
59 59 def close(self):
60 60 pass
61 61
62 62 def build_opener(ui, authinfo):
63 63 # urllib cannot handle URLs with embedded user or passwd
64 64 urlopener = url.opener(ui, authinfo)
65 65 urlopener.add_handler(byterange.HTTPRangeHandler())
66 66
67 67 class statichttpopener(scmutil.abstractopener):
68 68 def __init__(self, base):
69 69 self.base = base
70 70
71 71 def __call__(self, path, mode="r", atomictemp=None):
72 72 if mode not in ('r', 'rb'):
73 73 raise IOError('Permission denied')
74 74 f = "/".join((self.base, urllib.quote(path)))
75 75 return httprangereader(f, urlopener)
76 76
77 77 return statichttpopener
78 78
79 79 class statichttprepository(localrepo.localrepository):
80 80 def __init__(self, ui, path):
81 81 self._url = path
82 82 self.ui = ui
83 83
84 84 self.root = path
85 85 u = util.url(path.rstrip('/') + "/.hg")
86 86 self.path, authinfo = u.authinfo()
87 87
88 88 opener = build_opener(ui, authinfo)
89 89 self.opener = opener(self.path)
90 self.vfs = self.opener
90 91 self._phasedefaults = []
91 92
92 93 try:
93 94 requirements = scmutil.readrequires(self.opener, self.supported)
94 95 except IOError, inst:
95 96 if inst.errno != errno.ENOENT:
96 97 raise
97 98 requirements = set()
98 99
99 100 # check if it is a non-empty old-style repository
100 101 try:
101 102 fp = self.opener("00changelog.i")
102 103 fp.read(1)
103 104 fp.close()
104 105 except IOError, inst:
105 106 if inst.errno != errno.ENOENT:
106 107 raise
107 108 # we do not care about empty old-style repositories here
108 109 msg = _("'%s' does not appear to be an hg repository") % path
109 110 raise error.RepoError(msg)
110 111
111 112 # setup store
112 113 self.store = store.store(requirements, self.path, opener)
113 114 self.spath = self.store.path
114 115 self.sopener = self.store.opener
116 self.svfs = self.sopener
115 117 self.sjoin = self.store.join
116 118 self._filecache = {}
117 119
118 120 self.manifest = manifest.manifest(self.sopener)
119 121 self.changelog = changelog.changelog(self.sopener)
120 122 self._tags = None
121 123 self.nodetagscache = None
122 124 self._branchcache = None
123 125 self._branchcachetip = None
124 126 self.encodepats = None
125 127 self.decodepats = None
126 128 self.capabilities.difference_update(["pushkey"])
127 129
128 130 def url(self):
129 131 return self._url
130 132
131 133 def local(self):
132 134 return False
133 135
134 136 def lock(self, wait=True):
135 137 raise util.Abort(_('cannot lock static-http repository'))
136 138
137 139 def instance(ui, path, create):
138 140 if create:
139 141 raise util.Abort(_('cannot create new static-http repository'))
140 142 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now