##// END OF EJS Templates
changegroup: unnest flookup
Matt Mackall -
r13829:7b7c1d9d default
parent child Browse files
Show More
@@ -1,1934 +1,1934 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'parentdelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=0):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = util.path_auditor(self.root, self._checknested)
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener("00changelog.i", "a").write(
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'parentdelta', False):
65 65 requirements.append("parentdelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 # find requirements
72 72 requirements = set()
73 73 try:
74 74 requirements = set(self.opener("requires").read().splitlines())
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 for r in requirements - self.supported:
79 79 raise error.RequirementError(
80 80 _("requirement '%s' not supported") % r)
81 81
82 82 self.sharedpath = self.path
83 83 try:
84 84 s = os.path.realpath(self.opener("sharedpath").read())
85 85 if not os.path.exists(s):
86 86 raise error.RepoError(
87 87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 88 self.sharedpath = s
89 89 except IOError, inst:
90 90 if inst.errno != errno.ENOENT:
91 91 raise
92 92
93 93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97 self.opener.createmode = self.store.createmode
98 98 self._applyrequirements(requirements)
99 99 if create:
100 100 self._writerequirements()
101 101
102 102 # These two define the set of tags for this repository. _tags
103 103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 104 # 'local'. (Global tags are defined by .hgtags across all
105 105 # heads, and local tags are defined in .hg/localtags.) They
106 106 # constitute the in-memory cache of tags.
107 107 self._tags = None
108 108 self._tagtypes = None
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.nodetagscache = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 def _applyrequirements(self, requirements):
118 118 self.requirements = requirements
119 119 self.sopener.options = {}
120 120 if 'parentdelta' in requirements:
121 121 self.sopener.options['parentdelta'] = 1
122 122
123 123 def _writerequirements(self):
124 124 reqfile = self.opener("requires", "w")
125 125 for r in self.requirements:
126 126 reqfile.write("%s\n" % r)
127 127 reqfile.close()
128 128
129 129 def _checknested(self, path):
130 130 """Determine if path is a legal nested repository."""
131 131 if not path.startswith(self.root):
132 132 return False
133 133 subpath = path[len(self.root) + 1:]
134 134
135 135 # XXX: Checking against the current working copy is wrong in
136 136 # the sense that it can reject things like
137 137 #
138 138 # $ hg cat -r 10 sub/x.txt
139 139 #
140 140 # if sub/ is no longer a subrepository in the working copy
141 141 # parent revision.
142 142 #
143 143 # However, it can of course also allow things that would have
144 144 # been rejected before, such as the above cat command if sub/
145 145 # is a subrepository now, but was a normal directory before.
146 146 # The old path auditor would have rejected by mistake since it
147 147 # panics when it sees sub/.hg/.
148 148 #
149 149 # All in all, checking against the working copy seems sensible
150 150 # since we want to prevent access to nested repositories on
151 151 # the filesystem *now*.
152 152 ctx = self[None]
153 153 parts = util.splitpath(subpath)
154 154 while parts:
155 155 prefix = os.sep.join(parts)
156 156 if prefix in ctx.substate:
157 157 if prefix == subpath:
158 158 return True
159 159 else:
160 160 sub = ctx.sub(prefix)
161 161 return sub.checknested(subpath[len(prefix) + 1:])
162 162 else:
163 163 parts.pop()
164 164 return False
165 165
166 166 @util.propertycache
167 167 def _bookmarks(self):
168 168 return bookmarks.read(self)
169 169
170 170 @util.propertycache
171 171 def _bookmarkcurrent(self):
172 172 return bookmarks.readcurrent(self)
173 173
174 174 @propertycache
175 175 def changelog(self):
176 176 c = changelog.changelog(self.sopener)
177 177 if 'HG_PENDING' in os.environ:
178 178 p = os.environ['HG_PENDING']
179 179 if p.startswith(self.root):
180 180 c.readpending('00changelog.i.a')
181 181 self.sopener.options['defversion'] = c.version
182 182 return c
183 183
184 184 @propertycache
185 185 def manifest(self):
186 186 return manifest.manifest(self.sopener)
187 187
188 188 @propertycache
189 189 def dirstate(self):
190 190 warned = [0]
191 191 def validate(node):
192 192 try:
193 193 r = self.changelog.rev(node)
194 194 return node
195 195 except error.LookupError:
196 196 if not warned[0]:
197 197 warned[0] = True
198 198 self.ui.warn(_("warning: ignoring unknown"
199 199 " working parent %s!\n") % short(node))
200 200 return nullid
201 201
202 202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203 203
204 204 def __getitem__(self, changeid):
205 205 if changeid is None:
206 206 return context.workingctx(self)
207 207 return context.changectx(self, changeid)
208 208
209 209 def __contains__(self, changeid):
210 210 try:
211 211 return bool(self.lookup(changeid))
212 212 except error.RepoLookupError:
213 213 return False
214 214
215 215 def __nonzero__(self):
216 216 return True
217 217
218 218 def __len__(self):
219 219 return len(self.changelog)
220 220
221 221 def __iter__(self):
222 222 for i in xrange(len(self)):
223 223 yield i
224 224
225 225 def url(self):
226 226 return 'file:' + self.root
227 227
228 228 def hook(self, name, throw=False, **args):
229 229 return hook.hook(self.ui, self, name, throw, **args)
230 230
231 231 tag_disallowed = ':\r\n'
232 232
233 233 def _tag(self, names, node, message, local, user, date, extra={}):
234 234 if isinstance(names, str):
235 235 allchars = names
236 236 names = (names,)
237 237 else:
238 238 allchars = ''.join(names)
239 239 for c in self.tag_disallowed:
240 240 if c in allchars:
241 241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242 242
243 243 branches = self.branchmap()
244 244 for name in names:
245 245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 246 local=local)
247 247 if name in branches:
248 248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 249 " branch name\n") % name)
250 250
251 251 def writetags(fp, names, munge, prevtags):
252 252 fp.seek(0, 2)
253 253 if prevtags and prevtags[-1] != '\n':
254 254 fp.write('\n')
255 255 for name in names:
256 256 m = munge and munge(name) or name
257 257 if self._tagtypes and name in self._tagtypes:
258 258 old = self._tags.get(name, nullid)
259 259 fp.write('%s %s\n' % (hex(old), m))
260 260 fp.write('%s %s\n' % (hex(node), m))
261 261 fp.close()
262 262
263 263 prevtags = ''
264 264 if local:
265 265 try:
266 266 fp = self.opener('localtags', 'r+')
267 267 except IOError:
268 268 fp = self.opener('localtags', 'a')
269 269 else:
270 270 prevtags = fp.read()
271 271
272 272 # local tags are stored in the current charset
273 273 writetags(fp, names, None, prevtags)
274 274 for name in names:
275 275 self.hook('tag', node=hex(node), tag=name, local=local)
276 276 return
277 277
278 278 try:
279 279 fp = self.wfile('.hgtags', 'rb+')
280 280 except IOError:
281 281 fp = self.wfile('.hgtags', 'ab')
282 282 else:
283 283 prevtags = fp.read()
284 284
285 285 # committed tags are stored in UTF-8
286 286 writetags(fp, names, encoding.fromlocal, prevtags)
287 287
288 288 fp.close()
289 289
290 290 if '.hgtags' not in self.dirstate:
291 291 self[None].add(['.hgtags'])
292 292
293 293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295 295
296 296 for name in names:
297 297 self.hook('tag', node=hex(node), tag=name, local=local)
298 298
299 299 return tagnode
300 300
301 301 def tag(self, names, node, message, local, user, date):
302 302 '''tag a revision with one or more symbolic names.
303 303
304 304 names is a list of strings or, when adding a single tag, names may be a
305 305 string.
306 306
307 307 if local is True, the tags are stored in a per-repository file.
308 308 otherwise, they are stored in the .hgtags file, and a new
309 309 changeset is committed with the change.
310 310
311 311 keyword arguments:
312 312
313 313 local: whether to store tags in non-version-controlled file
314 314 (default False)
315 315
316 316 message: commit message to use if committing
317 317
318 318 user: name of user to use if committing
319 319
320 320 date: date tuple to use if committing'''
321 321
322 322 if not local:
323 323 for x in self.status()[:5]:
324 324 if '.hgtags' in x:
325 325 raise util.Abort(_('working copy of .hgtags is changed '
326 326 '(please commit .hgtags manually)'))
327 327
328 328 self.tags() # instantiate the cache
329 329 self._tag(names, node, message, local, user, date)
330 330
331 331 def tags(self):
332 332 '''return a mapping of tag to node'''
333 333 if self._tags is None:
334 334 (self._tags, self._tagtypes) = self._findtags()
335 335
336 336 return self._tags
337 337
338 338 def _findtags(self):
339 339 '''Do the hard work of finding tags. Return a pair of dicts
340 340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 341 maps tag name to a string like \'global\' or \'local\'.
342 342 Subclasses or extensions are free to add their own tags, but
343 343 should be aware that the returned dicts will be retained for the
344 344 duration of the localrepo object.'''
345 345
346 346 # XXX what tagtype should subclasses/extensions use? Currently
347 347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 348 # Should each extension invent its own tag type? Should there
349 349 # be one tagtype for all such "virtual" tags? Or is the status
350 350 # quo fine?
351 351
352 352 alltags = {} # map tag name to (node, hist)
353 353 tagtypes = {}
354 354
355 355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357 357
358 358 # Build the return dicts. Have to re-encode tag names because
359 359 # the tags module always uses UTF-8 (in order not to lose info
360 360 # writing to the cache), but the rest of Mercurial wants them in
361 361 # local encoding.
362 362 tags = {}
363 363 for (name, (node, hist)) in alltags.iteritems():
364 364 if node != nullid:
365 365 tags[encoding.tolocal(name)] = node
366 366 tags['tip'] = self.changelog.tip()
367 367 tagtypes = dict([(encoding.tolocal(name), value)
368 368 for (name, value) in tagtypes.iteritems()])
369 369 return (tags, tagtypes)
370 370
371 371 def tagtype(self, tagname):
372 372 '''
373 373 return the type of the given tag. result can be:
374 374
375 375 'local' : a local tag
376 376 'global' : a global tag
377 377 None : tag does not exist
378 378 '''
379 379
380 380 self.tags()
381 381
382 382 return self._tagtypes.get(tagname)
383 383
384 384 def tagslist(self):
385 385 '''return a list of tags ordered by revision'''
386 386 l = []
387 387 for t, n in self.tags().iteritems():
388 388 try:
389 389 r = self.changelog.rev(n)
390 390 except:
391 391 r = -2 # sort to the beginning of the list if unknown
392 392 l.append((r, t, n))
393 393 return [(t, n) for r, t, n in sorted(l)]
394 394
395 395 def nodetags(self, node):
396 396 '''return the tags associated with a node'''
397 397 if not self.nodetagscache:
398 398 self.nodetagscache = {}
399 399 for t, n in self.tags().iteritems():
400 400 self.nodetagscache.setdefault(n, []).append(t)
401 401 for tags in self.nodetagscache.itervalues():
402 402 tags.sort()
403 403 return self.nodetagscache.get(node, [])
404 404
405 405 def nodebookmarks(self, node):
406 406 marks = []
407 407 for bookmark, n in self._bookmarks.iteritems():
408 408 if n == node:
409 409 marks.append(bookmark)
410 410 return sorted(marks)
411 411
412 412 def _branchtags(self, partial, lrev):
413 413 # TODO: rename this function?
414 414 tiprev = len(self) - 1
415 415 if lrev != tiprev:
416 416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 417 self._updatebranchcache(partial, ctxgen)
418 418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419 419
420 420 return partial
421 421
422 422 def updatebranchcache(self):
423 423 tip = self.changelog.tip()
424 424 if self._branchcache is not None and self._branchcachetip == tip:
425 425 return self._branchcache
426 426
427 427 oldtip = self._branchcachetip
428 428 self._branchcachetip = tip
429 429 if oldtip is None or oldtip not in self.changelog.nodemap:
430 430 partial, last, lrev = self._readbranchcache()
431 431 else:
432 432 lrev = self.changelog.rev(oldtip)
433 433 partial = self._branchcache
434 434
435 435 self._branchtags(partial, lrev)
436 436 # this private cache holds all heads (not just tips)
437 437 self._branchcache = partial
438 438
439 439 def branchmap(self):
440 440 '''returns a dictionary {branch: [branchheads]}'''
441 441 self.updatebranchcache()
442 442 return self._branchcache
443 443
444 444 def branchtags(self):
445 445 '''return a dict where branch names map to the tipmost head of
446 446 the branch, open heads come before closed'''
447 447 bt = {}
448 448 for bn, heads in self.branchmap().iteritems():
449 449 tip = heads[-1]
450 450 for h in reversed(heads):
451 451 if 'close' not in self.changelog.read(h)[5]:
452 452 tip = h
453 453 break
454 454 bt[bn] = tip
455 455 return bt
456 456
457 457 def _readbranchcache(self):
458 458 partial = {}
459 459 try:
460 460 f = self.opener("cache/branchheads")
461 461 lines = f.read().split('\n')
462 462 f.close()
463 463 except (IOError, OSError):
464 464 return {}, nullid, nullrev
465 465
466 466 try:
467 467 last, lrev = lines.pop(0).split(" ", 1)
468 468 last, lrev = bin(last), int(lrev)
469 469 if lrev >= len(self) or self[lrev].node() != last:
470 470 # invalidate the cache
471 471 raise ValueError('invalidating branch cache (tip differs)')
472 472 for l in lines:
473 473 if not l:
474 474 continue
475 475 node, label = l.split(" ", 1)
476 476 label = encoding.tolocal(label.strip())
477 477 partial.setdefault(label, []).append(bin(node))
478 478 except KeyboardInterrupt:
479 479 raise
480 480 except Exception, inst:
481 481 if self.ui.debugflag:
482 482 self.ui.warn(str(inst), '\n')
483 483 partial, last, lrev = {}, nullid, nullrev
484 484 return partial, last, lrev
485 485
486 486 def _writebranchcache(self, branches, tip, tiprev):
487 487 try:
488 488 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 489 f.write("%s %s\n" % (hex(tip), tiprev))
490 490 for label, nodes in branches.iteritems():
491 491 for node in nodes:
492 492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 493 f.rename()
494 494 except (IOError, OSError):
495 495 pass
496 496
497 497 def _updatebranchcache(self, partial, ctxgen):
498 498 # collect new branch entries
499 499 newbranches = {}
500 500 for c in ctxgen:
501 501 newbranches.setdefault(c.branch(), []).append(c.node())
502 502 # if older branchheads are reachable from new ones, they aren't
503 503 # really branchheads. Note checking parents is insufficient:
504 504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 505 for branch, newnodes in newbranches.iteritems():
506 506 bheads = partial.setdefault(branch, [])
507 507 bheads.extend(newnodes)
508 508 if len(bheads) <= 1:
509 509 continue
510 510 # starting from tip means fewer passes over reachable
511 511 while newnodes:
512 512 latest = newnodes.pop()
513 513 if latest not in bheads:
514 514 continue
515 515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
516 516 reachable = self.changelog.reachable(latest, minbhrev)
517 517 reachable.remove(latest)
518 518 bheads = [b for b in bheads if b not in reachable]
519 519 partial[branch] = bheads
520 520
521 521 def lookup(self, key):
522 522 if isinstance(key, int):
523 523 return self.changelog.node(key)
524 524 elif key == '.':
525 525 return self.dirstate.parents()[0]
526 526 elif key == 'null':
527 527 return nullid
528 528 elif key == 'tip':
529 529 return self.changelog.tip()
530 530 n = self.changelog._match(key)
531 531 if n:
532 532 return n
533 533 if key in self._bookmarks:
534 534 return self._bookmarks[key]
535 535 if key in self.tags():
536 536 return self.tags()[key]
537 537 if key in self.branchtags():
538 538 return self.branchtags()[key]
539 539 n = self.changelog._partialmatch(key)
540 540 if n:
541 541 return n
542 542
543 543 # can't find key, check if it might have come from damaged dirstate
544 544 if key in self.dirstate.parents():
545 545 raise error.Abort(_("working directory has unknown parent '%s'!")
546 546 % short(key))
547 547 try:
548 548 if len(key) == 20:
549 549 key = hex(key)
550 550 except:
551 551 pass
552 552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553 553
554 554 def lookupbranch(self, key, remote=None):
555 555 repo = remote or self
556 556 if key in repo.branchmap():
557 557 return key
558 558
559 559 repo = (remote and remote.local()) and remote or self
560 560 return repo[key].branch()
561 561
562 562 def known(self, nodes):
563 563 nm = self.changelog.nodemap
564 564 return [(n in nm) for n in nodes]
565 565
566 566 def local(self):
567 567 return True
568 568
569 569 def join(self, f):
570 570 return os.path.join(self.path, f)
571 571
572 572 def wjoin(self, f):
573 573 return os.path.join(self.root, f)
574 574
575 575 def file(self, f):
576 576 if f[0] == '/':
577 577 f = f[1:]
578 578 return filelog.filelog(self.sopener, f)
579 579
580 580 def changectx(self, changeid):
581 581 return self[changeid]
582 582
583 583 def parents(self, changeid=None):
584 584 '''get list of changectxs for parents of changeid'''
585 585 return self[changeid].parents()
586 586
587 587 def filectx(self, path, changeid=None, fileid=None):
588 588 """changeid can be a changeset revision, node, or tag.
589 589 fileid can be a file revision or node."""
590 590 return context.filectx(self, path, changeid, fileid)
591 591
592 592 def getcwd(self):
593 593 return self.dirstate.getcwd()
594 594
595 595 def pathto(self, f, cwd=None):
596 596 return self.dirstate.pathto(f, cwd)
597 597
598 598 def wfile(self, f, mode='r'):
599 599 return self.wopener(f, mode)
600 600
601 601 def _link(self, f):
602 602 return os.path.islink(self.wjoin(f))
603 603
604 604 def _loadfilter(self, filter):
605 605 if filter not in self.filterpats:
606 606 l = []
607 607 for pat, cmd in self.ui.configitems(filter):
608 608 if cmd == '!':
609 609 continue
610 610 mf = matchmod.match(self.root, '', [pat])
611 611 fn = None
612 612 params = cmd
613 613 for name, filterfn in self._datafilters.iteritems():
614 614 if cmd.startswith(name):
615 615 fn = filterfn
616 616 params = cmd[len(name):].lstrip()
617 617 break
618 618 if not fn:
619 619 fn = lambda s, c, **kwargs: util.filter(s, c)
620 620 # Wrap old filters not supporting keyword arguments
621 621 if not inspect.getargspec(fn)[2]:
622 622 oldfn = fn
623 623 fn = lambda s, c, **kwargs: oldfn(s, c)
624 624 l.append((mf, fn, params))
625 625 self.filterpats[filter] = l
626 626 return self.filterpats[filter]
627 627
628 628 def _filter(self, filterpats, filename, data):
629 629 for mf, fn, cmd in filterpats:
630 630 if mf(filename):
631 631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 633 break
634 634
635 635 return data
636 636
637 637 @propertycache
638 638 def _encodefilterpats(self):
639 639 return self._loadfilter('encode')
640 640
641 641 @propertycache
642 642 def _decodefilterpats(self):
643 643 return self._loadfilter('decode')
644 644
645 645 def adddatafilter(self, name, filter):
646 646 self._datafilters[name] = filter
647 647
648 648 def wread(self, filename):
649 649 if self._link(filename):
650 650 data = os.readlink(self.wjoin(filename))
651 651 else:
652 652 data = self.wopener(filename, 'r').read()
653 653 return self._filter(self._encodefilterpats, filename, data)
654 654
655 655 def wwrite(self, filename, data, flags):
656 656 data = self._filter(self._decodefilterpats, filename, data)
657 657 if 'l' in flags:
658 658 self.wopener.symlink(data, filename)
659 659 else:
660 660 self.wopener(filename, 'w').write(data)
661 661 if 'x' in flags:
662 662 util.set_flags(self.wjoin(filename), False, True)
663 663
664 664 def wwritedata(self, filename, data):
665 665 return self._filter(self._decodefilterpats, filename, data)
666 666
667 667 def transaction(self, desc):
668 668 tr = self._transref and self._transref() or None
669 669 if tr and tr.running():
670 670 return tr.nest()
671 671
672 672 # abort here if the journal already exists
673 673 if os.path.exists(self.sjoin("journal")):
674 674 raise error.RepoError(
675 675 _("abandoned transaction found - run hg recover"))
676 676
677 677 # save dirstate for rollback
678 678 try:
679 679 ds = self.opener("dirstate").read()
680 680 except IOError:
681 681 ds = ""
682 682 self.opener("journal.dirstate", "w").write(ds)
683 683 self.opener("journal.branch", "w").write(
684 684 encoding.fromlocal(self.dirstate.branch()))
685 685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
686 686
687 687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 689 (self.join("journal.branch"), self.join("undo.branch")),
690 690 (self.join("journal.desc"), self.join("undo.desc"))]
691 691 tr = transaction.transaction(self.ui.warn, self.sopener,
692 692 self.sjoin("journal"),
693 693 aftertrans(renames),
694 694 self.store.createmode)
695 695 self._transref = weakref.ref(tr)
696 696 return tr
697 697
698 698 def recover(self):
699 699 lock = self.lock()
700 700 try:
701 701 if os.path.exists(self.sjoin("journal")):
702 702 self.ui.status(_("rolling back interrupted transaction\n"))
703 703 transaction.rollback(self.sopener, self.sjoin("journal"),
704 704 self.ui.warn)
705 705 self.invalidate()
706 706 return True
707 707 else:
708 708 self.ui.warn(_("no interrupted transaction available\n"))
709 709 return False
710 710 finally:
711 711 lock.release()
712 712
713 713 def rollback(self, dryrun=False):
714 714 wlock = lock = None
715 715 try:
716 716 wlock = self.wlock()
717 717 lock = self.lock()
718 718 if os.path.exists(self.sjoin("undo")):
719 719 try:
720 720 args = self.opener("undo.desc", "r").read().splitlines()
721 721 if len(args) >= 3 and self.ui.verbose:
722 722 desc = _("repository tip rolled back to revision %s"
723 723 " (undo %s: %s)\n") % (
724 724 int(args[0]) - 1, args[1], args[2])
725 725 elif len(args) >= 2:
726 726 desc = _("repository tip rolled back to revision %s"
727 727 " (undo %s)\n") % (
728 728 int(args[0]) - 1, args[1])
729 729 except IOError:
730 730 desc = _("rolling back unknown transaction\n")
731 731 self.ui.status(desc)
732 732 if dryrun:
733 733 return
734 734 transaction.rollback(self.sopener, self.sjoin("undo"),
735 735 self.ui.warn)
736 736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 737 if os.path.exists(self.join('undo.bookmarks')):
738 738 util.rename(self.join('undo.bookmarks'),
739 739 self.join('bookmarks'))
740 740 try:
741 741 branch = self.opener("undo.branch").read()
742 742 self.dirstate.setbranch(branch)
743 743 except IOError:
744 744 self.ui.warn(_("Named branch could not be reset, "
745 745 "current branch still is: %s\n")
746 746 % self.dirstate.branch())
747 747 self.invalidate()
748 748 self.dirstate.invalidate()
749 749 self.destroyed()
750 750 parents = tuple([p.rev() for p in self.parents()])
751 751 if len(parents) > 1:
752 752 self.ui.status(_("working directory now based on "
753 753 "revisions %d and %d\n") % parents)
754 754 else:
755 755 self.ui.status(_("working directory now based on "
756 756 "revision %d\n") % parents)
757 757 else:
758 758 self.ui.warn(_("no rollback information available\n"))
759 759 return 1
760 760 finally:
761 761 release(lock, wlock)
762 762
763 763 def invalidatecaches(self):
764 764 self._tags = None
765 765 self._tagtypes = None
766 766 self.nodetagscache = None
767 767 self._branchcache = None # in UTF-8
768 768 self._branchcachetip = None
769 769
770 770 def invalidate(self):
771 771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 772 if a in self.__dict__:
773 773 delattr(self, a)
774 774 self.invalidatecaches()
775 775
776 776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 777 try:
778 778 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 779 except error.LockHeld, inst:
780 780 if not wait:
781 781 raise
782 782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 783 (desc, inst.locker))
784 784 # default to 600 seconds timeout
785 785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 786 releasefn, desc=desc)
787 787 if acquirefn:
788 788 acquirefn()
789 789 return l
790 790
791 791 def lock(self, wait=True):
792 792 '''Lock the repository store (.hg/store) and return a weak reference
793 793 to the lock. Use this before modifying the store (e.g. committing or
794 794 stripping). If you are opening a transaction, get a lock as well.)'''
795 795 l = self._lockref and self._lockref()
796 796 if l is not None and l.held:
797 797 l.lock()
798 798 return l
799 799
800 800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 801 self.invalidate, _('repository %s') % self.origroot)
802 802 self._lockref = weakref.ref(l)
803 803 return l
804 804
805 805 def wlock(self, wait=True):
806 806 '''Lock the non-store parts of the repository (everything under
807 807 .hg except .hg/store) and return a weak reference to the lock.
808 808 Use this before modifying files in .hg.'''
809 809 l = self._wlockref and self._wlockref()
810 810 if l is not None and l.held:
811 811 l.lock()
812 812 return l
813 813
814 814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 815 self.dirstate.invalidate, _('working directory of %s') %
816 816 self.origroot)
817 817 self._wlockref = weakref.ref(l)
818 818 return l
819 819
820 820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 821 """
822 822 commit an individual file as part of a larger transaction
823 823 """
824 824
825 825 fname = fctx.path()
826 826 text = fctx.data()
827 827 flog = self.file(fname)
828 828 fparent1 = manifest1.get(fname, nullid)
829 829 fparent2 = fparent2o = manifest2.get(fname, nullid)
830 830
831 831 meta = {}
832 832 copy = fctx.renamed()
833 833 if copy and copy[0] != fname:
834 834 # Mark the new revision of this file as a copy of another
835 835 # file. This copy data will effectively act as a parent
836 836 # of this new revision. If this is a merge, the first
837 837 # parent will be the nullid (meaning "look up the copy data")
838 838 # and the second one will be the other parent. For example:
839 839 #
840 840 # 0 --- 1 --- 3 rev1 changes file foo
841 841 # \ / rev2 renames foo to bar and changes it
842 842 # \- 2 -/ rev3 should have bar with all changes and
843 843 # should record that bar descends from
844 844 # bar in rev2 and foo in rev1
845 845 #
846 846 # this allows this merge to succeed:
847 847 #
848 848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 849 # \ / merging rev3 and rev4 should use bar@rev2
850 850 # \- 2 --- 4 as the merge base
851 851 #
852 852
853 853 cfname = copy[0]
854 854 crev = manifest1.get(cfname)
855 855 newfparent = fparent2
856 856
857 857 if manifest2: # branch merge
858 858 if fparent2 == nullid or crev is None: # copied on remote side
859 859 if cfname in manifest2:
860 860 crev = manifest2[cfname]
861 861 newfparent = fparent1
862 862
863 863 # find source in nearest ancestor if we've lost track
864 864 if not crev:
865 865 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 866 (fname, cfname))
867 867 for ancestor in self[None].ancestors():
868 868 if cfname in ancestor:
869 869 crev = ancestor[cfname].filenode()
870 870 break
871 871
872 872 if crev:
873 873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 874 meta["copy"] = cfname
875 875 meta["copyrev"] = hex(crev)
876 876 fparent1, fparent2 = nullid, newfparent
877 877 else:
878 878 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 879 "copied from '%s'!\n") % (fname, cfname))
880 880
881 881 elif fparent2 != nullid:
882 882 # is one parent an ancestor of the other?
883 883 fparentancestor = flog.ancestor(fparent1, fparent2)
884 884 if fparentancestor == fparent1:
885 885 fparent1, fparent2 = fparent2, nullid
886 886 elif fparentancestor == fparent2:
887 887 fparent2 = nullid
888 888
889 889 # is the file changed?
890 890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 891 changelist.append(fname)
892 892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893 893
894 894 # are just the flags changed during merge?
895 895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 896 changelist.append(fname)
897 897
898 898 return fparent1
899 899
900 900 def commit(self, text="", user=None, date=None, match=None, force=False,
901 901 editor=False, extra={}):
902 902 """Add a new revision to current repository.
903 903
904 904 Revision information is gathered from the working directory,
905 905 match can be used to filter the committed files. If editor is
906 906 supplied, it is called to get a commit message.
907 907 """
908 908
909 909 def fail(f, msg):
910 910 raise util.Abort('%s: %s' % (f, msg))
911 911
912 912 if not match:
913 913 match = matchmod.always(self.root, '')
914 914
915 915 if not force:
916 916 vdirs = []
917 917 match.dir = vdirs.append
918 918 match.bad = fail
919 919
920 920 wlock = self.wlock()
921 921 try:
922 922 wctx = self[None]
923 923 merge = len(wctx.parents()) > 1
924 924
925 925 if (not force and merge and match and
926 926 (match.files() or match.anypats())):
927 927 raise util.Abort(_('cannot partially commit a merge '
928 928 '(do not specify files or patterns)'))
929 929
930 930 changes = self.status(match=match, clean=force)
931 931 if force:
932 932 changes[0].extend(changes[6]) # mq may commit unchanged files
933 933
934 934 # check subrepos
935 935 subs = []
936 936 removedsubs = set()
937 937 for p in wctx.parents():
938 938 removedsubs.update(s for s in p.substate if match(s))
939 939 for s in wctx.substate:
940 940 removedsubs.discard(s)
941 941 if match(s) and wctx.sub(s).dirty():
942 942 subs.append(s)
943 943 if (subs or removedsubs):
944 944 if (not match('.hgsub') and
945 945 '.hgsub' in (wctx.modified() + wctx.added())):
946 946 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 947 if '.hgsubstate' not in changes[0]:
948 948 changes[0].insert(0, '.hgsubstate')
949 949
950 950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 952 if changedsubs:
953 953 raise util.Abort(_("uncommitted changes in subrepo %s")
954 954 % changedsubs[0])
955 955
956 956 # make sure all explicit patterns are matched
957 957 if not force and match.files():
958 958 matched = set(changes[0] + changes[1] + changes[2])
959 959
960 960 for f in match.files():
961 961 if f == '.' or f in matched or f in wctx.substate:
962 962 continue
963 963 if f in changes[3]: # missing
964 964 fail(f, _('file not found!'))
965 965 if f in vdirs: # visited directory
966 966 d = f + '/'
967 967 for mf in matched:
968 968 if mf.startswith(d):
969 969 break
970 970 else:
971 971 fail(f, _("no match under directory!"))
972 972 elif f not in self.dirstate:
973 973 fail(f, _("file not tracked!"))
974 974
975 975 if (not force and not extra.get("close") and not merge
976 976 and not (changes[0] or changes[1] or changes[2])
977 977 and wctx.branch() == wctx.p1().branch()):
978 978 return None
979 979
980 980 ms = mergemod.mergestate(self)
981 981 for f in changes[0]:
982 982 if f in ms and ms[f] == 'u':
983 983 raise util.Abort(_("unresolved merge conflicts "
984 984 "(see hg help resolve)"))
985 985
986 986 cctx = context.workingctx(self, text, user, date, extra, changes)
987 987 if editor:
988 988 cctx._text = editor(self, cctx, subs)
989 989 edited = (text != cctx._text)
990 990
991 991 # commit subs
992 992 if subs or removedsubs:
993 993 state = wctx.substate.copy()
994 994 for s in sorted(subs):
995 995 sub = wctx.sub(s)
996 996 self.ui.status(_('committing subrepository %s\n') %
997 997 subrepo.subrelpath(sub))
998 998 sr = sub.commit(cctx._text, user, date)
999 999 state[s] = (state[s][0], sr)
1000 1000 subrepo.writestate(self, state)
1001 1001
1002 1002 # Save commit message in case this transaction gets rolled back
1003 1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 1004 # the assumption that the user will use the same editor again.
1005 1005 msgfile = self.opener('last-message.txt', 'wb')
1006 1006 msgfile.write(cctx._text)
1007 1007 msgfile.close()
1008 1008
1009 1009 p1, p2 = self.dirstate.parents()
1010 1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 1011 try:
1012 1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 1013 ret = self.commitctx(cctx, True)
1014 1014 except:
1015 1015 if edited:
1016 1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 1017 self.ui.write(
1018 1018 _('note: commit message saved in %s\n') % msgfn)
1019 1019 raise
1020 1020
1021 1021 # update bookmarks, dirstate and mergestate
1022 1022 bookmarks.update(self, p1, ret)
1023 1023 for f in changes[0] + changes[1]:
1024 1024 self.dirstate.normal(f)
1025 1025 for f in changes[2]:
1026 1026 self.dirstate.forget(f)
1027 1027 self.dirstate.setparents(ret)
1028 1028 ms.reset()
1029 1029 finally:
1030 1030 wlock.release()
1031 1031
1032 1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 1033 return ret
1034 1034
1035 1035 def commitctx(self, ctx, error=False):
1036 1036 """Add a new revision to current repository.
1037 1037 Revision information is passed via the context argument.
1038 1038 """
1039 1039
1040 1040 tr = lock = None
1041 1041 removed = list(ctx.removed())
1042 1042 p1, p2 = ctx.p1(), ctx.p2()
1043 1043 m1 = p1.manifest().copy()
1044 1044 m2 = p2.manifest()
1045 1045 user = ctx.user()
1046 1046
1047 1047 lock = self.lock()
1048 1048 try:
1049 1049 tr = self.transaction("commit")
1050 1050 trp = weakref.proxy(tr)
1051 1051
1052 1052 # check in files
1053 1053 new = {}
1054 1054 changed = []
1055 1055 linkrev = len(self)
1056 1056 for f in sorted(ctx.modified() + ctx.added()):
1057 1057 self.ui.note(f + "\n")
1058 1058 try:
1059 1059 fctx = ctx[f]
1060 1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1061 1061 changed)
1062 1062 m1.set(f, fctx.flags())
1063 1063 except OSError, inst:
1064 1064 self.ui.warn(_("trouble committing %s!\n") % f)
1065 1065 raise
1066 1066 except IOError, inst:
1067 1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1068 1068 if error or errcode and errcode != errno.ENOENT:
1069 1069 self.ui.warn(_("trouble committing %s!\n") % f)
1070 1070 raise
1071 1071 else:
1072 1072 removed.append(f)
1073 1073
1074 1074 # update manifest
1075 1075 m1.update(new)
1076 1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1077 1077 drop = [f for f in removed if f in m1]
1078 1078 for f in drop:
1079 1079 del m1[f]
1080 1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1081 1081 p2.manifestnode(), (new, drop))
1082 1082
1083 1083 # update changelog
1084 1084 self.changelog.delayupdate()
1085 1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1086 1086 trp, p1.node(), p2.node(),
1087 1087 user, ctx.date(), ctx.extra().copy())
1088 1088 p = lambda: self.changelog.writepending() and self.root or ""
1089 1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1090 1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1091 1091 parent2=xp2, pending=p)
1092 1092 self.changelog.finalize(trp)
1093 1093 tr.close()
1094 1094
1095 1095 if self._branchcache:
1096 1096 self.updatebranchcache()
1097 1097 return n
1098 1098 finally:
1099 1099 if tr:
1100 1100 tr.release()
1101 1101 lock.release()
1102 1102
1103 1103 def destroyed(self):
1104 1104 '''Inform the repository that nodes have been destroyed.
1105 1105 Intended for use by strip and rollback, so there's a common
1106 1106 place for anything that has to be done after destroying history.'''
1107 1107 # XXX it might be nice if we could take the list of destroyed
1108 1108 # nodes, but I don't see an easy way for rollback() to do that
1109 1109
1110 1110 # Ensure the persistent tag cache is updated. Doing it now
1111 1111 # means that the tag cache only has to worry about destroyed
1112 1112 # heads immediately after a strip/rollback. That in turn
1113 1113 # guarantees that "cachetip == currenttip" (comparing both rev
1114 1114 # and node) always means no nodes have been added or destroyed.
1115 1115
1116 1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1117 1117 # head, refresh the tag cache, then immediately add a new head.
1118 1118 # But I think doing it this way is necessary for the "instant
1119 1119 # tag cache retrieval" case to work.
1120 1120 self.invalidatecaches()
1121 1121
1122 1122 def walk(self, match, node=None):
1123 1123 '''
1124 1124 walk recursively through the directory tree or a given
1125 1125 changeset, finding all files matched by the match
1126 1126 function
1127 1127 '''
1128 1128 return self[node].walk(match)
1129 1129
1130 1130 def status(self, node1='.', node2=None, match=None,
1131 1131 ignored=False, clean=False, unknown=False,
1132 1132 listsubrepos=False):
1133 1133 """return status of files between two nodes or node and working directory
1134 1134
1135 1135 If node1 is None, use the first dirstate parent instead.
1136 1136 If node2 is None, compare node1 with working directory.
1137 1137 """
1138 1138
1139 1139 def mfmatches(ctx):
1140 1140 mf = ctx.manifest().copy()
1141 1141 for fn in mf.keys():
1142 1142 if not match(fn):
1143 1143 del mf[fn]
1144 1144 return mf
1145 1145
1146 1146 if isinstance(node1, context.changectx):
1147 1147 ctx1 = node1
1148 1148 else:
1149 1149 ctx1 = self[node1]
1150 1150 if isinstance(node2, context.changectx):
1151 1151 ctx2 = node2
1152 1152 else:
1153 1153 ctx2 = self[node2]
1154 1154
1155 1155 working = ctx2.rev() is None
1156 1156 parentworking = working and ctx1 == self['.']
1157 1157 match = match or matchmod.always(self.root, self.getcwd())
1158 1158 listignored, listclean, listunknown = ignored, clean, unknown
1159 1159
1160 1160 # load earliest manifest first for caching reasons
1161 1161 if not working and ctx2.rev() < ctx1.rev():
1162 1162 ctx2.manifest()
1163 1163
1164 1164 if not parentworking:
1165 1165 def bad(f, msg):
1166 1166 if f not in ctx1:
1167 1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1168 1168 match.bad = bad
1169 1169
1170 1170 if working: # we need to scan the working dir
1171 1171 subrepos = []
1172 1172 if '.hgsub' in self.dirstate:
1173 1173 subrepos = ctx1.substate.keys()
1174 1174 s = self.dirstate.status(match, subrepos, listignored,
1175 1175 listclean, listunknown)
1176 1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1177 1177
1178 1178 # check for any possibly clean files
1179 1179 if parentworking and cmp:
1180 1180 fixup = []
1181 1181 # do a full compare of any files that might have changed
1182 1182 for f in sorted(cmp):
1183 1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1184 1184 or ctx1[f].cmp(ctx2[f])):
1185 1185 modified.append(f)
1186 1186 else:
1187 1187 fixup.append(f)
1188 1188
1189 1189 # update dirstate for files that are actually clean
1190 1190 if fixup:
1191 1191 if listclean:
1192 1192 clean += fixup
1193 1193
1194 1194 try:
1195 1195 # updating the dirstate is optional
1196 1196 # so we don't wait on the lock
1197 1197 wlock = self.wlock(False)
1198 1198 try:
1199 1199 for f in fixup:
1200 1200 self.dirstate.normal(f)
1201 1201 finally:
1202 1202 wlock.release()
1203 1203 except error.LockError:
1204 1204 pass
1205 1205
1206 1206 if not parentworking:
1207 1207 mf1 = mfmatches(ctx1)
1208 1208 if working:
1209 1209 # we are comparing working dir against non-parent
1210 1210 # generate a pseudo-manifest for the working dir
1211 1211 mf2 = mfmatches(self['.'])
1212 1212 for f in cmp + modified + added:
1213 1213 mf2[f] = None
1214 1214 mf2.set(f, ctx2.flags(f))
1215 1215 for f in removed:
1216 1216 if f in mf2:
1217 1217 del mf2[f]
1218 1218 else:
1219 1219 # we are comparing two revisions
1220 1220 deleted, unknown, ignored = [], [], []
1221 1221 mf2 = mfmatches(ctx2)
1222 1222
1223 1223 modified, added, clean = [], [], []
1224 1224 for fn in mf2:
1225 1225 if fn in mf1:
1226 1226 if (mf1.flags(fn) != mf2.flags(fn) or
1227 1227 (mf1[fn] != mf2[fn] and
1228 1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1229 1229 modified.append(fn)
1230 1230 elif listclean:
1231 1231 clean.append(fn)
1232 1232 del mf1[fn]
1233 1233 else:
1234 1234 added.append(fn)
1235 1235 removed = mf1.keys()
1236 1236
1237 1237 r = modified, added, removed, deleted, unknown, ignored, clean
1238 1238
1239 1239 if listsubrepos:
1240 1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1241 1241 if working:
1242 1242 rev2 = None
1243 1243 else:
1244 1244 rev2 = ctx2.substate[subpath][1]
1245 1245 try:
1246 1246 submatch = matchmod.narrowmatcher(subpath, match)
1247 1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1248 1248 clean=listclean, unknown=listunknown,
1249 1249 listsubrepos=True)
1250 1250 for rfiles, sfiles in zip(r, s):
1251 1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1252 1252 except error.LookupError:
1253 1253 self.ui.status(_("skipping missing subrepository: %s\n")
1254 1254 % subpath)
1255 1255
1256 1256 for l in r:
1257 1257 l.sort()
1258 1258 return r
1259 1259
1260 1260 def heads(self, start=None):
1261 1261 heads = self.changelog.heads(start)
1262 1262 # sort the output in rev descending order
1263 1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1264 1264
1265 1265 def branchheads(self, branch=None, start=None, closed=False):
1266 1266 '''return a (possibly filtered) list of heads for the given branch
1267 1267
1268 1268 Heads are returned in topological order, from newest to oldest.
1269 1269 If branch is None, use the dirstate branch.
1270 1270 If start is not None, return only heads reachable from start.
1271 1271 If closed is True, return heads that are marked as closed as well.
1272 1272 '''
1273 1273 if branch is None:
1274 1274 branch = self[None].branch()
1275 1275 branches = self.branchmap()
1276 1276 if branch not in branches:
1277 1277 return []
1278 1278 # the cache returns heads ordered lowest to highest
1279 1279 bheads = list(reversed(branches[branch]))
1280 1280 if start is not None:
1281 1281 # filter out the heads that cannot be reached from startrev
1282 1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1283 1283 bheads = [h for h in bheads if h in fbheads]
1284 1284 if not closed:
1285 1285 bheads = [h for h in bheads if
1286 1286 ('close' not in self.changelog.read(h)[5])]
1287 1287 return bheads
1288 1288
1289 1289 def branches(self, nodes):
1290 1290 if not nodes:
1291 1291 nodes = [self.changelog.tip()]
1292 1292 b = []
1293 1293 for n in nodes:
1294 1294 t = n
1295 1295 while 1:
1296 1296 p = self.changelog.parents(n)
1297 1297 if p[1] != nullid or p[0] == nullid:
1298 1298 b.append((t, n, p[0], p[1]))
1299 1299 break
1300 1300 n = p[0]
1301 1301 return b
1302 1302
1303 1303 def between(self, pairs):
1304 1304 r = []
1305 1305
1306 1306 for top, bottom in pairs:
1307 1307 n, l, i = top, [], 0
1308 1308 f = 1
1309 1309
1310 1310 while n != bottom and n != nullid:
1311 1311 p = self.changelog.parents(n)[0]
1312 1312 if i == f:
1313 1313 l.append(n)
1314 1314 f = f * 2
1315 1315 n = p
1316 1316 i += 1
1317 1317
1318 1318 r.append(l)
1319 1319
1320 1320 return r
1321 1321
1322 1322 def pull(self, remote, heads=None, force=False):
1323 1323 lock = self.lock()
1324 1324 try:
1325 1325 usecommon = remote.capable('getbundle')
1326 1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1327 1327 force=force, commononly=usecommon)
1328 1328 common, fetch, rheads = tmp
1329 1329 if not fetch:
1330 1330 self.ui.status(_("no changes found\n"))
1331 1331 result = 0
1332 1332 else:
1333 1333 if heads is None and list(common) == [nullid]:
1334 1334 self.ui.status(_("requesting all changes\n"))
1335 1335 elif heads is None and remote.capable('changegroupsubset'):
1336 1336 # issue1320, avoid a race if remote changed after discovery
1337 1337 heads = rheads
1338 1338
1339 1339 if usecommon:
1340 1340 cg = remote.getbundle('pull', common=common,
1341 1341 heads=heads or rheads)
1342 1342 elif heads is None:
1343 1343 cg = remote.changegroup(fetch, 'pull')
1344 1344 elif not remote.capable('changegroupsubset'):
1345 1345 raise util.Abort(_("partial pull cannot be done because "
1346 1346 "other repository doesn't support "
1347 1347 "changegroupsubset."))
1348 1348 else:
1349 1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 1351 lock=lock)
1352 1352 finally:
1353 1353 lock.release()
1354 1354
1355 1355 return result
1356 1356
1357 1357 def checkpush(self, force, revs):
1358 1358 """Extensions can override this function if additional checks have
1359 1359 to be performed before pushing, or call it if they override push
1360 1360 command.
1361 1361 """
1362 1362 pass
1363 1363
1364 1364 def push(self, remote, force=False, revs=None, newbranch=False):
1365 1365 '''Push outgoing changesets (limited by revs) from the current
1366 1366 repository to remote. Return an integer:
1367 1367 - 0 means HTTP error *or* nothing to push
1368 1368 - 1 means we pushed and remote head count is unchanged *or*
1369 1369 we have outgoing changesets but refused to push
1370 1370 - other values as described by addchangegroup()
1371 1371 '''
1372 1372 # there are two ways to push to remote repo:
1373 1373 #
1374 1374 # addchangegroup assumes local user can lock remote
1375 1375 # repo (local filesystem, old ssh servers).
1376 1376 #
1377 1377 # unbundle assumes local user cannot lock remote repo (new ssh
1378 1378 # servers, http servers).
1379 1379
1380 1380 self.checkpush(force, revs)
1381 1381 lock = None
1382 1382 unbundle = remote.capable('unbundle')
1383 1383 if not unbundle:
1384 1384 lock = remote.lock()
1385 1385 try:
1386 1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1387 1387 newbranch)
1388 1388 ret = remote_heads
1389 1389 if cg is not None:
1390 1390 if unbundle:
1391 1391 # local repo finds heads on server, finds out what
1392 1392 # revs it must push. once revs transferred, if server
1393 1393 # finds it has different heads (someone else won
1394 1394 # commit/push race), server aborts.
1395 1395 if force:
1396 1396 remote_heads = ['force']
1397 1397 # ssh: return remote's addchangegroup()
1398 1398 # http: return remote's addchangegroup() or 0 for error
1399 1399 ret = remote.unbundle(cg, remote_heads, 'push')
1400 1400 else:
1401 1401 # we return an integer indicating remote head count change
1402 1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1403 1403 lock=lock)
1404 1404 finally:
1405 1405 if lock is not None:
1406 1406 lock.release()
1407 1407
1408 1408 self.ui.debug("checking for updated bookmarks\n")
1409 1409 rb = remote.listkeys('bookmarks')
1410 1410 for k in rb.keys():
1411 1411 if k in self._bookmarks:
1412 1412 nr, nl = rb[k], hex(self._bookmarks[k])
1413 1413 if nr in self:
1414 1414 cr = self[nr]
1415 1415 cl = self[nl]
1416 1416 if cl in cr.descendants():
1417 1417 r = remote.pushkey('bookmarks', k, nr, nl)
1418 1418 if r:
1419 1419 self.ui.status(_("updating bookmark %s\n") % k)
1420 1420 else:
1421 1421 self.ui.warn(_('updating bookmark %s'
1422 1422 ' failed!\n') % k)
1423 1423
1424 1424 return ret
1425 1425
1426 1426 def changegroupinfo(self, nodes, source):
1427 1427 if self.ui.verbose or source == 'bundle':
1428 1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1429 1429 if self.ui.debugflag:
1430 1430 self.ui.debug("list of changesets:\n")
1431 1431 for node in nodes:
1432 1432 self.ui.debug("%s\n" % hex(node))
1433 1433
1434 1434 def changegroupsubset(self, bases, heads, source):
1435 1435 """Compute a changegroup consisting of all the nodes that are
1436 1436 descendents of any of the bases and ancestors of any of the heads.
1437 1437 Return a chunkbuffer object whose read() method will return
1438 1438 successive changegroup chunks.
1439 1439
1440 1440 It is fairly complex as determining which filenodes and which
1441 1441 manifest nodes need to be included for the changeset to be complete
1442 1442 is non-trivial.
1443 1443
1444 1444 Another wrinkle is doing the reverse, figuring out which changeset in
1445 1445 the changegroup a particular filenode or manifestnode belongs to.
1446 1446 """
1447 1447 cl = self.changelog
1448 1448 if not bases:
1449 1449 bases = [nullid]
1450 1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1451 1451 # We assume that all ancestors of bases are known
1452 1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1453 1453 return self._changegroupsubset(common, csets, heads, source)
1454 1454
1455 1455 def getbundle(self, source, heads=None, common=None):
1456 1456 """Like changegroupsubset, but returns the set difference between the
1457 1457 ancestors of heads and the ancestors common.
1458 1458
1459 1459 If heads is None, use the local heads. If common is None, use [nullid].
1460 1460
1461 1461 The nodes in common might not all be known locally due to the way the
1462 1462 current discovery protocol works.
1463 1463 """
1464 1464 cl = self.changelog
1465 1465 if common:
1466 1466 nm = cl.nodemap
1467 1467 common = [n for n in common if n in nm]
1468 1468 else:
1469 1469 common = [nullid]
1470 1470 if not heads:
1471 1471 heads = cl.heads()
1472 1472 common, missing = cl.findcommonmissing(common, heads)
1473 1473 return self._changegroupsubset(common, missing, heads, source)
1474 1474
1475 1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1476 1476
1477 1477 cl = self.changelog
1478 1478 mf = self.manifest
1479 1479 mfs = {} # needed manifests
1480 1480 fnodes = {} # needed file nodes
1481 1481 changedfiles = set()
1482 fstate = ['', {}]
1482 1483 count = [0]
1483 1484
1484 1485 # can we go through the fast path ?
1485 1486 heads.sort()
1486 1487 if heads == sorted(self.heads()):
1487 1488 return self._changegroup(csets, source)
1488 1489
1489 1490 # slow path
1490 1491 self.hook('preoutgoing', throw=True, source=source)
1491 1492 self.changegroupinfo(csets, source)
1492 1493
1493 1494 # filter any nodes that claim to be part of the known set
1494 1495 def prune(revlog, missing):
1495 1496 for n in missing:
1496 1497 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1497 1498 yield n
1498 1499
1499 1500 def clookup(revlog, x):
1500 1501 c = cl.read(x)
1501 1502 changedfiles.update(c[3])
1502 1503 mfs.setdefault(c[0], x)
1503 1504 count[0] += 1
1504 1505 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1505 1506 return x
1506 1507
1507 1508 def mlookup(revlog, x):
1508 1509 clnode = mfs[x]
1509 1510 mdata = mf.readfast(x)
1510 1511 for f in changedfiles:
1511 1512 if f in mdata:
1512 1513 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1513 1514 count[0] += 1
1514 1515 self.ui.progress(_('bundling'), count[0],
1515 1516 unit=_('manifests'), total=len(mfs))
1516 1517 return mfs[x]
1517 1518
1519 def flookup(revlog, x):
1520 self.ui.progress(
1521 _('bundling'), count[0], item=fstate[0],
1522 unit=_('files'), total=len(changedfiles))
1523 return fstate[1][x]
1524
1518 1525 # Now that we have all theses utility functions to help out and
1519 1526 # logically divide up the task, generate the group.
1520 1527 def gengroup():
1521 1528 # Create a changenode group generator that will call our functions
1522 1529 # back to lookup the owning changenode and collect information.
1523 1530 for chunk in cl.group(csets, clookup):
1524 1531 yield chunk
1525 efiles = len(changedfiles)
1526 1532 self.ui.progress(_('bundling'), None)
1527 1533
1528 1534 # Create a generator for the manifestnodes that calls our lookup
1529 1535 # and data collection functions back.
1530 1536 count[0] = 0
1531 1537 for chunk in mf.group(prune(mf, mfs), mlookup):
1532 1538 yield chunk
1533 1539 self.ui.progress(_('bundling'), None)
1534 1540
1535 1541 mfs.clear()
1536 1542
1537 1543 # Go through all our files in order sorted by name.
1538 for idx, fname in enumerate(sorted(changedfiles)):
1544 count[0] = 0
1545 for fname in sorted(changedfiles):
1539 1546 filerevlog = self.file(fname)
1540 1547 if not len(filerevlog):
1541 1548 raise util.Abort(_("empty or missing revlog for %s") % fname)
1542 # Toss out the filenodes that the recipient isn't really
1543 # missing.
1544 missingfnodes = fnodes.pop(fname, {})
1549 fstate[0] = fname
1550 fstate[1] = fnodes.pop(fname, {})
1545 1551 first = True
1546 1552
1547 def flookup(revlog, x):
1548 # even though we print the same progress on
1549 # most loop iterations, put the progress call
1550 # here so that time estimates (if any) can be updated
1551 self.ui.progress(
1552 _('bundling'), idx, item=fname,
1553 unit=_('files'), total=efiles)
1554 return missingfnodes[x]
1555
1556 for chunk in filerevlog.group(prune(filerevlog, missingfnodes),
1553 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1557 1554 flookup):
1558 1555 if first:
1559 1556 if chunk == changegroup.closechunk():
1560 1557 break
1558 count[0] += 1
1561 1559 yield changegroup.chunkheader(len(fname))
1562 1560 yield fname
1563 1561 first = False
1564 1562 yield chunk
1565 1563 # Signal that no more groups are left.
1566 1564 yield changegroup.closechunk()
1567 1565 self.ui.progress(_('bundling'), None)
1568 1566
1569 1567 if csets:
1570 1568 self.hook('outgoing', node=hex(csets[0]), source=source)
1571 1569
1572 1570 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573 1571
1574 1572 def changegroup(self, basenodes, source):
1575 1573 # to avoid a race we use changegroupsubset() (issue1320)
1576 1574 return self.changegroupsubset(basenodes, self.heads(), source)
1577 1575
1578 1576 def _changegroup(self, nodes, source):
1579 1577 """Compute the changegroup of all nodes that we have that a recipient
1580 1578 doesn't. Return a chunkbuffer object whose read() method will return
1581 1579 successive changegroup chunks.
1582 1580
1583 1581 This is much easier than the previous function as we can assume that
1584 1582 the recipient has any changenode we aren't sending them.
1585 1583
1586 1584 nodes is the set of nodes to send"""
1587 1585
1588 1586 cl = self.changelog
1589 1587 mf = self.manifest
1590 1588 mfs = {}
1591 1589 changedfiles = set()
1590 fstate = ['']
1591 count = [0]
1592 1592
1593 1593 self.hook('preoutgoing', throw=True, source=source)
1594 1594 self.changegroupinfo(nodes, source)
1595 1595
1596 1596 revset = set([cl.rev(n) for n in nodes])
1597 1597
1598 1598 def gennodelst(log):
1599 1599 for r in log:
1600 1600 if log.linkrev(r) in revset:
1601 1601 yield log.node(r)
1602 1602
1603 def gengroup():
1604 '''yield a sequence of changegroup chunks (strings)'''
1605 # construct a list of all changed files
1606
1607 count = [0]
1608 1603 def clookup(revlog, x):
1609 1604 c = cl.read(x)
1610 1605 changedfiles.update(c[3])
1611 1606 mfs.setdefault(c[0], x)
1612 1607 count[0] += 1
1613 1608 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1614 1609 return x
1615 1610
1616 for chunk in cl.group(nodes, clookup):
1617 yield chunk
1618 efiles = len(changedfiles)
1619 changecount = count[0]
1620 self.ui.progress(_('bundling'), None)
1621
1622 count = [0]
1623 1611 def mlookup(revlog, x):
1624 1612 count[0] += 1
1625 1613 self.ui.progress(_('bundling'), count[0],
1626 unit=_('manifests'), total=changecount)
1614 unit=_('manifests'), total=len(mfs))
1615 return cl.node(revlog.linkrev(revlog.rev(x)))
1616
1617 def flookup(revlog, x):
1618 self.ui.progress(
1619 _('bundling'), count[0], item=fstate[0],
1620 total=len(changedfiles), unit=_('files'))
1627 1621 return cl.node(revlog.linkrev(revlog.rev(x)))
1628 1622
1623 def gengroup():
1624 '''yield a sequence of changegroup chunks (strings)'''
1625 # construct a list of all changed files
1626
1627 for chunk in cl.group(nodes, clookup):
1628 yield chunk
1629 self.ui.progress(_('bundling'), None)
1630
1631 count[0] = 0
1629 1632 for chunk in mf.group(gennodelst(mf), mlookup):
1630 1633 yield chunk
1631 1634 self.ui.progress(_('bundling'), None)
1632 1635
1633 for idx, fname in enumerate(sorted(changedfiles)):
1636 count[0] = 0
1637 for fname in sorted(changedfiles):
1634 1638 filerevlog = self.file(fname)
1635 1639 if not len(filerevlog):
1636 1640 raise util.Abort(_("empty or missing revlog for %s") % fname)
1641 fstate[0] = fname
1637 1642 first = True
1638 def flookup(revlog, x):
1639 self.ui.progress(
1640 _('bundling'), idx, item=fname,
1641 total=efiles, unit=_('files'))
1642 return cl.node(revlog.linkrev(revlog.rev(x)))
1643
1644 1643 for chunk in filerevlog.group(gennodelst(filerevlog), flookup):
1645 1644 if first:
1646 1645 if chunk == changegroup.closechunk():
1647 1646 break
1647 count[0] += 1
1648 1648 yield changegroup.chunkheader(len(fname))
1649 1649 yield fname
1650 1650 first = False
1651 1651 yield chunk
1652 1652 yield changegroup.closechunk()
1653 1653 self.ui.progress(_('bundling'), None)
1654 1654
1655 1655 if nodes:
1656 1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657 1657
1658 1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659 1659
1660 1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 1661 """Add the changegroup returned by source.read() to this repo.
1662 1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 1663 the URL of the repo where this changegroup is coming from.
1664 1664 If lock is not None, the function takes ownership of the lock
1665 1665 and releases it after the changegroup is added.
1666 1666
1667 1667 Return an integer summarizing the change to this repo:
1668 1668 - nothing changed or no source: 0
1669 1669 - more heads than before: 1+added heads (2..n)
1670 1670 - fewer heads than before: -1-removed heads (-2..-n)
1671 1671 - number of heads stays the same: 1
1672 1672 """
1673 1673 def csmap(x):
1674 1674 self.ui.debug("add changeset %s\n" % short(x))
1675 1675 return len(cl)
1676 1676
1677 1677 def revmap(x):
1678 1678 return cl.rev(x)
1679 1679
1680 1680 if not source:
1681 1681 return 0
1682 1682
1683 1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684 1684
1685 1685 changesets = files = revisions = 0
1686 1686 efiles = set()
1687 1687
1688 1688 # write changelog data to temp files so concurrent readers will not see
1689 1689 # inconsistent view
1690 1690 cl = self.changelog
1691 1691 cl.delayupdate()
1692 1692 oldheads = len(cl.heads())
1693 1693
1694 1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 1695 try:
1696 1696 trp = weakref.proxy(tr)
1697 1697 # pull off the changeset group
1698 1698 self.ui.status(_("adding changesets\n"))
1699 1699 clstart = len(cl)
1700 1700 class prog(object):
1701 1701 step = _('changesets')
1702 1702 count = 1
1703 1703 ui = self.ui
1704 1704 total = None
1705 1705 def __call__(self):
1706 1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 1707 total=self.total)
1708 1708 self.count += 1
1709 1709 pr = prog()
1710 1710 source.callback = pr
1711 1711
1712 1712 if (cl.addgroup(source, csmap, trp) is None
1713 1713 and not emptyok):
1714 1714 raise util.Abort(_("received changelog group is empty"))
1715 1715 clend = len(cl)
1716 1716 changesets = clend - clstart
1717 1717 for c in xrange(clstart, clend):
1718 1718 efiles.update(self[c].files())
1719 1719 efiles = len(efiles)
1720 1720 self.ui.progress(_('changesets'), None)
1721 1721
1722 1722 # pull off the manifest group
1723 1723 self.ui.status(_("adding manifests\n"))
1724 1724 pr.step = _('manifests')
1725 1725 pr.count = 1
1726 1726 pr.total = changesets # manifests <= changesets
1727 1727 # no need to check for empty manifest group here:
1728 1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 1729 # no new manifest will be created and the manifest group will
1730 1730 # be empty during the pull
1731 1731 self.manifest.addgroup(source, revmap, trp)
1732 1732 self.ui.progress(_('manifests'), None)
1733 1733
1734 1734 needfiles = {}
1735 1735 if self.ui.configbool('server', 'validate', default=False):
1736 1736 # validate incoming csets have their manifests
1737 1737 for cset in xrange(clstart, clend):
1738 1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 1739 mfest = self.manifest.readdelta(mfest)
1740 1740 # store file nodes we must see
1741 1741 for f, n in mfest.iteritems():
1742 1742 needfiles.setdefault(f, set()).add(n)
1743 1743
1744 1744 # process the files
1745 1745 self.ui.status(_("adding file changes\n"))
1746 1746 pr.step = 'files'
1747 1747 pr.count = 1
1748 1748 pr.total = efiles
1749 1749 source.callback = None
1750 1750
1751 1751 while 1:
1752 1752 f = source.chunk()
1753 1753 if not f:
1754 1754 break
1755 1755 self.ui.debug("adding %s revisions\n" % f)
1756 1756 pr()
1757 1757 fl = self.file(f)
1758 1758 o = len(fl)
1759 1759 if fl.addgroup(source, revmap, trp) is None:
1760 1760 raise util.Abort(_("received file revlog group is empty"))
1761 1761 revisions += len(fl) - o
1762 1762 files += 1
1763 1763 if f in needfiles:
1764 1764 needs = needfiles[f]
1765 1765 for new in xrange(o, len(fl)):
1766 1766 n = fl.node(new)
1767 1767 if n in needs:
1768 1768 needs.remove(n)
1769 1769 if not needs:
1770 1770 del needfiles[f]
1771 1771 self.ui.progress(_('files'), None)
1772 1772
1773 1773 for f, needs in needfiles.iteritems():
1774 1774 fl = self.file(f)
1775 1775 for n in needs:
1776 1776 try:
1777 1777 fl.rev(n)
1778 1778 except error.LookupError:
1779 1779 raise util.Abort(
1780 1780 _('missing file data for %s:%s - run hg verify') %
1781 1781 (f, hex(n)))
1782 1782
1783 1783 newheads = len(cl.heads())
1784 1784 heads = ""
1785 1785 if oldheads and newheads != oldheads:
1786 1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1787 1787
1788 1788 self.ui.status(_("added %d changesets"
1789 1789 " with %d changes to %d files%s\n")
1790 1790 % (changesets, revisions, files, heads))
1791 1791
1792 1792 if changesets > 0:
1793 1793 p = lambda: cl.writepending() and self.root or ""
1794 1794 self.hook('pretxnchangegroup', throw=True,
1795 1795 node=hex(cl.node(clstart)), source=srctype,
1796 1796 url=url, pending=p)
1797 1797
1798 1798 # make changelog see real files again
1799 1799 cl.finalize(trp)
1800 1800
1801 1801 tr.close()
1802 1802 finally:
1803 1803 tr.release()
1804 1804 if lock:
1805 1805 lock.release()
1806 1806
1807 1807 if changesets > 0:
1808 1808 # forcefully update the on-disk branch cache
1809 1809 self.ui.debug("updating the branch cache\n")
1810 1810 self.updatebranchcache()
1811 1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 1812 source=srctype, url=url)
1813 1813
1814 1814 for i in xrange(clstart, clend):
1815 1815 self.hook("incoming", node=hex(cl.node(i)),
1816 1816 source=srctype, url=url)
1817 1817
1818 1818 # never return 0 here:
1819 1819 if newheads < oldheads:
1820 1820 return newheads - oldheads - 1
1821 1821 else:
1822 1822 return newheads - oldheads + 1
1823 1823
1824 1824
1825 1825 def stream_in(self, remote, requirements):
1826 1826 lock = self.lock()
1827 1827 try:
1828 1828 fp = remote.stream_out()
1829 1829 l = fp.readline()
1830 1830 try:
1831 1831 resp = int(l)
1832 1832 except ValueError:
1833 1833 raise error.ResponseError(
1834 1834 _('Unexpected response from remote server:'), l)
1835 1835 if resp == 1:
1836 1836 raise util.Abort(_('operation forbidden by server'))
1837 1837 elif resp == 2:
1838 1838 raise util.Abort(_('locking the remote repository failed'))
1839 1839 elif resp != 0:
1840 1840 raise util.Abort(_('the server sent an unknown error code'))
1841 1841 self.ui.status(_('streaming all changes\n'))
1842 1842 l = fp.readline()
1843 1843 try:
1844 1844 total_files, total_bytes = map(int, l.split(' ', 1))
1845 1845 except (ValueError, TypeError):
1846 1846 raise error.ResponseError(
1847 1847 _('Unexpected response from remote server:'), l)
1848 1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 1849 (total_files, util.bytecount(total_bytes)))
1850 1850 start = time.time()
1851 1851 for i in xrange(total_files):
1852 1852 # XXX doesn't support '\n' or '\r' in filenames
1853 1853 l = fp.readline()
1854 1854 try:
1855 1855 name, size = l.split('\0', 1)
1856 1856 size = int(size)
1857 1857 except (ValueError, TypeError):
1858 1858 raise error.ResponseError(
1859 1859 _('Unexpected response from remote server:'), l)
1860 1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 1861 # for backwards compat, name was partially encoded
1862 1862 ofp = self.sopener(store.decodedir(name), 'w')
1863 1863 for chunk in util.filechunkiter(fp, limit=size):
1864 1864 ofp.write(chunk)
1865 1865 ofp.close()
1866 1866 elapsed = time.time() - start
1867 1867 if elapsed <= 0:
1868 1868 elapsed = 0.001
1869 1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 1870 (util.bytecount(total_bytes), elapsed,
1871 1871 util.bytecount(total_bytes / elapsed)))
1872 1872
1873 1873 # new requirements = old non-format requirements + new format-related
1874 1874 # requirements from the streamed-in repository
1875 1875 requirements.update(set(self.requirements) - self.supportedformats)
1876 1876 self._applyrequirements(requirements)
1877 1877 self._writerequirements()
1878 1878
1879 1879 self.invalidate()
1880 1880 return len(self.heads()) + 1
1881 1881 finally:
1882 1882 lock.release()
1883 1883
1884 1884 def clone(self, remote, heads=[], stream=False):
1885 1885 '''clone remote repository.
1886 1886
1887 1887 keyword arguments:
1888 1888 heads: list of revs to clone (forces use of pull)
1889 1889 stream: use streaming clone if possible'''
1890 1890
1891 1891 # now, all clients that can request uncompressed clones can
1892 1892 # read repo formats supported by all servers that can serve
1893 1893 # them.
1894 1894
1895 1895 # if revlog format changes, client will have to check version
1896 1896 # and format flags on "stream" capability, and use
1897 1897 # uncompressed only if compatible.
1898 1898
1899 1899 if stream and not heads:
1900 1900 # 'stream' means remote revlog format is revlogv1 only
1901 1901 if remote.capable('stream'):
1902 1902 return self.stream_in(remote, set(('revlogv1',)))
1903 1903 # otherwise, 'streamreqs' contains the remote revlog format
1904 1904 streamreqs = remote.capable('streamreqs')
1905 1905 if streamreqs:
1906 1906 streamreqs = set(streamreqs.split(','))
1907 1907 # if we support it, stream in and adjust our requirements
1908 1908 if not streamreqs - self.supportedformats:
1909 1909 return self.stream_in(remote, streamreqs)
1910 1910 return self.pull(remote, heads)
1911 1911
1912 1912 def pushkey(self, namespace, key, old, new):
1913 1913 return pushkey.push(self, namespace, key, old, new)
1914 1914
1915 1915 def listkeys(self, namespace):
1916 1916 return pushkey.list(self, namespace)
1917 1917
1918 1918 def debugwireargs(self, one, two, three=None, four=None):
1919 1919 '''used to test argument passing over the wire'''
1920 1920 return "%s %s %s %s" % (one, two, three, four)
1921 1921
1922 1922 # used to avoid circular references so destructors work
1923 1923 def aftertrans(files):
1924 1924 renamefiles = [tuple(t) for t in files]
1925 1925 def a():
1926 1926 for src, dest in renamefiles:
1927 1927 util.rename(src, dest)
1928 1928 return a
1929 1929
1930 1930 def instance(ui, path, create):
1931 1931 return localrepository(ui, urlmod.localpath(path), create)
1932 1932
1933 1933 def islocal(path):
1934 1934 return True
General Comments 0
You need to be logged in to leave comments. Login now