##// END OF EJS Templates
localrepo: ignore tags to unknown nodes (issue2750)
Idan Kamara -
r13892:31d15f76 default
parent child Browse files
Show More
@@ -1,1932 +1,1937 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'parentdelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=0):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = util.path_auditor(self.root, self._checknested)
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener("00changelog.i", "a").write(
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'parentdelta', False):
65 65 requirements.append("parentdelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 # find requirements
72 72 requirements = set()
73 73 try:
74 74 requirements = set(self.opener("requires").read().splitlines())
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 for r in requirements - self.supported:
79 79 raise error.RequirementError(
80 80 _("requirement '%s' not supported") % r)
81 81
82 82 self.sharedpath = self.path
83 83 try:
84 84 s = os.path.realpath(self.opener("sharedpath").read())
85 85 if not os.path.exists(s):
86 86 raise error.RepoError(
87 87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 88 self.sharedpath = s
89 89 except IOError, inst:
90 90 if inst.errno != errno.ENOENT:
91 91 raise
92 92
93 93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97 self.opener.createmode = self.store.createmode
98 98 self._applyrequirements(requirements)
99 99 if create:
100 100 self._writerequirements()
101 101
102 102 # These two define the set of tags for this repository. _tags
103 103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 104 # 'local'. (Global tags are defined by .hgtags across all
105 105 # heads, and local tags are defined in .hg/localtags.) They
106 106 # constitute the in-memory cache of tags.
107 107 self._tags = None
108 108 self._tagtypes = None
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.nodetagscache = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 def _applyrequirements(self, requirements):
118 118 self.requirements = requirements
119 119 self.sopener.options = {}
120 120 if 'parentdelta' in requirements:
121 121 self.sopener.options['parentdelta'] = 1
122 122
123 123 def _writerequirements(self):
124 124 reqfile = self.opener("requires", "w")
125 125 for r in self.requirements:
126 126 reqfile.write("%s\n" % r)
127 127 reqfile.close()
128 128
129 129 def _checknested(self, path):
130 130 """Determine if path is a legal nested repository."""
131 131 if not path.startswith(self.root):
132 132 return False
133 133 subpath = path[len(self.root) + 1:]
134 134
135 135 # XXX: Checking against the current working copy is wrong in
136 136 # the sense that it can reject things like
137 137 #
138 138 # $ hg cat -r 10 sub/x.txt
139 139 #
140 140 # if sub/ is no longer a subrepository in the working copy
141 141 # parent revision.
142 142 #
143 143 # However, it can of course also allow things that would have
144 144 # been rejected before, such as the above cat command if sub/
145 145 # is a subrepository now, but was a normal directory before.
146 146 # The old path auditor would have rejected by mistake since it
147 147 # panics when it sees sub/.hg/.
148 148 #
149 149 # All in all, checking against the working copy seems sensible
150 150 # since we want to prevent access to nested repositories on
151 151 # the filesystem *now*.
152 152 ctx = self[None]
153 153 parts = util.splitpath(subpath)
154 154 while parts:
155 155 prefix = os.sep.join(parts)
156 156 if prefix in ctx.substate:
157 157 if prefix == subpath:
158 158 return True
159 159 else:
160 160 sub = ctx.sub(prefix)
161 161 return sub.checknested(subpath[len(prefix) + 1:])
162 162 else:
163 163 parts.pop()
164 164 return False
165 165
166 166 @util.propertycache
167 167 def _bookmarks(self):
168 168 return bookmarks.read(self)
169 169
170 170 @util.propertycache
171 171 def _bookmarkcurrent(self):
172 172 return bookmarks.readcurrent(self)
173 173
174 174 @propertycache
175 175 def changelog(self):
176 176 c = changelog.changelog(self.sopener)
177 177 if 'HG_PENDING' in os.environ:
178 178 p = os.environ['HG_PENDING']
179 179 if p.startswith(self.root):
180 180 c.readpending('00changelog.i.a')
181 181 self.sopener.options['defversion'] = c.version
182 182 return c
183 183
184 184 @propertycache
185 185 def manifest(self):
186 186 return manifest.manifest(self.sopener)
187 187
188 188 @propertycache
189 189 def dirstate(self):
190 190 warned = [0]
191 191 def validate(node):
192 192 try:
193 193 r = self.changelog.rev(node)
194 194 return node
195 195 except error.LookupError:
196 196 if not warned[0]:
197 197 warned[0] = True
198 198 self.ui.warn(_("warning: ignoring unknown"
199 199 " working parent %s!\n") % short(node))
200 200 return nullid
201 201
202 202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203 203
204 204 def __getitem__(self, changeid):
205 205 if changeid is None:
206 206 return context.workingctx(self)
207 207 return context.changectx(self, changeid)
208 208
209 209 def __contains__(self, changeid):
210 210 try:
211 211 return bool(self.lookup(changeid))
212 212 except error.RepoLookupError:
213 213 return False
214 214
215 215 def __nonzero__(self):
216 216 return True
217 217
218 218 def __len__(self):
219 219 return len(self.changelog)
220 220
221 221 def __iter__(self):
222 222 for i in xrange(len(self)):
223 223 yield i
224 224
225 225 def url(self):
226 226 return 'file:' + self.root
227 227
228 228 def hook(self, name, throw=False, **args):
229 229 return hook.hook(self.ui, self, name, throw, **args)
230 230
231 231 tag_disallowed = ':\r\n'
232 232
233 233 def _tag(self, names, node, message, local, user, date, extra={}):
234 234 if isinstance(names, str):
235 235 allchars = names
236 236 names = (names,)
237 237 else:
238 238 allchars = ''.join(names)
239 239 for c in self.tag_disallowed:
240 240 if c in allchars:
241 241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242 242
243 243 branches = self.branchmap()
244 244 for name in names:
245 245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 246 local=local)
247 247 if name in branches:
248 248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 249 " branch name\n") % name)
250 250
251 251 def writetags(fp, names, munge, prevtags):
252 252 fp.seek(0, 2)
253 253 if prevtags and prevtags[-1] != '\n':
254 254 fp.write('\n')
255 255 for name in names:
256 256 m = munge and munge(name) or name
257 257 if self._tagtypes and name in self._tagtypes:
258 258 old = self._tags.get(name, nullid)
259 259 fp.write('%s %s\n' % (hex(old), m))
260 260 fp.write('%s %s\n' % (hex(node), m))
261 261 fp.close()
262 262
263 263 prevtags = ''
264 264 if local:
265 265 try:
266 266 fp = self.opener('localtags', 'r+')
267 267 except IOError:
268 268 fp = self.opener('localtags', 'a')
269 269 else:
270 270 prevtags = fp.read()
271 271
272 272 # local tags are stored in the current charset
273 273 writetags(fp, names, None, prevtags)
274 274 for name in names:
275 275 self.hook('tag', node=hex(node), tag=name, local=local)
276 276 return
277 277
278 278 try:
279 279 fp = self.wfile('.hgtags', 'rb+')
280 280 except IOError:
281 281 fp = self.wfile('.hgtags', 'ab')
282 282 else:
283 283 prevtags = fp.read()
284 284
285 285 # committed tags are stored in UTF-8
286 286 writetags(fp, names, encoding.fromlocal, prevtags)
287 287
288 288 fp.close()
289 289
290 290 if '.hgtags' not in self.dirstate:
291 291 self[None].add(['.hgtags'])
292 292
293 293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295 295
296 296 for name in names:
297 297 self.hook('tag', node=hex(node), tag=name, local=local)
298 298
299 299 return tagnode
300 300
301 301 def tag(self, names, node, message, local, user, date):
302 302 '''tag a revision with one or more symbolic names.
303 303
304 304 names is a list of strings or, when adding a single tag, names may be a
305 305 string.
306 306
307 307 if local is True, the tags are stored in a per-repository file.
308 308 otherwise, they are stored in the .hgtags file, and a new
309 309 changeset is committed with the change.
310 310
311 311 keyword arguments:
312 312
313 313 local: whether to store tags in non-version-controlled file
314 314 (default False)
315 315
316 316 message: commit message to use if committing
317 317
318 318 user: name of user to use if committing
319 319
320 320 date: date tuple to use if committing'''
321 321
322 322 if not local:
323 323 for x in self.status()[:5]:
324 324 if '.hgtags' in x:
325 325 raise util.Abort(_('working copy of .hgtags is changed '
326 326 '(please commit .hgtags manually)'))
327 327
328 328 self.tags() # instantiate the cache
329 329 self._tag(names, node, message, local, user, date)
330 330
331 331 def tags(self):
332 332 '''return a mapping of tag to node'''
333 333 if self._tags is None:
334 334 (self._tags, self._tagtypes) = self._findtags()
335 335
336 336 return self._tags
337 337
338 338 def _findtags(self):
339 339 '''Do the hard work of finding tags. Return a pair of dicts
340 340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 341 maps tag name to a string like \'global\' or \'local\'.
342 342 Subclasses or extensions are free to add their own tags, but
343 343 should be aware that the returned dicts will be retained for the
344 344 duration of the localrepo object.'''
345 345
346 346 # XXX what tagtype should subclasses/extensions use? Currently
347 347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 348 # Should each extension invent its own tag type? Should there
349 349 # be one tagtype for all such "virtual" tags? Or is the status
350 350 # quo fine?
351 351
352 352 alltags = {} # map tag name to (node, hist)
353 353 tagtypes = {}
354 354
355 355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357 357
358 358 # Build the return dicts. Have to re-encode tag names because
359 359 # the tags module always uses UTF-8 (in order not to lose info
360 360 # writing to the cache), but the rest of Mercurial wants them in
361 361 # local encoding.
362 362 tags = {}
363 363 for (name, (node, hist)) in alltags.iteritems():
364 364 if node != nullid:
365 tags[encoding.tolocal(name)] = node
365 try:
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
370 pass
366 371 tags['tip'] = self.changelog.tip()
367 372 tagtypes = dict([(encoding.tolocal(name), value)
368 373 for (name, value) in tagtypes.iteritems()])
369 374 return (tags, tagtypes)
370 375
371 376 def tagtype(self, tagname):
372 377 '''
373 378 return the type of the given tag. result can be:
374 379
375 380 'local' : a local tag
376 381 'global' : a global tag
377 382 None : tag does not exist
378 383 '''
379 384
380 385 self.tags()
381 386
382 387 return self._tagtypes.get(tagname)
383 388
384 389 def tagslist(self):
385 390 '''return a list of tags ordered by revision'''
386 391 l = []
387 392 for t, n in self.tags().iteritems():
388 393 try:
389 394 r = self.changelog.rev(n)
390 395 except error.LookupError:
391 396 r = -2 # sort to the beginning of the list if unknown
392 397 l.append((r, t, n))
393 398 return [(t, n) for r, t, n in sorted(l)]
394 399
395 400 def nodetags(self, node):
396 401 '''return the tags associated with a node'''
397 402 if not self.nodetagscache:
398 403 self.nodetagscache = {}
399 404 for t, n in self.tags().iteritems():
400 405 self.nodetagscache.setdefault(n, []).append(t)
401 406 for tags in self.nodetagscache.itervalues():
402 407 tags.sort()
403 408 return self.nodetagscache.get(node, [])
404 409
405 410 def nodebookmarks(self, node):
406 411 marks = []
407 412 for bookmark, n in self._bookmarks.iteritems():
408 413 if n == node:
409 414 marks.append(bookmark)
410 415 return sorted(marks)
411 416
412 417 def _branchtags(self, partial, lrev):
413 418 # TODO: rename this function?
414 419 tiprev = len(self) - 1
415 420 if lrev != tiprev:
416 421 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 422 self._updatebranchcache(partial, ctxgen)
418 423 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419 424
420 425 return partial
421 426
422 427 def updatebranchcache(self):
423 428 tip = self.changelog.tip()
424 429 if self._branchcache is not None and self._branchcachetip == tip:
425 430 return self._branchcache
426 431
427 432 oldtip = self._branchcachetip
428 433 self._branchcachetip = tip
429 434 if oldtip is None or oldtip not in self.changelog.nodemap:
430 435 partial, last, lrev = self._readbranchcache()
431 436 else:
432 437 lrev = self.changelog.rev(oldtip)
433 438 partial = self._branchcache
434 439
435 440 self._branchtags(partial, lrev)
436 441 # this private cache holds all heads (not just tips)
437 442 self._branchcache = partial
438 443
439 444 def branchmap(self):
440 445 '''returns a dictionary {branch: [branchheads]}'''
441 446 self.updatebranchcache()
442 447 return self._branchcache
443 448
444 449 def branchtags(self):
445 450 '''return a dict where branch names map to the tipmost head of
446 451 the branch, open heads come before closed'''
447 452 bt = {}
448 453 for bn, heads in self.branchmap().iteritems():
449 454 tip = heads[-1]
450 455 for h in reversed(heads):
451 456 if 'close' not in self.changelog.read(h)[5]:
452 457 tip = h
453 458 break
454 459 bt[bn] = tip
455 460 return bt
456 461
457 462 def _readbranchcache(self):
458 463 partial = {}
459 464 try:
460 465 f = self.opener("cache/branchheads")
461 466 lines = f.read().split('\n')
462 467 f.close()
463 468 except (IOError, OSError):
464 469 return {}, nullid, nullrev
465 470
466 471 try:
467 472 last, lrev = lines.pop(0).split(" ", 1)
468 473 last, lrev = bin(last), int(lrev)
469 474 if lrev >= len(self) or self[lrev].node() != last:
470 475 # invalidate the cache
471 476 raise ValueError('invalidating branch cache (tip differs)')
472 477 for l in lines:
473 478 if not l:
474 479 continue
475 480 node, label = l.split(" ", 1)
476 481 label = encoding.tolocal(label.strip())
477 482 partial.setdefault(label, []).append(bin(node))
478 483 except KeyboardInterrupt:
479 484 raise
480 485 except Exception, inst:
481 486 if self.ui.debugflag:
482 487 self.ui.warn(str(inst), '\n')
483 488 partial, last, lrev = {}, nullid, nullrev
484 489 return partial, last, lrev
485 490
486 491 def _writebranchcache(self, branches, tip, tiprev):
487 492 try:
488 493 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 494 f.write("%s %s\n" % (hex(tip), tiprev))
490 495 for label, nodes in branches.iteritems():
491 496 for node in nodes:
492 497 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 498 f.rename()
494 499 except (IOError, OSError):
495 500 pass
496 501
497 502 def _updatebranchcache(self, partial, ctxgen):
498 503 # collect new branch entries
499 504 newbranches = {}
500 505 for c in ctxgen:
501 506 newbranches.setdefault(c.branch(), []).append(c.node())
502 507 # if older branchheads are reachable from new ones, they aren't
503 508 # really branchheads. Note checking parents is insufficient:
504 509 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 510 for branch, newnodes in newbranches.iteritems():
506 511 bheads = partial.setdefault(branch, [])
507 512 bheads.extend(newnodes)
508 513 if len(bheads) <= 1:
509 514 continue
510 515 # starting from tip means fewer passes over reachable
511 516 while newnodes:
512 517 latest = newnodes.pop()
513 518 if latest not in bheads:
514 519 continue
515 520 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
516 521 reachable = self.changelog.reachable(latest, minbhrev)
517 522 reachable.remove(latest)
518 523 bheads = [b for b in bheads if b not in reachable]
519 524 partial[branch] = bheads
520 525
521 526 def lookup(self, key):
522 527 if isinstance(key, int):
523 528 return self.changelog.node(key)
524 529 elif key == '.':
525 530 return self.dirstate.p1()
526 531 elif key == 'null':
527 532 return nullid
528 533 elif key == 'tip':
529 534 return self.changelog.tip()
530 535 n = self.changelog._match(key)
531 536 if n:
532 537 return n
533 538 if key in self._bookmarks:
534 539 return self._bookmarks[key]
535 540 if key in self.tags():
536 541 return self.tags()[key]
537 542 if key in self.branchtags():
538 543 return self.branchtags()[key]
539 544 n = self.changelog._partialmatch(key)
540 545 if n:
541 546 return n
542 547
543 548 # can't find key, check if it might have come from damaged dirstate
544 549 if key in self.dirstate.parents():
545 550 raise error.Abort(_("working directory has unknown parent '%s'!")
546 551 % short(key))
547 552 try:
548 553 if len(key) == 20:
549 554 key = hex(key)
550 555 except:
551 556 pass
552 557 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553 558
554 559 def lookupbranch(self, key, remote=None):
555 560 repo = remote or self
556 561 if key in repo.branchmap():
557 562 return key
558 563
559 564 repo = (remote and remote.local()) and remote or self
560 565 return repo[key].branch()
561 566
562 567 def known(self, nodes):
563 568 nm = self.changelog.nodemap
564 569 return [(n in nm) for n in nodes]
565 570
566 571 def local(self):
567 572 return True
568 573
569 574 def join(self, f):
570 575 return os.path.join(self.path, f)
571 576
572 577 def wjoin(self, f):
573 578 return os.path.join(self.root, f)
574 579
575 580 def file(self, f):
576 581 if f[0] == '/':
577 582 f = f[1:]
578 583 return filelog.filelog(self.sopener, f)
579 584
580 585 def changectx(self, changeid):
581 586 return self[changeid]
582 587
583 588 def parents(self, changeid=None):
584 589 '''get list of changectxs for parents of changeid'''
585 590 return self[changeid].parents()
586 591
587 592 def filectx(self, path, changeid=None, fileid=None):
588 593 """changeid can be a changeset revision, node, or tag.
589 594 fileid can be a file revision or node."""
590 595 return context.filectx(self, path, changeid, fileid)
591 596
592 597 def getcwd(self):
593 598 return self.dirstate.getcwd()
594 599
595 600 def pathto(self, f, cwd=None):
596 601 return self.dirstate.pathto(f, cwd)
597 602
598 603 def wfile(self, f, mode='r'):
599 604 return self.wopener(f, mode)
600 605
601 606 def _link(self, f):
602 607 return os.path.islink(self.wjoin(f))
603 608
604 609 def _loadfilter(self, filter):
605 610 if filter not in self.filterpats:
606 611 l = []
607 612 for pat, cmd in self.ui.configitems(filter):
608 613 if cmd == '!':
609 614 continue
610 615 mf = matchmod.match(self.root, '', [pat])
611 616 fn = None
612 617 params = cmd
613 618 for name, filterfn in self._datafilters.iteritems():
614 619 if cmd.startswith(name):
615 620 fn = filterfn
616 621 params = cmd[len(name):].lstrip()
617 622 break
618 623 if not fn:
619 624 fn = lambda s, c, **kwargs: util.filter(s, c)
620 625 # Wrap old filters not supporting keyword arguments
621 626 if not inspect.getargspec(fn)[2]:
622 627 oldfn = fn
623 628 fn = lambda s, c, **kwargs: oldfn(s, c)
624 629 l.append((mf, fn, params))
625 630 self.filterpats[filter] = l
626 631 return self.filterpats[filter]
627 632
628 633 def _filter(self, filterpats, filename, data):
629 634 for mf, fn, cmd in filterpats:
630 635 if mf(filename):
631 636 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 637 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 638 break
634 639
635 640 return data
636 641
637 642 @propertycache
638 643 def _encodefilterpats(self):
639 644 return self._loadfilter('encode')
640 645
641 646 @propertycache
642 647 def _decodefilterpats(self):
643 648 return self._loadfilter('decode')
644 649
645 650 def adddatafilter(self, name, filter):
646 651 self._datafilters[name] = filter
647 652
648 653 def wread(self, filename):
649 654 if self._link(filename):
650 655 data = os.readlink(self.wjoin(filename))
651 656 else:
652 657 data = self.wopener(filename, 'r').read()
653 658 return self._filter(self._encodefilterpats, filename, data)
654 659
655 660 def wwrite(self, filename, data, flags):
656 661 data = self._filter(self._decodefilterpats, filename, data)
657 662 if 'l' in flags:
658 663 self.wopener.symlink(data, filename)
659 664 else:
660 665 self.wopener(filename, 'w').write(data)
661 666 if 'x' in flags:
662 667 util.set_flags(self.wjoin(filename), False, True)
663 668
664 669 def wwritedata(self, filename, data):
665 670 return self._filter(self._decodefilterpats, filename, data)
666 671
667 672 def transaction(self, desc):
668 673 tr = self._transref and self._transref() or None
669 674 if tr and tr.running():
670 675 return tr.nest()
671 676
672 677 # abort here if the journal already exists
673 678 if os.path.exists(self.sjoin("journal")):
674 679 raise error.RepoError(
675 680 _("abandoned transaction found - run hg recover"))
676 681
677 682 # save dirstate for rollback
678 683 try:
679 684 ds = self.opener("dirstate").read()
680 685 except IOError:
681 686 ds = ""
682 687 self.opener("journal.dirstate", "w").write(ds)
683 688 self.opener("journal.branch", "w").write(
684 689 encoding.fromlocal(self.dirstate.branch()))
685 690 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
686 691
687 692 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 693 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 694 (self.join("journal.branch"), self.join("undo.branch")),
690 695 (self.join("journal.desc"), self.join("undo.desc"))]
691 696 tr = transaction.transaction(self.ui.warn, self.sopener,
692 697 self.sjoin("journal"),
693 698 aftertrans(renames),
694 699 self.store.createmode)
695 700 self._transref = weakref.ref(tr)
696 701 return tr
697 702
698 703 def recover(self):
699 704 lock = self.lock()
700 705 try:
701 706 if os.path.exists(self.sjoin("journal")):
702 707 self.ui.status(_("rolling back interrupted transaction\n"))
703 708 transaction.rollback(self.sopener, self.sjoin("journal"),
704 709 self.ui.warn)
705 710 self.invalidate()
706 711 return True
707 712 else:
708 713 self.ui.warn(_("no interrupted transaction available\n"))
709 714 return False
710 715 finally:
711 716 lock.release()
712 717
713 718 def rollback(self, dryrun=False):
714 719 wlock = lock = None
715 720 try:
716 721 wlock = self.wlock()
717 722 lock = self.lock()
718 723 if os.path.exists(self.sjoin("undo")):
719 724 try:
720 725 args = self.opener("undo.desc", "r").read().splitlines()
721 726 if len(args) >= 3 and self.ui.verbose:
722 727 desc = _("repository tip rolled back to revision %s"
723 728 " (undo %s: %s)\n") % (
724 729 int(args[0]) - 1, args[1], args[2])
725 730 elif len(args) >= 2:
726 731 desc = _("repository tip rolled back to revision %s"
727 732 " (undo %s)\n") % (
728 733 int(args[0]) - 1, args[1])
729 734 except IOError:
730 735 desc = _("rolling back unknown transaction\n")
731 736 self.ui.status(desc)
732 737 if dryrun:
733 738 return
734 739 transaction.rollback(self.sopener, self.sjoin("undo"),
735 740 self.ui.warn)
736 741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 742 if os.path.exists(self.join('undo.bookmarks')):
738 743 util.rename(self.join('undo.bookmarks'),
739 744 self.join('bookmarks'))
740 745 try:
741 746 branch = self.opener("undo.branch").read()
742 747 self.dirstate.setbranch(branch)
743 748 except IOError:
744 749 self.ui.warn(_("Named branch could not be reset, "
745 750 "current branch still is: %s\n")
746 751 % self.dirstate.branch())
747 752 self.invalidate()
748 753 self.dirstate.invalidate()
749 754 self.destroyed()
750 755 parents = tuple([p.rev() for p in self.parents()])
751 756 if len(parents) > 1:
752 757 self.ui.status(_("working directory now based on "
753 758 "revisions %d and %d\n") % parents)
754 759 else:
755 760 self.ui.status(_("working directory now based on "
756 761 "revision %d\n") % parents)
757 762 else:
758 763 self.ui.warn(_("no rollback information available\n"))
759 764 return 1
760 765 finally:
761 766 release(lock, wlock)
762 767
763 768 def invalidatecaches(self):
764 769 self._tags = None
765 770 self._tagtypes = None
766 771 self.nodetagscache = None
767 772 self._branchcache = None # in UTF-8
768 773 self._branchcachetip = None
769 774
770 775 def invalidate(self):
771 776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 777 if a in self.__dict__:
773 778 delattr(self, a)
774 779 self.invalidatecaches()
775 780
776 781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 782 try:
778 783 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 784 except error.LockHeld, inst:
780 785 if not wait:
781 786 raise
782 787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 788 (desc, inst.locker))
784 789 # default to 600 seconds timeout
785 790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 791 releasefn, desc=desc)
787 792 if acquirefn:
788 793 acquirefn()
789 794 return l
790 795
791 796 def lock(self, wait=True):
792 797 '''Lock the repository store (.hg/store) and return a weak reference
793 798 to the lock. Use this before modifying the store (e.g. committing or
794 799 stripping). If you are opening a transaction, get a lock as well.)'''
795 800 l = self._lockref and self._lockref()
796 801 if l is not None and l.held:
797 802 l.lock()
798 803 return l
799 804
800 805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 806 self.invalidate, _('repository %s') % self.origroot)
802 807 self._lockref = weakref.ref(l)
803 808 return l
804 809
805 810 def wlock(self, wait=True):
806 811 '''Lock the non-store parts of the repository (everything under
807 812 .hg except .hg/store) and return a weak reference to the lock.
808 813 Use this before modifying files in .hg.'''
809 814 l = self._wlockref and self._wlockref()
810 815 if l is not None and l.held:
811 816 l.lock()
812 817 return l
813 818
814 819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 820 self.dirstate.invalidate, _('working directory of %s') %
816 821 self.origroot)
817 822 self._wlockref = weakref.ref(l)
818 823 return l
819 824
820 825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 826 """
822 827 commit an individual file as part of a larger transaction
823 828 """
824 829
825 830 fname = fctx.path()
826 831 text = fctx.data()
827 832 flog = self.file(fname)
828 833 fparent1 = manifest1.get(fname, nullid)
829 834 fparent2 = fparent2o = manifest2.get(fname, nullid)
830 835
831 836 meta = {}
832 837 copy = fctx.renamed()
833 838 if copy and copy[0] != fname:
834 839 # Mark the new revision of this file as a copy of another
835 840 # file. This copy data will effectively act as a parent
836 841 # of this new revision. If this is a merge, the first
837 842 # parent will be the nullid (meaning "look up the copy data")
838 843 # and the second one will be the other parent. For example:
839 844 #
840 845 # 0 --- 1 --- 3 rev1 changes file foo
841 846 # \ / rev2 renames foo to bar and changes it
842 847 # \- 2 -/ rev3 should have bar with all changes and
843 848 # should record that bar descends from
844 849 # bar in rev2 and foo in rev1
845 850 #
846 851 # this allows this merge to succeed:
847 852 #
848 853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 854 # \ / merging rev3 and rev4 should use bar@rev2
850 855 # \- 2 --- 4 as the merge base
851 856 #
852 857
853 858 cfname = copy[0]
854 859 crev = manifest1.get(cfname)
855 860 newfparent = fparent2
856 861
857 862 if manifest2: # branch merge
858 863 if fparent2 == nullid or crev is None: # copied on remote side
859 864 if cfname in manifest2:
860 865 crev = manifest2[cfname]
861 866 newfparent = fparent1
862 867
863 868 # find source in nearest ancestor if we've lost track
864 869 if not crev:
865 870 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 871 (fname, cfname))
867 872 for ancestor in self[None].ancestors():
868 873 if cfname in ancestor:
869 874 crev = ancestor[cfname].filenode()
870 875 break
871 876
872 877 if crev:
873 878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 879 meta["copy"] = cfname
875 880 meta["copyrev"] = hex(crev)
876 881 fparent1, fparent2 = nullid, newfparent
877 882 else:
878 883 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 884 "copied from '%s'!\n") % (fname, cfname))
880 885
881 886 elif fparent2 != nullid:
882 887 # is one parent an ancestor of the other?
883 888 fparentancestor = flog.ancestor(fparent1, fparent2)
884 889 if fparentancestor == fparent1:
885 890 fparent1, fparent2 = fparent2, nullid
886 891 elif fparentancestor == fparent2:
887 892 fparent2 = nullid
888 893
889 894 # is the file changed?
890 895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 896 changelist.append(fname)
892 897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893 898
894 899 # are just the flags changed during merge?
895 900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 901 changelist.append(fname)
897 902
898 903 return fparent1
899 904
900 905 def commit(self, text="", user=None, date=None, match=None, force=False,
901 906 editor=False, extra={}):
902 907 """Add a new revision to current repository.
903 908
904 909 Revision information is gathered from the working directory,
905 910 match can be used to filter the committed files. If editor is
906 911 supplied, it is called to get a commit message.
907 912 """
908 913
909 914 def fail(f, msg):
910 915 raise util.Abort('%s: %s' % (f, msg))
911 916
912 917 if not match:
913 918 match = matchmod.always(self.root, '')
914 919
915 920 if not force:
916 921 vdirs = []
917 922 match.dir = vdirs.append
918 923 match.bad = fail
919 924
920 925 wlock = self.wlock()
921 926 try:
922 927 wctx = self[None]
923 928 merge = len(wctx.parents()) > 1
924 929
925 930 if (not force and merge and match and
926 931 (match.files() or match.anypats())):
927 932 raise util.Abort(_('cannot partially commit a merge '
928 933 '(do not specify files or patterns)'))
929 934
930 935 changes = self.status(match=match, clean=force)
931 936 if force:
932 937 changes[0].extend(changes[6]) # mq may commit unchanged files
933 938
934 939 # check subrepos
935 940 subs = []
936 941 removedsubs = set()
937 942 for p in wctx.parents():
938 943 removedsubs.update(s for s in p.substate if match(s))
939 944 for s in wctx.substate:
940 945 removedsubs.discard(s)
941 946 if match(s) and wctx.sub(s).dirty():
942 947 subs.append(s)
943 948 if (subs or removedsubs):
944 949 if (not match('.hgsub') and
945 950 '.hgsub' in (wctx.modified() + wctx.added())):
946 951 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 952 if '.hgsubstate' not in changes[0]:
948 953 changes[0].insert(0, '.hgsubstate')
949 954
950 955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 957 if changedsubs:
953 958 raise util.Abort(_("uncommitted changes in subrepo %s")
954 959 % changedsubs[0])
955 960
956 961 # make sure all explicit patterns are matched
957 962 if not force and match.files():
958 963 matched = set(changes[0] + changes[1] + changes[2])
959 964
960 965 for f in match.files():
961 966 if f == '.' or f in matched or f in wctx.substate:
962 967 continue
963 968 if f in changes[3]: # missing
964 969 fail(f, _('file not found!'))
965 970 if f in vdirs: # visited directory
966 971 d = f + '/'
967 972 for mf in matched:
968 973 if mf.startswith(d):
969 974 break
970 975 else:
971 976 fail(f, _("no match under directory!"))
972 977 elif f not in self.dirstate:
973 978 fail(f, _("file not tracked!"))
974 979
975 980 if (not force and not extra.get("close") and not merge
976 981 and not (changes[0] or changes[1] or changes[2])
977 982 and wctx.branch() == wctx.p1().branch()):
978 983 return None
979 984
980 985 ms = mergemod.mergestate(self)
981 986 for f in changes[0]:
982 987 if f in ms and ms[f] == 'u':
983 988 raise util.Abort(_("unresolved merge conflicts "
984 989 "(see hg help resolve)"))
985 990
986 991 cctx = context.workingctx(self, text, user, date, extra, changes)
987 992 if editor:
988 993 cctx._text = editor(self, cctx, subs)
989 994 edited = (text != cctx._text)
990 995
991 996 # commit subs
992 997 if subs or removedsubs:
993 998 state = wctx.substate.copy()
994 999 for s in sorted(subs):
995 1000 sub = wctx.sub(s)
996 1001 self.ui.status(_('committing subrepository %s\n') %
997 1002 subrepo.subrelpath(sub))
998 1003 sr = sub.commit(cctx._text, user, date)
999 1004 state[s] = (state[s][0], sr)
1000 1005 subrepo.writestate(self, state)
1001 1006
1002 1007 # Save commit message in case this transaction gets rolled back
1003 1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 1009 # the assumption that the user will use the same editor again.
1005 1010 msgfile = self.opener('last-message.txt', 'wb')
1006 1011 msgfile.write(cctx._text)
1007 1012 msgfile.close()
1008 1013
1009 1014 p1, p2 = self.dirstate.parents()
1010 1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 1016 try:
1012 1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 1018 ret = self.commitctx(cctx, True)
1014 1019 except:
1015 1020 if edited:
1016 1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 1022 self.ui.write(
1018 1023 _('note: commit message saved in %s\n') % msgfn)
1019 1024 raise
1020 1025
1021 1026 # update bookmarks, dirstate and mergestate
1022 1027 bookmarks.update(self, p1, ret)
1023 1028 for f in changes[0] + changes[1]:
1024 1029 self.dirstate.normal(f)
1025 1030 for f in changes[2]:
1026 1031 self.dirstate.forget(f)
1027 1032 self.dirstate.setparents(ret)
1028 1033 ms.reset()
1029 1034 finally:
1030 1035 wlock.release()
1031 1036
1032 1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 1038 return ret
1034 1039
1035 1040 def commitctx(self, ctx, error=False):
1036 1041 """Add a new revision to current repository.
1037 1042 Revision information is passed via the context argument.
1038 1043 """
1039 1044
1040 1045 tr = lock = None
1041 1046 removed = list(ctx.removed())
1042 1047 p1, p2 = ctx.p1(), ctx.p2()
1043 1048 m1 = p1.manifest().copy()
1044 1049 m2 = p2.manifest()
1045 1050 user = ctx.user()
1046 1051
1047 1052 lock = self.lock()
1048 1053 try:
1049 1054 tr = self.transaction("commit")
1050 1055 trp = weakref.proxy(tr)
1051 1056
1052 1057 # check in files
1053 1058 new = {}
1054 1059 changed = []
1055 1060 linkrev = len(self)
1056 1061 for f in sorted(ctx.modified() + ctx.added()):
1057 1062 self.ui.note(f + "\n")
1058 1063 try:
1059 1064 fctx = ctx[f]
1060 1065 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1061 1066 changed)
1062 1067 m1.set(f, fctx.flags())
1063 1068 except OSError, inst:
1064 1069 self.ui.warn(_("trouble committing %s!\n") % f)
1065 1070 raise
1066 1071 except IOError, inst:
1067 1072 errcode = getattr(inst, 'errno', errno.ENOENT)
1068 1073 if error or errcode and errcode != errno.ENOENT:
1069 1074 self.ui.warn(_("trouble committing %s!\n") % f)
1070 1075 raise
1071 1076 else:
1072 1077 removed.append(f)
1073 1078
1074 1079 # update manifest
1075 1080 m1.update(new)
1076 1081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1077 1082 drop = [f for f in removed if f in m1]
1078 1083 for f in drop:
1079 1084 del m1[f]
1080 1085 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1081 1086 p2.manifestnode(), (new, drop))
1082 1087
1083 1088 # update changelog
1084 1089 self.changelog.delayupdate()
1085 1090 n = self.changelog.add(mn, changed + removed, ctx.description(),
1086 1091 trp, p1.node(), p2.node(),
1087 1092 user, ctx.date(), ctx.extra().copy())
1088 1093 p = lambda: self.changelog.writepending() and self.root or ""
1089 1094 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1090 1095 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1091 1096 parent2=xp2, pending=p)
1092 1097 self.changelog.finalize(trp)
1093 1098 tr.close()
1094 1099
1095 1100 if self._branchcache:
1096 1101 self.updatebranchcache()
1097 1102 return n
1098 1103 finally:
1099 1104 if tr:
1100 1105 tr.release()
1101 1106 lock.release()
1102 1107
1103 1108 def destroyed(self):
1104 1109 '''Inform the repository that nodes have been destroyed.
1105 1110 Intended for use by strip and rollback, so there's a common
1106 1111 place for anything that has to be done after destroying history.'''
1107 1112 # XXX it might be nice if we could take the list of destroyed
1108 1113 # nodes, but I don't see an easy way for rollback() to do that
1109 1114
1110 1115 # Ensure the persistent tag cache is updated. Doing it now
1111 1116 # means that the tag cache only has to worry about destroyed
1112 1117 # heads immediately after a strip/rollback. That in turn
1113 1118 # guarantees that "cachetip == currenttip" (comparing both rev
1114 1119 # and node) always means no nodes have been added or destroyed.
1115 1120
1116 1121 # XXX this is suboptimal when qrefresh'ing: we strip the current
1117 1122 # head, refresh the tag cache, then immediately add a new head.
1118 1123 # But I think doing it this way is necessary for the "instant
1119 1124 # tag cache retrieval" case to work.
1120 1125 self.invalidatecaches()
1121 1126
1122 1127 def walk(self, match, node=None):
1123 1128 '''
1124 1129 walk recursively through the directory tree or a given
1125 1130 changeset, finding all files matched by the match
1126 1131 function
1127 1132 '''
1128 1133 return self[node].walk(match)
1129 1134
1130 1135 def status(self, node1='.', node2=None, match=None,
1131 1136 ignored=False, clean=False, unknown=False,
1132 1137 listsubrepos=False):
1133 1138 """return status of files between two nodes or node and working directory
1134 1139
1135 1140 If node1 is None, use the first dirstate parent instead.
1136 1141 If node2 is None, compare node1 with working directory.
1137 1142 """
1138 1143
1139 1144 def mfmatches(ctx):
1140 1145 mf = ctx.manifest().copy()
1141 1146 for fn in mf.keys():
1142 1147 if not match(fn):
1143 1148 del mf[fn]
1144 1149 return mf
1145 1150
1146 1151 if isinstance(node1, context.changectx):
1147 1152 ctx1 = node1
1148 1153 else:
1149 1154 ctx1 = self[node1]
1150 1155 if isinstance(node2, context.changectx):
1151 1156 ctx2 = node2
1152 1157 else:
1153 1158 ctx2 = self[node2]
1154 1159
1155 1160 working = ctx2.rev() is None
1156 1161 parentworking = working and ctx1 == self['.']
1157 1162 match = match or matchmod.always(self.root, self.getcwd())
1158 1163 listignored, listclean, listunknown = ignored, clean, unknown
1159 1164
1160 1165 # load earliest manifest first for caching reasons
1161 1166 if not working and ctx2.rev() < ctx1.rev():
1162 1167 ctx2.manifest()
1163 1168
1164 1169 if not parentworking:
1165 1170 def bad(f, msg):
1166 1171 if f not in ctx1:
1167 1172 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1168 1173 match.bad = bad
1169 1174
1170 1175 if working: # we need to scan the working dir
1171 1176 subrepos = []
1172 1177 if '.hgsub' in self.dirstate:
1173 1178 subrepos = ctx1.substate.keys()
1174 1179 s = self.dirstate.status(match, subrepos, listignored,
1175 1180 listclean, listunknown)
1176 1181 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1177 1182
1178 1183 # check for any possibly clean files
1179 1184 if parentworking and cmp:
1180 1185 fixup = []
1181 1186 # do a full compare of any files that might have changed
1182 1187 for f in sorted(cmp):
1183 1188 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1184 1189 or ctx1[f].cmp(ctx2[f])):
1185 1190 modified.append(f)
1186 1191 else:
1187 1192 fixup.append(f)
1188 1193
1189 1194 # update dirstate for files that are actually clean
1190 1195 if fixup:
1191 1196 if listclean:
1192 1197 clean += fixup
1193 1198
1194 1199 try:
1195 1200 # updating the dirstate is optional
1196 1201 # so we don't wait on the lock
1197 1202 wlock = self.wlock(False)
1198 1203 try:
1199 1204 for f in fixup:
1200 1205 self.dirstate.normal(f)
1201 1206 finally:
1202 1207 wlock.release()
1203 1208 except error.LockError:
1204 1209 pass
1205 1210
1206 1211 if not parentworking:
1207 1212 mf1 = mfmatches(ctx1)
1208 1213 if working:
1209 1214 # we are comparing working dir against non-parent
1210 1215 # generate a pseudo-manifest for the working dir
1211 1216 mf2 = mfmatches(self['.'])
1212 1217 for f in cmp + modified + added:
1213 1218 mf2[f] = None
1214 1219 mf2.set(f, ctx2.flags(f))
1215 1220 for f in removed:
1216 1221 if f in mf2:
1217 1222 del mf2[f]
1218 1223 else:
1219 1224 # we are comparing two revisions
1220 1225 deleted, unknown, ignored = [], [], []
1221 1226 mf2 = mfmatches(ctx2)
1222 1227
1223 1228 modified, added, clean = [], [], []
1224 1229 for fn in mf2:
1225 1230 if fn in mf1:
1226 1231 if (mf1.flags(fn) != mf2.flags(fn) or
1227 1232 (mf1[fn] != mf2[fn] and
1228 1233 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1229 1234 modified.append(fn)
1230 1235 elif listclean:
1231 1236 clean.append(fn)
1232 1237 del mf1[fn]
1233 1238 else:
1234 1239 added.append(fn)
1235 1240 removed = mf1.keys()
1236 1241
1237 1242 r = modified, added, removed, deleted, unknown, ignored, clean
1238 1243
1239 1244 if listsubrepos:
1240 1245 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1241 1246 if working:
1242 1247 rev2 = None
1243 1248 else:
1244 1249 rev2 = ctx2.substate[subpath][1]
1245 1250 try:
1246 1251 submatch = matchmod.narrowmatcher(subpath, match)
1247 1252 s = sub.status(rev2, match=submatch, ignored=listignored,
1248 1253 clean=listclean, unknown=listunknown,
1249 1254 listsubrepos=True)
1250 1255 for rfiles, sfiles in zip(r, s):
1251 1256 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1252 1257 except error.LookupError:
1253 1258 self.ui.status(_("skipping missing subrepository: %s\n")
1254 1259 % subpath)
1255 1260
1256 1261 for l in r:
1257 1262 l.sort()
1258 1263 return r
1259 1264
1260 1265 def heads(self, start=None):
1261 1266 heads = self.changelog.heads(start)
1262 1267 # sort the output in rev descending order
1263 1268 return sorted(heads, key=self.changelog.rev, reverse=True)
1264 1269
1265 1270 def branchheads(self, branch=None, start=None, closed=False):
1266 1271 '''return a (possibly filtered) list of heads for the given branch
1267 1272
1268 1273 Heads are returned in topological order, from newest to oldest.
1269 1274 If branch is None, use the dirstate branch.
1270 1275 If start is not None, return only heads reachable from start.
1271 1276 If closed is True, return heads that are marked as closed as well.
1272 1277 '''
1273 1278 if branch is None:
1274 1279 branch = self[None].branch()
1275 1280 branches = self.branchmap()
1276 1281 if branch not in branches:
1277 1282 return []
1278 1283 # the cache returns heads ordered lowest to highest
1279 1284 bheads = list(reversed(branches[branch]))
1280 1285 if start is not None:
1281 1286 # filter out the heads that cannot be reached from startrev
1282 1287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1283 1288 bheads = [h for h in bheads if h in fbheads]
1284 1289 if not closed:
1285 1290 bheads = [h for h in bheads if
1286 1291 ('close' not in self.changelog.read(h)[5])]
1287 1292 return bheads
1288 1293
1289 1294 def branches(self, nodes):
1290 1295 if not nodes:
1291 1296 nodes = [self.changelog.tip()]
1292 1297 b = []
1293 1298 for n in nodes:
1294 1299 t = n
1295 1300 while 1:
1296 1301 p = self.changelog.parents(n)
1297 1302 if p[1] != nullid or p[0] == nullid:
1298 1303 b.append((t, n, p[0], p[1]))
1299 1304 break
1300 1305 n = p[0]
1301 1306 return b
1302 1307
1303 1308 def between(self, pairs):
1304 1309 r = []
1305 1310
1306 1311 for top, bottom in pairs:
1307 1312 n, l, i = top, [], 0
1308 1313 f = 1
1309 1314
1310 1315 while n != bottom and n != nullid:
1311 1316 p = self.changelog.parents(n)[0]
1312 1317 if i == f:
1313 1318 l.append(n)
1314 1319 f = f * 2
1315 1320 n = p
1316 1321 i += 1
1317 1322
1318 1323 r.append(l)
1319 1324
1320 1325 return r
1321 1326
1322 1327 def pull(self, remote, heads=None, force=False):
1323 1328 lock = self.lock()
1324 1329 try:
1325 1330 usecommon = remote.capable('getbundle')
1326 1331 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1327 1332 force=force, commononly=usecommon)
1328 1333 common, fetch, rheads = tmp
1329 1334 if not fetch:
1330 1335 self.ui.status(_("no changes found\n"))
1331 1336 result = 0
1332 1337 else:
1333 1338 if heads is None and list(common) == [nullid]:
1334 1339 self.ui.status(_("requesting all changes\n"))
1335 1340 elif heads is None and remote.capable('changegroupsubset'):
1336 1341 # issue1320, avoid a race if remote changed after discovery
1337 1342 heads = rheads
1338 1343
1339 1344 if usecommon:
1340 1345 cg = remote.getbundle('pull', common=common,
1341 1346 heads=heads or rheads)
1342 1347 elif heads is None:
1343 1348 cg = remote.changegroup(fetch, 'pull')
1344 1349 elif not remote.capable('changegroupsubset'):
1345 1350 raise util.Abort(_("partial pull cannot be done because "
1346 1351 "other repository doesn't support "
1347 1352 "changegroupsubset."))
1348 1353 else:
1349 1354 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 1355 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 1356 lock=lock)
1352 1357 finally:
1353 1358 lock.release()
1354 1359
1355 1360 return result
1356 1361
1357 1362 def checkpush(self, force, revs):
1358 1363 """Extensions can override this function if additional checks have
1359 1364 to be performed before pushing, or call it if they override push
1360 1365 command.
1361 1366 """
1362 1367 pass
1363 1368
1364 1369 def push(self, remote, force=False, revs=None, newbranch=False):
1365 1370 '''Push outgoing changesets (limited by revs) from the current
1366 1371 repository to remote. Return an integer:
1367 1372 - 0 means HTTP error *or* nothing to push
1368 1373 - 1 means we pushed and remote head count is unchanged *or*
1369 1374 we have outgoing changesets but refused to push
1370 1375 - other values as described by addchangegroup()
1371 1376 '''
1372 1377 # there are two ways to push to remote repo:
1373 1378 #
1374 1379 # addchangegroup assumes local user can lock remote
1375 1380 # repo (local filesystem, old ssh servers).
1376 1381 #
1377 1382 # unbundle assumes local user cannot lock remote repo (new ssh
1378 1383 # servers, http servers).
1379 1384
1380 1385 self.checkpush(force, revs)
1381 1386 lock = None
1382 1387 unbundle = remote.capable('unbundle')
1383 1388 if not unbundle:
1384 1389 lock = remote.lock()
1385 1390 try:
1386 1391 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1387 1392 newbranch)
1388 1393 ret = remote_heads
1389 1394 if cg is not None:
1390 1395 if unbundle:
1391 1396 # local repo finds heads on server, finds out what
1392 1397 # revs it must push. once revs transferred, if server
1393 1398 # finds it has different heads (someone else won
1394 1399 # commit/push race), server aborts.
1395 1400 if force:
1396 1401 remote_heads = ['force']
1397 1402 # ssh: return remote's addchangegroup()
1398 1403 # http: return remote's addchangegroup() or 0 for error
1399 1404 ret = remote.unbundle(cg, remote_heads, 'push')
1400 1405 else:
1401 1406 # we return an integer indicating remote head count change
1402 1407 ret = remote.addchangegroup(cg, 'push', self.url(),
1403 1408 lock=lock)
1404 1409 finally:
1405 1410 if lock is not None:
1406 1411 lock.release()
1407 1412
1408 1413 self.ui.debug("checking for updated bookmarks\n")
1409 1414 rb = remote.listkeys('bookmarks')
1410 1415 for k in rb.keys():
1411 1416 if k in self._bookmarks:
1412 1417 nr, nl = rb[k], hex(self._bookmarks[k])
1413 1418 if nr in self:
1414 1419 cr = self[nr]
1415 1420 cl = self[nl]
1416 1421 if cl in cr.descendants():
1417 1422 r = remote.pushkey('bookmarks', k, nr, nl)
1418 1423 if r:
1419 1424 self.ui.status(_("updating bookmark %s\n") % k)
1420 1425 else:
1421 1426 self.ui.warn(_('updating bookmark %s'
1422 1427 ' failed!\n') % k)
1423 1428
1424 1429 return ret
1425 1430
1426 1431 def changegroupinfo(self, nodes, source):
1427 1432 if self.ui.verbose or source == 'bundle':
1428 1433 self.ui.status(_("%d changesets found\n") % len(nodes))
1429 1434 if self.ui.debugflag:
1430 1435 self.ui.debug("list of changesets:\n")
1431 1436 for node in nodes:
1432 1437 self.ui.debug("%s\n" % hex(node))
1433 1438
1434 1439 def changegroupsubset(self, bases, heads, source):
1435 1440 """Compute a changegroup consisting of all the nodes that are
1436 1441 descendents of any of the bases and ancestors of any of the heads.
1437 1442 Return a chunkbuffer object whose read() method will return
1438 1443 successive changegroup chunks.
1439 1444
1440 1445 It is fairly complex as determining which filenodes and which
1441 1446 manifest nodes need to be included for the changeset to be complete
1442 1447 is non-trivial.
1443 1448
1444 1449 Another wrinkle is doing the reverse, figuring out which changeset in
1445 1450 the changegroup a particular filenode or manifestnode belongs to.
1446 1451 """
1447 1452 cl = self.changelog
1448 1453 if not bases:
1449 1454 bases = [nullid]
1450 1455 csets, bases, heads = cl.nodesbetween(bases, heads)
1451 1456 # We assume that all ancestors of bases are known
1452 1457 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1453 1458 return self._changegroupsubset(common, csets, heads, source)
1454 1459
1455 1460 def getbundle(self, source, heads=None, common=None):
1456 1461 """Like changegroupsubset, but returns the set difference between the
1457 1462 ancestors of heads and the ancestors common.
1458 1463
1459 1464 If heads is None, use the local heads. If common is None, use [nullid].
1460 1465
1461 1466 The nodes in common might not all be known locally due to the way the
1462 1467 current discovery protocol works.
1463 1468 """
1464 1469 cl = self.changelog
1465 1470 if common:
1466 1471 nm = cl.nodemap
1467 1472 common = [n for n in common if n in nm]
1468 1473 else:
1469 1474 common = [nullid]
1470 1475 if not heads:
1471 1476 heads = cl.heads()
1472 1477 common, missing = cl.findcommonmissing(common, heads)
1473 1478 return self._changegroupsubset(common, missing, heads, source)
1474 1479
1475 1480 def _changegroupsubset(self, commonrevs, csets, heads, source):
1476 1481
1477 1482 cl = self.changelog
1478 1483 mf = self.manifest
1479 1484 mfs = {} # needed manifests
1480 1485 fnodes = {} # needed file nodes
1481 1486 changedfiles = set()
1482 1487 fstate = ['', {}]
1483 1488 count = [0]
1484 1489
1485 1490 # can we go through the fast path ?
1486 1491 heads.sort()
1487 1492 if heads == sorted(self.heads()):
1488 1493 return self._changegroup(csets, source)
1489 1494
1490 1495 # slow path
1491 1496 self.hook('preoutgoing', throw=True, source=source)
1492 1497 self.changegroupinfo(csets, source)
1493 1498
1494 1499 # filter any nodes that claim to be part of the known set
1495 1500 def prune(revlog, missing):
1496 1501 for n in missing:
1497 1502 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1498 1503 yield n
1499 1504
1500 1505 def lookup(revlog, x):
1501 1506 if revlog == cl:
1502 1507 c = cl.read(x)
1503 1508 changedfiles.update(c[3])
1504 1509 mfs.setdefault(c[0], x)
1505 1510 count[0] += 1
1506 1511 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1507 1512 return x
1508 1513 elif revlog == mf:
1509 1514 clnode = mfs[x]
1510 1515 mdata = mf.readfast(x)
1511 1516 for f in changedfiles:
1512 1517 if f in mdata:
1513 1518 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1514 1519 count[0] += 1
1515 1520 self.ui.progress(_('bundling'), count[0],
1516 1521 unit=_('manifests'), total=len(mfs))
1517 1522 return mfs[x]
1518 1523 else:
1519 1524 self.ui.progress(
1520 1525 _('bundling'), count[0], item=fstate[0],
1521 1526 unit=_('files'), total=len(changedfiles))
1522 1527 return fstate[1][x]
1523 1528
1524 1529 bundler = changegroup.bundle10(lookup)
1525 1530
1526 1531 def gengroup():
1527 1532 # Create a changenode group generator that will call our functions
1528 1533 # back to lookup the owning changenode and collect information.
1529 1534 for chunk in cl.group(csets, bundler):
1530 1535 yield chunk
1531 1536 self.ui.progress(_('bundling'), None)
1532 1537
1533 1538 # Create a generator for the manifestnodes that calls our lookup
1534 1539 # and data collection functions back.
1535 1540 count[0] = 0
1536 1541 for chunk in mf.group(prune(mf, mfs), bundler):
1537 1542 yield chunk
1538 1543 self.ui.progress(_('bundling'), None)
1539 1544
1540 1545 mfs.clear()
1541 1546
1542 1547 # Go through all our files in order sorted by name.
1543 1548 count[0] = 0
1544 1549 for fname in sorted(changedfiles):
1545 1550 filerevlog = self.file(fname)
1546 1551 if not len(filerevlog):
1547 1552 raise util.Abort(_("empty or missing revlog for %s") % fname)
1548 1553 fstate[0] = fname
1549 1554 fstate[1] = fnodes.pop(fname, {})
1550 1555 first = True
1551 1556
1552 1557 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1553 1558 bundler):
1554 1559 if first:
1555 1560 if chunk == bundler.close():
1556 1561 break
1557 1562 count[0] += 1
1558 1563 yield bundler.fileheader(fname)
1559 1564 first = False
1560 1565 yield chunk
1561 1566 # Signal that no more groups are left.
1562 1567 yield bundler.close()
1563 1568 self.ui.progress(_('bundling'), None)
1564 1569
1565 1570 if csets:
1566 1571 self.hook('outgoing', node=hex(csets[0]), source=source)
1567 1572
1568 1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1569 1574
1570 1575 def changegroup(self, basenodes, source):
1571 1576 # to avoid a race we use changegroupsubset() (issue1320)
1572 1577 return self.changegroupsubset(basenodes, self.heads(), source)
1573 1578
1574 1579 def _changegroup(self, nodes, source):
1575 1580 """Compute the changegroup of all nodes that we have that a recipient
1576 1581 doesn't. Return a chunkbuffer object whose read() method will return
1577 1582 successive changegroup chunks.
1578 1583
1579 1584 This is much easier than the previous function as we can assume that
1580 1585 the recipient has any changenode we aren't sending them.
1581 1586
1582 1587 nodes is the set of nodes to send"""
1583 1588
1584 1589 cl = self.changelog
1585 1590 mf = self.manifest
1586 1591 mfs = {}
1587 1592 changedfiles = set()
1588 1593 fstate = ['']
1589 1594 count = [0]
1590 1595
1591 1596 self.hook('preoutgoing', throw=True, source=source)
1592 1597 self.changegroupinfo(nodes, source)
1593 1598
1594 1599 revset = set([cl.rev(n) for n in nodes])
1595 1600
1596 1601 def gennodelst(log):
1597 1602 for r in log:
1598 1603 if log.linkrev(r) in revset:
1599 1604 yield log.node(r)
1600 1605
1601 1606 def lookup(revlog, x):
1602 1607 if revlog == cl:
1603 1608 c = cl.read(x)
1604 1609 changedfiles.update(c[3])
1605 1610 mfs.setdefault(c[0], x)
1606 1611 count[0] += 1
1607 1612 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1608 1613 return x
1609 1614 elif revlog == mf:
1610 1615 count[0] += 1
1611 1616 self.ui.progress(_('bundling'), count[0],
1612 1617 unit=_('manifests'), total=len(mfs))
1613 1618 return cl.node(revlog.linkrev(revlog.rev(x)))
1614 1619 else:
1615 1620 self.ui.progress(
1616 1621 _('bundling'), count[0], item=fstate[0],
1617 1622 total=len(changedfiles), unit=_('files'))
1618 1623 return cl.node(revlog.linkrev(revlog.rev(x)))
1619 1624
1620 1625 bundler = changegroup.bundle10(lookup)
1621 1626
1622 1627 def gengroup():
1623 1628 '''yield a sequence of changegroup chunks (strings)'''
1624 1629 # construct a list of all changed files
1625 1630
1626 1631 for chunk in cl.group(nodes, bundler):
1627 1632 yield chunk
1628 1633 self.ui.progress(_('bundling'), None)
1629 1634
1630 1635 count[0] = 0
1631 1636 for chunk in mf.group(gennodelst(mf), bundler):
1632 1637 yield chunk
1633 1638 self.ui.progress(_('bundling'), None)
1634 1639
1635 1640 count[0] = 0
1636 1641 for fname in sorted(changedfiles):
1637 1642 filerevlog = self.file(fname)
1638 1643 if not len(filerevlog):
1639 1644 raise util.Abort(_("empty or missing revlog for %s") % fname)
1640 1645 fstate[0] = fname
1641 1646 first = True
1642 1647 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1643 1648 if first:
1644 1649 if chunk == bundler.close():
1645 1650 break
1646 1651 count[0] += 1
1647 1652 yield bundler.fileheader(fname)
1648 1653 first = False
1649 1654 yield chunk
1650 1655 yield bundler.close()
1651 1656 self.ui.progress(_('bundling'), None)
1652 1657
1653 1658 if nodes:
1654 1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1655 1660
1656 1661 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1657 1662
1658 1663 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1659 1664 """Add the changegroup returned by source.read() to this repo.
1660 1665 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1661 1666 the URL of the repo where this changegroup is coming from.
1662 1667 If lock is not None, the function takes ownership of the lock
1663 1668 and releases it after the changegroup is added.
1664 1669
1665 1670 Return an integer summarizing the change to this repo:
1666 1671 - nothing changed or no source: 0
1667 1672 - more heads than before: 1+added heads (2..n)
1668 1673 - fewer heads than before: -1-removed heads (-2..-n)
1669 1674 - number of heads stays the same: 1
1670 1675 """
1671 1676 def csmap(x):
1672 1677 self.ui.debug("add changeset %s\n" % short(x))
1673 1678 return len(cl)
1674 1679
1675 1680 def revmap(x):
1676 1681 return cl.rev(x)
1677 1682
1678 1683 if not source:
1679 1684 return 0
1680 1685
1681 1686 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1682 1687
1683 1688 changesets = files = revisions = 0
1684 1689 efiles = set()
1685 1690
1686 1691 # write changelog data to temp files so concurrent readers will not see
1687 1692 # inconsistent view
1688 1693 cl = self.changelog
1689 1694 cl.delayupdate()
1690 1695 oldheads = len(cl.heads())
1691 1696
1692 1697 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1693 1698 try:
1694 1699 trp = weakref.proxy(tr)
1695 1700 # pull off the changeset group
1696 1701 self.ui.status(_("adding changesets\n"))
1697 1702 clstart = len(cl)
1698 1703 class prog(object):
1699 1704 step = _('changesets')
1700 1705 count = 1
1701 1706 ui = self.ui
1702 1707 total = None
1703 1708 def __call__(self):
1704 1709 self.ui.progress(self.step, self.count, unit=_('chunks'),
1705 1710 total=self.total)
1706 1711 self.count += 1
1707 1712 pr = prog()
1708 1713 source.callback = pr
1709 1714
1710 1715 if (cl.addgroup(source, csmap, trp) is None
1711 1716 and not emptyok):
1712 1717 raise util.Abort(_("received changelog group is empty"))
1713 1718 clend = len(cl)
1714 1719 changesets = clend - clstart
1715 1720 for c in xrange(clstart, clend):
1716 1721 efiles.update(self[c].files())
1717 1722 efiles = len(efiles)
1718 1723 self.ui.progress(_('changesets'), None)
1719 1724
1720 1725 # pull off the manifest group
1721 1726 self.ui.status(_("adding manifests\n"))
1722 1727 pr.step = _('manifests')
1723 1728 pr.count = 1
1724 1729 pr.total = changesets # manifests <= changesets
1725 1730 # no need to check for empty manifest group here:
1726 1731 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1727 1732 # no new manifest will be created and the manifest group will
1728 1733 # be empty during the pull
1729 1734 self.manifest.addgroup(source, revmap, trp)
1730 1735 self.ui.progress(_('manifests'), None)
1731 1736
1732 1737 needfiles = {}
1733 1738 if self.ui.configbool('server', 'validate', default=False):
1734 1739 # validate incoming csets have their manifests
1735 1740 for cset in xrange(clstart, clend):
1736 1741 mfest = self.changelog.read(self.changelog.node(cset))[0]
1737 1742 mfest = self.manifest.readdelta(mfest)
1738 1743 # store file nodes we must see
1739 1744 for f, n in mfest.iteritems():
1740 1745 needfiles.setdefault(f, set()).add(n)
1741 1746
1742 1747 # process the files
1743 1748 self.ui.status(_("adding file changes\n"))
1744 1749 pr.step = 'files'
1745 1750 pr.count = 1
1746 1751 pr.total = efiles
1747 1752 source.callback = None
1748 1753
1749 1754 while 1:
1750 1755 f = source.chunk()
1751 1756 if not f:
1752 1757 break
1753 1758 self.ui.debug("adding %s revisions\n" % f)
1754 1759 pr()
1755 1760 fl = self.file(f)
1756 1761 o = len(fl)
1757 1762 if fl.addgroup(source, revmap, trp) is None:
1758 1763 raise util.Abort(_("received file revlog group is empty"))
1759 1764 revisions += len(fl) - o
1760 1765 files += 1
1761 1766 if f in needfiles:
1762 1767 needs = needfiles[f]
1763 1768 for new in xrange(o, len(fl)):
1764 1769 n = fl.node(new)
1765 1770 if n in needs:
1766 1771 needs.remove(n)
1767 1772 if not needs:
1768 1773 del needfiles[f]
1769 1774 self.ui.progress(_('files'), None)
1770 1775
1771 1776 for f, needs in needfiles.iteritems():
1772 1777 fl = self.file(f)
1773 1778 for n in needs:
1774 1779 try:
1775 1780 fl.rev(n)
1776 1781 except error.LookupError:
1777 1782 raise util.Abort(
1778 1783 _('missing file data for %s:%s - run hg verify') %
1779 1784 (f, hex(n)))
1780 1785
1781 1786 newheads = len(cl.heads())
1782 1787 heads = ""
1783 1788 if oldheads and newheads != oldheads:
1784 1789 heads = _(" (%+d heads)") % (newheads - oldheads)
1785 1790
1786 1791 self.ui.status(_("added %d changesets"
1787 1792 " with %d changes to %d files%s\n")
1788 1793 % (changesets, revisions, files, heads))
1789 1794
1790 1795 if changesets > 0:
1791 1796 p = lambda: cl.writepending() and self.root or ""
1792 1797 self.hook('pretxnchangegroup', throw=True,
1793 1798 node=hex(cl.node(clstart)), source=srctype,
1794 1799 url=url, pending=p)
1795 1800
1796 1801 # make changelog see real files again
1797 1802 cl.finalize(trp)
1798 1803
1799 1804 tr.close()
1800 1805 finally:
1801 1806 tr.release()
1802 1807 if lock:
1803 1808 lock.release()
1804 1809
1805 1810 if changesets > 0:
1806 1811 # forcefully update the on-disk branch cache
1807 1812 self.ui.debug("updating the branch cache\n")
1808 1813 self.updatebranchcache()
1809 1814 self.hook("changegroup", node=hex(cl.node(clstart)),
1810 1815 source=srctype, url=url)
1811 1816
1812 1817 for i in xrange(clstart, clend):
1813 1818 self.hook("incoming", node=hex(cl.node(i)),
1814 1819 source=srctype, url=url)
1815 1820
1816 1821 # never return 0 here:
1817 1822 if newheads < oldheads:
1818 1823 return newheads - oldheads - 1
1819 1824 else:
1820 1825 return newheads - oldheads + 1
1821 1826
1822 1827
1823 1828 def stream_in(self, remote, requirements):
1824 1829 lock = self.lock()
1825 1830 try:
1826 1831 fp = remote.stream_out()
1827 1832 l = fp.readline()
1828 1833 try:
1829 1834 resp = int(l)
1830 1835 except ValueError:
1831 1836 raise error.ResponseError(
1832 1837 _('Unexpected response from remote server:'), l)
1833 1838 if resp == 1:
1834 1839 raise util.Abort(_('operation forbidden by server'))
1835 1840 elif resp == 2:
1836 1841 raise util.Abort(_('locking the remote repository failed'))
1837 1842 elif resp != 0:
1838 1843 raise util.Abort(_('the server sent an unknown error code'))
1839 1844 self.ui.status(_('streaming all changes\n'))
1840 1845 l = fp.readline()
1841 1846 try:
1842 1847 total_files, total_bytes = map(int, l.split(' ', 1))
1843 1848 except (ValueError, TypeError):
1844 1849 raise error.ResponseError(
1845 1850 _('Unexpected response from remote server:'), l)
1846 1851 self.ui.status(_('%d files to transfer, %s of data\n') %
1847 1852 (total_files, util.bytecount(total_bytes)))
1848 1853 start = time.time()
1849 1854 for i in xrange(total_files):
1850 1855 # XXX doesn't support '\n' or '\r' in filenames
1851 1856 l = fp.readline()
1852 1857 try:
1853 1858 name, size = l.split('\0', 1)
1854 1859 size = int(size)
1855 1860 except (ValueError, TypeError):
1856 1861 raise error.ResponseError(
1857 1862 _('Unexpected response from remote server:'), l)
1858 1863 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1859 1864 # for backwards compat, name was partially encoded
1860 1865 ofp = self.sopener(store.decodedir(name), 'w')
1861 1866 for chunk in util.filechunkiter(fp, limit=size):
1862 1867 ofp.write(chunk)
1863 1868 ofp.close()
1864 1869 elapsed = time.time() - start
1865 1870 if elapsed <= 0:
1866 1871 elapsed = 0.001
1867 1872 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1868 1873 (util.bytecount(total_bytes), elapsed,
1869 1874 util.bytecount(total_bytes / elapsed)))
1870 1875
1871 1876 # new requirements = old non-format requirements + new format-related
1872 1877 # requirements from the streamed-in repository
1873 1878 requirements.update(set(self.requirements) - self.supportedformats)
1874 1879 self._applyrequirements(requirements)
1875 1880 self._writerequirements()
1876 1881
1877 1882 self.invalidate()
1878 1883 return len(self.heads()) + 1
1879 1884 finally:
1880 1885 lock.release()
1881 1886
1882 1887 def clone(self, remote, heads=[], stream=False):
1883 1888 '''clone remote repository.
1884 1889
1885 1890 keyword arguments:
1886 1891 heads: list of revs to clone (forces use of pull)
1887 1892 stream: use streaming clone if possible'''
1888 1893
1889 1894 # now, all clients that can request uncompressed clones can
1890 1895 # read repo formats supported by all servers that can serve
1891 1896 # them.
1892 1897
1893 1898 # if revlog format changes, client will have to check version
1894 1899 # and format flags on "stream" capability, and use
1895 1900 # uncompressed only if compatible.
1896 1901
1897 1902 if stream and not heads:
1898 1903 # 'stream' means remote revlog format is revlogv1 only
1899 1904 if remote.capable('stream'):
1900 1905 return self.stream_in(remote, set(('revlogv1',)))
1901 1906 # otherwise, 'streamreqs' contains the remote revlog format
1902 1907 streamreqs = remote.capable('streamreqs')
1903 1908 if streamreqs:
1904 1909 streamreqs = set(streamreqs.split(','))
1905 1910 # if we support it, stream in and adjust our requirements
1906 1911 if not streamreqs - self.supportedformats:
1907 1912 return self.stream_in(remote, streamreqs)
1908 1913 return self.pull(remote, heads)
1909 1914
1910 1915 def pushkey(self, namespace, key, old, new):
1911 1916 return pushkey.push(self, namespace, key, old, new)
1912 1917
1913 1918 def listkeys(self, namespace):
1914 1919 return pushkey.list(self, namespace)
1915 1920
1916 1921 def debugwireargs(self, one, two, three=None, four=None):
1917 1922 '''used to test argument passing over the wire'''
1918 1923 return "%s %s %s %s" % (one, two, three, four)
1919 1924
1920 1925 # used to avoid circular references so destructors work
1921 1926 def aftertrans(files):
1922 1927 renamefiles = [tuple(t) for t in files]
1923 1928 def a():
1924 1929 for src, dest in renamefiles:
1925 1930 util.rename(src, dest)
1926 1931 return a
1927 1932
1928 1933 def instance(ui, path, create):
1929 1934 return localrepository(ui, urlmod.localpath(path), create)
1930 1935
1931 1936 def islocal(path):
1932 1937 return True
General Comments 0
You need to be logged in to leave comments. Login now