##// END OF EJS Templates
tags: remove another check for valid nodes...
Idan Kamara -
r13909:184cf2fa default
parent child Browse files
Show More
@@ -1,1937 +1,1934
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'parentdelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=0):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = util.path_auditor(self.root, self._checknested)
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener("00changelog.i", "a").write(
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'parentdelta', False):
65 65 requirements.append("parentdelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 # find requirements
72 72 requirements = set()
73 73 try:
74 74 requirements = set(self.opener("requires").read().splitlines())
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 for r in requirements - self.supported:
79 79 raise error.RequirementError(
80 80 _("requirement '%s' not supported") % r)
81 81
82 82 self.sharedpath = self.path
83 83 try:
84 84 s = os.path.realpath(self.opener("sharedpath").read())
85 85 if not os.path.exists(s):
86 86 raise error.RepoError(
87 87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 88 self.sharedpath = s
89 89 except IOError, inst:
90 90 if inst.errno != errno.ENOENT:
91 91 raise
92 92
93 93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97 self.opener.createmode = self.store.createmode
98 98 self._applyrequirements(requirements)
99 99 if create:
100 100 self._writerequirements()
101 101
102 102 # These two define the set of tags for this repository. _tags
103 103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 104 # 'local'. (Global tags are defined by .hgtags across all
105 105 # heads, and local tags are defined in .hg/localtags.) They
106 106 # constitute the in-memory cache of tags.
107 107 self._tags = None
108 108 self._tagtypes = None
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.nodetagscache = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 def _applyrequirements(self, requirements):
118 118 self.requirements = requirements
119 119 self.sopener.options = {}
120 120 if 'parentdelta' in requirements:
121 121 self.sopener.options['parentdelta'] = 1
122 122
123 123 def _writerequirements(self):
124 124 reqfile = self.opener("requires", "w")
125 125 for r in self.requirements:
126 126 reqfile.write("%s\n" % r)
127 127 reqfile.close()
128 128
129 129 def _checknested(self, path):
130 130 """Determine if path is a legal nested repository."""
131 131 if not path.startswith(self.root):
132 132 return False
133 133 subpath = path[len(self.root) + 1:]
134 134
135 135 # XXX: Checking against the current working copy is wrong in
136 136 # the sense that it can reject things like
137 137 #
138 138 # $ hg cat -r 10 sub/x.txt
139 139 #
140 140 # if sub/ is no longer a subrepository in the working copy
141 141 # parent revision.
142 142 #
143 143 # However, it can of course also allow things that would have
144 144 # been rejected before, such as the above cat command if sub/
145 145 # is a subrepository now, but was a normal directory before.
146 146 # The old path auditor would have rejected by mistake since it
147 147 # panics when it sees sub/.hg/.
148 148 #
149 149 # All in all, checking against the working copy seems sensible
150 150 # since we want to prevent access to nested repositories on
151 151 # the filesystem *now*.
152 152 ctx = self[None]
153 153 parts = util.splitpath(subpath)
154 154 while parts:
155 155 prefix = os.sep.join(parts)
156 156 if prefix in ctx.substate:
157 157 if prefix == subpath:
158 158 return True
159 159 else:
160 160 sub = ctx.sub(prefix)
161 161 return sub.checknested(subpath[len(prefix) + 1:])
162 162 else:
163 163 parts.pop()
164 164 return False
165 165
166 166 @util.propertycache
167 167 def _bookmarks(self):
168 168 return bookmarks.read(self)
169 169
170 170 @util.propertycache
171 171 def _bookmarkcurrent(self):
172 172 return bookmarks.readcurrent(self)
173 173
174 174 @propertycache
175 175 def changelog(self):
176 176 c = changelog.changelog(self.sopener)
177 177 if 'HG_PENDING' in os.environ:
178 178 p = os.environ['HG_PENDING']
179 179 if p.startswith(self.root):
180 180 c.readpending('00changelog.i.a')
181 181 self.sopener.options['defversion'] = c.version
182 182 return c
183 183
184 184 @propertycache
185 185 def manifest(self):
186 186 return manifest.manifest(self.sopener)
187 187
188 188 @propertycache
189 189 def dirstate(self):
190 190 warned = [0]
191 191 def validate(node):
192 192 try:
193 193 r = self.changelog.rev(node)
194 194 return node
195 195 except error.LookupError:
196 196 if not warned[0]:
197 197 warned[0] = True
198 198 self.ui.warn(_("warning: ignoring unknown"
199 199 " working parent %s!\n") % short(node))
200 200 return nullid
201 201
202 202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203 203
204 204 def __getitem__(self, changeid):
205 205 if changeid is None:
206 206 return context.workingctx(self)
207 207 return context.changectx(self, changeid)
208 208
209 209 def __contains__(self, changeid):
210 210 try:
211 211 return bool(self.lookup(changeid))
212 212 except error.RepoLookupError:
213 213 return False
214 214
215 215 def __nonzero__(self):
216 216 return True
217 217
218 218 def __len__(self):
219 219 return len(self.changelog)
220 220
221 221 def __iter__(self):
222 222 for i in xrange(len(self)):
223 223 yield i
224 224
225 225 def url(self):
226 226 return 'file:' + self.root
227 227
228 228 def hook(self, name, throw=False, **args):
229 229 return hook.hook(self.ui, self, name, throw, **args)
230 230
231 231 tag_disallowed = ':\r\n'
232 232
233 233 def _tag(self, names, node, message, local, user, date, extra={}):
234 234 if isinstance(names, str):
235 235 allchars = names
236 236 names = (names,)
237 237 else:
238 238 allchars = ''.join(names)
239 239 for c in self.tag_disallowed:
240 240 if c in allchars:
241 241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242 242
243 243 branches = self.branchmap()
244 244 for name in names:
245 245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 246 local=local)
247 247 if name in branches:
248 248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 249 " branch name\n") % name)
250 250
251 251 def writetags(fp, names, munge, prevtags):
252 252 fp.seek(0, 2)
253 253 if prevtags and prevtags[-1] != '\n':
254 254 fp.write('\n')
255 255 for name in names:
256 256 m = munge and munge(name) or name
257 257 if self._tagtypes and name in self._tagtypes:
258 258 old = self._tags.get(name, nullid)
259 259 fp.write('%s %s\n' % (hex(old), m))
260 260 fp.write('%s %s\n' % (hex(node), m))
261 261 fp.close()
262 262
263 263 prevtags = ''
264 264 if local:
265 265 try:
266 266 fp = self.opener('localtags', 'r+')
267 267 except IOError:
268 268 fp = self.opener('localtags', 'a')
269 269 else:
270 270 prevtags = fp.read()
271 271
272 272 # local tags are stored in the current charset
273 273 writetags(fp, names, None, prevtags)
274 274 for name in names:
275 275 self.hook('tag', node=hex(node), tag=name, local=local)
276 276 return
277 277
278 278 try:
279 279 fp = self.wfile('.hgtags', 'rb+')
280 280 except IOError:
281 281 fp = self.wfile('.hgtags', 'ab')
282 282 else:
283 283 prevtags = fp.read()
284 284
285 285 # committed tags are stored in UTF-8
286 286 writetags(fp, names, encoding.fromlocal, prevtags)
287 287
288 288 fp.close()
289 289
290 290 if '.hgtags' not in self.dirstate:
291 291 self[None].add(['.hgtags'])
292 292
293 293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295 295
296 296 for name in names:
297 297 self.hook('tag', node=hex(node), tag=name, local=local)
298 298
299 299 return tagnode
300 300
301 301 def tag(self, names, node, message, local, user, date):
302 302 '''tag a revision with one or more symbolic names.
303 303
304 304 names is a list of strings or, when adding a single tag, names may be a
305 305 string.
306 306
307 307 if local is True, the tags are stored in a per-repository file.
308 308 otherwise, they are stored in the .hgtags file, and a new
309 309 changeset is committed with the change.
310 310
311 311 keyword arguments:
312 312
313 313 local: whether to store tags in non-version-controlled file
314 314 (default False)
315 315
316 316 message: commit message to use if committing
317 317
318 318 user: name of user to use if committing
319 319
320 320 date: date tuple to use if committing'''
321 321
322 322 if not local:
323 323 for x in self.status()[:5]:
324 324 if '.hgtags' in x:
325 325 raise util.Abort(_('working copy of .hgtags is changed '
326 326 '(please commit .hgtags manually)'))
327 327
328 328 self.tags() # instantiate the cache
329 329 self._tag(names, node, message, local, user, date)
330 330
331 331 def tags(self):
332 332 '''return a mapping of tag to node'''
333 333 if self._tags is None:
334 334 (self._tags, self._tagtypes) = self._findtags()
335 335
336 336 return self._tags
337 337
338 338 def _findtags(self):
339 339 '''Do the hard work of finding tags. Return a pair of dicts
340 340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 341 maps tag name to a string like \'global\' or \'local\'.
342 342 Subclasses or extensions are free to add their own tags, but
343 343 should be aware that the returned dicts will be retained for the
344 344 duration of the localrepo object.'''
345 345
346 346 # XXX what tagtype should subclasses/extensions use? Currently
347 347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 348 # Should each extension invent its own tag type? Should there
349 349 # be one tagtype for all such "virtual" tags? Or is the status
350 350 # quo fine?
351 351
352 352 alltags = {} # map tag name to (node, hist)
353 353 tagtypes = {}
354 354
355 355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357 357
358 358 # Build the return dicts. Have to re-encode tag names because
359 359 # the tags module always uses UTF-8 (in order not to lose info
360 360 # writing to the cache), but the rest of Mercurial wants them in
361 361 # local encoding.
362 362 tags = {}
363 363 for (name, (node, hist)) in alltags.iteritems():
364 364 if node != nullid:
365 365 try:
366 366 # ignore tags to unknown nodes
367 367 self.changelog.lookup(node)
368 368 tags[encoding.tolocal(name)] = node
369 369 except error.LookupError:
370 370 pass
371 371 tags['tip'] = self.changelog.tip()
372 372 tagtypes = dict([(encoding.tolocal(name), value)
373 373 for (name, value) in tagtypes.iteritems()])
374 374 return (tags, tagtypes)
375 375
376 376 def tagtype(self, tagname):
377 377 '''
378 378 return the type of the given tag. result can be:
379 379
380 380 'local' : a local tag
381 381 'global' : a global tag
382 382 None : tag does not exist
383 383 '''
384 384
385 385 self.tags()
386 386
387 387 return self._tagtypes.get(tagname)
388 388
389 389 def tagslist(self):
390 390 '''return a list of tags ordered by revision'''
391 391 l = []
392 392 for t, n in self.tags().iteritems():
393 try:
394 r = self.changelog.rev(n)
395 except error.LookupError:
396 r = -2 # sort to the beginning of the list if unknown
393 r = self.changelog.rev(n)
397 394 l.append((r, t, n))
398 395 return [(t, n) for r, t, n in sorted(l)]
399 396
400 397 def nodetags(self, node):
401 398 '''return the tags associated with a node'''
402 399 if not self.nodetagscache:
403 400 self.nodetagscache = {}
404 401 for t, n in self.tags().iteritems():
405 402 self.nodetagscache.setdefault(n, []).append(t)
406 403 for tags in self.nodetagscache.itervalues():
407 404 tags.sort()
408 405 return self.nodetagscache.get(node, [])
409 406
410 407 def nodebookmarks(self, node):
411 408 marks = []
412 409 for bookmark, n in self._bookmarks.iteritems():
413 410 if n == node:
414 411 marks.append(bookmark)
415 412 return sorted(marks)
416 413
417 414 def _branchtags(self, partial, lrev):
418 415 # TODO: rename this function?
419 416 tiprev = len(self) - 1
420 417 if lrev != tiprev:
421 418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
422 419 self._updatebranchcache(partial, ctxgen)
423 420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
424 421
425 422 return partial
426 423
427 424 def updatebranchcache(self):
428 425 tip = self.changelog.tip()
429 426 if self._branchcache is not None and self._branchcachetip == tip:
430 427 return self._branchcache
431 428
432 429 oldtip = self._branchcachetip
433 430 self._branchcachetip = tip
434 431 if oldtip is None or oldtip not in self.changelog.nodemap:
435 432 partial, last, lrev = self._readbranchcache()
436 433 else:
437 434 lrev = self.changelog.rev(oldtip)
438 435 partial = self._branchcache
439 436
440 437 self._branchtags(partial, lrev)
441 438 # this private cache holds all heads (not just tips)
442 439 self._branchcache = partial
443 440
444 441 def branchmap(self):
445 442 '''returns a dictionary {branch: [branchheads]}'''
446 443 self.updatebranchcache()
447 444 return self._branchcache
448 445
449 446 def branchtags(self):
450 447 '''return a dict where branch names map to the tipmost head of
451 448 the branch, open heads come before closed'''
452 449 bt = {}
453 450 for bn, heads in self.branchmap().iteritems():
454 451 tip = heads[-1]
455 452 for h in reversed(heads):
456 453 if 'close' not in self.changelog.read(h)[5]:
457 454 tip = h
458 455 break
459 456 bt[bn] = tip
460 457 return bt
461 458
462 459 def _readbranchcache(self):
463 460 partial = {}
464 461 try:
465 462 f = self.opener("cache/branchheads")
466 463 lines = f.read().split('\n')
467 464 f.close()
468 465 except (IOError, OSError):
469 466 return {}, nullid, nullrev
470 467
471 468 try:
472 469 last, lrev = lines.pop(0).split(" ", 1)
473 470 last, lrev = bin(last), int(lrev)
474 471 if lrev >= len(self) or self[lrev].node() != last:
475 472 # invalidate the cache
476 473 raise ValueError('invalidating branch cache (tip differs)')
477 474 for l in lines:
478 475 if not l:
479 476 continue
480 477 node, label = l.split(" ", 1)
481 478 label = encoding.tolocal(label.strip())
482 479 partial.setdefault(label, []).append(bin(node))
483 480 except KeyboardInterrupt:
484 481 raise
485 482 except Exception, inst:
486 483 if self.ui.debugflag:
487 484 self.ui.warn(str(inst), '\n')
488 485 partial, last, lrev = {}, nullid, nullrev
489 486 return partial, last, lrev
490 487
491 488 def _writebranchcache(self, branches, tip, tiprev):
492 489 try:
493 490 f = self.opener("cache/branchheads", "w", atomictemp=True)
494 491 f.write("%s %s\n" % (hex(tip), tiprev))
495 492 for label, nodes in branches.iteritems():
496 493 for node in nodes:
497 494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
498 495 f.rename()
499 496 except (IOError, OSError):
500 497 pass
501 498
502 499 def _updatebranchcache(self, partial, ctxgen):
503 500 # collect new branch entries
504 501 newbranches = {}
505 502 for c in ctxgen:
506 503 newbranches.setdefault(c.branch(), []).append(c.node())
507 504 # if older branchheads are reachable from new ones, they aren't
508 505 # really branchheads. Note checking parents is insufficient:
509 506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
510 507 for branch, newnodes in newbranches.iteritems():
511 508 bheads = partial.setdefault(branch, [])
512 509 bheads.extend(newnodes)
513 510 if len(bheads) <= 1:
514 511 continue
515 512 # starting from tip means fewer passes over reachable
516 513 while newnodes:
517 514 latest = newnodes.pop()
518 515 if latest not in bheads:
519 516 continue
520 517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
521 518 reachable = self.changelog.reachable(latest, minbhrev)
522 519 reachable.remove(latest)
523 520 bheads = [b for b in bheads if b not in reachable]
524 521 partial[branch] = bheads
525 522
526 523 def lookup(self, key):
527 524 if isinstance(key, int):
528 525 return self.changelog.node(key)
529 526 elif key == '.':
530 527 return self.dirstate.p1()
531 528 elif key == 'null':
532 529 return nullid
533 530 elif key == 'tip':
534 531 return self.changelog.tip()
535 532 n = self.changelog._match(key)
536 533 if n:
537 534 return n
538 535 if key in self._bookmarks:
539 536 return self._bookmarks[key]
540 537 if key in self.tags():
541 538 return self.tags()[key]
542 539 if key in self.branchtags():
543 540 return self.branchtags()[key]
544 541 n = self.changelog._partialmatch(key)
545 542 if n:
546 543 return n
547 544
548 545 # can't find key, check if it might have come from damaged dirstate
549 546 if key in self.dirstate.parents():
550 547 raise error.Abort(_("working directory has unknown parent '%s'!")
551 548 % short(key))
552 549 try:
553 550 if len(key) == 20:
554 551 key = hex(key)
555 552 except:
556 553 pass
557 554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
558 555
559 556 def lookupbranch(self, key, remote=None):
560 557 repo = remote or self
561 558 if key in repo.branchmap():
562 559 return key
563 560
564 561 repo = (remote and remote.local()) and remote or self
565 562 return repo[key].branch()
566 563
567 564 def known(self, nodes):
568 565 nm = self.changelog.nodemap
569 566 return [(n in nm) for n in nodes]
570 567
571 568 def local(self):
572 569 return True
573 570
574 571 def join(self, f):
575 572 return os.path.join(self.path, f)
576 573
577 574 def wjoin(self, f):
578 575 return os.path.join(self.root, f)
579 576
580 577 def file(self, f):
581 578 if f[0] == '/':
582 579 f = f[1:]
583 580 return filelog.filelog(self.sopener, f)
584 581
585 582 def changectx(self, changeid):
586 583 return self[changeid]
587 584
588 585 def parents(self, changeid=None):
589 586 '''get list of changectxs for parents of changeid'''
590 587 return self[changeid].parents()
591 588
592 589 def filectx(self, path, changeid=None, fileid=None):
593 590 """changeid can be a changeset revision, node, or tag.
594 591 fileid can be a file revision or node."""
595 592 return context.filectx(self, path, changeid, fileid)
596 593
597 594 def getcwd(self):
598 595 return self.dirstate.getcwd()
599 596
600 597 def pathto(self, f, cwd=None):
601 598 return self.dirstate.pathto(f, cwd)
602 599
603 600 def wfile(self, f, mode='r'):
604 601 return self.wopener(f, mode)
605 602
606 603 def _link(self, f):
607 604 return os.path.islink(self.wjoin(f))
608 605
609 606 def _loadfilter(self, filter):
610 607 if filter not in self.filterpats:
611 608 l = []
612 609 for pat, cmd in self.ui.configitems(filter):
613 610 if cmd == '!':
614 611 continue
615 612 mf = matchmod.match(self.root, '', [pat])
616 613 fn = None
617 614 params = cmd
618 615 for name, filterfn in self._datafilters.iteritems():
619 616 if cmd.startswith(name):
620 617 fn = filterfn
621 618 params = cmd[len(name):].lstrip()
622 619 break
623 620 if not fn:
624 621 fn = lambda s, c, **kwargs: util.filter(s, c)
625 622 # Wrap old filters not supporting keyword arguments
626 623 if not inspect.getargspec(fn)[2]:
627 624 oldfn = fn
628 625 fn = lambda s, c, **kwargs: oldfn(s, c)
629 626 l.append((mf, fn, params))
630 627 self.filterpats[filter] = l
631 628 return self.filterpats[filter]
632 629
633 630 def _filter(self, filterpats, filename, data):
634 631 for mf, fn, cmd in filterpats:
635 632 if mf(filename):
636 633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
637 634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
638 635 break
639 636
640 637 return data
641 638
642 639 @propertycache
643 640 def _encodefilterpats(self):
644 641 return self._loadfilter('encode')
645 642
646 643 @propertycache
647 644 def _decodefilterpats(self):
648 645 return self._loadfilter('decode')
649 646
650 647 def adddatafilter(self, name, filter):
651 648 self._datafilters[name] = filter
652 649
653 650 def wread(self, filename):
654 651 if self._link(filename):
655 652 data = os.readlink(self.wjoin(filename))
656 653 else:
657 654 data = self.wopener(filename, 'r').read()
658 655 return self._filter(self._encodefilterpats, filename, data)
659 656
660 657 def wwrite(self, filename, data, flags):
661 658 data = self._filter(self._decodefilterpats, filename, data)
662 659 if 'l' in flags:
663 660 self.wopener.symlink(data, filename)
664 661 else:
665 662 self.wopener(filename, 'w').write(data)
666 663 if 'x' in flags:
667 664 util.set_flags(self.wjoin(filename), False, True)
668 665
669 666 def wwritedata(self, filename, data):
670 667 return self._filter(self._decodefilterpats, filename, data)
671 668
672 669 def transaction(self, desc):
673 670 tr = self._transref and self._transref() or None
674 671 if tr and tr.running():
675 672 return tr.nest()
676 673
677 674 # abort here if the journal already exists
678 675 if os.path.exists(self.sjoin("journal")):
679 676 raise error.RepoError(
680 677 _("abandoned transaction found - run hg recover"))
681 678
682 679 # save dirstate for rollback
683 680 try:
684 681 ds = self.opener("dirstate").read()
685 682 except IOError:
686 683 ds = ""
687 684 self.opener("journal.dirstate", "w").write(ds)
688 685 self.opener("journal.branch", "w").write(
689 686 encoding.fromlocal(self.dirstate.branch()))
690 687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
691 688
692 689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
693 690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
694 691 (self.join("journal.branch"), self.join("undo.branch")),
695 692 (self.join("journal.desc"), self.join("undo.desc"))]
696 693 tr = transaction.transaction(self.ui.warn, self.sopener,
697 694 self.sjoin("journal"),
698 695 aftertrans(renames),
699 696 self.store.createmode)
700 697 self._transref = weakref.ref(tr)
701 698 return tr
702 699
703 700 def recover(self):
704 701 lock = self.lock()
705 702 try:
706 703 if os.path.exists(self.sjoin("journal")):
707 704 self.ui.status(_("rolling back interrupted transaction\n"))
708 705 transaction.rollback(self.sopener, self.sjoin("journal"),
709 706 self.ui.warn)
710 707 self.invalidate()
711 708 return True
712 709 else:
713 710 self.ui.warn(_("no interrupted transaction available\n"))
714 711 return False
715 712 finally:
716 713 lock.release()
717 714
718 715 def rollback(self, dryrun=False):
719 716 wlock = lock = None
720 717 try:
721 718 wlock = self.wlock()
722 719 lock = self.lock()
723 720 if os.path.exists(self.sjoin("undo")):
724 721 try:
725 722 args = self.opener("undo.desc", "r").read().splitlines()
726 723 if len(args) >= 3 and self.ui.verbose:
727 724 desc = _("repository tip rolled back to revision %s"
728 725 " (undo %s: %s)\n") % (
729 726 int(args[0]) - 1, args[1], args[2])
730 727 elif len(args) >= 2:
731 728 desc = _("repository tip rolled back to revision %s"
732 729 " (undo %s)\n") % (
733 730 int(args[0]) - 1, args[1])
734 731 except IOError:
735 732 desc = _("rolling back unknown transaction\n")
736 733 self.ui.status(desc)
737 734 if dryrun:
738 735 return
739 736 transaction.rollback(self.sopener, self.sjoin("undo"),
740 737 self.ui.warn)
741 738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
742 739 if os.path.exists(self.join('undo.bookmarks')):
743 740 util.rename(self.join('undo.bookmarks'),
744 741 self.join('bookmarks'))
745 742 try:
746 743 branch = self.opener("undo.branch").read()
747 744 self.dirstate.setbranch(branch)
748 745 except IOError:
749 746 self.ui.warn(_("Named branch could not be reset, "
750 747 "current branch still is: %s\n")
751 748 % self.dirstate.branch())
752 749 self.invalidate()
753 750 self.dirstate.invalidate()
754 751 self.destroyed()
755 752 parents = tuple([p.rev() for p in self.parents()])
756 753 if len(parents) > 1:
757 754 self.ui.status(_("working directory now based on "
758 755 "revisions %d and %d\n") % parents)
759 756 else:
760 757 self.ui.status(_("working directory now based on "
761 758 "revision %d\n") % parents)
762 759 else:
763 760 self.ui.warn(_("no rollback information available\n"))
764 761 return 1
765 762 finally:
766 763 release(lock, wlock)
767 764
768 765 def invalidatecaches(self):
769 766 self._tags = None
770 767 self._tagtypes = None
771 768 self.nodetagscache = None
772 769 self._branchcache = None # in UTF-8
773 770 self._branchcachetip = None
774 771
775 772 def invalidate(self):
776 773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
777 774 if a in self.__dict__:
778 775 delattr(self, a)
779 776 self.invalidatecaches()
780 777
781 778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
782 779 try:
783 780 l = lock.lock(lockname, 0, releasefn, desc=desc)
784 781 except error.LockHeld, inst:
785 782 if not wait:
786 783 raise
787 784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
788 785 (desc, inst.locker))
789 786 # default to 600 seconds timeout
790 787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
791 788 releasefn, desc=desc)
792 789 if acquirefn:
793 790 acquirefn()
794 791 return l
795 792
796 793 def lock(self, wait=True):
797 794 '''Lock the repository store (.hg/store) and return a weak reference
798 795 to the lock. Use this before modifying the store (e.g. committing or
799 796 stripping). If you are opening a transaction, get a lock as well.)'''
800 797 l = self._lockref and self._lockref()
801 798 if l is not None and l.held:
802 799 l.lock()
803 800 return l
804 801
805 802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
806 803 self.invalidate, _('repository %s') % self.origroot)
807 804 self._lockref = weakref.ref(l)
808 805 return l
809 806
810 807 def wlock(self, wait=True):
811 808 '''Lock the non-store parts of the repository (everything under
812 809 .hg except .hg/store) and return a weak reference to the lock.
813 810 Use this before modifying files in .hg.'''
814 811 l = self._wlockref and self._wlockref()
815 812 if l is not None and l.held:
816 813 l.lock()
817 814 return l
818 815
819 816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
820 817 self.dirstate.invalidate, _('working directory of %s') %
821 818 self.origroot)
822 819 self._wlockref = weakref.ref(l)
823 820 return l
824 821
825 822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
826 823 """
827 824 commit an individual file as part of a larger transaction
828 825 """
829 826
830 827 fname = fctx.path()
831 828 text = fctx.data()
832 829 flog = self.file(fname)
833 830 fparent1 = manifest1.get(fname, nullid)
834 831 fparent2 = fparent2o = manifest2.get(fname, nullid)
835 832
836 833 meta = {}
837 834 copy = fctx.renamed()
838 835 if copy and copy[0] != fname:
839 836 # Mark the new revision of this file as a copy of another
840 837 # file. This copy data will effectively act as a parent
841 838 # of this new revision. If this is a merge, the first
842 839 # parent will be the nullid (meaning "look up the copy data")
843 840 # and the second one will be the other parent. For example:
844 841 #
845 842 # 0 --- 1 --- 3 rev1 changes file foo
846 843 # \ / rev2 renames foo to bar and changes it
847 844 # \- 2 -/ rev3 should have bar with all changes and
848 845 # should record that bar descends from
849 846 # bar in rev2 and foo in rev1
850 847 #
851 848 # this allows this merge to succeed:
852 849 #
853 850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
854 851 # \ / merging rev3 and rev4 should use bar@rev2
855 852 # \- 2 --- 4 as the merge base
856 853 #
857 854
858 855 cfname = copy[0]
859 856 crev = manifest1.get(cfname)
860 857 newfparent = fparent2
861 858
862 859 if manifest2: # branch merge
863 860 if fparent2 == nullid or crev is None: # copied on remote side
864 861 if cfname in manifest2:
865 862 crev = manifest2[cfname]
866 863 newfparent = fparent1
867 864
868 865 # find source in nearest ancestor if we've lost track
869 866 if not crev:
870 867 self.ui.debug(" %s: searching for copy revision for %s\n" %
871 868 (fname, cfname))
872 869 for ancestor in self[None].ancestors():
873 870 if cfname in ancestor:
874 871 crev = ancestor[cfname].filenode()
875 872 break
876 873
877 874 if crev:
878 875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
879 876 meta["copy"] = cfname
880 877 meta["copyrev"] = hex(crev)
881 878 fparent1, fparent2 = nullid, newfparent
882 879 else:
883 880 self.ui.warn(_("warning: can't find ancestor for '%s' "
884 881 "copied from '%s'!\n") % (fname, cfname))
885 882
886 883 elif fparent2 != nullid:
887 884 # is one parent an ancestor of the other?
888 885 fparentancestor = flog.ancestor(fparent1, fparent2)
889 886 if fparentancestor == fparent1:
890 887 fparent1, fparent2 = fparent2, nullid
891 888 elif fparentancestor == fparent2:
892 889 fparent2 = nullid
893 890
894 891 # is the file changed?
895 892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
896 893 changelist.append(fname)
897 894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
898 895
899 896 # are just the flags changed during merge?
900 897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
901 898 changelist.append(fname)
902 899
903 900 return fparent1
904 901
905 902 def commit(self, text="", user=None, date=None, match=None, force=False,
906 903 editor=False, extra={}):
907 904 """Add a new revision to current repository.
908 905
909 906 Revision information is gathered from the working directory,
910 907 match can be used to filter the committed files. If editor is
911 908 supplied, it is called to get a commit message.
912 909 """
913 910
914 911 def fail(f, msg):
915 912 raise util.Abort('%s: %s' % (f, msg))
916 913
917 914 if not match:
918 915 match = matchmod.always(self.root, '')
919 916
920 917 if not force:
921 918 vdirs = []
922 919 match.dir = vdirs.append
923 920 match.bad = fail
924 921
925 922 wlock = self.wlock()
926 923 try:
927 924 wctx = self[None]
928 925 merge = len(wctx.parents()) > 1
929 926
930 927 if (not force and merge and match and
931 928 (match.files() or match.anypats())):
932 929 raise util.Abort(_('cannot partially commit a merge '
933 930 '(do not specify files or patterns)'))
934 931
935 932 changes = self.status(match=match, clean=force)
936 933 if force:
937 934 changes[0].extend(changes[6]) # mq may commit unchanged files
938 935
939 936 # check subrepos
940 937 subs = []
941 938 removedsubs = set()
942 939 for p in wctx.parents():
943 940 removedsubs.update(s for s in p.substate if match(s))
944 941 for s in wctx.substate:
945 942 removedsubs.discard(s)
946 943 if match(s) and wctx.sub(s).dirty():
947 944 subs.append(s)
948 945 if (subs or removedsubs):
949 946 if (not match('.hgsub') and
950 947 '.hgsub' in (wctx.modified() + wctx.added())):
951 948 raise util.Abort(_("can't commit subrepos without .hgsub"))
952 949 if '.hgsubstate' not in changes[0]:
953 950 changes[0].insert(0, '.hgsubstate')
954 951
955 952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
956 953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
957 954 if changedsubs:
958 955 raise util.Abort(_("uncommitted changes in subrepo %s")
959 956 % changedsubs[0])
960 957
961 958 # make sure all explicit patterns are matched
962 959 if not force and match.files():
963 960 matched = set(changes[0] + changes[1] + changes[2])
964 961
965 962 for f in match.files():
966 963 if f == '.' or f in matched or f in wctx.substate:
967 964 continue
968 965 if f in changes[3]: # missing
969 966 fail(f, _('file not found!'))
970 967 if f in vdirs: # visited directory
971 968 d = f + '/'
972 969 for mf in matched:
973 970 if mf.startswith(d):
974 971 break
975 972 else:
976 973 fail(f, _("no match under directory!"))
977 974 elif f not in self.dirstate:
978 975 fail(f, _("file not tracked!"))
979 976
980 977 if (not force and not extra.get("close") and not merge
981 978 and not (changes[0] or changes[1] or changes[2])
982 979 and wctx.branch() == wctx.p1().branch()):
983 980 return None
984 981
985 982 ms = mergemod.mergestate(self)
986 983 for f in changes[0]:
987 984 if f in ms and ms[f] == 'u':
988 985 raise util.Abort(_("unresolved merge conflicts "
989 986 "(see hg help resolve)"))
990 987
991 988 cctx = context.workingctx(self, text, user, date, extra, changes)
992 989 if editor:
993 990 cctx._text = editor(self, cctx, subs)
994 991 edited = (text != cctx._text)
995 992
996 993 # commit subs
997 994 if subs or removedsubs:
998 995 state = wctx.substate.copy()
999 996 for s in sorted(subs):
1000 997 sub = wctx.sub(s)
1001 998 self.ui.status(_('committing subrepository %s\n') %
1002 999 subrepo.subrelpath(sub))
1003 1000 sr = sub.commit(cctx._text, user, date)
1004 1001 state[s] = (state[s][0], sr)
1005 1002 subrepo.writestate(self, state)
1006 1003
1007 1004 # Save commit message in case this transaction gets rolled back
1008 1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1009 1006 # the assumption that the user will use the same editor again.
1010 1007 msgfile = self.opener('last-message.txt', 'wb')
1011 1008 msgfile.write(cctx._text)
1012 1009 msgfile.close()
1013 1010
1014 1011 p1, p2 = self.dirstate.parents()
1015 1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1016 1013 try:
1017 1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1018 1015 ret = self.commitctx(cctx, True)
1019 1016 except:
1020 1017 if edited:
1021 1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1022 1019 self.ui.write(
1023 1020 _('note: commit message saved in %s\n') % msgfn)
1024 1021 raise
1025 1022
1026 1023 # update bookmarks, dirstate and mergestate
1027 1024 bookmarks.update(self, p1, ret)
1028 1025 for f in changes[0] + changes[1]:
1029 1026 self.dirstate.normal(f)
1030 1027 for f in changes[2]:
1031 1028 self.dirstate.forget(f)
1032 1029 self.dirstate.setparents(ret)
1033 1030 ms.reset()
1034 1031 finally:
1035 1032 wlock.release()
1036 1033
1037 1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1038 1035 return ret
1039 1036
1040 1037 def commitctx(self, ctx, error=False):
1041 1038 """Add a new revision to current repository.
1042 1039 Revision information is passed via the context argument.
1043 1040 """
1044 1041
1045 1042 tr = lock = None
1046 1043 removed = list(ctx.removed())
1047 1044 p1, p2 = ctx.p1(), ctx.p2()
1048 1045 m1 = p1.manifest().copy()
1049 1046 m2 = p2.manifest()
1050 1047 user = ctx.user()
1051 1048
1052 1049 lock = self.lock()
1053 1050 try:
1054 1051 tr = self.transaction("commit")
1055 1052 trp = weakref.proxy(tr)
1056 1053
1057 1054 # check in files
1058 1055 new = {}
1059 1056 changed = []
1060 1057 linkrev = len(self)
1061 1058 for f in sorted(ctx.modified() + ctx.added()):
1062 1059 self.ui.note(f + "\n")
1063 1060 try:
1064 1061 fctx = ctx[f]
1065 1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1066 1063 changed)
1067 1064 m1.set(f, fctx.flags())
1068 1065 except OSError, inst:
1069 1066 self.ui.warn(_("trouble committing %s!\n") % f)
1070 1067 raise
1071 1068 except IOError, inst:
1072 1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1073 1070 if error or errcode and errcode != errno.ENOENT:
1074 1071 self.ui.warn(_("trouble committing %s!\n") % f)
1075 1072 raise
1076 1073 else:
1077 1074 removed.append(f)
1078 1075
1079 1076 # update manifest
1080 1077 m1.update(new)
1081 1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1082 1079 drop = [f for f in removed if f in m1]
1083 1080 for f in drop:
1084 1081 del m1[f]
1085 1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1086 1083 p2.manifestnode(), (new, drop))
1087 1084
1088 1085 # update changelog
1089 1086 self.changelog.delayupdate()
1090 1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1091 1088 trp, p1.node(), p2.node(),
1092 1089 user, ctx.date(), ctx.extra().copy())
1093 1090 p = lambda: self.changelog.writepending() and self.root or ""
1094 1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1095 1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1096 1093 parent2=xp2, pending=p)
1097 1094 self.changelog.finalize(trp)
1098 1095 tr.close()
1099 1096
1100 1097 if self._branchcache:
1101 1098 self.updatebranchcache()
1102 1099 return n
1103 1100 finally:
1104 1101 if tr:
1105 1102 tr.release()
1106 1103 lock.release()
1107 1104
1108 1105 def destroyed(self):
1109 1106 '''Inform the repository that nodes have been destroyed.
1110 1107 Intended for use by strip and rollback, so there's a common
1111 1108 place for anything that has to be done after destroying history.'''
1112 1109 # XXX it might be nice if we could take the list of destroyed
1113 1110 # nodes, but I don't see an easy way for rollback() to do that
1114 1111
1115 1112 # Ensure the persistent tag cache is updated. Doing it now
1116 1113 # means that the tag cache only has to worry about destroyed
1117 1114 # heads immediately after a strip/rollback. That in turn
1118 1115 # guarantees that "cachetip == currenttip" (comparing both rev
1119 1116 # and node) always means no nodes have been added or destroyed.
1120 1117
1121 1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1122 1119 # head, refresh the tag cache, then immediately add a new head.
1123 1120 # But I think doing it this way is necessary for the "instant
1124 1121 # tag cache retrieval" case to work.
1125 1122 self.invalidatecaches()
1126 1123
1127 1124 def walk(self, match, node=None):
1128 1125 '''
1129 1126 walk recursively through the directory tree or a given
1130 1127 changeset, finding all files matched by the match
1131 1128 function
1132 1129 '''
1133 1130 return self[node].walk(match)
1134 1131
1135 1132 def status(self, node1='.', node2=None, match=None,
1136 1133 ignored=False, clean=False, unknown=False,
1137 1134 listsubrepos=False):
1138 1135 """return status of files between two nodes or node and working directory
1139 1136
1140 1137 If node1 is None, use the first dirstate parent instead.
1141 1138 If node2 is None, compare node1 with working directory.
1142 1139 """
1143 1140
1144 1141 def mfmatches(ctx):
1145 1142 mf = ctx.manifest().copy()
1146 1143 for fn in mf.keys():
1147 1144 if not match(fn):
1148 1145 del mf[fn]
1149 1146 return mf
1150 1147
1151 1148 if isinstance(node1, context.changectx):
1152 1149 ctx1 = node1
1153 1150 else:
1154 1151 ctx1 = self[node1]
1155 1152 if isinstance(node2, context.changectx):
1156 1153 ctx2 = node2
1157 1154 else:
1158 1155 ctx2 = self[node2]
1159 1156
1160 1157 working = ctx2.rev() is None
1161 1158 parentworking = working and ctx1 == self['.']
1162 1159 match = match or matchmod.always(self.root, self.getcwd())
1163 1160 listignored, listclean, listunknown = ignored, clean, unknown
1164 1161
1165 1162 # load earliest manifest first for caching reasons
1166 1163 if not working and ctx2.rev() < ctx1.rev():
1167 1164 ctx2.manifest()
1168 1165
1169 1166 if not parentworking:
1170 1167 def bad(f, msg):
1171 1168 if f not in ctx1:
1172 1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1173 1170 match.bad = bad
1174 1171
1175 1172 if working: # we need to scan the working dir
1176 1173 subrepos = []
1177 1174 if '.hgsub' in self.dirstate:
1178 1175 subrepos = ctx1.substate.keys()
1179 1176 s = self.dirstate.status(match, subrepos, listignored,
1180 1177 listclean, listunknown)
1181 1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1182 1179
1183 1180 # check for any possibly clean files
1184 1181 if parentworking and cmp:
1185 1182 fixup = []
1186 1183 # do a full compare of any files that might have changed
1187 1184 for f in sorted(cmp):
1188 1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1189 1186 or ctx1[f].cmp(ctx2[f])):
1190 1187 modified.append(f)
1191 1188 else:
1192 1189 fixup.append(f)
1193 1190
1194 1191 # update dirstate for files that are actually clean
1195 1192 if fixup:
1196 1193 if listclean:
1197 1194 clean += fixup
1198 1195
1199 1196 try:
1200 1197 # updating the dirstate is optional
1201 1198 # so we don't wait on the lock
1202 1199 wlock = self.wlock(False)
1203 1200 try:
1204 1201 for f in fixup:
1205 1202 self.dirstate.normal(f)
1206 1203 finally:
1207 1204 wlock.release()
1208 1205 except error.LockError:
1209 1206 pass
1210 1207
1211 1208 if not parentworking:
1212 1209 mf1 = mfmatches(ctx1)
1213 1210 if working:
1214 1211 # we are comparing working dir against non-parent
1215 1212 # generate a pseudo-manifest for the working dir
1216 1213 mf2 = mfmatches(self['.'])
1217 1214 for f in cmp + modified + added:
1218 1215 mf2[f] = None
1219 1216 mf2.set(f, ctx2.flags(f))
1220 1217 for f in removed:
1221 1218 if f in mf2:
1222 1219 del mf2[f]
1223 1220 else:
1224 1221 # we are comparing two revisions
1225 1222 deleted, unknown, ignored = [], [], []
1226 1223 mf2 = mfmatches(ctx2)
1227 1224
1228 1225 modified, added, clean = [], [], []
1229 1226 for fn in mf2:
1230 1227 if fn in mf1:
1231 1228 if (mf1.flags(fn) != mf2.flags(fn) or
1232 1229 (mf1[fn] != mf2[fn] and
1233 1230 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1234 1231 modified.append(fn)
1235 1232 elif listclean:
1236 1233 clean.append(fn)
1237 1234 del mf1[fn]
1238 1235 else:
1239 1236 added.append(fn)
1240 1237 removed = mf1.keys()
1241 1238
1242 1239 r = modified, added, removed, deleted, unknown, ignored, clean
1243 1240
1244 1241 if listsubrepos:
1245 1242 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1246 1243 if working:
1247 1244 rev2 = None
1248 1245 else:
1249 1246 rev2 = ctx2.substate[subpath][1]
1250 1247 try:
1251 1248 submatch = matchmod.narrowmatcher(subpath, match)
1252 1249 s = sub.status(rev2, match=submatch, ignored=listignored,
1253 1250 clean=listclean, unknown=listunknown,
1254 1251 listsubrepos=True)
1255 1252 for rfiles, sfiles in zip(r, s):
1256 1253 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1257 1254 except error.LookupError:
1258 1255 self.ui.status(_("skipping missing subrepository: %s\n")
1259 1256 % subpath)
1260 1257
1261 1258 for l in r:
1262 1259 l.sort()
1263 1260 return r
1264 1261
1265 1262 def heads(self, start=None):
1266 1263 heads = self.changelog.heads(start)
1267 1264 # sort the output in rev descending order
1268 1265 return sorted(heads, key=self.changelog.rev, reverse=True)
1269 1266
1270 1267 def branchheads(self, branch=None, start=None, closed=False):
1271 1268 '''return a (possibly filtered) list of heads for the given branch
1272 1269
1273 1270 Heads are returned in topological order, from newest to oldest.
1274 1271 If branch is None, use the dirstate branch.
1275 1272 If start is not None, return only heads reachable from start.
1276 1273 If closed is True, return heads that are marked as closed as well.
1277 1274 '''
1278 1275 if branch is None:
1279 1276 branch = self[None].branch()
1280 1277 branches = self.branchmap()
1281 1278 if branch not in branches:
1282 1279 return []
1283 1280 # the cache returns heads ordered lowest to highest
1284 1281 bheads = list(reversed(branches[branch]))
1285 1282 if start is not None:
1286 1283 # filter out the heads that cannot be reached from startrev
1287 1284 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1288 1285 bheads = [h for h in bheads if h in fbheads]
1289 1286 if not closed:
1290 1287 bheads = [h for h in bheads if
1291 1288 ('close' not in self.changelog.read(h)[5])]
1292 1289 return bheads
1293 1290
1294 1291 def branches(self, nodes):
1295 1292 if not nodes:
1296 1293 nodes = [self.changelog.tip()]
1297 1294 b = []
1298 1295 for n in nodes:
1299 1296 t = n
1300 1297 while 1:
1301 1298 p = self.changelog.parents(n)
1302 1299 if p[1] != nullid or p[0] == nullid:
1303 1300 b.append((t, n, p[0], p[1]))
1304 1301 break
1305 1302 n = p[0]
1306 1303 return b
1307 1304
1308 1305 def between(self, pairs):
1309 1306 r = []
1310 1307
1311 1308 for top, bottom in pairs:
1312 1309 n, l, i = top, [], 0
1313 1310 f = 1
1314 1311
1315 1312 while n != bottom and n != nullid:
1316 1313 p = self.changelog.parents(n)[0]
1317 1314 if i == f:
1318 1315 l.append(n)
1319 1316 f = f * 2
1320 1317 n = p
1321 1318 i += 1
1322 1319
1323 1320 r.append(l)
1324 1321
1325 1322 return r
1326 1323
1327 1324 def pull(self, remote, heads=None, force=False):
1328 1325 lock = self.lock()
1329 1326 try:
1330 1327 usecommon = remote.capable('getbundle')
1331 1328 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1332 1329 force=force, commononly=usecommon)
1333 1330 common, fetch, rheads = tmp
1334 1331 if not fetch:
1335 1332 self.ui.status(_("no changes found\n"))
1336 1333 result = 0
1337 1334 else:
1338 1335 if heads is None and list(common) == [nullid]:
1339 1336 self.ui.status(_("requesting all changes\n"))
1340 1337 elif heads is None and remote.capable('changegroupsubset'):
1341 1338 # issue1320, avoid a race if remote changed after discovery
1342 1339 heads = rheads
1343 1340
1344 1341 if usecommon:
1345 1342 cg = remote.getbundle('pull', common=common,
1346 1343 heads=heads or rheads)
1347 1344 elif heads is None:
1348 1345 cg = remote.changegroup(fetch, 'pull')
1349 1346 elif not remote.capable('changegroupsubset'):
1350 1347 raise util.Abort(_("partial pull cannot be done because "
1351 1348 "other repository doesn't support "
1352 1349 "changegroupsubset."))
1353 1350 else:
1354 1351 cg = remote.changegroupsubset(fetch, heads, 'pull')
1355 1352 result = self.addchangegroup(cg, 'pull', remote.url(),
1356 1353 lock=lock)
1357 1354 finally:
1358 1355 lock.release()
1359 1356
1360 1357 return result
1361 1358
1362 1359 def checkpush(self, force, revs):
1363 1360 """Extensions can override this function if additional checks have
1364 1361 to be performed before pushing, or call it if they override push
1365 1362 command.
1366 1363 """
1367 1364 pass
1368 1365
1369 1366 def push(self, remote, force=False, revs=None, newbranch=False):
1370 1367 '''Push outgoing changesets (limited by revs) from the current
1371 1368 repository to remote. Return an integer:
1372 1369 - 0 means HTTP error *or* nothing to push
1373 1370 - 1 means we pushed and remote head count is unchanged *or*
1374 1371 we have outgoing changesets but refused to push
1375 1372 - other values as described by addchangegroup()
1376 1373 '''
1377 1374 # there are two ways to push to remote repo:
1378 1375 #
1379 1376 # addchangegroup assumes local user can lock remote
1380 1377 # repo (local filesystem, old ssh servers).
1381 1378 #
1382 1379 # unbundle assumes local user cannot lock remote repo (new ssh
1383 1380 # servers, http servers).
1384 1381
1385 1382 self.checkpush(force, revs)
1386 1383 lock = None
1387 1384 unbundle = remote.capable('unbundle')
1388 1385 if not unbundle:
1389 1386 lock = remote.lock()
1390 1387 try:
1391 1388 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1392 1389 newbranch)
1393 1390 ret = remote_heads
1394 1391 if cg is not None:
1395 1392 if unbundle:
1396 1393 # local repo finds heads on server, finds out what
1397 1394 # revs it must push. once revs transferred, if server
1398 1395 # finds it has different heads (someone else won
1399 1396 # commit/push race), server aborts.
1400 1397 if force:
1401 1398 remote_heads = ['force']
1402 1399 # ssh: return remote's addchangegroup()
1403 1400 # http: return remote's addchangegroup() or 0 for error
1404 1401 ret = remote.unbundle(cg, remote_heads, 'push')
1405 1402 else:
1406 1403 # we return an integer indicating remote head count change
1407 1404 ret = remote.addchangegroup(cg, 'push', self.url(),
1408 1405 lock=lock)
1409 1406 finally:
1410 1407 if lock is not None:
1411 1408 lock.release()
1412 1409
1413 1410 self.ui.debug("checking for updated bookmarks\n")
1414 1411 rb = remote.listkeys('bookmarks')
1415 1412 for k in rb.keys():
1416 1413 if k in self._bookmarks:
1417 1414 nr, nl = rb[k], hex(self._bookmarks[k])
1418 1415 if nr in self:
1419 1416 cr = self[nr]
1420 1417 cl = self[nl]
1421 1418 if cl in cr.descendants():
1422 1419 r = remote.pushkey('bookmarks', k, nr, nl)
1423 1420 if r:
1424 1421 self.ui.status(_("updating bookmark %s\n") % k)
1425 1422 else:
1426 1423 self.ui.warn(_('updating bookmark %s'
1427 1424 ' failed!\n') % k)
1428 1425
1429 1426 return ret
1430 1427
1431 1428 def changegroupinfo(self, nodes, source):
1432 1429 if self.ui.verbose or source == 'bundle':
1433 1430 self.ui.status(_("%d changesets found\n") % len(nodes))
1434 1431 if self.ui.debugflag:
1435 1432 self.ui.debug("list of changesets:\n")
1436 1433 for node in nodes:
1437 1434 self.ui.debug("%s\n" % hex(node))
1438 1435
1439 1436 def changegroupsubset(self, bases, heads, source):
1440 1437 """Compute a changegroup consisting of all the nodes that are
1441 1438 descendents of any of the bases and ancestors of any of the heads.
1442 1439 Return a chunkbuffer object whose read() method will return
1443 1440 successive changegroup chunks.
1444 1441
1445 1442 It is fairly complex as determining which filenodes and which
1446 1443 manifest nodes need to be included for the changeset to be complete
1447 1444 is non-trivial.
1448 1445
1449 1446 Another wrinkle is doing the reverse, figuring out which changeset in
1450 1447 the changegroup a particular filenode or manifestnode belongs to.
1451 1448 """
1452 1449 cl = self.changelog
1453 1450 if not bases:
1454 1451 bases = [nullid]
1455 1452 csets, bases, heads = cl.nodesbetween(bases, heads)
1456 1453 # We assume that all ancestors of bases are known
1457 1454 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1458 1455 return self._changegroupsubset(common, csets, heads, source)
1459 1456
1460 1457 def getbundle(self, source, heads=None, common=None):
1461 1458 """Like changegroupsubset, but returns the set difference between the
1462 1459 ancestors of heads and the ancestors common.
1463 1460
1464 1461 If heads is None, use the local heads. If common is None, use [nullid].
1465 1462
1466 1463 The nodes in common might not all be known locally due to the way the
1467 1464 current discovery protocol works.
1468 1465 """
1469 1466 cl = self.changelog
1470 1467 if common:
1471 1468 nm = cl.nodemap
1472 1469 common = [n for n in common if n in nm]
1473 1470 else:
1474 1471 common = [nullid]
1475 1472 if not heads:
1476 1473 heads = cl.heads()
1477 1474 common, missing = cl.findcommonmissing(common, heads)
1478 1475 return self._changegroupsubset(common, missing, heads, source)
1479 1476
1480 1477 def _changegroupsubset(self, commonrevs, csets, heads, source):
1481 1478
1482 1479 cl = self.changelog
1483 1480 mf = self.manifest
1484 1481 mfs = {} # needed manifests
1485 1482 fnodes = {} # needed file nodes
1486 1483 changedfiles = set()
1487 1484 fstate = ['', {}]
1488 1485 count = [0]
1489 1486
1490 1487 # can we go through the fast path ?
1491 1488 heads.sort()
1492 1489 if heads == sorted(self.heads()):
1493 1490 return self._changegroup(csets, source)
1494 1491
1495 1492 # slow path
1496 1493 self.hook('preoutgoing', throw=True, source=source)
1497 1494 self.changegroupinfo(csets, source)
1498 1495
1499 1496 # filter any nodes that claim to be part of the known set
1500 1497 def prune(revlog, missing):
1501 1498 for n in missing:
1502 1499 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1503 1500 yield n
1504 1501
1505 1502 def lookup(revlog, x):
1506 1503 if revlog == cl:
1507 1504 c = cl.read(x)
1508 1505 changedfiles.update(c[3])
1509 1506 mfs.setdefault(c[0], x)
1510 1507 count[0] += 1
1511 1508 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1512 1509 return x
1513 1510 elif revlog == mf:
1514 1511 clnode = mfs[x]
1515 1512 mdata = mf.readfast(x)
1516 1513 for f in changedfiles:
1517 1514 if f in mdata:
1518 1515 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1519 1516 count[0] += 1
1520 1517 self.ui.progress(_('bundling'), count[0],
1521 1518 unit=_('manifests'), total=len(mfs))
1522 1519 return mfs[x]
1523 1520 else:
1524 1521 self.ui.progress(
1525 1522 _('bundling'), count[0], item=fstate[0],
1526 1523 unit=_('files'), total=len(changedfiles))
1527 1524 return fstate[1][x]
1528 1525
1529 1526 bundler = changegroup.bundle10(lookup)
1530 1527
1531 1528 def gengroup():
1532 1529 # Create a changenode group generator that will call our functions
1533 1530 # back to lookup the owning changenode and collect information.
1534 1531 for chunk in cl.group(csets, bundler):
1535 1532 yield chunk
1536 1533 self.ui.progress(_('bundling'), None)
1537 1534
1538 1535 # Create a generator for the manifestnodes that calls our lookup
1539 1536 # and data collection functions back.
1540 1537 count[0] = 0
1541 1538 for chunk in mf.group(prune(mf, mfs), bundler):
1542 1539 yield chunk
1543 1540 self.ui.progress(_('bundling'), None)
1544 1541
1545 1542 mfs.clear()
1546 1543
1547 1544 # Go through all our files in order sorted by name.
1548 1545 count[0] = 0
1549 1546 for fname in sorted(changedfiles):
1550 1547 filerevlog = self.file(fname)
1551 1548 if not len(filerevlog):
1552 1549 raise util.Abort(_("empty or missing revlog for %s") % fname)
1553 1550 fstate[0] = fname
1554 1551 fstate[1] = fnodes.pop(fname, {})
1555 1552 first = True
1556 1553
1557 1554 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1558 1555 bundler):
1559 1556 if first:
1560 1557 if chunk == bundler.close():
1561 1558 break
1562 1559 count[0] += 1
1563 1560 yield bundler.fileheader(fname)
1564 1561 first = False
1565 1562 yield chunk
1566 1563 # Signal that no more groups are left.
1567 1564 yield bundler.close()
1568 1565 self.ui.progress(_('bundling'), None)
1569 1566
1570 1567 if csets:
1571 1568 self.hook('outgoing', node=hex(csets[0]), source=source)
1572 1569
1573 1570 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1574 1571
1575 1572 def changegroup(self, basenodes, source):
1576 1573 # to avoid a race we use changegroupsubset() (issue1320)
1577 1574 return self.changegroupsubset(basenodes, self.heads(), source)
1578 1575
1579 1576 def _changegroup(self, nodes, source):
1580 1577 """Compute the changegroup of all nodes that we have that a recipient
1581 1578 doesn't. Return a chunkbuffer object whose read() method will return
1582 1579 successive changegroup chunks.
1583 1580
1584 1581 This is much easier than the previous function as we can assume that
1585 1582 the recipient has any changenode we aren't sending them.
1586 1583
1587 1584 nodes is the set of nodes to send"""
1588 1585
1589 1586 cl = self.changelog
1590 1587 mf = self.manifest
1591 1588 mfs = {}
1592 1589 changedfiles = set()
1593 1590 fstate = ['']
1594 1591 count = [0]
1595 1592
1596 1593 self.hook('preoutgoing', throw=True, source=source)
1597 1594 self.changegroupinfo(nodes, source)
1598 1595
1599 1596 revset = set([cl.rev(n) for n in nodes])
1600 1597
1601 1598 def gennodelst(log):
1602 1599 for r in log:
1603 1600 if log.linkrev(r) in revset:
1604 1601 yield log.node(r)
1605 1602
1606 1603 def lookup(revlog, x):
1607 1604 if revlog == cl:
1608 1605 c = cl.read(x)
1609 1606 changedfiles.update(c[3])
1610 1607 mfs.setdefault(c[0], x)
1611 1608 count[0] += 1
1612 1609 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1613 1610 return x
1614 1611 elif revlog == mf:
1615 1612 count[0] += 1
1616 1613 self.ui.progress(_('bundling'), count[0],
1617 1614 unit=_('manifests'), total=len(mfs))
1618 1615 return cl.node(revlog.linkrev(revlog.rev(x)))
1619 1616 else:
1620 1617 self.ui.progress(
1621 1618 _('bundling'), count[0], item=fstate[0],
1622 1619 total=len(changedfiles), unit=_('files'))
1623 1620 return cl.node(revlog.linkrev(revlog.rev(x)))
1624 1621
1625 1622 bundler = changegroup.bundle10(lookup)
1626 1623
1627 1624 def gengroup():
1628 1625 '''yield a sequence of changegroup chunks (strings)'''
1629 1626 # construct a list of all changed files
1630 1627
1631 1628 for chunk in cl.group(nodes, bundler):
1632 1629 yield chunk
1633 1630 self.ui.progress(_('bundling'), None)
1634 1631
1635 1632 count[0] = 0
1636 1633 for chunk in mf.group(gennodelst(mf), bundler):
1637 1634 yield chunk
1638 1635 self.ui.progress(_('bundling'), None)
1639 1636
1640 1637 count[0] = 0
1641 1638 for fname in sorted(changedfiles):
1642 1639 filerevlog = self.file(fname)
1643 1640 if not len(filerevlog):
1644 1641 raise util.Abort(_("empty or missing revlog for %s") % fname)
1645 1642 fstate[0] = fname
1646 1643 first = True
1647 1644 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1648 1645 if first:
1649 1646 if chunk == bundler.close():
1650 1647 break
1651 1648 count[0] += 1
1652 1649 yield bundler.fileheader(fname)
1653 1650 first = False
1654 1651 yield chunk
1655 1652 yield bundler.close()
1656 1653 self.ui.progress(_('bundling'), None)
1657 1654
1658 1655 if nodes:
1659 1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660 1657
1661 1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1662 1659
1663 1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1664 1661 """Add the changegroup returned by source.read() to this repo.
1665 1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1666 1663 the URL of the repo where this changegroup is coming from.
1667 1664 If lock is not None, the function takes ownership of the lock
1668 1665 and releases it after the changegroup is added.
1669 1666
1670 1667 Return an integer summarizing the change to this repo:
1671 1668 - nothing changed or no source: 0
1672 1669 - more heads than before: 1+added heads (2..n)
1673 1670 - fewer heads than before: -1-removed heads (-2..-n)
1674 1671 - number of heads stays the same: 1
1675 1672 """
1676 1673 def csmap(x):
1677 1674 self.ui.debug("add changeset %s\n" % short(x))
1678 1675 return len(cl)
1679 1676
1680 1677 def revmap(x):
1681 1678 return cl.rev(x)
1682 1679
1683 1680 if not source:
1684 1681 return 0
1685 1682
1686 1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1687 1684
1688 1685 changesets = files = revisions = 0
1689 1686 efiles = set()
1690 1687
1691 1688 # write changelog data to temp files so concurrent readers will not see
1692 1689 # inconsistent view
1693 1690 cl = self.changelog
1694 1691 cl.delayupdate()
1695 1692 oldheads = len(cl.heads())
1696 1693
1697 1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1698 1695 try:
1699 1696 trp = weakref.proxy(tr)
1700 1697 # pull off the changeset group
1701 1698 self.ui.status(_("adding changesets\n"))
1702 1699 clstart = len(cl)
1703 1700 class prog(object):
1704 1701 step = _('changesets')
1705 1702 count = 1
1706 1703 ui = self.ui
1707 1704 total = None
1708 1705 def __call__(self):
1709 1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1710 1707 total=self.total)
1711 1708 self.count += 1
1712 1709 pr = prog()
1713 1710 source.callback = pr
1714 1711
1715 1712 if (cl.addgroup(source, csmap, trp) is None
1716 1713 and not emptyok):
1717 1714 raise util.Abort(_("received changelog group is empty"))
1718 1715 clend = len(cl)
1719 1716 changesets = clend - clstart
1720 1717 for c in xrange(clstart, clend):
1721 1718 efiles.update(self[c].files())
1722 1719 efiles = len(efiles)
1723 1720 self.ui.progress(_('changesets'), None)
1724 1721
1725 1722 # pull off the manifest group
1726 1723 self.ui.status(_("adding manifests\n"))
1727 1724 pr.step = _('manifests')
1728 1725 pr.count = 1
1729 1726 pr.total = changesets # manifests <= changesets
1730 1727 # no need to check for empty manifest group here:
1731 1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1732 1729 # no new manifest will be created and the manifest group will
1733 1730 # be empty during the pull
1734 1731 self.manifest.addgroup(source, revmap, trp)
1735 1732 self.ui.progress(_('manifests'), None)
1736 1733
1737 1734 needfiles = {}
1738 1735 if self.ui.configbool('server', 'validate', default=False):
1739 1736 # validate incoming csets have their manifests
1740 1737 for cset in xrange(clstart, clend):
1741 1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1742 1739 mfest = self.manifest.readdelta(mfest)
1743 1740 # store file nodes we must see
1744 1741 for f, n in mfest.iteritems():
1745 1742 needfiles.setdefault(f, set()).add(n)
1746 1743
1747 1744 # process the files
1748 1745 self.ui.status(_("adding file changes\n"))
1749 1746 pr.step = 'files'
1750 1747 pr.count = 1
1751 1748 pr.total = efiles
1752 1749 source.callback = None
1753 1750
1754 1751 while 1:
1755 1752 f = source.chunk()
1756 1753 if not f:
1757 1754 break
1758 1755 self.ui.debug("adding %s revisions\n" % f)
1759 1756 pr()
1760 1757 fl = self.file(f)
1761 1758 o = len(fl)
1762 1759 if fl.addgroup(source, revmap, trp) is None:
1763 1760 raise util.Abort(_("received file revlog group is empty"))
1764 1761 revisions += len(fl) - o
1765 1762 files += 1
1766 1763 if f in needfiles:
1767 1764 needs = needfiles[f]
1768 1765 for new in xrange(o, len(fl)):
1769 1766 n = fl.node(new)
1770 1767 if n in needs:
1771 1768 needs.remove(n)
1772 1769 if not needs:
1773 1770 del needfiles[f]
1774 1771 self.ui.progress(_('files'), None)
1775 1772
1776 1773 for f, needs in needfiles.iteritems():
1777 1774 fl = self.file(f)
1778 1775 for n in needs:
1779 1776 try:
1780 1777 fl.rev(n)
1781 1778 except error.LookupError:
1782 1779 raise util.Abort(
1783 1780 _('missing file data for %s:%s - run hg verify') %
1784 1781 (f, hex(n)))
1785 1782
1786 1783 newheads = len(cl.heads())
1787 1784 heads = ""
1788 1785 if oldheads and newheads != oldheads:
1789 1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1790 1787
1791 1788 self.ui.status(_("added %d changesets"
1792 1789 " with %d changes to %d files%s\n")
1793 1790 % (changesets, revisions, files, heads))
1794 1791
1795 1792 if changesets > 0:
1796 1793 p = lambda: cl.writepending() and self.root or ""
1797 1794 self.hook('pretxnchangegroup', throw=True,
1798 1795 node=hex(cl.node(clstart)), source=srctype,
1799 1796 url=url, pending=p)
1800 1797
1801 1798 # make changelog see real files again
1802 1799 cl.finalize(trp)
1803 1800
1804 1801 tr.close()
1805 1802 finally:
1806 1803 tr.release()
1807 1804 if lock:
1808 1805 lock.release()
1809 1806
1810 1807 if changesets > 0:
1811 1808 # forcefully update the on-disk branch cache
1812 1809 self.ui.debug("updating the branch cache\n")
1813 1810 self.updatebranchcache()
1814 1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1815 1812 source=srctype, url=url)
1816 1813
1817 1814 for i in xrange(clstart, clend):
1818 1815 self.hook("incoming", node=hex(cl.node(i)),
1819 1816 source=srctype, url=url)
1820 1817
1821 1818 # never return 0 here:
1822 1819 if newheads < oldheads:
1823 1820 return newheads - oldheads - 1
1824 1821 else:
1825 1822 return newheads - oldheads + 1
1826 1823
1827 1824
1828 1825 def stream_in(self, remote, requirements):
1829 1826 lock = self.lock()
1830 1827 try:
1831 1828 fp = remote.stream_out()
1832 1829 l = fp.readline()
1833 1830 try:
1834 1831 resp = int(l)
1835 1832 except ValueError:
1836 1833 raise error.ResponseError(
1837 1834 _('Unexpected response from remote server:'), l)
1838 1835 if resp == 1:
1839 1836 raise util.Abort(_('operation forbidden by server'))
1840 1837 elif resp == 2:
1841 1838 raise util.Abort(_('locking the remote repository failed'))
1842 1839 elif resp != 0:
1843 1840 raise util.Abort(_('the server sent an unknown error code'))
1844 1841 self.ui.status(_('streaming all changes\n'))
1845 1842 l = fp.readline()
1846 1843 try:
1847 1844 total_files, total_bytes = map(int, l.split(' ', 1))
1848 1845 except (ValueError, TypeError):
1849 1846 raise error.ResponseError(
1850 1847 _('Unexpected response from remote server:'), l)
1851 1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1852 1849 (total_files, util.bytecount(total_bytes)))
1853 1850 start = time.time()
1854 1851 for i in xrange(total_files):
1855 1852 # XXX doesn't support '\n' or '\r' in filenames
1856 1853 l = fp.readline()
1857 1854 try:
1858 1855 name, size = l.split('\0', 1)
1859 1856 size = int(size)
1860 1857 except (ValueError, TypeError):
1861 1858 raise error.ResponseError(
1862 1859 _('Unexpected response from remote server:'), l)
1863 1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1864 1861 # for backwards compat, name was partially encoded
1865 1862 ofp = self.sopener(store.decodedir(name), 'w')
1866 1863 for chunk in util.filechunkiter(fp, limit=size):
1867 1864 ofp.write(chunk)
1868 1865 ofp.close()
1869 1866 elapsed = time.time() - start
1870 1867 if elapsed <= 0:
1871 1868 elapsed = 0.001
1872 1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1873 1870 (util.bytecount(total_bytes), elapsed,
1874 1871 util.bytecount(total_bytes / elapsed)))
1875 1872
1876 1873 # new requirements = old non-format requirements + new format-related
1877 1874 # requirements from the streamed-in repository
1878 1875 requirements.update(set(self.requirements) - self.supportedformats)
1879 1876 self._applyrequirements(requirements)
1880 1877 self._writerequirements()
1881 1878
1882 1879 self.invalidate()
1883 1880 return len(self.heads()) + 1
1884 1881 finally:
1885 1882 lock.release()
1886 1883
1887 1884 def clone(self, remote, heads=[], stream=False):
1888 1885 '''clone remote repository.
1889 1886
1890 1887 keyword arguments:
1891 1888 heads: list of revs to clone (forces use of pull)
1892 1889 stream: use streaming clone if possible'''
1893 1890
1894 1891 # now, all clients that can request uncompressed clones can
1895 1892 # read repo formats supported by all servers that can serve
1896 1893 # them.
1897 1894
1898 1895 # if revlog format changes, client will have to check version
1899 1896 # and format flags on "stream" capability, and use
1900 1897 # uncompressed only if compatible.
1901 1898
1902 1899 if stream and not heads:
1903 1900 # 'stream' means remote revlog format is revlogv1 only
1904 1901 if remote.capable('stream'):
1905 1902 return self.stream_in(remote, set(('revlogv1',)))
1906 1903 # otherwise, 'streamreqs' contains the remote revlog format
1907 1904 streamreqs = remote.capable('streamreqs')
1908 1905 if streamreqs:
1909 1906 streamreqs = set(streamreqs.split(','))
1910 1907 # if we support it, stream in and adjust our requirements
1911 1908 if not streamreqs - self.supportedformats:
1912 1909 return self.stream_in(remote, streamreqs)
1913 1910 return self.pull(remote, heads)
1914 1911
1915 1912 def pushkey(self, namespace, key, old, new):
1916 1913 return pushkey.push(self, namespace, key, old, new)
1917 1914
1918 1915 def listkeys(self, namespace):
1919 1916 return pushkey.list(self, namespace)
1920 1917
1921 1918 def debugwireargs(self, one, two, three=None, four=None):
1922 1919 '''used to test argument passing over the wire'''
1923 1920 return "%s %s %s %s" % (one, two, three, four)
1924 1921
1925 1922 # used to avoid circular references so destructors work
1926 1923 def aftertrans(files):
1927 1924 renamefiles = [tuple(t) for t in files]
1928 1925 def a():
1929 1926 for src, dest in renamefiles:
1930 1927 util.rename(src, dest)
1931 1928 return a
1932 1929
1933 1930 def instance(ui, path, create):
1934 1931 return localrepository(ui, urlmod.localpath(path), create)
1935 1932
1936 1933 def islocal(path):
1937 1934 return True
General Comments 0
You need to be logged in to leave comments. Login now