##// END OF EJS Templates
localrepo: do not modify ctx.remove() list in-place
Patrick Mezard -
r12899:fabe6141 stable
parent child Browse files
Show More
@@ -1,1900 +1,1900 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None # in UTF-8
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 181 return dirstate.dirstate(self.opener, self.ui, self.root)
182 182
183 183 def __getitem__(self, changeid):
184 184 if changeid is None:
185 185 return context.workingctx(self)
186 186 return context.changectx(self, changeid)
187 187
188 188 def __contains__(self, changeid):
189 189 try:
190 190 return bool(self.lookup(changeid))
191 191 except error.RepoLookupError:
192 192 return False
193 193
194 194 def __nonzero__(self):
195 195 return True
196 196
197 197 def __len__(self):
198 198 return len(self.changelog)
199 199
200 200 def __iter__(self):
201 201 for i in xrange(len(self)):
202 202 yield i
203 203
204 204 def url(self):
205 205 return 'file:' + self.root
206 206
207 207 def hook(self, name, throw=False, **args):
208 208 return hook.hook(self.ui, self, name, throw, **args)
209 209
210 210 tag_disallowed = ':\r\n'
211 211
212 212 def _tag(self, names, node, message, local, user, date, extra={}):
213 213 if isinstance(names, str):
214 214 allchars = names
215 215 names = (names,)
216 216 else:
217 217 allchars = ''.join(names)
218 218 for c in self.tag_disallowed:
219 219 if c in allchars:
220 220 raise util.Abort(_('%r cannot be used in a tag name') % c)
221 221
222 222 branches = self.branchmap()
223 223 for name in names:
224 224 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 225 local=local)
226 226 if name in branches:
227 227 self.ui.warn(_("warning: tag %s conflicts with existing"
228 228 " branch name\n") % name)
229 229
230 230 def writetags(fp, names, munge, prevtags):
231 231 fp.seek(0, 2)
232 232 if prevtags and prevtags[-1] != '\n':
233 233 fp.write('\n')
234 234 for name in names:
235 235 m = munge and munge(name) or name
236 236 if self._tagtypes and name in self._tagtypes:
237 237 old = self._tags.get(name, nullid)
238 238 fp.write('%s %s\n' % (hex(old), m))
239 239 fp.write('%s %s\n' % (hex(node), m))
240 240 fp.close()
241 241
242 242 prevtags = ''
243 243 if local:
244 244 try:
245 245 fp = self.opener('localtags', 'r+')
246 246 except IOError:
247 247 fp = self.opener('localtags', 'a')
248 248 else:
249 249 prevtags = fp.read()
250 250
251 251 # local tags are stored in the current charset
252 252 writetags(fp, names, None, prevtags)
253 253 for name in names:
254 254 self.hook('tag', node=hex(node), tag=name, local=local)
255 255 return
256 256
257 257 try:
258 258 fp = self.wfile('.hgtags', 'rb+')
259 259 except IOError:
260 260 fp = self.wfile('.hgtags', 'ab')
261 261 else:
262 262 prevtags = fp.read()
263 263
264 264 # committed tags are stored in UTF-8
265 265 writetags(fp, names, encoding.fromlocal, prevtags)
266 266
267 267 if '.hgtags' not in self.dirstate:
268 268 self[None].add(['.hgtags'])
269 269
270 270 m = matchmod.exact(self.root, '', ['.hgtags'])
271 271 tagnode = self.commit(message, user, date, extra=extra, match=m)
272 272
273 273 for name in names:
274 274 self.hook('tag', node=hex(node), tag=name, local=local)
275 275
276 276 return tagnode
277 277
278 278 def tag(self, names, node, message, local, user, date):
279 279 '''tag a revision with one or more symbolic names.
280 280
281 281 names is a list of strings or, when adding a single tag, names may be a
282 282 string.
283 283
284 284 if local is True, the tags are stored in a per-repository file.
285 285 otherwise, they are stored in the .hgtags file, and a new
286 286 changeset is committed with the change.
287 287
288 288 keyword arguments:
289 289
290 290 local: whether to store tags in non-version-controlled file
291 291 (default False)
292 292
293 293 message: commit message to use if committing
294 294
295 295 user: name of user to use if committing
296 296
297 297 date: date tuple to use if committing'''
298 298
299 299 for x in self.status()[:5]:
300 300 if '.hgtags' in x:
301 301 raise util.Abort(_('working copy of .hgtags is changed '
302 302 '(please commit .hgtags manually)'))
303 303
304 304 self.tags() # instantiate the cache
305 305 self._tag(names, node, message, local, user, date)
306 306
307 307 def tags(self):
308 308 '''return a mapping of tag to node'''
309 309 if self._tags is None:
310 310 (self._tags, self._tagtypes) = self._findtags()
311 311
312 312 return self._tags
313 313
314 314 def _findtags(self):
315 315 '''Do the hard work of finding tags. Return a pair of dicts
316 316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 317 maps tag name to a string like \'global\' or \'local\'.
318 318 Subclasses or extensions are free to add their own tags, but
319 319 should be aware that the returned dicts will be retained for the
320 320 duration of the localrepo object.'''
321 321
322 322 # XXX what tagtype should subclasses/extensions use? Currently
323 323 # mq and bookmarks add tags, but do not set the tagtype at all.
324 324 # Should each extension invent its own tag type? Should there
325 325 # be one tagtype for all such "virtual" tags? Or is the status
326 326 # quo fine?
327 327
328 328 alltags = {} # map tag name to (node, hist)
329 329 tagtypes = {}
330 330
331 331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333 333
334 334 # Build the return dicts. Have to re-encode tag names because
335 335 # the tags module always uses UTF-8 (in order not to lose info
336 336 # writing to the cache), but the rest of Mercurial wants them in
337 337 # local encoding.
338 338 tags = {}
339 339 for (name, (node, hist)) in alltags.iteritems():
340 340 if node != nullid:
341 341 tags[encoding.tolocal(name)] = node
342 342 tags['tip'] = self.changelog.tip()
343 343 tagtypes = dict([(encoding.tolocal(name), value)
344 344 for (name, value) in tagtypes.iteritems()])
345 345 return (tags, tagtypes)
346 346
347 347 def tagtype(self, tagname):
348 348 '''
349 349 return the type of the given tag. result can be:
350 350
351 351 'local' : a local tag
352 352 'global' : a global tag
353 353 None : tag does not exist
354 354 '''
355 355
356 356 self.tags()
357 357
358 358 return self._tagtypes.get(tagname)
359 359
360 360 def tagslist(self):
361 361 '''return a list of tags ordered by revision'''
362 362 l = []
363 363 for t, n in self.tags().iteritems():
364 364 try:
365 365 r = self.changelog.rev(n)
366 366 except:
367 367 r = -2 # sort to the beginning of the list if unknown
368 368 l.append((r, t, n))
369 369 return [(t, n) for r, t, n in sorted(l)]
370 370
371 371 def nodetags(self, node):
372 372 '''return the tags associated with a node'''
373 373 if not self.nodetagscache:
374 374 self.nodetagscache = {}
375 375 for t, n in self.tags().iteritems():
376 376 self.nodetagscache.setdefault(n, []).append(t)
377 377 for tags in self.nodetagscache.itervalues():
378 378 tags.sort()
379 379 return self.nodetagscache.get(node, [])
380 380
381 381 def _branchtags(self, partial, lrev):
382 382 # TODO: rename this function?
383 383 tiprev = len(self) - 1
384 384 if lrev != tiprev:
385 385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 386 self._updatebranchcache(partial, ctxgen)
387 387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388 388
389 389 return partial
390 390
391 391 def updatebranchcache(self):
392 392 tip = self.changelog.tip()
393 393 if self._branchcache is not None and self._branchcachetip == tip:
394 394 return self._branchcache
395 395
396 396 oldtip = self._branchcachetip
397 397 self._branchcachetip = tip
398 398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 399 partial, last, lrev = self._readbranchcache()
400 400 else:
401 401 lrev = self.changelog.rev(oldtip)
402 402 partial = self._branchcache
403 403
404 404 self._branchtags(partial, lrev)
405 405 # this private cache holds all heads (not just tips)
406 406 self._branchcache = partial
407 407
408 408 def branchmap(self):
409 409 '''returns a dictionary {branch: [branchheads]}'''
410 410 self.updatebranchcache()
411 411 return self._branchcache
412 412
413 413 def branchtags(self):
414 414 '''return a dict where branch names map to the tipmost head of
415 415 the branch, open heads come before closed'''
416 416 bt = {}
417 417 for bn, heads in self.branchmap().iteritems():
418 418 tip = heads[-1]
419 419 for h in reversed(heads):
420 420 if 'close' not in self.changelog.read(h)[5]:
421 421 tip = h
422 422 break
423 423 bt[bn] = tip
424 424 return bt
425 425
426 426
427 427 def _readbranchcache(self):
428 428 partial = {}
429 429 try:
430 430 f = self.opener("branchheads.cache")
431 431 lines = f.read().split('\n')
432 432 f.close()
433 433 except (IOError, OSError):
434 434 return {}, nullid, nullrev
435 435
436 436 try:
437 437 last, lrev = lines.pop(0).split(" ", 1)
438 438 last, lrev = bin(last), int(lrev)
439 439 if lrev >= len(self) or self[lrev].node() != last:
440 440 # invalidate the cache
441 441 raise ValueError('invalidating branch cache (tip differs)')
442 442 for l in lines:
443 443 if not l:
444 444 continue
445 445 node, label = l.split(" ", 1)
446 446 partial.setdefault(label.strip(), []).append(bin(node))
447 447 except KeyboardInterrupt:
448 448 raise
449 449 except Exception, inst:
450 450 if self.ui.debugflag:
451 451 self.ui.warn(str(inst), '\n')
452 452 partial, last, lrev = {}, nullid, nullrev
453 453 return partial, last, lrev
454 454
455 455 def _writebranchcache(self, branches, tip, tiprev):
456 456 try:
457 457 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 458 f.write("%s %s\n" % (hex(tip), tiprev))
459 459 for label, nodes in branches.iteritems():
460 460 for node in nodes:
461 461 f.write("%s %s\n" % (hex(node), label))
462 462 f.rename()
463 463 except (IOError, OSError):
464 464 pass
465 465
466 466 def _updatebranchcache(self, partial, ctxgen):
467 467 # collect new branch entries
468 468 newbranches = {}
469 469 for c in ctxgen:
470 470 newbranches.setdefault(c.branch(), []).append(c.node())
471 471 # if older branchheads are reachable from new ones, they aren't
472 472 # really branchheads. Note checking parents is insufficient:
473 473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 474 for branch, newnodes in newbranches.iteritems():
475 475 bheads = partial.setdefault(branch, [])
476 476 bheads.extend(newnodes)
477 477 if len(bheads) <= 1:
478 478 continue
479 479 # starting from tip means fewer passes over reachable
480 480 while newnodes:
481 481 latest = newnodes.pop()
482 482 if latest not in bheads:
483 483 continue
484 484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 485 reachable = self.changelog.reachable(latest, minbhrev)
486 486 reachable.remove(latest)
487 487 bheads = [b for b in bheads if b not in reachable]
488 488 partial[branch] = bheads
489 489
490 490 def lookup(self, key):
491 491 if isinstance(key, int):
492 492 return self.changelog.node(key)
493 493 elif key == '.':
494 494 return self.dirstate.parents()[0]
495 495 elif key == 'null':
496 496 return nullid
497 497 elif key == 'tip':
498 498 return self.changelog.tip()
499 499 n = self.changelog._match(key)
500 500 if n:
501 501 return n
502 502 if key in self.tags():
503 503 return self.tags()[key]
504 504 if key in self.branchtags():
505 505 return self.branchtags()[key]
506 506 n = self.changelog._partialmatch(key)
507 507 if n:
508 508 return n
509 509
510 510 # can't find key, check if it might have come from damaged dirstate
511 511 if key in self.dirstate.parents():
512 512 raise error.Abort(_("working directory has unknown parent '%s'!")
513 513 % short(key))
514 514 try:
515 515 if len(key) == 20:
516 516 key = hex(key)
517 517 except:
518 518 pass
519 519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520 520
521 521 def lookupbranch(self, key, remote=None):
522 522 repo = remote or self
523 523 if key in repo.branchmap():
524 524 return key
525 525
526 526 repo = (remote and remote.local()) and remote or self
527 527 return repo[key].branch()
528 528
529 529 def local(self):
530 530 return True
531 531
532 532 def join(self, f):
533 533 return os.path.join(self.path, f)
534 534
535 535 def wjoin(self, f):
536 536 return os.path.join(self.root, f)
537 537
538 538 def file(self, f):
539 539 if f[0] == '/':
540 540 f = f[1:]
541 541 return filelog.filelog(self.sopener, f)
542 542
543 543 def changectx(self, changeid):
544 544 return self[changeid]
545 545
546 546 def parents(self, changeid=None):
547 547 '''get list of changectxs for parents of changeid'''
548 548 return self[changeid].parents()
549 549
550 550 def filectx(self, path, changeid=None, fileid=None):
551 551 """changeid can be a changeset revision, node, or tag.
552 552 fileid can be a file revision or node."""
553 553 return context.filectx(self, path, changeid, fileid)
554 554
555 555 def getcwd(self):
556 556 return self.dirstate.getcwd()
557 557
558 558 def pathto(self, f, cwd=None):
559 559 return self.dirstate.pathto(f, cwd)
560 560
561 561 def wfile(self, f, mode='r'):
562 562 return self.wopener(f, mode)
563 563
564 564 def _link(self, f):
565 565 return os.path.islink(self.wjoin(f))
566 566
567 567 def _loadfilter(self, filter):
568 568 if filter not in self.filterpats:
569 569 l = []
570 570 for pat, cmd in self.ui.configitems(filter):
571 571 if cmd == '!':
572 572 continue
573 573 mf = matchmod.match(self.root, '', [pat])
574 574 fn = None
575 575 params = cmd
576 576 for name, filterfn in self._datafilters.iteritems():
577 577 if cmd.startswith(name):
578 578 fn = filterfn
579 579 params = cmd[len(name):].lstrip()
580 580 break
581 581 if not fn:
582 582 fn = lambda s, c, **kwargs: util.filter(s, c)
583 583 # Wrap old filters not supporting keyword arguments
584 584 if not inspect.getargspec(fn)[2]:
585 585 oldfn = fn
586 586 fn = lambda s, c, **kwargs: oldfn(s, c)
587 587 l.append((mf, fn, params))
588 588 self.filterpats[filter] = l
589 589 return self.filterpats[filter]
590 590
591 591 def _filter(self, filterpats, filename, data):
592 592 for mf, fn, cmd in filterpats:
593 593 if mf(filename):
594 594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 596 break
597 597
598 598 return data
599 599
600 600 @propertycache
601 601 def _encodefilterpats(self):
602 602 return self._loadfilter('encode')
603 603
604 604 @propertycache
605 605 def _decodefilterpats(self):
606 606 return self._loadfilter('decode')
607 607
608 608 def adddatafilter(self, name, filter):
609 609 self._datafilters[name] = filter
610 610
611 611 def wread(self, filename):
612 612 if self._link(filename):
613 613 data = os.readlink(self.wjoin(filename))
614 614 else:
615 615 data = self.wopener(filename, 'r').read()
616 616 return self._filter(self._encodefilterpats, filename, data)
617 617
618 618 def wwrite(self, filename, data, flags):
619 619 data = self._filter(self._decodefilterpats, filename, data)
620 620 try:
621 621 os.unlink(self.wjoin(filename))
622 622 except OSError:
623 623 pass
624 624 if 'l' in flags:
625 625 self.wopener.symlink(data, filename)
626 626 else:
627 627 self.wopener(filename, 'w').write(data)
628 628 if 'x' in flags:
629 629 util.set_flags(self.wjoin(filename), False, True)
630 630
631 631 def wwritedata(self, filename, data):
632 632 return self._filter(self._decodefilterpats, filename, data)
633 633
634 634 def transaction(self, desc):
635 635 tr = self._transref and self._transref() or None
636 636 if tr and tr.running():
637 637 return tr.nest()
638 638
639 639 # abort here if the journal already exists
640 640 if os.path.exists(self.sjoin("journal")):
641 641 raise error.RepoError(
642 642 _("abandoned transaction found - run hg recover"))
643 643
644 644 # save dirstate for rollback
645 645 try:
646 646 ds = self.opener("dirstate").read()
647 647 except IOError:
648 648 ds = ""
649 649 self.opener("journal.dirstate", "w").write(ds)
650 650 self.opener("journal.branch", "w").write(self.dirstate.branch())
651 651 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
652 652
653 653 renames = [(self.sjoin("journal"), self.sjoin("undo")),
654 654 (self.join("journal.dirstate"), self.join("undo.dirstate")),
655 655 (self.join("journal.branch"), self.join("undo.branch")),
656 656 (self.join("journal.desc"), self.join("undo.desc"))]
657 657 tr = transaction.transaction(self.ui.warn, self.sopener,
658 658 self.sjoin("journal"),
659 659 aftertrans(renames),
660 660 self.store.createmode)
661 661 self._transref = weakref.ref(tr)
662 662 return tr
663 663
664 664 def recover(self):
665 665 lock = self.lock()
666 666 try:
667 667 if os.path.exists(self.sjoin("journal")):
668 668 self.ui.status(_("rolling back interrupted transaction\n"))
669 669 transaction.rollback(self.sopener, self.sjoin("journal"),
670 670 self.ui.warn)
671 671 self.invalidate()
672 672 return True
673 673 else:
674 674 self.ui.warn(_("no interrupted transaction available\n"))
675 675 return False
676 676 finally:
677 677 lock.release()
678 678
679 679 def rollback(self, dryrun=False):
680 680 wlock = lock = None
681 681 try:
682 682 wlock = self.wlock()
683 683 lock = self.lock()
684 684 if os.path.exists(self.sjoin("undo")):
685 685 try:
686 686 args = self.opener("undo.desc", "r").read().splitlines()
687 687 if len(args) >= 3 and self.ui.verbose:
688 688 desc = _("rolling back to revision %s"
689 689 " (undo %s: %s)\n") % (
690 690 int(args[0]) - 1, args[1], args[2])
691 691 elif len(args) >= 2:
692 692 desc = _("rolling back to revision %s (undo %s)\n") % (
693 693 int(args[0]) - 1, args[1])
694 694 except IOError:
695 695 desc = _("rolling back unknown transaction\n")
696 696 self.ui.status(desc)
697 697 if dryrun:
698 698 return
699 699 transaction.rollback(self.sopener, self.sjoin("undo"),
700 700 self.ui.warn)
701 701 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
702 702 try:
703 703 branch = self.opener("undo.branch").read()
704 704 self.dirstate.setbranch(branch)
705 705 except IOError:
706 706 self.ui.warn(_("Named branch could not be reset, "
707 707 "current branch still is: %s\n")
708 708 % encoding.tolocal(self.dirstate.branch()))
709 709 self.invalidate()
710 710 self.dirstate.invalidate()
711 711 self.destroyed()
712 712 else:
713 713 self.ui.warn(_("no rollback information available\n"))
714 714 return 1
715 715 finally:
716 716 release(lock, wlock)
717 717
718 718 def invalidatecaches(self):
719 719 self._tags = None
720 720 self._tagtypes = None
721 721 self.nodetagscache = None
722 722 self._branchcache = None # in UTF-8
723 723 self._branchcachetip = None
724 724
725 725 def invalidate(self):
726 726 for a in "changelog manifest".split():
727 727 if a in self.__dict__:
728 728 delattr(self, a)
729 729 self.invalidatecaches()
730 730
731 731 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
732 732 try:
733 733 l = lock.lock(lockname, 0, releasefn, desc=desc)
734 734 except error.LockHeld, inst:
735 735 if not wait:
736 736 raise
737 737 self.ui.warn(_("waiting for lock on %s held by %r\n") %
738 738 (desc, inst.locker))
739 739 # default to 600 seconds timeout
740 740 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
741 741 releasefn, desc=desc)
742 742 if acquirefn:
743 743 acquirefn()
744 744 return l
745 745
746 746 def lock(self, wait=True):
747 747 '''Lock the repository store (.hg/store) and return a weak reference
748 748 to the lock. Use this before modifying the store (e.g. committing or
749 749 stripping). If you are opening a transaction, get a lock as well.)'''
750 750 l = self._lockref and self._lockref()
751 751 if l is not None and l.held:
752 752 l.lock()
753 753 return l
754 754
755 755 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
756 756 _('repository %s') % self.origroot)
757 757 self._lockref = weakref.ref(l)
758 758 return l
759 759
760 760 def wlock(self, wait=True):
761 761 '''Lock the non-store parts of the repository (everything under
762 762 .hg except .hg/store) and return a weak reference to the lock.
763 763 Use this before modifying files in .hg.'''
764 764 l = self._wlockref and self._wlockref()
765 765 if l is not None and l.held:
766 766 l.lock()
767 767 return l
768 768
769 769 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
770 770 self.dirstate.invalidate, _('working directory of %s') %
771 771 self.origroot)
772 772 self._wlockref = weakref.ref(l)
773 773 return l
774 774
775 775 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
776 776 """
777 777 commit an individual file as part of a larger transaction
778 778 """
779 779
780 780 fname = fctx.path()
781 781 text = fctx.data()
782 782 flog = self.file(fname)
783 783 fparent1 = manifest1.get(fname, nullid)
784 784 fparent2 = fparent2o = manifest2.get(fname, nullid)
785 785
786 786 meta = {}
787 787 copy = fctx.renamed()
788 788 if copy and copy[0] != fname:
789 789 # Mark the new revision of this file as a copy of another
790 790 # file. This copy data will effectively act as a parent
791 791 # of this new revision. If this is a merge, the first
792 792 # parent will be the nullid (meaning "look up the copy data")
793 793 # and the second one will be the other parent. For example:
794 794 #
795 795 # 0 --- 1 --- 3 rev1 changes file foo
796 796 # \ / rev2 renames foo to bar and changes it
797 797 # \- 2 -/ rev3 should have bar with all changes and
798 798 # should record that bar descends from
799 799 # bar in rev2 and foo in rev1
800 800 #
801 801 # this allows this merge to succeed:
802 802 #
803 803 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
804 804 # \ / merging rev3 and rev4 should use bar@rev2
805 805 # \- 2 --- 4 as the merge base
806 806 #
807 807
808 808 cfname = copy[0]
809 809 crev = manifest1.get(cfname)
810 810 newfparent = fparent2
811 811
812 812 if manifest2: # branch merge
813 813 if fparent2 == nullid or crev is None: # copied on remote side
814 814 if cfname in manifest2:
815 815 crev = manifest2[cfname]
816 816 newfparent = fparent1
817 817
818 818 # find source in nearest ancestor if we've lost track
819 819 if not crev:
820 820 self.ui.debug(" %s: searching for copy revision for %s\n" %
821 821 (fname, cfname))
822 822 for ancestor in self['.'].ancestors():
823 823 if cfname in ancestor:
824 824 crev = ancestor[cfname].filenode()
825 825 break
826 826
827 827 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
828 828 meta["copy"] = cfname
829 829 meta["copyrev"] = hex(crev)
830 830 fparent1, fparent2 = nullid, newfparent
831 831 elif fparent2 != nullid:
832 832 # is one parent an ancestor of the other?
833 833 fparentancestor = flog.ancestor(fparent1, fparent2)
834 834 if fparentancestor == fparent1:
835 835 fparent1, fparent2 = fparent2, nullid
836 836 elif fparentancestor == fparent2:
837 837 fparent2 = nullid
838 838
839 839 # is the file changed?
840 840 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
841 841 changelist.append(fname)
842 842 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
843 843
844 844 # are just the flags changed during merge?
845 845 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
846 846 changelist.append(fname)
847 847
848 848 return fparent1
849 849
850 850 def commit(self, text="", user=None, date=None, match=None, force=False,
851 851 editor=False, extra={}):
852 852 """Add a new revision to current repository.
853 853
854 854 Revision information is gathered from the working directory,
855 855 match can be used to filter the committed files. If editor is
856 856 supplied, it is called to get a commit message.
857 857 """
858 858
859 859 def fail(f, msg):
860 860 raise util.Abort('%s: %s' % (f, msg))
861 861
862 862 if not match:
863 863 match = matchmod.always(self.root, '')
864 864
865 865 if not force:
866 866 vdirs = []
867 867 match.dir = vdirs.append
868 868 match.bad = fail
869 869
870 870 wlock = self.wlock()
871 871 try:
872 872 wctx = self[None]
873 873 merge = len(wctx.parents()) > 1
874 874
875 875 if (not force and merge and match and
876 876 (match.files() or match.anypats())):
877 877 raise util.Abort(_('cannot partially commit a merge '
878 878 '(do not specify files or patterns)'))
879 879
880 880 changes = self.status(match=match, clean=force)
881 881 if force:
882 882 changes[0].extend(changes[6]) # mq may commit unchanged files
883 883
884 884 # check subrepos
885 885 subs = []
886 886 removedsubs = set()
887 887 for p in wctx.parents():
888 888 removedsubs.update(s for s in p.substate if match(s))
889 889 for s in wctx.substate:
890 890 removedsubs.discard(s)
891 891 if match(s) and wctx.sub(s).dirty():
892 892 subs.append(s)
893 893 if (subs or removedsubs):
894 894 if (not match('.hgsub') and
895 895 '.hgsub' in (wctx.modified() + wctx.added())):
896 896 raise util.Abort(_("can't commit subrepos without .hgsub"))
897 897 if '.hgsubstate' not in changes[0]:
898 898 changes[0].insert(0, '.hgsubstate')
899 899
900 900 # make sure all explicit patterns are matched
901 901 if not force and match.files():
902 902 matched = set(changes[0] + changes[1] + changes[2])
903 903
904 904 for f in match.files():
905 905 if f == '.' or f in matched or f in wctx.substate:
906 906 continue
907 907 if f in changes[3]: # missing
908 908 fail(f, _('file not found!'))
909 909 if f in vdirs: # visited directory
910 910 d = f + '/'
911 911 for mf in matched:
912 912 if mf.startswith(d):
913 913 break
914 914 else:
915 915 fail(f, _("no match under directory!"))
916 916 elif f not in self.dirstate:
917 917 fail(f, _("file not tracked!"))
918 918
919 919 if (not force and not extra.get("close") and not merge
920 920 and not (changes[0] or changes[1] or changes[2])
921 921 and wctx.branch() == wctx.p1().branch()):
922 922 return None
923 923
924 924 ms = mergemod.mergestate(self)
925 925 for f in changes[0]:
926 926 if f in ms and ms[f] == 'u':
927 927 raise util.Abort(_("unresolved merge conflicts "
928 928 "(see hg resolve)"))
929 929
930 930 cctx = context.workingctx(self, text, user, date, extra, changes)
931 931 if editor:
932 932 cctx._text = editor(self, cctx, subs)
933 933 edited = (text != cctx._text)
934 934
935 935 # commit subs
936 936 if subs or removedsubs:
937 937 state = wctx.substate.copy()
938 938 for s in sorted(subs):
939 939 sub = wctx.sub(s)
940 940 self.ui.status(_('committing subrepository %s\n') %
941 941 subrepo.subrelpath(sub))
942 942 sr = sub.commit(cctx._text, user, date)
943 943 state[s] = (state[s][0], sr)
944 944 subrepo.writestate(self, state)
945 945
946 946 # Save commit message in case this transaction gets rolled back
947 947 # (e.g. by a pretxncommit hook). Leave the content alone on
948 948 # the assumption that the user will use the same editor again.
949 949 msgfile = self.opener('last-message.txt', 'wb')
950 950 msgfile.write(cctx._text)
951 951 msgfile.close()
952 952
953 953 p1, p2 = self.dirstate.parents()
954 954 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
955 955 try:
956 956 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
957 957 ret = self.commitctx(cctx, True)
958 958 except:
959 959 if edited:
960 960 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
961 961 self.ui.write(
962 962 _('note: commit message saved in %s\n') % msgfn)
963 963 raise
964 964
965 965 # update dirstate and mergestate
966 966 for f in changes[0] + changes[1]:
967 967 self.dirstate.normal(f)
968 968 for f in changes[2]:
969 969 self.dirstate.forget(f)
970 970 self.dirstate.setparents(ret)
971 971 ms.reset()
972 972 finally:
973 973 wlock.release()
974 974
975 975 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
976 976 return ret
977 977
978 978 def commitctx(self, ctx, error=False):
979 979 """Add a new revision to current repository.
980 980 Revision information is passed via the context argument.
981 981 """
982 982
983 983 tr = lock = None
984 removed = ctx.removed()
984 removed = list(ctx.removed())
985 985 p1, p2 = ctx.p1(), ctx.p2()
986 986 m1 = p1.manifest().copy()
987 987 m2 = p2.manifest()
988 988 user = ctx.user()
989 989
990 990 lock = self.lock()
991 991 try:
992 992 tr = self.transaction("commit")
993 993 trp = weakref.proxy(tr)
994 994
995 995 # check in files
996 996 new = {}
997 997 changed = []
998 998 linkrev = len(self)
999 999 for f in sorted(ctx.modified() + ctx.added()):
1000 1000 self.ui.note(f + "\n")
1001 1001 try:
1002 1002 fctx = ctx[f]
1003 1003 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1004 1004 changed)
1005 1005 m1.set(f, fctx.flags())
1006 1006 except OSError, inst:
1007 1007 self.ui.warn(_("trouble committing %s!\n") % f)
1008 1008 raise
1009 1009 except IOError, inst:
1010 1010 errcode = getattr(inst, 'errno', errno.ENOENT)
1011 1011 if error or errcode and errcode != errno.ENOENT:
1012 1012 self.ui.warn(_("trouble committing %s!\n") % f)
1013 1013 raise
1014 1014 else:
1015 1015 removed.append(f)
1016 1016
1017 1017 # update manifest
1018 1018 m1.update(new)
1019 1019 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1020 1020 drop = [f for f in removed if f in m1]
1021 1021 for f in drop:
1022 1022 del m1[f]
1023 1023 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1024 1024 p2.manifestnode(), (new, drop))
1025 1025
1026 1026 # update changelog
1027 1027 self.changelog.delayupdate()
1028 1028 n = self.changelog.add(mn, changed + removed, ctx.description(),
1029 1029 trp, p1.node(), p2.node(),
1030 1030 user, ctx.date(), ctx.extra().copy())
1031 1031 p = lambda: self.changelog.writepending() and self.root or ""
1032 1032 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1033 1033 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1034 1034 parent2=xp2, pending=p)
1035 1035 self.changelog.finalize(trp)
1036 1036 tr.close()
1037 1037
1038 1038 if self._branchcache:
1039 1039 self.updatebranchcache()
1040 1040 return n
1041 1041 finally:
1042 1042 if tr:
1043 1043 tr.release()
1044 1044 lock.release()
1045 1045
1046 1046 def destroyed(self):
1047 1047 '''Inform the repository that nodes have been destroyed.
1048 1048 Intended for use by strip and rollback, so there's a common
1049 1049 place for anything that has to be done after destroying history.'''
1050 1050 # XXX it might be nice if we could take the list of destroyed
1051 1051 # nodes, but I don't see an easy way for rollback() to do that
1052 1052
1053 1053 # Ensure the persistent tag cache is updated. Doing it now
1054 1054 # means that the tag cache only has to worry about destroyed
1055 1055 # heads immediately after a strip/rollback. That in turn
1056 1056 # guarantees that "cachetip == currenttip" (comparing both rev
1057 1057 # and node) always means no nodes have been added or destroyed.
1058 1058
1059 1059 # XXX this is suboptimal when qrefresh'ing: we strip the current
1060 1060 # head, refresh the tag cache, then immediately add a new head.
1061 1061 # But I think doing it this way is necessary for the "instant
1062 1062 # tag cache retrieval" case to work.
1063 1063 self.invalidatecaches()
1064 1064
1065 1065 def walk(self, match, node=None):
1066 1066 '''
1067 1067 walk recursively through the directory tree or a given
1068 1068 changeset, finding all files matched by the match
1069 1069 function
1070 1070 '''
1071 1071 return self[node].walk(match)
1072 1072
1073 1073 def status(self, node1='.', node2=None, match=None,
1074 1074 ignored=False, clean=False, unknown=False,
1075 1075 listsubrepos=False):
1076 1076 """return status of files between two nodes or node and working directory
1077 1077
1078 1078 If node1 is None, use the first dirstate parent instead.
1079 1079 If node2 is None, compare node1 with working directory.
1080 1080 """
1081 1081
1082 1082 def mfmatches(ctx):
1083 1083 mf = ctx.manifest().copy()
1084 1084 for fn in mf.keys():
1085 1085 if not match(fn):
1086 1086 del mf[fn]
1087 1087 return mf
1088 1088
1089 1089 if isinstance(node1, context.changectx):
1090 1090 ctx1 = node1
1091 1091 else:
1092 1092 ctx1 = self[node1]
1093 1093 if isinstance(node2, context.changectx):
1094 1094 ctx2 = node2
1095 1095 else:
1096 1096 ctx2 = self[node2]
1097 1097
1098 1098 working = ctx2.rev() is None
1099 1099 parentworking = working and ctx1 == self['.']
1100 1100 match = match or matchmod.always(self.root, self.getcwd())
1101 1101 listignored, listclean, listunknown = ignored, clean, unknown
1102 1102
1103 1103 # load earliest manifest first for caching reasons
1104 1104 if not working and ctx2.rev() < ctx1.rev():
1105 1105 ctx2.manifest()
1106 1106
1107 1107 if not parentworking:
1108 1108 def bad(f, msg):
1109 1109 if f not in ctx1:
1110 1110 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1111 1111 match.bad = bad
1112 1112
1113 1113 if working: # we need to scan the working dir
1114 1114 subrepos = []
1115 1115 if '.hgsub' in self.dirstate:
1116 1116 subrepos = ctx1.substate.keys()
1117 1117 s = self.dirstate.status(match, subrepos, listignored,
1118 1118 listclean, listunknown)
1119 1119 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1120 1120
1121 1121 # check for any possibly clean files
1122 1122 if parentworking and cmp:
1123 1123 fixup = []
1124 1124 # do a full compare of any files that might have changed
1125 1125 for f in sorted(cmp):
1126 1126 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1127 1127 or ctx1[f].cmp(ctx2[f])):
1128 1128 modified.append(f)
1129 1129 else:
1130 1130 fixup.append(f)
1131 1131
1132 1132 # update dirstate for files that are actually clean
1133 1133 if fixup:
1134 1134 if listclean:
1135 1135 clean += fixup
1136 1136
1137 1137 try:
1138 1138 # updating the dirstate is optional
1139 1139 # so we don't wait on the lock
1140 1140 wlock = self.wlock(False)
1141 1141 try:
1142 1142 for f in fixup:
1143 1143 self.dirstate.normal(f)
1144 1144 finally:
1145 1145 wlock.release()
1146 1146 except error.LockError:
1147 1147 pass
1148 1148
1149 1149 if not parentworking:
1150 1150 mf1 = mfmatches(ctx1)
1151 1151 if working:
1152 1152 # we are comparing working dir against non-parent
1153 1153 # generate a pseudo-manifest for the working dir
1154 1154 mf2 = mfmatches(self['.'])
1155 1155 for f in cmp + modified + added:
1156 1156 mf2[f] = None
1157 1157 mf2.set(f, ctx2.flags(f))
1158 1158 for f in removed:
1159 1159 if f in mf2:
1160 1160 del mf2[f]
1161 1161 else:
1162 1162 # we are comparing two revisions
1163 1163 deleted, unknown, ignored = [], [], []
1164 1164 mf2 = mfmatches(ctx2)
1165 1165
1166 1166 modified, added, clean = [], [], []
1167 1167 for fn in mf2:
1168 1168 if fn in mf1:
1169 1169 if (mf1.flags(fn) != mf2.flags(fn) or
1170 1170 (mf1[fn] != mf2[fn] and
1171 1171 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1172 1172 modified.append(fn)
1173 1173 elif listclean:
1174 1174 clean.append(fn)
1175 1175 del mf1[fn]
1176 1176 else:
1177 1177 added.append(fn)
1178 1178 removed = mf1.keys()
1179 1179
1180 1180 r = modified, added, removed, deleted, unknown, ignored, clean
1181 1181
1182 1182 if listsubrepos:
1183 1183 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1184 1184 if working:
1185 1185 rev2 = None
1186 1186 else:
1187 1187 rev2 = ctx2.substate[subpath][1]
1188 1188 try:
1189 1189 submatch = matchmod.narrowmatcher(subpath, match)
1190 1190 s = sub.status(rev2, match=submatch, ignored=listignored,
1191 1191 clean=listclean, unknown=listunknown,
1192 1192 listsubrepos=True)
1193 1193 for rfiles, sfiles in zip(r, s):
1194 1194 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1195 1195 except error.LookupError:
1196 1196 self.ui.status(_("skipping missing subrepository: %s\n")
1197 1197 % subpath)
1198 1198
1199 1199 [l.sort() for l in r]
1200 1200 return r
1201 1201
1202 1202 def heads(self, start=None):
1203 1203 heads = self.changelog.heads(start)
1204 1204 # sort the output in rev descending order
1205 1205 heads = [(-self.changelog.rev(h), h) for h in heads]
1206 1206 return [n for (r, n) in sorted(heads)]
1207 1207
1208 1208 def branchheads(self, branch=None, start=None, closed=False):
1209 1209 '''return a (possibly filtered) list of heads for the given branch
1210 1210
1211 1211 Heads are returned in topological order, from newest to oldest.
1212 1212 If branch is None, use the dirstate branch.
1213 1213 If start is not None, return only heads reachable from start.
1214 1214 If closed is True, return heads that are marked as closed as well.
1215 1215 '''
1216 1216 if branch is None:
1217 1217 branch = self[None].branch()
1218 1218 branches = self.branchmap()
1219 1219 if branch not in branches:
1220 1220 return []
1221 1221 # the cache returns heads ordered lowest to highest
1222 1222 bheads = list(reversed(branches[branch]))
1223 1223 if start is not None:
1224 1224 # filter out the heads that cannot be reached from startrev
1225 1225 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1226 1226 bheads = [h for h in bheads if h in fbheads]
1227 1227 if not closed:
1228 1228 bheads = [h for h in bheads if
1229 1229 ('close' not in self.changelog.read(h)[5])]
1230 1230 return bheads
1231 1231
1232 1232 def branches(self, nodes):
1233 1233 if not nodes:
1234 1234 nodes = [self.changelog.tip()]
1235 1235 b = []
1236 1236 for n in nodes:
1237 1237 t = n
1238 1238 while 1:
1239 1239 p = self.changelog.parents(n)
1240 1240 if p[1] != nullid or p[0] == nullid:
1241 1241 b.append((t, n, p[0], p[1]))
1242 1242 break
1243 1243 n = p[0]
1244 1244 return b
1245 1245
1246 1246 def between(self, pairs):
1247 1247 r = []
1248 1248
1249 1249 for top, bottom in pairs:
1250 1250 n, l, i = top, [], 0
1251 1251 f = 1
1252 1252
1253 1253 while n != bottom and n != nullid:
1254 1254 p = self.changelog.parents(n)[0]
1255 1255 if i == f:
1256 1256 l.append(n)
1257 1257 f = f * 2
1258 1258 n = p
1259 1259 i += 1
1260 1260
1261 1261 r.append(l)
1262 1262
1263 1263 return r
1264 1264
1265 1265 def pull(self, remote, heads=None, force=False):
1266 1266 lock = self.lock()
1267 1267 try:
1268 1268 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1269 1269 force=force)
1270 1270 common, fetch, rheads = tmp
1271 1271 if not fetch:
1272 1272 self.ui.status(_("no changes found\n"))
1273 1273 return 0
1274 1274
1275 1275 if heads is None and fetch == [nullid]:
1276 1276 self.ui.status(_("requesting all changes\n"))
1277 1277 elif heads is None and remote.capable('changegroupsubset'):
1278 1278 # issue1320, avoid a race if remote changed after discovery
1279 1279 heads = rheads
1280 1280
1281 1281 if heads is None:
1282 1282 cg = remote.changegroup(fetch, 'pull')
1283 1283 else:
1284 1284 if not remote.capable('changegroupsubset'):
1285 1285 raise util.Abort(_("partial pull cannot be done because "
1286 1286 "other repository doesn't support "
1287 1287 "changegroupsubset."))
1288 1288 cg = remote.changegroupsubset(fetch, heads, 'pull')
1289 1289 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1290 1290 finally:
1291 1291 lock.release()
1292 1292
1293 1293 def push(self, remote, force=False, revs=None, newbranch=False):
1294 1294 '''Push outgoing changesets (limited by revs) from the current
1295 1295 repository to remote. Return an integer:
1296 1296 - 0 means HTTP error *or* nothing to push
1297 1297 - 1 means we pushed and remote head count is unchanged *or*
1298 1298 we have outgoing changesets but refused to push
1299 1299 - other values as described by addchangegroup()
1300 1300 '''
1301 1301 # there are two ways to push to remote repo:
1302 1302 #
1303 1303 # addchangegroup assumes local user can lock remote
1304 1304 # repo (local filesystem, old ssh servers).
1305 1305 #
1306 1306 # unbundle assumes local user cannot lock remote repo (new ssh
1307 1307 # servers, http servers).
1308 1308
1309 1309 lock = None
1310 1310 unbundle = remote.capable('unbundle')
1311 1311 if not unbundle:
1312 1312 lock = remote.lock()
1313 1313 try:
1314 1314 ret = discovery.prepush(self, remote, force, revs, newbranch)
1315 1315 if ret[0] is None:
1316 1316 # and here we return 0 for "nothing to push" or 1 for
1317 1317 # "something to push but I refuse"
1318 1318 return ret[1]
1319 1319
1320 1320 cg, remote_heads = ret
1321 1321 if unbundle:
1322 1322 # local repo finds heads on server, finds out what revs it must
1323 1323 # push. once revs transferred, if server finds it has
1324 1324 # different heads (someone else won commit/push race), server
1325 1325 # aborts.
1326 1326 if force:
1327 1327 remote_heads = ['force']
1328 1328 # ssh: return remote's addchangegroup()
1329 1329 # http: return remote's addchangegroup() or 0 for error
1330 1330 return remote.unbundle(cg, remote_heads, 'push')
1331 1331 else:
1332 1332 # we return an integer indicating remote head count change
1333 1333 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1334 1334 finally:
1335 1335 if lock is not None:
1336 1336 lock.release()
1337 1337
1338 1338 def changegroupinfo(self, nodes, source):
1339 1339 if self.ui.verbose or source == 'bundle':
1340 1340 self.ui.status(_("%d changesets found\n") % len(nodes))
1341 1341 if self.ui.debugflag:
1342 1342 self.ui.debug("list of changesets:\n")
1343 1343 for node in nodes:
1344 1344 self.ui.debug("%s\n" % hex(node))
1345 1345
1346 1346 def changegroupsubset(self, bases, heads, source, extranodes=None):
1347 1347 """Compute a changegroup consisting of all the nodes that are
1348 1348 descendents of any of the bases and ancestors of any of the heads.
1349 1349 Return a chunkbuffer object whose read() method will return
1350 1350 successive changegroup chunks.
1351 1351
1352 1352 It is fairly complex as determining which filenodes and which
1353 1353 manifest nodes need to be included for the changeset to be complete
1354 1354 is non-trivial.
1355 1355
1356 1356 Another wrinkle is doing the reverse, figuring out which changeset in
1357 1357 the changegroup a particular filenode or manifestnode belongs to.
1358 1358
1359 1359 The caller can specify some nodes that must be included in the
1360 1360 changegroup using the extranodes argument. It should be a dict
1361 1361 where the keys are the filenames (or 1 for the manifest), and the
1362 1362 values are lists of (node, linknode) tuples, where node is a wanted
1363 1363 node and linknode is the changelog node that should be transmitted as
1364 1364 the linkrev.
1365 1365 """
1366 1366
1367 1367 # Set up some initial variables
1368 1368 # Make it easy to refer to self.changelog
1369 1369 cl = self.changelog
1370 1370 # Compute the list of changesets in this changegroup.
1371 1371 # Some bases may turn out to be superfluous, and some heads may be
1372 1372 # too. nodesbetween will return the minimal set of bases and heads
1373 1373 # necessary to re-create the changegroup.
1374 1374 if not bases:
1375 1375 bases = [nullid]
1376 1376 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1377 1377
1378 1378 if extranodes is None:
1379 1379 # can we go through the fast path ?
1380 1380 heads.sort()
1381 1381 allheads = self.heads()
1382 1382 allheads.sort()
1383 1383 if heads == allheads:
1384 1384 return self._changegroup(msng_cl_lst, source)
1385 1385
1386 1386 # slow path
1387 1387 self.hook('preoutgoing', throw=True, source=source)
1388 1388
1389 1389 self.changegroupinfo(msng_cl_lst, source)
1390 1390
1391 1391 # We assume that all ancestors of bases are known
1392 1392 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1393 1393
1394 1394 # Make it easy to refer to self.manifest
1395 1395 mnfst = self.manifest
1396 1396 # We don't know which manifests are missing yet
1397 1397 msng_mnfst_set = {}
1398 1398 # Nor do we know which filenodes are missing.
1399 1399 msng_filenode_set = {}
1400 1400
1401 1401 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1402 1402 junk = None
1403 1403
1404 1404 # A changeset always belongs to itself, so the changenode lookup
1405 1405 # function for a changenode is identity.
1406 1406 def identity(x):
1407 1407 return x
1408 1408
1409 1409 # A function generating function that sets up the initial environment
1410 1410 # the inner function.
1411 1411 def filenode_collector(changedfiles):
1412 1412 # This gathers information from each manifestnode included in the
1413 1413 # changegroup about which filenodes the manifest node references
1414 1414 # so we can include those in the changegroup too.
1415 1415 #
1416 1416 # It also remembers which changenode each filenode belongs to. It
1417 1417 # does this by assuming the a filenode belongs to the changenode
1418 1418 # the first manifest that references it belongs to.
1419 1419 def collect_msng_filenodes(mnfstnode):
1420 1420 r = mnfst.rev(mnfstnode)
1421 1421 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1422 1422 # If the previous rev is one of the parents,
1423 1423 # we only need to see a diff.
1424 1424 deltamf = mnfst.readdelta(mnfstnode)
1425 1425 # For each line in the delta
1426 1426 for f, fnode in deltamf.iteritems():
1427 1427 # And if the file is in the list of files we care
1428 1428 # about.
1429 1429 if f in changedfiles:
1430 1430 # Get the changenode this manifest belongs to
1431 1431 clnode = msng_mnfst_set[mnfstnode]
1432 1432 # Create the set of filenodes for the file if
1433 1433 # there isn't one already.
1434 1434 ndset = msng_filenode_set.setdefault(f, {})
1435 1435 # And set the filenode's changelog node to the
1436 1436 # manifest's if it hasn't been set already.
1437 1437 ndset.setdefault(fnode, clnode)
1438 1438 else:
1439 1439 # Otherwise we need a full manifest.
1440 1440 m = mnfst.read(mnfstnode)
1441 1441 # For every file in we care about.
1442 1442 for f in changedfiles:
1443 1443 fnode = m.get(f, None)
1444 1444 # If it's in the manifest
1445 1445 if fnode is not None:
1446 1446 # See comments above.
1447 1447 clnode = msng_mnfst_set[mnfstnode]
1448 1448 ndset = msng_filenode_set.setdefault(f, {})
1449 1449 ndset.setdefault(fnode, clnode)
1450 1450 return collect_msng_filenodes
1451 1451
1452 1452 # If we determine that a particular file or manifest node must be a
1453 1453 # node that the recipient of the changegroup will already have, we can
1454 1454 # also assume the recipient will have all the parents. This function
1455 1455 # prunes them from the set of missing nodes.
1456 1456 def prune(revlog, missingnodes):
1457 1457 hasset = set()
1458 1458 # If a 'missing' filenode thinks it belongs to a changenode we
1459 1459 # assume the recipient must have, then the recipient must have
1460 1460 # that filenode.
1461 1461 for n in missingnodes:
1462 1462 clrev = revlog.linkrev(revlog.rev(n))
1463 1463 if clrev in commonrevs:
1464 1464 hasset.add(n)
1465 1465 for n in hasset:
1466 1466 missingnodes.pop(n, None)
1467 1467 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1468 1468 missingnodes.pop(revlog.node(r), None)
1469 1469
1470 1470 # Add the nodes that were explicitly requested.
1471 1471 def add_extra_nodes(name, nodes):
1472 1472 if not extranodes or name not in extranodes:
1473 1473 return
1474 1474
1475 1475 for node, linknode in extranodes[name]:
1476 1476 if node not in nodes:
1477 1477 nodes[node] = linknode
1478 1478
1479 1479 # Now that we have all theses utility functions to help out and
1480 1480 # logically divide up the task, generate the group.
1481 1481 def gengroup():
1482 1482 # The set of changed files starts empty.
1483 1483 changedfiles = set()
1484 1484 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1485 1485
1486 1486 # Create a changenode group generator that will call our functions
1487 1487 # back to lookup the owning changenode and collect information.
1488 1488 group = cl.group(msng_cl_lst, identity, collect)
1489 1489 for cnt, chnk in enumerate(group):
1490 1490 yield chnk
1491 1491 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1492 1492 self.ui.progress(_('bundling changes'), None)
1493 1493
1494 1494 prune(mnfst, msng_mnfst_set)
1495 1495 add_extra_nodes(1, msng_mnfst_set)
1496 1496 msng_mnfst_lst = msng_mnfst_set.keys()
1497 1497 # Sort the manifestnodes by revision number.
1498 1498 msng_mnfst_lst.sort(key=mnfst.rev)
1499 1499 # Create a generator for the manifestnodes that calls our lookup
1500 1500 # and data collection functions back.
1501 1501 group = mnfst.group(msng_mnfst_lst,
1502 1502 lambda mnode: msng_mnfst_set[mnode],
1503 1503 filenode_collector(changedfiles))
1504 1504 for cnt, chnk in enumerate(group):
1505 1505 yield chnk
1506 1506 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1507 1507 self.ui.progress(_('bundling manifests'), None)
1508 1508
1509 1509 # These are no longer needed, dereference and toss the memory for
1510 1510 # them.
1511 1511 msng_mnfst_lst = None
1512 1512 msng_mnfst_set.clear()
1513 1513
1514 1514 if extranodes:
1515 1515 for fname in extranodes:
1516 1516 if isinstance(fname, int):
1517 1517 continue
1518 1518 msng_filenode_set.setdefault(fname, {})
1519 1519 changedfiles.add(fname)
1520 1520 # Go through all our files in order sorted by name.
1521 1521 cnt = 0
1522 1522 for fname in sorted(changedfiles):
1523 1523 filerevlog = self.file(fname)
1524 1524 if not len(filerevlog):
1525 1525 raise util.Abort(_("empty or missing revlog for %s") % fname)
1526 1526 # Toss out the filenodes that the recipient isn't really
1527 1527 # missing.
1528 1528 missingfnodes = msng_filenode_set.pop(fname, {})
1529 1529 prune(filerevlog, missingfnodes)
1530 1530 add_extra_nodes(fname, missingfnodes)
1531 1531 # If any filenodes are left, generate the group for them,
1532 1532 # otherwise don't bother.
1533 1533 if missingfnodes:
1534 1534 yield changegroup.chunkheader(len(fname))
1535 1535 yield fname
1536 1536 # Sort the filenodes by their revision # (topological order)
1537 1537 nodeiter = list(missingfnodes)
1538 1538 nodeiter.sort(key=filerevlog.rev)
1539 1539 # Create a group generator and only pass in a changenode
1540 1540 # lookup function as we need to collect no information
1541 1541 # from filenodes.
1542 1542 group = filerevlog.group(nodeiter,
1543 1543 lambda fnode: missingfnodes[fnode])
1544 1544 for chnk in group:
1545 1545 self.ui.progress(
1546 1546 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1547 1547 cnt += 1
1548 1548 yield chnk
1549 1549 # Signal that no more groups are left.
1550 1550 yield changegroup.closechunk()
1551 1551 self.ui.progress(_('bundling files'), None)
1552 1552
1553 1553 if msng_cl_lst:
1554 1554 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1555 1555
1556 1556 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1557 1557
1558 1558 def changegroup(self, basenodes, source):
1559 1559 # to avoid a race we use changegroupsubset() (issue1320)
1560 1560 return self.changegroupsubset(basenodes, self.heads(), source)
1561 1561
1562 1562 def _changegroup(self, nodes, source):
1563 1563 """Compute the changegroup of all nodes that we have that a recipient
1564 1564 doesn't. Return a chunkbuffer object whose read() method will return
1565 1565 successive changegroup chunks.
1566 1566
1567 1567 This is much easier than the previous function as we can assume that
1568 1568 the recipient has any changenode we aren't sending them.
1569 1569
1570 1570 nodes is the set of nodes to send"""
1571 1571
1572 1572 self.hook('preoutgoing', throw=True, source=source)
1573 1573
1574 1574 cl = self.changelog
1575 1575 revset = set([cl.rev(n) for n in nodes])
1576 1576 self.changegroupinfo(nodes, source)
1577 1577
1578 1578 def identity(x):
1579 1579 return x
1580 1580
1581 1581 def gennodelst(log):
1582 1582 for r in log:
1583 1583 if log.linkrev(r) in revset:
1584 1584 yield log.node(r)
1585 1585
1586 1586 def lookuplinkrev_func(revlog):
1587 1587 def lookuplinkrev(n):
1588 1588 return cl.node(revlog.linkrev(revlog.rev(n)))
1589 1589 return lookuplinkrev
1590 1590
1591 1591 def gengroup():
1592 1592 '''yield a sequence of changegroup chunks (strings)'''
1593 1593 # construct a list of all changed files
1594 1594 changedfiles = set()
1595 1595 mmfs = {}
1596 1596 collect = changegroup.collector(cl, mmfs, changedfiles)
1597 1597
1598 1598 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1599 1599 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1600 1600 yield chnk
1601 1601 self.ui.progress(_('bundling changes'), None)
1602 1602
1603 1603 mnfst = self.manifest
1604 1604 nodeiter = gennodelst(mnfst)
1605 1605 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1606 1606 lookuplinkrev_func(mnfst))):
1607 1607 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1608 1608 yield chnk
1609 1609 self.ui.progress(_('bundling manifests'), None)
1610 1610
1611 1611 cnt = 0
1612 1612 for fname in sorted(changedfiles):
1613 1613 filerevlog = self.file(fname)
1614 1614 if not len(filerevlog):
1615 1615 raise util.Abort(_("empty or missing revlog for %s") % fname)
1616 1616 nodeiter = gennodelst(filerevlog)
1617 1617 nodeiter = list(nodeiter)
1618 1618 if nodeiter:
1619 1619 yield changegroup.chunkheader(len(fname))
1620 1620 yield fname
1621 1621 lookup = lookuplinkrev_func(filerevlog)
1622 1622 for chnk in filerevlog.group(nodeiter, lookup):
1623 1623 self.ui.progress(
1624 1624 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1625 1625 cnt += 1
1626 1626 yield chnk
1627 1627 self.ui.progress(_('bundling files'), None)
1628 1628
1629 1629 yield changegroup.closechunk()
1630 1630
1631 1631 if nodes:
1632 1632 self.hook('outgoing', node=hex(nodes[0]), source=source)
1633 1633
1634 1634 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1635 1635
1636 1636 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1637 1637 """Add the changegroup returned by source.read() to this repo.
1638 1638 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1639 1639 the URL of the repo where this changegroup is coming from.
1640 1640
1641 1641 Return an integer summarizing the change to this repo:
1642 1642 - nothing changed or no source: 0
1643 1643 - more heads than before: 1+added heads (2..n)
1644 1644 - fewer heads than before: -1-removed heads (-2..-n)
1645 1645 - number of heads stays the same: 1
1646 1646 """
1647 1647 def csmap(x):
1648 1648 self.ui.debug("add changeset %s\n" % short(x))
1649 1649 return len(cl)
1650 1650
1651 1651 def revmap(x):
1652 1652 return cl.rev(x)
1653 1653
1654 1654 if not source:
1655 1655 return 0
1656 1656
1657 1657 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1658 1658
1659 1659 changesets = files = revisions = 0
1660 1660 efiles = set()
1661 1661
1662 1662 # write changelog data to temp files so concurrent readers will not see
1663 1663 # inconsistent view
1664 1664 cl = self.changelog
1665 1665 cl.delayupdate()
1666 1666 oldheads = len(cl.heads())
1667 1667
1668 1668 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1669 1669 try:
1670 1670 trp = weakref.proxy(tr)
1671 1671 # pull off the changeset group
1672 1672 self.ui.status(_("adding changesets\n"))
1673 1673 clstart = len(cl)
1674 1674 class prog(object):
1675 1675 step = _('changesets')
1676 1676 count = 1
1677 1677 ui = self.ui
1678 1678 total = None
1679 1679 def __call__(self):
1680 1680 self.ui.progress(self.step, self.count, unit=_('chunks'),
1681 1681 total=self.total)
1682 1682 self.count += 1
1683 1683 pr = prog()
1684 1684 source.callback = pr
1685 1685
1686 1686 if (cl.addgroup(source, csmap, trp) is None
1687 1687 and not emptyok):
1688 1688 raise util.Abort(_("received changelog group is empty"))
1689 1689 clend = len(cl)
1690 1690 changesets = clend - clstart
1691 1691 for c in xrange(clstart, clend):
1692 1692 efiles.update(self[c].files())
1693 1693 efiles = len(efiles)
1694 1694 self.ui.progress(_('changesets'), None)
1695 1695
1696 1696 # pull off the manifest group
1697 1697 self.ui.status(_("adding manifests\n"))
1698 1698 pr.step = _('manifests')
1699 1699 pr.count = 1
1700 1700 pr.total = changesets # manifests <= changesets
1701 1701 # no need to check for empty manifest group here:
1702 1702 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1703 1703 # no new manifest will be created and the manifest group will
1704 1704 # be empty during the pull
1705 1705 self.manifest.addgroup(source, revmap, trp)
1706 1706 self.ui.progress(_('manifests'), None)
1707 1707
1708 1708 needfiles = {}
1709 1709 if self.ui.configbool('server', 'validate', default=False):
1710 1710 # validate incoming csets have their manifests
1711 1711 for cset in xrange(clstart, clend):
1712 1712 mfest = self.changelog.read(self.changelog.node(cset))[0]
1713 1713 mfest = self.manifest.readdelta(mfest)
1714 1714 # store file nodes we must see
1715 1715 for f, n in mfest.iteritems():
1716 1716 needfiles.setdefault(f, set()).add(n)
1717 1717
1718 1718 # process the files
1719 1719 self.ui.status(_("adding file changes\n"))
1720 1720 pr.step = 'files'
1721 1721 pr.count = 1
1722 1722 pr.total = efiles
1723 1723 source.callback = None
1724 1724
1725 1725 while 1:
1726 1726 f = source.chunk()
1727 1727 if not f:
1728 1728 break
1729 1729 self.ui.debug("adding %s revisions\n" % f)
1730 1730 pr()
1731 1731 fl = self.file(f)
1732 1732 o = len(fl)
1733 1733 if fl.addgroup(source, revmap, trp) is None:
1734 1734 raise util.Abort(_("received file revlog group is empty"))
1735 1735 revisions += len(fl) - o
1736 1736 files += 1
1737 1737 if f in needfiles:
1738 1738 needs = needfiles[f]
1739 1739 for new in xrange(o, len(fl)):
1740 1740 n = fl.node(new)
1741 1741 if n in needs:
1742 1742 needs.remove(n)
1743 1743 if not needs:
1744 1744 del needfiles[f]
1745 1745 self.ui.progress(_('files'), None)
1746 1746
1747 1747 for f, needs in needfiles.iteritems():
1748 1748 fl = self.file(f)
1749 1749 for n in needs:
1750 1750 try:
1751 1751 fl.rev(n)
1752 1752 except error.LookupError:
1753 1753 raise util.Abort(
1754 1754 _('missing file data for %s:%s - run hg verify') %
1755 1755 (f, hex(n)))
1756 1756
1757 1757 newheads = len(cl.heads())
1758 1758 heads = ""
1759 1759 if oldheads and newheads != oldheads:
1760 1760 heads = _(" (%+d heads)") % (newheads - oldheads)
1761 1761
1762 1762 self.ui.status(_("added %d changesets"
1763 1763 " with %d changes to %d files%s\n")
1764 1764 % (changesets, revisions, files, heads))
1765 1765
1766 1766 if changesets > 0:
1767 1767 p = lambda: cl.writepending() and self.root or ""
1768 1768 self.hook('pretxnchangegroup', throw=True,
1769 1769 node=hex(cl.node(clstart)), source=srctype,
1770 1770 url=url, pending=p)
1771 1771
1772 1772 # make changelog see real files again
1773 1773 cl.finalize(trp)
1774 1774
1775 1775 tr.close()
1776 1776 finally:
1777 1777 tr.release()
1778 1778 if lock:
1779 1779 lock.release()
1780 1780
1781 1781 if changesets > 0:
1782 1782 # forcefully update the on-disk branch cache
1783 1783 self.ui.debug("updating the branch cache\n")
1784 1784 self.updatebranchcache()
1785 1785 self.hook("changegroup", node=hex(cl.node(clstart)),
1786 1786 source=srctype, url=url)
1787 1787
1788 1788 for i in xrange(clstart, clend):
1789 1789 self.hook("incoming", node=hex(cl.node(i)),
1790 1790 source=srctype, url=url)
1791 1791
1792 1792 # never return 0 here:
1793 1793 if newheads < oldheads:
1794 1794 return newheads - oldheads - 1
1795 1795 else:
1796 1796 return newheads - oldheads + 1
1797 1797
1798 1798
1799 1799 def stream_in(self, remote, requirements):
1800 1800 fp = remote.stream_out()
1801 1801 l = fp.readline()
1802 1802 try:
1803 1803 resp = int(l)
1804 1804 except ValueError:
1805 1805 raise error.ResponseError(
1806 1806 _('Unexpected response from remote server:'), l)
1807 1807 if resp == 1:
1808 1808 raise util.Abort(_('operation forbidden by server'))
1809 1809 elif resp == 2:
1810 1810 raise util.Abort(_('locking the remote repository failed'))
1811 1811 elif resp != 0:
1812 1812 raise util.Abort(_('the server sent an unknown error code'))
1813 1813 self.ui.status(_('streaming all changes\n'))
1814 1814 l = fp.readline()
1815 1815 try:
1816 1816 total_files, total_bytes = map(int, l.split(' ', 1))
1817 1817 except (ValueError, TypeError):
1818 1818 raise error.ResponseError(
1819 1819 _('Unexpected response from remote server:'), l)
1820 1820 self.ui.status(_('%d files to transfer, %s of data\n') %
1821 1821 (total_files, util.bytecount(total_bytes)))
1822 1822 start = time.time()
1823 1823 for i in xrange(total_files):
1824 1824 # XXX doesn't support '\n' or '\r' in filenames
1825 1825 l = fp.readline()
1826 1826 try:
1827 1827 name, size = l.split('\0', 1)
1828 1828 size = int(size)
1829 1829 except (ValueError, TypeError):
1830 1830 raise error.ResponseError(
1831 1831 _('Unexpected response from remote server:'), l)
1832 1832 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1833 1833 # for backwards compat, name was partially encoded
1834 1834 ofp = self.sopener(store.decodedir(name), 'w')
1835 1835 for chunk in util.filechunkiter(fp, limit=size):
1836 1836 ofp.write(chunk)
1837 1837 ofp.close()
1838 1838 elapsed = time.time() - start
1839 1839 if elapsed <= 0:
1840 1840 elapsed = 0.001
1841 1841 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1842 1842 (util.bytecount(total_bytes), elapsed,
1843 1843 util.bytecount(total_bytes / elapsed)))
1844 1844
1845 1845 # new requirements = old non-format requirements + new format-related
1846 1846 # requirements from the streamed-in repository
1847 1847 requirements.update(set(self.requirements) - self.supportedformats)
1848 1848 self._applyrequirements(requirements)
1849 1849 self._writerequirements()
1850 1850
1851 1851 self.invalidate()
1852 1852 return len(self.heads()) + 1
1853 1853
1854 1854 def clone(self, remote, heads=[], stream=False):
1855 1855 '''clone remote repository.
1856 1856
1857 1857 keyword arguments:
1858 1858 heads: list of revs to clone (forces use of pull)
1859 1859 stream: use streaming clone if possible'''
1860 1860
1861 1861 # now, all clients that can request uncompressed clones can
1862 1862 # read repo formats supported by all servers that can serve
1863 1863 # them.
1864 1864
1865 1865 # if revlog format changes, client will have to check version
1866 1866 # and format flags on "stream" capability, and use
1867 1867 # uncompressed only if compatible.
1868 1868
1869 1869 if stream and not heads:
1870 1870 # 'stream' means remote revlog format is revlogv1 only
1871 1871 if remote.capable('stream'):
1872 1872 return self.stream_in(remote, set(('revlogv1',)))
1873 1873 # otherwise, 'streamreqs' contains the remote revlog format
1874 1874 streamreqs = remote.capable('streamreqs')
1875 1875 if streamreqs:
1876 1876 streamreqs = set(streamreqs.split(','))
1877 1877 # if we support it, stream in and adjust our requirements
1878 1878 if not streamreqs - self.supportedformats:
1879 1879 return self.stream_in(remote, streamreqs)
1880 1880 return self.pull(remote, heads)
1881 1881
1882 1882 def pushkey(self, namespace, key, old, new):
1883 1883 return pushkey.push(self, namespace, key, old, new)
1884 1884
1885 1885 def listkeys(self, namespace):
1886 1886 return pushkey.list(self, namespace)
1887 1887
1888 1888 # used to avoid circular references so destructors work
1889 1889 def aftertrans(files):
1890 1890 renamefiles = [tuple(t) for t in files]
1891 1891 def a():
1892 1892 for src, dest in renamefiles:
1893 1893 util.rename(src, dest)
1894 1894 return a
1895 1895
1896 1896 def instance(ui, path, create):
1897 1897 return localrepository(ui, util.drop_scheme('file', path), create)
1898 1898
1899 1899 def islocal(path):
1900 1900 return True
General Comments 0
You need to be logged in to leave comments. Login now