##// END OF EJS Templates
localrepo: factor out requirement application and write
Sune Foldager -
r12295:3388ab21 default
parent child Browse files
Show More
@@ -1,1863 +1,1872
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared'))
25 26
26 27 def __init__(self, baseui, path=None, create=0):
27 28 repo.repository.__init__(self)
28 29 self.root = os.path.realpath(util.expandpath(path))
29 30 self.path = os.path.join(self.root, ".hg")
30 31 self.origroot = path
31 32 self.auditor = util.path_auditor(self.root, self._checknested)
32 33 self.opener = util.opener(self.path)
33 34 self.wopener = util.opener(self.root)
34 35 self.baseui = baseui
35 36 self.ui = baseui.copy()
36 37
37 38 try:
38 39 self.ui.readconfig(self.join("hgrc"), self.root)
39 40 extensions.loadall(self.ui)
40 41 except IOError:
41 42 pass
42 43
43 44 if not os.path.isdir(self.path):
44 45 if create:
45 46 if not os.path.exists(path):
46 47 util.makedirs(path)
47 48 os.mkdir(self.path)
48 49 requirements = ["revlogv1"]
49 50 if self.ui.configbool('format', 'usestore', True):
50 51 os.mkdir(os.path.join(self.path, "store"))
51 52 requirements.append("store")
52 53 if self.ui.configbool('format', 'usefncache', True):
53 54 requirements.append("fncache")
54 55 # create an invalid changelog
55 56 self.opener("00changelog.i", "a").write(
56 57 '\0\0\0\2' # represents revlogv2
57 58 ' dummy changelog to prevent using the old repo layout'
58 59 )
59 60 if self.ui.configbool('format', 'parentdelta', False):
60 61 requirements.append("parentdelta")
61 reqfile = self.opener("requires", "w")
62 for r in requirements:
63 reqfile.write("%s\n" % r)
64 reqfile.close()
65 62 else:
66 63 raise error.RepoError(_("repository %s not found") % path)
67 64 elif create:
68 65 raise error.RepoError(_("repository %s already exists") % path)
69 66 else:
70 67 # find requirements
71 68 requirements = set()
72 69 try:
73 70 requirements = set(self.opener("requires").read().splitlines())
74 71 except IOError, inst:
75 72 if inst.errno != errno.ENOENT:
76 73 raise
77 74 for r in requirements - self.supported:
78 75 raise error.RepoError(_("requirement '%s' not supported") % r)
79 76
80 77 self.sharedpath = self.path
81 78 try:
82 79 s = os.path.realpath(self.opener("sharedpath").read())
83 80 if not os.path.exists(s):
84 81 raise error.RepoError(
85 82 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 83 self.sharedpath = s
87 84 except IOError, inst:
88 85 if inst.errno != errno.ENOENT:
89 86 raise
90 87
91 88 self.store = store.store(requirements, self.sharedpath, util.opener)
92 89 self.spath = self.store.path
93 90 self.sopener = self.store.opener
94 91 self.sjoin = self.store.join
95 92 self.opener.createmode = self.store.createmode
96 self.sopener.options = {}
97 if 'parentdelta' in requirements:
98 self.sopener.options['parentdelta'] = 1
93 self._applyrequirements(requirements)
94 if create:
95 self._writerequirements()
99 96
100 97 # These two define the set of tags for this repository. _tags
101 98 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 99 # 'local'. (Global tags are defined by .hgtags across all
103 100 # heads, and local tags are defined in .hg/localtags.) They
104 101 # constitute the in-memory cache of tags.
105 102 self._tags = None
106 103 self._tagtypes = None
107 104
108 105 self._branchcache = None # in UTF-8
109 106 self._branchcachetip = None
110 107 self.nodetagscache = None
111 108 self.filterpats = {}
112 109 self._datafilters = {}
113 110 self._transref = self._lockref = self._wlockref = None
114 111
112 def _applyrequirements(self, requirements):
113 self.requirements = requirements
114 self.sopener.options = {}
115 if 'parentdelta' in requirements:
116 self.sopener.options['parentdelta'] = 1
117
118 def _writerequirements(self):
119 reqfile = self.opener("requires", "w")
120 for r in self.requirements:
121 reqfile.write("%s\n" % r)
122 reqfile.close()
123
115 124 def _checknested(self, path):
116 125 """Determine if path is a legal nested repository."""
117 126 if not path.startswith(self.root):
118 127 return False
119 128 subpath = path[len(self.root) + 1:]
120 129
121 130 # XXX: Checking against the current working copy is wrong in
122 131 # the sense that it can reject things like
123 132 #
124 133 # $ hg cat -r 10 sub/x.txt
125 134 #
126 135 # if sub/ is no longer a subrepository in the working copy
127 136 # parent revision.
128 137 #
129 138 # However, it can of course also allow things that would have
130 139 # been rejected before, such as the above cat command if sub/
131 140 # is a subrepository now, but was a normal directory before.
132 141 # The old path auditor would have rejected by mistake since it
133 142 # panics when it sees sub/.hg/.
134 143 #
135 144 # All in all, checking against the working copy seems sensible
136 145 # since we want to prevent access to nested repositories on
137 146 # the filesystem *now*.
138 147 ctx = self[None]
139 148 parts = util.splitpath(subpath)
140 149 while parts:
141 150 prefix = os.sep.join(parts)
142 151 if prefix in ctx.substate:
143 152 if prefix == subpath:
144 153 return True
145 154 else:
146 155 sub = ctx.sub(prefix)
147 156 return sub.checknested(subpath[len(prefix) + 1:])
148 157 else:
149 158 parts.pop()
150 159 return False
151 160
152 161
153 162 @propertycache
154 163 def changelog(self):
155 164 c = changelog.changelog(self.sopener)
156 165 if 'HG_PENDING' in os.environ:
157 166 p = os.environ['HG_PENDING']
158 167 if p.startswith(self.root):
159 168 c.readpending('00changelog.i.a')
160 169 self.sopener.options['defversion'] = c.version
161 170 return c
162 171
163 172 @propertycache
164 173 def manifest(self):
165 174 return manifest.manifest(self.sopener)
166 175
167 176 @propertycache
168 177 def dirstate(self):
169 178 return dirstate.dirstate(self.opener, self.ui, self.root)
170 179
171 180 def __getitem__(self, changeid):
172 181 if changeid is None:
173 182 return context.workingctx(self)
174 183 return context.changectx(self, changeid)
175 184
176 185 def __contains__(self, changeid):
177 186 try:
178 187 return bool(self.lookup(changeid))
179 188 except error.RepoLookupError:
180 189 return False
181 190
182 191 def __nonzero__(self):
183 192 return True
184 193
185 194 def __len__(self):
186 195 return len(self.changelog)
187 196
188 197 def __iter__(self):
189 198 for i in xrange(len(self)):
190 199 yield i
191 200
192 201 def url(self):
193 202 return 'file:' + self.root
194 203
195 204 def hook(self, name, throw=False, **args):
196 205 return hook.hook(self.ui, self, name, throw, **args)
197 206
198 207 tag_disallowed = ':\r\n'
199 208
200 209 def _tag(self, names, node, message, local, user, date, extra={}):
201 210 if isinstance(names, str):
202 211 allchars = names
203 212 names = (names,)
204 213 else:
205 214 allchars = ''.join(names)
206 215 for c in self.tag_disallowed:
207 216 if c in allchars:
208 217 raise util.Abort(_('%r cannot be used in a tag name') % c)
209 218
210 219 branches = self.branchmap()
211 220 for name in names:
212 221 self.hook('pretag', throw=True, node=hex(node), tag=name,
213 222 local=local)
214 223 if name in branches:
215 224 self.ui.warn(_("warning: tag %s conflicts with existing"
216 225 " branch name\n") % name)
217 226
218 227 def writetags(fp, names, munge, prevtags):
219 228 fp.seek(0, 2)
220 229 if prevtags and prevtags[-1] != '\n':
221 230 fp.write('\n')
222 231 for name in names:
223 232 m = munge and munge(name) or name
224 233 if self._tagtypes and name in self._tagtypes:
225 234 old = self._tags.get(name, nullid)
226 235 fp.write('%s %s\n' % (hex(old), m))
227 236 fp.write('%s %s\n' % (hex(node), m))
228 237 fp.close()
229 238
230 239 prevtags = ''
231 240 if local:
232 241 try:
233 242 fp = self.opener('localtags', 'r+')
234 243 except IOError:
235 244 fp = self.opener('localtags', 'a')
236 245 else:
237 246 prevtags = fp.read()
238 247
239 248 # local tags are stored in the current charset
240 249 writetags(fp, names, None, prevtags)
241 250 for name in names:
242 251 self.hook('tag', node=hex(node), tag=name, local=local)
243 252 return
244 253
245 254 try:
246 255 fp = self.wfile('.hgtags', 'rb+')
247 256 except IOError:
248 257 fp = self.wfile('.hgtags', 'ab')
249 258 else:
250 259 prevtags = fp.read()
251 260
252 261 # committed tags are stored in UTF-8
253 262 writetags(fp, names, encoding.fromlocal, prevtags)
254 263
255 264 if '.hgtags' not in self.dirstate:
256 265 self[None].add(['.hgtags'])
257 266
258 267 m = matchmod.exact(self.root, '', ['.hgtags'])
259 268 tagnode = self.commit(message, user, date, extra=extra, match=m)
260 269
261 270 for name in names:
262 271 self.hook('tag', node=hex(node), tag=name, local=local)
263 272
264 273 return tagnode
265 274
266 275 def tag(self, names, node, message, local, user, date):
267 276 '''tag a revision with one or more symbolic names.
268 277
269 278 names is a list of strings or, when adding a single tag, names may be a
270 279 string.
271 280
272 281 if local is True, the tags are stored in a per-repository file.
273 282 otherwise, they are stored in the .hgtags file, and a new
274 283 changeset is committed with the change.
275 284
276 285 keyword arguments:
277 286
278 287 local: whether to store tags in non-version-controlled file
279 288 (default False)
280 289
281 290 message: commit message to use if committing
282 291
283 292 user: name of user to use if committing
284 293
285 294 date: date tuple to use if committing'''
286 295
287 296 for x in self.status()[:5]:
288 297 if '.hgtags' in x:
289 298 raise util.Abort(_('working copy of .hgtags is changed '
290 299 '(please commit .hgtags manually)'))
291 300
292 301 self.tags() # instantiate the cache
293 302 self._tag(names, node, message, local, user, date)
294 303
295 304 def tags(self):
296 305 '''return a mapping of tag to node'''
297 306 if self._tags is None:
298 307 (self._tags, self._tagtypes) = self._findtags()
299 308
300 309 return self._tags
301 310
302 311 def _findtags(self):
303 312 '''Do the hard work of finding tags. Return a pair of dicts
304 313 (tags, tagtypes) where tags maps tag name to node, and tagtypes
305 314 maps tag name to a string like \'global\' or \'local\'.
306 315 Subclasses or extensions are free to add their own tags, but
307 316 should be aware that the returned dicts will be retained for the
308 317 duration of the localrepo object.'''
309 318
310 319 # XXX what tagtype should subclasses/extensions use? Currently
311 320 # mq and bookmarks add tags, but do not set the tagtype at all.
312 321 # Should each extension invent its own tag type? Should there
313 322 # be one tagtype for all such "virtual" tags? Or is the status
314 323 # quo fine?
315 324
316 325 alltags = {} # map tag name to (node, hist)
317 326 tagtypes = {}
318 327
319 328 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
320 329 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
321 330
322 331 # Build the return dicts. Have to re-encode tag names because
323 332 # the tags module always uses UTF-8 (in order not to lose info
324 333 # writing to the cache), but the rest of Mercurial wants them in
325 334 # local encoding.
326 335 tags = {}
327 336 for (name, (node, hist)) in alltags.iteritems():
328 337 if node != nullid:
329 338 tags[encoding.tolocal(name)] = node
330 339 tags['tip'] = self.changelog.tip()
331 340 tagtypes = dict([(encoding.tolocal(name), value)
332 341 for (name, value) in tagtypes.iteritems()])
333 342 return (tags, tagtypes)
334 343
335 344 def tagtype(self, tagname):
336 345 '''
337 346 return the type of the given tag. result can be:
338 347
339 348 'local' : a local tag
340 349 'global' : a global tag
341 350 None : tag does not exist
342 351 '''
343 352
344 353 self.tags()
345 354
346 355 return self._tagtypes.get(tagname)
347 356
348 357 def tagslist(self):
349 358 '''return a list of tags ordered by revision'''
350 359 l = []
351 360 for t, n in self.tags().iteritems():
352 361 try:
353 362 r = self.changelog.rev(n)
354 363 except:
355 364 r = -2 # sort to the beginning of the list if unknown
356 365 l.append((r, t, n))
357 366 return [(t, n) for r, t, n in sorted(l)]
358 367
359 368 def nodetags(self, node):
360 369 '''return the tags associated with a node'''
361 370 if not self.nodetagscache:
362 371 self.nodetagscache = {}
363 372 for t, n in self.tags().iteritems():
364 373 self.nodetagscache.setdefault(n, []).append(t)
365 374 for tags in self.nodetagscache.itervalues():
366 375 tags.sort()
367 376 return self.nodetagscache.get(node, [])
368 377
369 378 def _branchtags(self, partial, lrev):
370 379 # TODO: rename this function?
371 380 tiprev = len(self) - 1
372 381 if lrev != tiprev:
373 382 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
374 383 self._updatebranchcache(partial, ctxgen)
375 384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376 385
377 386 return partial
378 387
379 388 def updatebranchcache(self):
380 389 tip = self.changelog.tip()
381 390 if self._branchcache is not None and self._branchcachetip == tip:
382 391 return self._branchcache
383 392
384 393 oldtip = self._branchcachetip
385 394 self._branchcachetip = tip
386 395 if oldtip is None or oldtip not in self.changelog.nodemap:
387 396 partial, last, lrev = self._readbranchcache()
388 397 else:
389 398 lrev = self.changelog.rev(oldtip)
390 399 partial = self._branchcache
391 400
392 401 self._branchtags(partial, lrev)
393 402 # this private cache holds all heads (not just tips)
394 403 self._branchcache = partial
395 404
396 405 def branchmap(self):
397 406 '''returns a dictionary {branch: [branchheads]}'''
398 407 self.updatebranchcache()
399 408 return self._branchcache
400 409
401 410 def branchtags(self):
402 411 '''return a dict where branch names map to the tipmost head of
403 412 the branch, open heads come before closed'''
404 413 bt = {}
405 414 for bn, heads in self.branchmap().iteritems():
406 415 tip = heads[-1]
407 416 for h in reversed(heads):
408 417 if 'close' not in self.changelog.read(h)[5]:
409 418 tip = h
410 419 break
411 420 bt[bn] = tip
412 421 return bt
413 422
414 423
415 424 def _readbranchcache(self):
416 425 partial = {}
417 426 try:
418 427 f = self.opener("branchheads.cache")
419 428 lines = f.read().split('\n')
420 429 f.close()
421 430 except (IOError, OSError):
422 431 return {}, nullid, nullrev
423 432
424 433 try:
425 434 last, lrev = lines.pop(0).split(" ", 1)
426 435 last, lrev = bin(last), int(lrev)
427 436 if lrev >= len(self) or self[lrev].node() != last:
428 437 # invalidate the cache
429 438 raise ValueError('invalidating branch cache (tip differs)')
430 439 for l in lines:
431 440 if not l:
432 441 continue
433 442 node, label = l.split(" ", 1)
434 443 partial.setdefault(label.strip(), []).append(bin(node))
435 444 except KeyboardInterrupt:
436 445 raise
437 446 except Exception, inst:
438 447 if self.ui.debugflag:
439 448 self.ui.warn(str(inst), '\n')
440 449 partial, last, lrev = {}, nullid, nullrev
441 450 return partial, last, lrev
442 451
443 452 def _writebranchcache(self, branches, tip, tiprev):
444 453 try:
445 454 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 455 f.write("%s %s\n" % (hex(tip), tiprev))
447 456 for label, nodes in branches.iteritems():
448 457 for node in nodes:
449 458 f.write("%s %s\n" % (hex(node), label))
450 459 f.rename()
451 460 except (IOError, OSError):
452 461 pass
453 462
454 463 def _updatebranchcache(self, partial, ctxgen):
455 464 # collect new branch entries
456 465 newbranches = {}
457 466 for c in ctxgen:
458 467 newbranches.setdefault(c.branch(), []).append(c.node())
459 468 # if older branchheads are reachable from new ones, they aren't
460 469 # really branchheads. Note checking parents is insufficient:
461 470 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
462 471 for branch, newnodes in newbranches.iteritems():
463 472 bheads = partial.setdefault(branch, [])
464 473 bheads.extend(newnodes)
465 474 if len(bheads) <= 1:
466 475 continue
467 476 # starting from tip means fewer passes over reachable
468 477 while newnodes:
469 478 latest = newnodes.pop()
470 479 if latest not in bheads:
471 480 continue
472 481 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
473 482 reachable = self.changelog.reachable(latest, minbhrev)
474 483 reachable.remove(latest)
475 484 bheads = [b for b in bheads if b not in reachable]
476 485 partial[branch] = bheads
477 486
478 487 def lookup(self, key):
479 488 if isinstance(key, int):
480 489 return self.changelog.node(key)
481 490 elif key == '.':
482 491 return self.dirstate.parents()[0]
483 492 elif key == 'null':
484 493 return nullid
485 494 elif key == 'tip':
486 495 return self.changelog.tip()
487 496 n = self.changelog._match(key)
488 497 if n:
489 498 return n
490 499 if key in self.tags():
491 500 return self.tags()[key]
492 501 if key in self.branchtags():
493 502 return self.branchtags()[key]
494 503 n = self.changelog._partialmatch(key)
495 504 if n:
496 505 return n
497 506
498 507 # can't find key, check if it might have come from damaged dirstate
499 508 if key in self.dirstate.parents():
500 509 raise error.Abort(_("working directory has unknown parent '%s'!")
501 510 % short(key))
502 511 try:
503 512 if len(key) == 20:
504 513 key = hex(key)
505 514 except:
506 515 pass
507 516 raise error.RepoLookupError(_("unknown revision '%s'") % key)
508 517
509 518 def lookupbranch(self, key, remote=None):
510 519 repo = remote or self
511 520 if key in repo.branchmap():
512 521 return key
513 522
514 523 repo = (remote and remote.local()) and remote or self
515 524 return repo[key].branch()
516 525
517 526 def local(self):
518 527 return True
519 528
520 529 def join(self, f):
521 530 return os.path.join(self.path, f)
522 531
523 532 def wjoin(self, f):
524 533 return os.path.join(self.root, f)
525 534
526 535 def file(self, f):
527 536 if f[0] == '/':
528 537 f = f[1:]
529 538 return filelog.filelog(self.sopener, f)
530 539
531 540 def changectx(self, changeid):
532 541 return self[changeid]
533 542
534 543 def parents(self, changeid=None):
535 544 '''get list of changectxs for parents of changeid'''
536 545 return self[changeid].parents()
537 546
538 547 def filectx(self, path, changeid=None, fileid=None):
539 548 """changeid can be a changeset revision, node, or tag.
540 549 fileid can be a file revision or node."""
541 550 return context.filectx(self, path, changeid, fileid)
542 551
543 552 def getcwd(self):
544 553 return self.dirstate.getcwd()
545 554
546 555 def pathto(self, f, cwd=None):
547 556 return self.dirstate.pathto(f, cwd)
548 557
549 558 def wfile(self, f, mode='r'):
550 559 return self.wopener(f, mode)
551 560
552 561 def _link(self, f):
553 562 return os.path.islink(self.wjoin(f))
554 563
555 564 def _loadfilter(self, filter):
556 565 if filter not in self.filterpats:
557 566 l = []
558 567 for pat, cmd in self.ui.configitems(filter):
559 568 if cmd == '!':
560 569 continue
561 570 mf = matchmod.match(self.root, '', [pat])
562 571 fn = None
563 572 params = cmd
564 573 for name, filterfn in self._datafilters.iteritems():
565 574 if cmd.startswith(name):
566 575 fn = filterfn
567 576 params = cmd[len(name):].lstrip()
568 577 break
569 578 if not fn:
570 579 fn = lambda s, c, **kwargs: util.filter(s, c)
571 580 # Wrap old filters not supporting keyword arguments
572 581 if not inspect.getargspec(fn)[2]:
573 582 oldfn = fn
574 583 fn = lambda s, c, **kwargs: oldfn(s, c)
575 584 l.append((mf, fn, params))
576 585 self.filterpats[filter] = l
577 586
578 587 def _filter(self, filter, filename, data):
579 588 self._loadfilter(filter)
580 589
581 590 for mf, fn, cmd in self.filterpats[filter]:
582 591 if mf(filename):
583 592 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
584 593 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
585 594 break
586 595
587 596 return data
588 597
589 598 def adddatafilter(self, name, filter):
590 599 self._datafilters[name] = filter
591 600
592 601 def wread(self, filename):
593 602 if self._link(filename):
594 603 data = os.readlink(self.wjoin(filename))
595 604 else:
596 605 data = self.wopener(filename, 'r').read()
597 606 return self._filter("encode", filename, data)
598 607
599 608 def wwrite(self, filename, data, flags):
600 609 data = self._filter("decode", filename, data)
601 610 try:
602 611 os.unlink(self.wjoin(filename))
603 612 except OSError:
604 613 pass
605 614 if 'l' in flags:
606 615 self.wopener.symlink(data, filename)
607 616 else:
608 617 self.wopener(filename, 'w').write(data)
609 618 if 'x' in flags:
610 619 util.set_flags(self.wjoin(filename), False, True)
611 620
612 621 def wwritedata(self, filename, data):
613 622 return self._filter("decode", filename, data)
614 623
615 624 def transaction(self, desc):
616 625 tr = self._transref and self._transref() or None
617 626 if tr and tr.running():
618 627 return tr.nest()
619 628
620 629 # abort here if the journal already exists
621 630 if os.path.exists(self.sjoin("journal")):
622 631 raise error.RepoError(
623 632 _("abandoned transaction found - run hg recover"))
624 633
625 634 # save dirstate for rollback
626 635 try:
627 636 ds = self.opener("dirstate").read()
628 637 except IOError:
629 638 ds = ""
630 639 self.opener("journal.dirstate", "w").write(ds)
631 640 self.opener("journal.branch", "w").write(self.dirstate.branch())
632 641 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
633 642
634 643 renames = [(self.sjoin("journal"), self.sjoin("undo")),
635 644 (self.join("journal.dirstate"), self.join("undo.dirstate")),
636 645 (self.join("journal.branch"), self.join("undo.branch")),
637 646 (self.join("journal.desc"), self.join("undo.desc"))]
638 647 tr = transaction.transaction(self.ui.warn, self.sopener,
639 648 self.sjoin("journal"),
640 649 aftertrans(renames),
641 650 self.store.createmode)
642 651 self._transref = weakref.ref(tr)
643 652 return tr
644 653
645 654 def recover(self):
646 655 lock = self.lock()
647 656 try:
648 657 if os.path.exists(self.sjoin("journal")):
649 658 self.ui.status(_("rolling back interrupted transaction\n"))
650 659 transaction.rollback(self.sopener, self.sjoin("journal"),
651 660 self.ui.warn)
652 661 self.invalidate()
653 662 return True
654 663 else:
655 664 self.ui.warn(_("no interrupted transaction available\n"))
656 665 return False
657 666 finally:
658 667 lock.release()
659 668
660 669 def rollback(self, dryrun=False):
661 670 wlock = lock = None
662 671 try:
663 672 wlock = self.wlock()
664 673 lock = self.lock()
665 674 if os.path.exists(self.sjoin("undo")):
666 675 try:
667 676 args = self.opener("undo.desc", "r").read().splitlines()
668 677 if len(args) >= 3 and self.ui.verbose:
669 678 desc = _("rolling back to revision %s"
670 679 " (undo %s: %s)\n") % (
671 680 int(args[0]) - 1, args[1], args[2])
672 681 elif len(args) >= 2:
673 682 desc = _("rolling back to revision %s (undo %s)\n") % (
674 683 int(args[0]) - 1, args[1])
675 684 except IOError:
676 685 desc = _("rolling back unknown transaction\n")
677 686 self.ui.status(desc)
678 687 if dryrun:
679 688 return
680 689 transaction.rollback(self.sopener, self.sjoin("undo"),
681 690 self.ui.warn)
682 691 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
683 692 try:
684 693 branch = self.opener("undo.branch").read()
685 694 self.dirstate.setbranch(branch)
686 695 except IOError:
687 696 self.ui.warn(_("Named branch could not be reset, "
688 697 "current branch still is: %s\n")
689 698 % encoding.tolocal(self.dirstate.branch()))
690 699 self.invalidate()
691 700 self.dirstate.invalidate()
692 701 self.destroyed()
693 702 else:
694 703 self.ui.warn(_("no rollback information available\n"))
695 704 return 1
696 705 finally:
697 706 release(lock, wlock)
698 707
699 708 def invalidatecaches(self):
700 709 self._tags = None
701 710 self._tagtypes = None
702 711 self.nodetagscache = None
703 712 self._branchcache = None # in UTF-8
704 713 self._branchcachetip = None
705 714
706 715 def invalidate(self):
707 716 for a in "changelog manifest".split():
708 717 if a in self.__dict__:
709 718 delattr(self, a)
710 719 self.invalidatecaches()
711 720
712 721 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
713 722 try:
714 723 l = lock.lock(lockname, 0, releasefn, desc=desc)
715 724 except error.LockHeld, inst:
716 725 if not wait:
717 726 raise
718 727 self.ui.warn(_("waiting for lock on %s held by %r\n") %
719 728 (desc, inst.locker))
720 729 # default to 600 seconds timeout
721 730 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
722 731 releasefn, desc=desc)
723 732 if acquirefn:
724 733 acquirefn()
725 734 return l
726 735
727 736 def lock(self, wait=True):
728 737 '''Lock the repository store (.hg/store) and return a weak reference
729 738 to the lock. Use this before modifying the store (e.g. committing or
730 739 stripping). If you are opening a transaction, get a lock as well.)'''
731 740 l = self._lockref and self._lockref()
732 741 if l is not None and l.held:
733 742 l.lock()
734 743 return l
735 744
736 745 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
737 746 _('repository %s') % self.origroot)
738 747 self._lockref = weakref.ref(l)
739 748 return l
740 749
741 750 def wlock(self, wait=True):
742 751 '''Lock the non-store parts of the repository (everything under
743 752 .hg except .hg/store) and return a weak reference to the lock.
744 753 Use this before modifying files in .hg.'''
745 754 l = self._wlockref and self._wlockref()
746 755 if l is not None and l.held:
747 756 l.lock()
748 757 return l
749 758
750 759 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
751 760 self.dirstate.invalidate, _('working directory of %s') %
752 761 self.origroot)
753 762 self._wlockref = weakref.ref(l)
754 763 return l
755 764
756 765 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
757 766 """
758 767 commit an individual file as part of a larger transaction
759 768 """
760 769
761 770 fname = fctx.path()
762 771 text = fctx.data()
763 772 flog = self.file(fname)
764 773 fparent1 = manifest1.get(fname, nullid)
765 774 fparent2 = fparent2o = manifest2.get(fname, nullid)
766 775
767 776 meta = {}
768 777 copy = fctx.renamed()
769 778 if copy and copy[0] != fname:
770 779 # Mark the new revision of this file as a copy of another
771 780 # file. This copy data will effectively act as a parent
772 781 # of this new revision. If this is a merge, the first
773 782 # parent will be the nullid (meaning "look up the copy data")
774 783 # and the second one will be the other parent. For example:
775 784 #
776 785 # 0 --- 1 --- 3 rev1 changes file foo
777 786 # \ / rev2 renames foo to bar and changes it
778 787 # \- 2 -/ rev3 should have bar with all changes and
779 788 # should record that bar descends from
780 789 # bar in rev2 and foo in rev1
781 790 #
782 791 # this allows this merge to succeed:
783 792 #
784 793 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
785 794 # \ / merging rev3 and rev4 should use bar@rev2
786 795 # \- 2 --- 4 as the merge base
787 796 #
788 797
789 798 cfname = copy[0]
790 799 crev = manifest1.get(cfname)
791 800 newfparent = fparent2
792 801
793 802 if manifest2: # branch merge
794 803 if fparent2 == nullid or crev is None: # copied on remote side
795 804 if cfname in manifest2:
796 805 crev = manifest2[cfname]
797 806 newfparent = fparent1
798 807
799 808 # find source in nearest ancestor if we've lost track
800 809 if not crev:
801 810 self.ui.debug(" %s: searching for copy revision for %s\n" %
802 811 (fname, cfname))
803 812 for ancestor in self['.'].ancestors():
804 813 if cfname in ancestor:
805 814 crev = ancestor[cfname].filenode()
806 815 break
807 816
808 817 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
809 818 meta["copy"] = cfname
810 819 meta["copyrev"] = hex(crev)
811 820 fparent1, fparent2 = nullid, newfparent
812 821 elif fparent2 != nullid:
813 822 # is one parent an ancestor of the other?
814 823 fparentancestor = flog.ancestor(fparent1, fparent2)
815 824 if fparentancestor == fparent1:
816 825 fparent1, fparent2 = fparent2, nullid
817 826 elif fparentancestor == fparent2:
818 827 fparent2 = nullid
819 828
820 829 # is the file changed?
821 830 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
822 831 changelist.append(fname)
823 832 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
824 833
825 834 # are just the flags changed during merge?
826 835 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
827 836 changelist.append(fname)
828 837
829 838 return fparent1
830 839
831 840 def commit(self, text="", user=None, date=None, match=None, force=False,
832 841 editor=False, extra={}):
833 842 """Add a new revision to current repository.
834 843
835 844 Revision information is gathered from the working directory,
836 845 match can be used to filter the committed files. If editor is
837 846 supplied, it is called to get a commit message.
838 847 """
839 848
840 849 def fail(f, msg):
841 850 raise util.Abort('%s: %s' % (f, msg))
842 851
843 852 if not match:
844 853 match = matchmod.always(self.root, '')
845 854
846 855 if not force:
847 856 vdirs = []
848 857 match.dir = vdirs.append
849 858 match.bad = fail
850 859
851 860 wlock = self.wlock()
852 861 try:
853 862 wctx = self[None]
854 863 merge = len(wctx.parents()) > 1
855 864
856 865 if (not force and merge and match and
857 866 (match.files() or match.anypats())):
858 867 raise util.Abort(_('cannot partially commit a merge '
859 868 '(do not specify files or patterns)'))
860 869
861 870 changes = self.status(match=match, clean=force)
862 871 if force:
863 872 changes[0].extend(changes[6]) # mq may commit unchanged files
864 873
865 874 # check subrepos
866 875 subs = []
867 876 removedsubs = set()
868 877 for p in wctx.parents():
869 878 removedsubs.update(s for s in p.substate if match(s))
870 879 for s in wctx.substate:
871 880 removedsubs.discard(s)
872 881 if match(s) and wctx.sub(s).dirty():
873 882 subs.append(s)
874 883 if (subs or removedsubs):
875 884 if (not match('.hgsub') and
876 885 '.hgsub' in (wctx.modified() + wctx.added())):
877 886 raise util.Abort(_("can't commit subrepos without .hgsub"))
878 887 if '.hgsubstate' not in changes[0]:
879 888 changes[0].insert(0, '.hgsubstate')
880 889
881 890 # make sure all explicit patterns are matched
882 891 if not force and match.files():
883 892 matched = set(changes[0] + changes[1] + changes[2])
884 893
885 894 for f in match.files():
886 895 if f == '.' or f in matched or f in wctx.substate:
887 896 continue
888 897 if f in changes[3]: # missing
889 898 fail(f, _('file not found!'))
890 899 if f in vdirs: # visited directory
891 900 d = f + '/'
892 901 for mf in matched:
893 902 if mf.startswith(d):
894 903 break
895 904 else:
896 905 fail(f, _("no match under directory!"))
897 906 elif f not in self.dirstate:
898 907 fail(f, _("file not tracked!"))
899 908
900 909 if (not force and not extra.get("close") and not merge
901 910 and not (changes[0] or changes[1] or changes[2])
902 911 and wctx.branch() == wctx.p1().branch()):
903 912 return None
904 913
905 914 ms = mergemod.mergestate(self)
906 915 for f in changes[0]:
907 916 if f in ms and ms[f] == 'u':
908 917 raise util.Abort(_("unresolved merge conflicts "
909 918 "(see hg resolve)"))
910 919
911 920 cctx = context.workingctx(self, text, user, date, extra, changes)
912 921 if editor:
913 922 cctx._text = editor(self, cctx, subs)
914 923 edited = (text != cctx._text)
915 924
916 925 # commit subs
917 926 if subs or removedsubs:
918 927 state = wctx.substate.copy()
919 928 for s in sorted(subs):
920 929 sub = wctx.sub(s)
921 930 self.ui.status(_('committing subrepository %s\n') %
922 931 subrepo.relpath(sub))
923 932 sr = sub.commit(cctx._text, user, date)
924 933 state[s] = (state[s][0], sr)
925 934 subrepo.writestate(self, state)
926 935
927 936 # Save commit message in case this transaction gets rolled back
928 937 # (e.g. by a pretxncommit hook). Leave the content alone on
929 938 # the assumption that the user will use the same editor again.
930 939 msgfile = self.opener('last-message.txt', 'wb')
931 940 msgfile.write(cctx._text)
932 941 msgfile.close()
933 942
934 943 p1, p2 = self.dirstate.parents()
935 944 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
936 945 try:
937 946 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
938 947 ret = self.commitctx(cctx, True)
939 948 except:
940 949 if edited:
941 950 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
942 951 self.ui.write(
943 952 _('note: commit message saved in %s\n') % msgfn)
944 953 raise
945 954
946 955 # update dirstate and mergestate
947 956 for f in changes[0] + changes[1]:
948 957 self.dirstate.normal(f)
949 958 for f in changes[2]:
950 959 self.dirstate.forget(f)
951 960 self.dirstate.setparents(ret)
952 961 ms.reset()
953 962 finally:
954 963 wlock.release()
955 964
956 965 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
957 966 return ret
958 967
959 968 def commitctx(self, ctx, error=False):
960 969 """Add a new revision to current repository.
961 970 Revision information is passed via the context argument.
962 971 """
963 972
964 973 tr = lock = None
965 974 removed = ctx.removed()
966 975 p1, p2 = ctx.p1(), ctx.p2()
967 976 m1 = p1.manifest().copy()
968 977 m2 = p2.manifest()
969 978 user = ctx.user()
970 979
971 980 lock = self.lock()
972 981 try:
973 982 tr = self.transaction("commit")
974 983 trp = weakref.proxy(tr)
975 984
976 985 # check in files
977 986 new = {}
978 987 changed = []
979 988 linkrev = len(self)
980 989 for f in sorted(ctx.modified() + ctx.added()):
981 990 self.ui.note(f + "\n")
982 991 try:
983 992 fctx = ctx[f]
984 993 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
985 994 changed)
986 995 m1.set(f, fctx.flags())
987 996 except OSError, inst:
988 997 self.ui.warn(_("trouble committing %s!\n") % f)
989 998 raise
990 999 except IOError, inst:
991 1000 errcode = getattr(inst, 'errno', errno.ENOENT)
992 1001 if error or errcode and errcode != errno.ENOENT:
993 1002 self.ui.warn(_("trouble committing %s!\n") % f)
994 1003 raise
995 1004 else:
996 1005 removed.append(f)
997 1006
998 1007 # update manifest
999 1008 m1.update(new)
1000 1009 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1001 1010 drop = [f for f in removed if f in m1]
1002 1011 for f in drop:
1003 1012 del m1[f]
1004 1013 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1005 1014 p2.manifestnode(), (new, drop))
1006 1015
1007 1016 # update changelog
1008 1017 self.changelog.delayupdate()
1009 1018 n = self.changelog.add(mn, changed + removed, ctx.description(),
1010 1019 trp, p1.node(), p2.node(),
1011 1020 user, ctx.date(), ctx.extra().copy())
1012 1021 p = lambda: self.changelog.writepending() and self.root or ""
1013 1022 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1014 1023 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1015 1024 parent2=xp2, pending=p)
1016 1025 self.changelog.finalize(trp)
1017 1026 tr.close()
1018 1027
1019 1028 if self._branchcache:
1020 1029 self.updatebranchcache()
1021 1030 return n
1022 1031 finally:
1023 1032 if tr:
1024 1033 tr.release()
1025 1034 lock.release()
1026 1035
1027 1036 def destroyed(self):
1028 1037 '''Inform the repository that nodes have been destroyed.
1029 1038 Intended for use by strip and rollback, so there's a common
1030 1039 place for anything that has to be done after destroying history.'''
1031 1040 # XXX it might be nice if we could take the list of destroyed
1032 1041 # nodes, but I don't see an easy way for rollback() to do that
1033 1042
1034 1043 # Ensure the persistent tag cache is updated. Doing it now
1035 1044 # means that the tag cache only has to worry about destroyed
1036 1045 # heads immediately after a strip/rollback. That in turn
1037 1046 # guarantees that "cachetip == currenttip" (comparing both rev
1038 1047 # and node) always means no nodes have been added or destroyed.
1039 1048
1040 1049 # XXX this is suboptimal when qrefresh'ing: we strip the current
1041 1050 # head, refresh the tag cache, then immediately add a new head.
1042 1051 # But I think doing it this way is necessary for the "instant
1043 1052 # tag cache retrieval" case to work.
1044 1053 self.invalidatecaches()
1045 1054
1046 1055 def walk(self, match, node=None):
1047 1056 '''
1048 1057 walk recursively through the directory tree or a given
1049 1058 changeset, finding all files matched by the match
1050 1059 function
1051 1060 '''
1052 1061 return self[node].walk(match)
1053 1062
1054 1063 def status(self, node1='.', node2=None, match=None,
1055 1064 ignored=False, clean=False, unknown=False,
1056 1065 listsubrepos=False):
1057 1066 """return status of files between two nodes or node and working directory
1058 1067
1059 1068 If node1 is None, use the first dirstate parent instead.
1060 1069 If node2 is None, compare node1 with working directory.
1061 1070 """
1062 1071
1063 1072 def mfmatches(ctx):
1064 1073 mf = ctx.manifest().copy()
1065 1074 for fn in mf.keys():
1066 1075 if not match(fn):
1067 1076 del mf[fn]
1068 1077 return mf
1069 1078
1070 1079 if isinstance(node1, context.changectx):
1071 1080 ctx1 = node1
1072 1081 else:
1073 1082 ctx1 = self[node1]
1074 1083 if isinstance(node2, context.changectx):
1075 1084 ctx2 = node2
1076 1085 else:
1077 1086 ctx2 = self[node2]
1078 1087
1079 1088 working = ctx2.rev() is None
1080 1089 parentworking = working and ctx1 == self['.']
1081 1090 match = match or matchmod.always(self.root, self.getcwd())
1082 1091 listignored, listclean, listunknown = ignored, clean, unknown
1083 1092
1084 1093 # load earliest manifest first for caching reasons
1085 1094 if not working and ctx2.rev() < ctx1.rev():
1086 1095 ctx2.manifest()
1087 1096
1088 1097 if not parentworking:
1089 1098 def bad(f, msg):
1090 1099 if f not in ctx1:
1091 1100 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1092 1101 match.bad = bad
1093 1102
1094 1103 if working: # we need to scan the working dir
1095 1104 subrepos = []
1096 1105 if '.hgsub' in self.dirstate:
1097 1106 subrepos = ctx1.substate.keys()
1098 1107 s = self.dirstate.status(match, subrepos, listignored,
1099 1108 listclean, listunknown)
1100 1109 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1101 1110
1102 1111 # check for any possibly clean files
1103 1112 if parentworking and cmp:
1104 1113 fixup = []
1105 1114 # do a full compare of any files that might have changed
1106 1115 for f in sorted(cmp):
1107 1116 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1108 1117 or ctx1[f].cmp(ctx2[f])):
1109 1118 modified.append(f)
1110 1119 else:
1111 1120 fixup.append(f)
1112 1121
1113 1122 # update dirstate for files that are actually clean
1114 1123 if fixup:
1115 1124 if listclean:
1116 1125 clean += fixup
1117 1126
1118 1127 try:
1119 1128 # updating the dirstate is optional
1120 1129 # so we don't wait on the lock
1121 1130 wlock = self.wlock(False)
1122 1131 try:
1123 1132 for f in fixup:
1124 1133 self.dirstate.normal(f)
1125 1134 finally:
1126 1135 wlock.release()
1127 1136 except error.LockError:
1128 1137 pass
1129 1138
1130 1139 if not parentworking:
1131 1140 mf1 = mfmatches(ctx1)
1132 1141 if working:
1133 1142 # we are comparing working dir against non-parent
1134 1143 # generate a pseudo-manifest for the working dir
1135 1144 mf2 = mfmatches(self['.'])
1136 1145 for f in cmp + modified + added:
1137 1146 mf2[f] = None
1138 1147 mf2.set(f, ctx2.flags(f))
1139 1148 for f in removed:
1140 1149 if f in mf2:
1141 1150 del mf2[f]
1142 1151 else:
1143 1152 # we are comparing two revisions
1144 1153 deleted, unknown, ignored = [], [], []
1145 1154 mf2 = mfmatches(ctx2)
1146 1155
1147 1156 modified, added, clean = [], [], []
1148 1157 for fn in mf2:
1149 1158 if fn in mf1:
1150 1159 if (mf1.flags(fn) != mf2.flags(fn) or
1151 1160 (mf1[fn] != mf2[fn] and
1152 1161 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1153 1162 modified.append(fn)
1154 1163 elif listclean:
1155 1164 clean.append(fn)
1156 1165 del mf1[fn]
1157 1166 else:
1158 1167 added.append(fn)
1159 1168 removed = mf1.keys()
1160 1169
1161 1170 r = modified, added, removed, deleted, unknown, ignored, clean
1162 1171
1163 1172 if listsubrepos:
1164 1173 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1165 1174 if working:
1166 1175 rev2 = None
1167 1176 else:
1168 1177 rev2 = ctx2.substate[subpath][1]
1169 1178 try:
1170 1179 submatch = matchmod.narrowmatcher(subpath, match)
1171 1180 s = sub.status(rev2, match=submatch, ignored=listignored,
1172 1181 clean=listclean, unknown=listunknown,
1173 1182 listsubrepos=True)
1174 1183 for rfiles, sfiles in zip(r, s):
1175 1184 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1176 1185 except error.LookupError:
1177 1186 self.ui.status(_("skipping missing subrepository: %s\n")
1178 1187 % subpath)
1179 1188
1180 1189 [l.sort() for l in r]
1181 1190 return r
1182 1191
1183 1192 def heads(self, start=None):
1184 1193 heads = self.changelog.heads(start)
1185 1194 # sort the output in rev descending order
1186 1195 heads = [(-self.changelog.rev(h), h) for h in heads]
1187 1196 return [n for (r, n) in sorted(heads)]
1188 1197
1189 1198 def branchheads(self, branch=None, start=None, closed=False):
1190 1199 '''return a (possibly filtered) list of heads for the given branch
1191 1200
1192 1201 Heads are returned in topological order, from newest to oldest.
1193 1202 If branch is None, use the dirstate branch.
1194 1203 If start is not None, return only heads reachable from start.
1195 1204 If closed is True, return heads that are marked as closed as well.
1196 1205 '''
1197 1206 if branch is None:
1198 1207 branch = self[None].branch()
1199 1208 branches = self.branchmap()
1200 1209 if branch not in branches:
1201 1210 return []
1202 1211 # the cache returns heads ordered lowest to highest
1203 1212 bheads = list(reversed(branches[branch]))
1204 1213 if start is not None:
1205 1214 # filter out the heads that cannot be reached from startrev
1206 1215 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1207 1216 bheads = [h for h in bheads if h in fbheads]
1208 1217 if not closed:
1209 1218 bheads = [h for h in bheads if
1210 1219 ('close' not in self.changelog.read(h)[5])]
1211 1220 return bheads
1212 1221
1213 1222 def branches(self, nodes):
1214 1223 if not nodes:
1215 1224 nodes = [self.changelog.tip()]
1216 1225 b = []
1217 1226 for n in nodes:
1218 1227 t = n
1219 1228 while 1:
1220 1229 p = self.changelog.parents(n)
1221 1230 if p[1] != nullid or p[0] == nullid:
1222 1231 b.append((t, n, p[0], p[1]))
1223 1232 break
1224 1233 n = p[0]
1225 1234 return b
1226 1235
1227 1236 def between(self, pairs):
1228 1237 r = []
1229 1238
1230 1239 for top, bottom in pairs:
1231 1240 n, l, i = top, [], 0
1232 1241 f = 1
1233 1242
1234 1243 while n != bottom and n != nullid:
1235 1244 p = self.changelog.parents(n)[0]
1236 1245 if i == f:
1237 1246 l.append(n)
1238 1247 f = f * 2
1239 1248 n = p
1240 1249 i += 1
1241 1250
1242 1251 r.append(l)
1243 1252
1244 1253 return r
1245 1254
1246 1255 def pull(self, remote, heads=None, force=False):
1247 1256 lock = self.lock()
1248 1257 try:
1249 1258 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1250 1259 force=force)
1251 1260 common, fetch, rheads = tmp
1252 1261 if not fetch:
1253 1262 self.ui.status(_("no changes found\n"))
1254 1263 return 0
1255 1264
1256 1265 if fetch == [nullid]:
1257 1266 self.ui.status(_("requesting all changes\n"))
1258 1267 elif heads is None and remote.capable('changegroupsubset'):
1259 1268 # issue1320, avoid a race if remote changed after discovery
1260 1269 heads = rheads
1261 1270
1262 1271 if heads is None:
1263 1272 cg = remote.changegroup(fetch, 'pull')
1264 1273 else:
1265 1274 if not remote.capable('changegroupsubset'):
1266 1275 raise util.Abort(_("partial pull cannot be done because "
1267 1276 "other repository doesn't support "
1268 1277 "changegroupsubset."))
1269 1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1270 1279 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1271 1280 finally:
1272 1281 lock.release()
1273 1282
1274 1283 def push(self, remote, force=False, revs=None, newbranch=False):
1275 1284 '''Push outgoing changesets (limited by revs) from the current
1276 1285 repository to remote. Return an integer:
1277 1286 - 0 means HTTP error *or* nothing to push
1278 1287 - 1 means we pushed and remote head count is unchanged *or*
1279 1288 we have outgoing changesets but refused to push
1280 1289 - other values as described by addchangegroup()
1281 1290 '''
1282 1291 # there are two ways to push to remote repo:
1283 1292 #
1284 1293 # addchangegroup assumes local user can lock remote
1285 1294 # repo (local filesystem, old ssh servers).
1286 1295 #
1287 1296 # unbundle assumes local user cannot lock remote repo (new ssh
1288 1297 # servers, http servers).
1289 1298
1290 1299 lock = None
1291 1300 unbundle = remote.capable('unbundle')
1292 1301 if not unbundle:
1293 1302 lock = remote.lock()
1294 1303 try:
1295 1304 ret = discovery.prepush(self, remote, force, revs, newbranch)
1296 1305 if ret[0] is None:
1297 1306 # and here we return 0 for "nothing to push" or 1 for
1298 1307 # "something to push but I refuse"
1299 1308 return ret[1]
1300 1309
1301 1310 cg, remote_heads = ret
1302 1311 if unbundle:
1303 1312 # local repo finds heads on server, finds out what revs it must
1304 1313 # push. once revs transferred, if server finds it has
1305 1314 # different heads (someone else won commit/push race), server
1306 1315 # aborts.
1307 1316 if force:
1308 1317 remote_heads = ['force']
1309 1318 # ssh: return remote's addchangegroup()
1310 1319 # http: return remote's addchangegroup() or 0 for error
1311 1320 return remote.unbundle(cg, remote_heads, 'push')
1312 1321 else:
1313 1322 # we return an integer indicating remote head count change
1314 1323 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1315 1324 finally:
1316 1325 if lock is not None:
1317 1326 lock.release()
1318 1327
1319 1328 def changegroupinfo(self, nodes, source):
1320 1329 if self.ui.verbose or source == 'bundle':
1321 1330 self.ui.status(_("%d changesets found\n") % len(nodes))
1322 1331 if self.ui.debugflag:
1323 1332 self.ui.debug("list of changesets:\n")
1324 1333 for node in nodes:
1325 1334 self.ui.debug("%s\n" % hex(node))
1326 1335
1327 1336 def changegroupsubset(self, bases, heads, source, extranodes=None):
1328 1337 """Compute a changegroup consisting of all the nodes that are
1329 1338 descendents of any of the bases and ancestors of any of the heads.
1330 1339 Return a chunkbuffer object whose read() method will return
1331 1340 successive changegroup chunks.
1332 1341
1333 1342 It is fairly complex as determining which filenodes and which
1334 1343 manifest nodes need to be included for the changeset to be complete
1335 1344 is non-trivial.
1336 1345
1337 1346 Another wrinkle is doing the reverse, figuring out which changeset in
1338 1347 the changegroup a particular filenode or manifestnode belongs to.
1339 1348
1340 1349 The caller can specify some nodes that must be included in the
1341 1350 changegroup using the extranodes argument. It should be a dict
1342 1351 where the keys are the filenames (or 1 for the manifest), and the
1343 1352 values are lists of (node, linknode) tuples, where node is a wanted
1344 1353 node and linknode is the changelog node that should be transmitted as
1345 1354 the linkrev.
1346 1355 """
1347 1356
1348 1357 # Set up some initial variables
1349 1358 # Make it easy to refer to self.changelog
1350 1359 cl = self.changelog
1351 1360 # Compute the list of changesets in this changegroup.
1352 1361 # Some bases may turn out to be superfluous, and some heads may be
1353 1362 # too. nodesbetween will return the minimal set of bases and heads
1354 1363 # necessary to re-create the changegroup.
1355 1364 if not bases:
1356 1365 bases = [nullid]
1357 1366 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1358 1367
1359 1368 if extranodes is None:
1360 1369 # can we go through the fast path ?
1361 1370 heads.sort()
1362 1371 allheads = self.heads()
1363 1372 allheads.sort()
1364 1373 if heads == allheads:
1365 1374 return self._changegroup(msng_cl_lst, source)
1366 1375
1367 1376 # slow path
1368 1377 self.hook('preoutgoing', throw=True, source=source)
1369 1378
1370 1379 self.changegroupinfo(msng_cl_lst, source)
1371 1380
1372 1381 # We assume that all ancestors of bases are known
1373 1382 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1374 1383
1375 1384 # Make it easy to refer to self.manifest
1376 1385 mnfst = self.manifest
1377 1386 # We don't know which manifests are missing yet
1378 1387 msng_mnfst_set = {}
1379 1388 # Nor do we know which filenodes are missing.
1380 1389 msng_filenode_set = {}
1381 1390
1382 1391 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1383 1392 junk = None
1384 1393
1385 1394 # A changeset always belongs to itself, so the changenode lookup
1386 1395 # function for a changenode is identity.
1387 1396 def identity(x):
1388 1397 return x
1389 1398
1390 1399 # A function generating function that sets up the initial environment
1391 1400 # the inner function.
1392 1401 def filenode_collector(changedfiles):
1393 1402 # This gathers information from each manifestnode included in the
1394 1403 # changegroup about which filenodes the manifest node references
1395 1404 # so we can include those in the changegroup too.
1396 1405 #
1397 1406 # It also remembers which changenode each filenode belongs to. It
1398 1407 # does this by assuming the a filenode belongs to the changenode
1399 1408 # the first manifest that references it belongs to.
1400 1409 def collect_msng_filenodes(mnfstnode):
1401 1410 r = mnfst.rev(mnfstnode)
1402 1411 if r - 1 in mnfst.parentrevs(r):
1403 1412 # If the previous rev is one of the parents,
1404 1413 # we only need to see a diff.
1405 1414 deltamf = mnfst.readdelta(mnfstnode)
1406 1415 # For each line in the delta
1407 1416 for f, fnode in deltamf.iteritems():
1408 1417 # And if the file is in the list of files we care
1409 1418 # about.
1410 1419 if f in changedfiles:
1411 1420 # Get the changenode this manifest belongs to
1412 1421 clnode = msng_mnfst_set[mnfstnode]
1413 1422 # Create the set of filenodes for the file if
1414 1423 # there isn't one already.
1415 1424 ndset = msng_filenode_set.setdefault(f, {})
1416 1425 # And set the filenode's changelog node to the
1417 1426 # manifest's if it hasn't been set already.
1418 1427 ndset.setdefault(fnode, clnode)
1419 1428 else:
1420 1429 # Otherwise we need a full manifest.
1421 1430 m = mnfst.read(mnfstnode)
1422 1431 # For every file in we care about.
1423 1432 for f in changedfiles:
1424 1433 fnode = m.get(f, None)
1425 1434 # If it's in the manifest
1426 1435 if fnode is not None:
1427 1436 # See comments above.
1428 1437 clnode = msng_mnfst_set[mnfstnode]
1429 1438 ndset = msng_filenode_set.setdefault(f, {})
1430 1439 ndset.setdefault(fnode, clnode)
1431 1440 return collect_msng_filenodes
1432 1441
1433 1442 # If we determine that a particular file or manifest node must be a
1434 1443 # node that the recipient of the changegroup will already have, we can
1435 1444 # also assume the recipient will have all the parents. This function
1436 1445 # prunes them from the set of missing nodes.
1437 1446 def prune(revlog, missingnodes):
1438 1447 hasset = set()
1439 1448 # If a 'missing' filenode thinks it belongs to a changenode we
1440 1449 # assume the recipient must have, then the recipient must have
1441 1450 # that filenode.
1442 1451 for n in missingnodes:
1443 1452 clrev = revlog.linkrev(revlog.rev(n))
1444 1453 if clrev in commonrevs:
1445 1454 hasset.add(n)
1446 1455 for n in hasset:
1447 1456 missingnodes.pop(n, None)
1448 1457 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1449 1458 missingnodes.pop(revlog.node(r), None)
1450 1459
1451 1460 # Add the nodes that were explicitly requested.
1452 1461 def add_extra_nodes(name, nodes):
1453 1462 if not extranodes or name not in extranodes:
1454 1463 return
1455 1464
1456 1465 for node, linknode in extranodes[name]:
1457 1466 if node not in nodes:
1458 1467 nodes[node] = linknode
1459 1468
1460 1469 # Now that we have all theses utility functions to help out and
1461 1470 # logically divide up the task, generate the group.
1462 1471 def gengroup():
1463 1472 # The set of changed files starts empty.
1464 1473 changedfiles = set()
1465 1474 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1466 1475
1467 1476 # Create a changenode group generator that will call our functions
1468 1477 # back to lookup the owning changenode and collect information.
1469 1478 group = cl.group(msng_cl_lst, identity, collect)
1470 1479 for cnt, chnk in enumerate(group):
1471 1480 yield chnk
1472 1481 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1473 1482 self.ui.progress(_('bundling changes'), None)
1474 1483
1475 1484 prune(mnfst, msng_mnfst_set)
1476 1485 add_extra_nodes(1, msng_mnfst_set)
1477 1486 msng_mnfst_lst = msng_mnfst_set.keys()
1478 1487 # Sort the manifestnodes by revision number.
1479 1488 msng_mnfst_lst.sort(key=mnfst.rev)
1480 1489 # Create a generator for the manifestnodes that calls our lookup
1481 1490 # and data collection functions back.
1482 1491 group = mnfst.group(msng_mnfst_lst,
1483 1492 lambda mnode: msng_mnfst_set[mnode],
1484 1493 filenode_collector(changedfiles))
1485 1494 for cnt, chnk in enumerate(group):
1486 1495 yield chnk
1487 1496 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1488 1497 self.ui.progress(_('bundling manifests'), None)
1489 1498
1490 1499 # These are no longer needed, dereference and toss the memory for
1491 1500 # them.
1492 1501 msng_mnfst_lst = None
1493 1502 msng_mnfst_set.clear()
1494 1503
1495 1504 if extranodes:
1496 1505 for fname in extranodes:
1497 1506 if isinstance(fname, int):
1498 1507 continue
1499 1508 msng_filenode_set.setdefault(fname, {})
1500 1509 changedfiles.add(fname)
1501 1510 # Go through all our files in order sorted by name.
1502 1511 cnt = 0
1503 1512 for fname in sorted(changedfiles):
1504 1513 filerevlog = self.file(fname)
1505 1514 if not len(filerevlog):
1506 1515 raise util.Abort(_("empty or missing revlog for %s") % fname)
1507 1516 # Toss out the filenodes that the recipient isn't really
1508 1517 # missing.
1509 1518 missingfnodes = msng_filenode_set.pop(fname, {})
1510 1519 prune(filerevlog, missingfnodes)
1511 1520 add_extra_nodes(fname, missingfnodes)
1512 1521 # If any filenodes are left, generate the group for them,
1513 1522 # otherwise don't bother.
1514 1523 if missingfnodes:
1515 1524 yield changegroup.chunkheader(len(fname))
1516 1525 yield fname
1517 1526 # Sort the filenodes by their revision # (topological order)
1518 1527 nodeiter = list(missingfnodes)
1519 1528 nodeiter.sort(key=filerevlog.rev)
1520 1529 # Create a group generator and only pass in a changenode
1521 1530 # lookup function as we need to collect no information
1522 1531 # from filenodes.
1523 1532 group = filerevlog.group(nodeiter,
1524 1533 lambda fnode: missingfnodes[fnode])
1525 1534 for chnk in group:
1526 1535 self.ui.progress(
1527 1536 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1528 1537 cnt += 1
1529 1538 yield chnk
1530 1539 # Signal that no more groups are left.
1531 1540 yield changegroup.closechunk()
1532 1541 self.ui.progress(_('bundling files'), None)
1533 1542
1534 1543 if msng_cl_lst:
1535 1544 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1536 1545
1537 1546 return util.chunkbuffer(gengroup())
1538 1547
1539 1548 def changegroup(self, basenodes, source):
1540 1549 # to avoid a race we use changegroupsubset() (issue1320)
1541 1550 return self.changegroupsubset(basenodes, self.heads(), source)
1542 1551
1543 1552 def _changegroup(self, nodes, source):
1544 1553 """Compute the changegroup of all nodes that we have that a recipient
1545 1554 doesn't. Return a chunkbuffer object whose read() method will return
1546 1555 successive changegroup chunks.
1547 1556
1548 1557 This is much easier than the previous function as we can assume that
1549 1558 the recipient has any changenode we aren't sending them.
1550 1559
1551 1560 nodes is the set of nodes to send"""
1552 1561
1553 1562 self.hook('preoutgoing', throw=True, source=source)
1554 1563
1555 1564 cl = self.changelog
1556 1565 revset = set([cl.rev(n) for n in nodes])
1557 1566 self.changegroupinfo(nodes, source)
1558 1567
1559 1568 def identity(x):
1560 1569 return x
1561 1570
1562 1571 def gennodelst(log):
1563 1572 for r in log:
1564 1573 if log.linkrev(r) in revset:
1565 1574 yield log.node(r)
1566 1575
1567 1576 def lookuplinkrev_func(revlog):
1568 1577 def lookuplinkrev(n):
1569 1578 return cl.node(revlog.linkrev(revlog.rev(n)))
1570 1579 return lookuplinkrev
1571 1580
1572 1581 def gengroup():
1573 1582 '''yield a sequence of changegroup chunks (strings)'''
1574 1583 # construct a list of all changed files
1575 1584 changedfiles = set()
1576 1585 mmfs = {}
1577 1586 collect = changegroup.collector(cl, mmfs, changedfiles)
1578 1587
1579 1588 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1580 1589 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1581 1590 yield chnk
1582 1591 self.ui.progress(_('bundling changes'), None)
1583 1592
1584 1593 mnfst = self.manifest
1585 1594 nodeiter = gennodelst(mnfst)
1586 1595 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1587 1596 lookuplinkrev_func(mnfst))):
1588 1597 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1589 1598 yield chnk
1590 1599 self.ui.progress(_('bundling manifests'), None)
1591 1600
1592 1601 cnt = 0
1593 1602 for fname in sorted(changedfiles):
1594 1603 filerevlog = self.file(fname)
1595 1604 if not len(filerevlog):
1596 1605 raise util.Abort(_("empty or missing revlog for %s") % fname)
1597 1606 nodeiter = gennodelst(filerevlog)
1598 1607 nodeiter = list(nodeiter)
1599 1608 if nodeiter:
1600 1609 yield changegroup.chunkheader(len(fname))
1601 1610 yield fname
1602 1611 lookup = lookuplinkrev_func(filerevlog)
1603 1612 for chnk in filerevlog.group(nodeiter, lookup):
1604 1613 self.ui.progress(
1605 1614 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1606 1615 cnt += 1
1607 1616 yield chnk
1608 1617 self.ui.progress(_('bundling files'), None)
1609 1618
1610 1619 yield changegroup.closechunk()
1611 1620
1612 1621 if nodes:
1613 1622 self.hook('outgoing', node=hex(nodes[0]), source=source)
1614 1623
1615 1624 return util.chunkbuffer(gengroup())
1616 1625
1617 1626 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1618 1627 """Add the changegroup returned by source.read() to this repo.
1619 1628 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1620 1629 the URL of the repo where this changegroup is coming from.
1621 1630
1622 1631 Return an integer summarizing the change to this repo:
1623 1632 - nothing changed or no source: 0
1624 1633 - more heads than before: 1+added heads (2..n)
1625 1634 - fewer heads than before: -1-removed heads (-2..-n)
1626 1635 - number of heads stays the same: 1
1627 1636 """
1628 1637 def csmap(x):
1629 1638 self.ui.debug("add changeset %s\n" % short(x))
1630 1639 return len(cl)
1631 1640
1632 1641 def revmap(x):
1633 1642 return cl.rev(x)
1634 1643
1635 1644 if not source:
1636 1645 return 0
1637 1646
1638 1647 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1639 1648
1640 1649 changesets = files = revisions = 0
1641 1650 efiles = set()
1642 1651
1643 1652 # write changelog data to temp files so concurrent readers will not see
1644 1653 # inconsistent view
1645 1654 cl = self.changelog
1646 1655 cl.delayupdate()
1647 1656 oldheads = len(cl.heads())
1648 1657
1649 1658 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1650 1659 try:
1651 1660 trp = weakref.proxy(tr)
1652 1661 # pull off the changeset group
1653 1662 self.ui.status(_("adding changesets\n"))
1654 1663 clstart = len(cl)
1655 1664 class prog(object):
1656 1665 step = _('changesets')
1657 1666 count = 1
1658 1667 ui = self.ui
1659 1668 total = None
1660 1669 def __call__(self):
1661 1670 self.ui.progress(self.step, self.count, unit=_('chunks'),
1662 1671 total=self.total)
1663 1672 self.count += 1
1664 1673 pr = prog()
1665 1674 chunkiter = changegroup.chunkiter(source, progress=pr)
1666 1675 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1667 1676 raise util.Abort(_("received changelog group is empty"))
1668 1677 clend = len(cl)
1669 1678 changesets = clend - clstart
1670 1679 for c in xrange(clstart, clend):
1671 1680 efiles.update(self[c].files())
1672 1681 efiles = len(efiles)
1673 1682 self.ui.progress(_('changesets'), None)
1674 1683
1675 1684 # pull off the manifest group
1676 1685 self.ui.status(_("adding manifests\n"))
1677 1686 pr.step = _('manifests')
1678 1687 pr.count = 1
1679 1688 pr.total = changesets # manifests <= changesets
1680 1689 chunkiter = changegroup.chunkiter(source, progress=pr)
1681 1690 # no need to check for empty manifest group here:
1682 1691 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1683 1692 # no new manifest will be created and the manifest group will
1684 1693 # be empty during the pull
1685 1694 self.manifest.addgroup(chunkiter, revmap, trp)
1686 1695 self.ui.progress(_('manifests'), None)
1687 1696
1688 1697 needfiles = {}
1689 1698 if self.ui.configbool('server', 'validate', default=False):
1690 1699 # validate incoming csets have their manifests
1691 1700 for cset in xrange(clstart, clend):
1692 1701 mfest = self.changelog.read(self.changelog.node(cset))[0]
1693 1702 mfest = self.manifest.readdelta(mfest)
1694 1703 # store file nodes we must see
1695 1704 for f, n in mfest.iteritems():
1696 1705 needfiles.setdefault(f, set()).add(n)
1697 1706
1698 1707 # process the files
1699 1708 self.ui.status(_("adding file changes\n"))
1700 1709 pr.step = 'files'
1701 1710 pr.count = 1
1702 1711 pr.total = efiles
1703 1712 while 1:
1704 1713 f = changegroup.getchunk(source)
1705 1714 if not f:
1706 1715 break
1707 1716 self.ui.debug("adding %s revisions\n" % f)
1708 1717 pr()
1709 1718 fl = self.file(f)
1710 1719 o = len(fl)
1711 1720 chunkiter = changegroup.chunkiter(source)
1712 1721 if fl.addgroup(chunkiter, revmap, trp) is None:
1713 1722 raise util.Abort(_("received file revlog group is empty"))
1714 1723 revisions += len(fl) - o
1715 1724 files += 1
1716 1725 if f in needfiles:
1717 1726 needs = needfiles[f]
1718 1727 for new in xrange(o, len(fl)):
1719 1728 n = fl.node(new)
1720 1729 if n in needs:
1721 1730 needs.remove(n)
1722 1731 if not needs:
1723 1732 del needfiles[f]
1724 1733 self.ui.progress(_('files'), None)
1725 1734
1726 1735 for f, needs in needfiles.iteritems():
1727 1736 fl = self.file(f)
1728 1737 for n in needs:
1729 1738 try:
1730 1739 fl.rev(n)
1731 1740 except error.LookupError:
1732 1741 raise util.Abort(
1733 1742 _('missing file data for %s:%s - run hg verify') %
1734 1743 (f, hex(n)))
1735 1744
1736 1745 newheads = len(cl.heads())
1737 1746 heads = ""
1738 1747 if oldheads and newheads != oldheads:
1739 1748 heads = _(" (%+d heads)") % (newheads - oldheads)
1740 1749
1741 1750 self.ui.status(_("added %d changesets"
1742 1751 " with %d changes to %d files%s\n")
1743 1752 % (changesets, revisions, files, heads))
1744 1753
1745 1754 if changesets > 0:
1746 1755 p = lambda: cl.writepending() and self.root or ""
1747 1756 self.hook('pretxnchangegroup', throw=True,
1748 1757 node=hex(cl.node(clstart)), source=srctype,
1749 1758 url=url, pending=p)
1750 1759
1751 1760 # make changelog see real files again
1752 1761 cl.finalize(trp)
1753 1762
1754 1763 tr.close()
1755 1764 finally:
1756 1765 tr.release()
1757 1766 if lock:
1758 1767 lock.release()
1759 1768
1760 1769 if changesets > 0:
1761 1770 # forcefully update the on-disk branch cache
1762 1771 self.ui.debug("updating the branch cache\n")
1763 1772 self.updatebranchcache()
1764 1773 self.hook("changegroup", node=hex(cl.node(clstart)),
1765 1774 source=srctype, url=url)
1766 1775
1767 1776 for i in xrange(clstart, clend):
1768 1777 self.hook("incoming", node=hex(cl.node(i)),
1769 1778 source=srctype, url=url)
1770 1779
1771 1780 # never return 0 here:
1772 1781 if newheads < oldheads:
1773 1782 return newheads - oldheads - 1
1774 1783 else:
1775 1784 return newheads - oldheads + 1
1776 1785
1777 1786
1778 1787 def stream_in(self, remote):
1779 1788 fp = remote.stream_out()
1780 1789 l = fp.readline()
1781 1790 try:
1782 1791 resp = int(l)
1783 1792 except ValueError:
1784 1793 raise error.ResponseError(
1785 1794 _('Unexpected response from remote server:'), l)
1786 1795 if resp == 1:
1787 1796 raise util.Abort(_('operation forbidden by server'))
1788 1797 elif resp == 2:
1789 1798 raise util.Abort(_('locking the remote repository failed'))
1790 1799 elif resp != 0:
1791 1800 raise util.Abort(_('the server sent an unknown error code'))
1792 1801 self.ui.status(_('streaming all changes\n'))
1793 1802 l = fp.readline()
1794 1803 try:
1795 1804 total_files, total_bytes = map(int, l.split(' ', 1))
1796 1805 except (ValueError, TypeError):
1797 1806 raise error.ResponseError(
1798 1807 _('Unexpected response from remote server:'), l)
1799 1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1800 1809 (total_files, util.bytecount(total_bytes)))
1801 1810 start = time.time()
1802 1811 for i in xrange(total_files):
1803 1812 # XXX doesn't support '\n' or '\r' in filenames
1804 1813 l = fp.readline()
1805 1814 try:
1806 1815 name, size = l.split('\0', 1)
1807 1816 size = int(size)
1808 1817 except (ValueError, TypeError):
1809 1818 raise error.ResponseError(
1810 1819 _('Unexpected response from remote server:'), l)
1811 1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1812 1821 # for backwards compat, name was partially encoded
1813 1822 ofp = self.sopener(store.decodedir(name), 'w')
1814 1823 for chunk in util.filechunkiter(fp, limit=size):
1815 1824 ofp.write(chunk)
1816 1825 ofp.close()
1817 1826 elapsed = time.time() - start
1818 1827 if elapsed <= 0:
1819 1828 elapsed = 0.001
1820 1829 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1821 1830 (util.bytecount(total_bytes), elapsed,
1822 1831 util.bytecount(total_bytes / elapsed)))
1823 1832 self.invalidate()
1824 1833 return len(self.heads()) + 1
1825 1834
1826 1835 def clone(self, remote, heads=[], stream=False):
1827 1836 '''clone remote repository.
1828 1837
1829 1838 keyword arguments:
1830 1839 heads: list of revs to clone (forces use of pull)
1831 1840 stream: use streaming clone if possible'''
1832 1841
1833 1842 # now, all clients that can request uncompressed clones can
1834 1843 # read repo formats supported by all servers that can serve
1835 1844 # them.
1836 1845
1837 1846 # if revlog format changes, client will have to check version
1838 1847 # and format flags on "stream" capability, and use
1839 1848 # uncompressed only if compatible.
1840 1849
1841 1850 if stream and not heads and remote.capable('stream'):
1842 1851 return self.stream_in(remote)
1843 1852 return self.pull(remote, heads)
1844 1853
1845 1854 def pushkey(self, namespace, key, old, new):
1846 1855 return pushkey.push(self, namespace, key, old, new)
1847 1856
1848 1857 def listkeys(self, namespace):
1849 1858 return pushkey.list(self, namespace)
1850 1859
1851 1860 # used to avoid circular references so destructors work
1852 1861 def aftertrans(files):
1853 1862 renamefiles = [tuple(t) for t in files]
1854 1863 def a():
1855 1864 for src, dest in renamefiles:
1856 1865 util.rename(src, dest)
1857 1866 return a
1858 1867
1859 1868 def instance(ui, path, create):
1860 1869 return localrepository(ui, util.drop_scheme('file', path), create)
1861 1870
1862 1871 def islocal(path):
1863 1872 return True
General Comments 0
You need to be logged in to leave comments. Login now