##// END OF EJS Templates
localrepo: move string formatting out of gettext call
Martin Geisler -
r13037:9beac11b default
parent child Browse files
Show More
@@ -1,1916 +1,1916 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None # in UTF-8
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 181 warned = [0]
182 182 def validate(node):
183 183 try:
184 184 r = self.changelog.rev(node)
185 185 return node
186 186 except error.LookupError:
187 187 if not warned[0]:
188 188 warned[0] = True
189 189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n" % short(node)))
190 " working parent %s!\n") % short(node))
191 191 return nullid
192 192
193 193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194 194
195 195 def __getitem__(self, changeid):
196 196 if changeid is None:
197 197 return context.workingctx(self)
198 198 return context.changectx(self, changeid)
199 199
200 200 def __contains__(self, changeid):
201 201 try:
202 202 return bool(self.lookup(changeid))
203 203 except error.RepoLookupError:
204 204 return False
205 205
206 206 def __nonzero__(self):
207 207 return True
208 208
209 209 def __len__(self):
210 210 return len(self.changelog)
211 211
212 212 def __iter__(self):
213 213 for i in xrange(len(self)):
214 214 yield i
215 215
216 216 def url(self):
217 217 return 'file:' + self.root
218 218
219 219 def hook(self, name, throw=False, **args):
220 220 return hook.hook(self.ui, self, name, throw, **args)
221 221
222 222 tag_disallowed = ':\r\n'
223 223
224 224 def _tag(self, names, node, message, local, user, date, extra={}):
225 225 if isinstance(names, str):
226 226 allchars = names
227 227 names = (names,)
228 228 else:
229 229 allchars = ''.join(names)
230 230 for c in self.tag_disallowed:
231 231 if c in allchars:
232 232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233 233
234 234 branches = self.branchmap()
235 235 for name in names:
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 237 local=local)
238 238 if name in branches:
239 239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 240 " branch name\n") % name)
241 241
242 242 def writetags(fp, names, munge, prevtags):
243 243 fp.seek(0, 2)
244 244 if prevtags and prevtags[-1] != '\n':
245 245 fp.write('\n')
246 246 for name in names:
247 247 m = munge and munge(name) or name
248 248 if self._tagtypes and name in self._tagtypes:
249 249 old = self._tags.get(name, nullid)
250 250 fp.write('%s %s\n' % (hex(old), m))
251 251 fp.write('%s %s\n' % (hex(node), m))
252 252 fp.close()
253 253
254 254 prevtags = ''
255 255 if local:
256 256 try:
257 257 fp = self.opener('localtags', 'r+')
258 258 except IOError:
259 259 fp = self.opener('localtags', 'a')
260 260 else:
261 261 prevtags = fp.read()
262 262
263 263 # local tags are stored in the current charset
264 264 writetags(fp, names, None, prevtags)
265 265 for name in names:
266 266 self.hook('tag', node=hex(node), tag=name, local=local)
267 267 return
268 268
269 269 try:
270 270 fp = self.wfile('.hgtags', 'rb+')
271 271 except IOError:
272 272 fp = self.wfile('.hgtags', 'ab')
273 273 else:
274 274 prevtags = fp.read()
275 275
276 276 # committed tags are stored in UTF-8
277 277 writetags(fp, names, encoding.fromlocal, prevtags)
278 278
279 279 if '.hgtags' not in self.dirstate:
280 280 self[None].add(['.hgtags'])
281 281
282 282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284 284
285 285 for name in names:
286 286 self.hook('tag', node=hex(node), tag=name, local=local)
287 287
288 288 return tagnode
289 289
290 290 def tag(self, names, node, message, local, user, date):
291 291 '''tag a revision with one or more symbolic names.
292 292
293 293 names is a list of strings or, when adding a single tag, names may be a
294 294 string.
295 295
296 296 if local is True, the tags are stored in a per-repository file.
297 297 otherwise, they are stored in the .hgtags file, and a new
298 298 changeset is committed with the change.
299 299
300 300 keyword arguments:
301 301
302 302 local: whether to store tags in non-version-controlled file
303 303 (default False)
304 304
305 305 message: commit message to use if committing
306 306
307 307 user: name of user to use if committing
308 308
309 309 date: date tuple to use if committing'''
310 310
311 311 for x in self.status()[:5]:
312 312 if '.hgtags' in x:
313 313 raise util.Abort(_('working copy of .hgtags is changed '
314 314 '(please commit .hgtags manually)'))
315 315
316 316 self.tags() # instantiate the cache
317 317 self._tag(names, node, message, local, user, date)
318 318
319 319 def tags(self):
320 320 '''return a mapping of tag to node'''
321 321 if self._tags is None:
322 322 (self._tags, self._tagtypes) = self._findtags()
323 323
324 324 return self._tags
325 325
326 326 def _findtags(self):
327 327 '''Do the hard work of finding tags. Return a pair of dicts
328 328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 329 maps tag name to a string like \'global\' or \'local\'.
330 330 Subclasses or extensions are free to add their own tags, but
331 331 should be aware that the returned dicts will be retained for the
332 332 duration of the localrepo object.'''
333 333
334 334 # XXX what tagtype should subclasses/extensions use? Currently
335 335 # mq and bookmarks add tags, but do not set the tagtype at all.
336 336 # Should each extension invent its own tag type? Should there
337 337 # be one tagtype for all such "virtual" tags? Or is the status
338 338 # quo fine?
339 339
340 340 alltags = {} # map tag name to (node, hist)
341 341 tagtypes = {}
342 342
343 343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345 345
346 346 # Build the return dicts. Have to re-encode tag names because
347 347 # the tags module always uses UTF-8 (in order not to lose info
348 348 # writing to the cache), but the rest of Mercurial wants them in
349 349 # local encoding.
350 350 tags = {}
351 351 for (name, (node, hist)) in alltags.iteritems():
352 352 if node != nullid:
353 353 tags[encoding.tolocal(name)] = node
354 354 tags['tip'] = self.changelog.tip()
355 355 tagtypes = dict([(encoding.tolocal(name), value)
356 356 for (name, value) in tagtypes.iteritems()])
357 357 return (tags, tagtypes)
358 358
359 359 def tagtype(self, tagname):
360 360 '''
361 361 return the type of the given tag. result can be:
362 362
363 363 'local' : a local tag
364 364 'global' : a global tag
365 365 None : tag does not exist
366 366 '''
367 367
368 368 self.tags()
369 369
370 370 return self._tagtypes.get(tagname)
371 371
372 372 def tagslist(self):
373 373 '''return a list of tags ordered by revision'''
374 374 l = []
375 375 for t, n in self.tags().iteritems():
376 376 try:
377 377 r = self.changelog.rev(n)
378 378 except:
379 379 r = -2 # sort to the beginning of the list if unknown
380 380 l.append((r, t, n))
381 381 return [(t, n) for r, t, n in sorted(l)]
382 382
383 383 def nodetags(self, node):
384 384 '''return the tags associated with a node'''
385 385 if not self.nodetagscache:
386 386 self.nodetagscache = {}
387 387 for t, n in self.tags().iteritems():
388 388 self.nodetagscache.setdefault(n, []).append(t)
389 389 for tags in self.nodetagscache.itervalues():
390 390 tags.sort()
391 391 return self.nodetagscache.get(node, [])
392 392
393 393 def _branchtags(self, partial, lrev):
394 394 # TODO: rename this function?
395 395 tiprev = len(self) - 1
396 396 if lrev != tiprev:
397 397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 398 self._updatebranchcache(partial, ctxgen)
399 399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400 400
401 401 return partial
402 402
403 403 def updatebranchcache(self):
404 404 tip = self.changelog.tip()
405 405 if self._branchcache is not None and self._branchcachetip == tip:
406 406 return self._branchcache
407 407
408 408 oldtip = self._branchcachetip
409 409 self._branchcachetip = tip
410 410 if oldtip is None or oldtip not in self.changelog.nodemap:
411 411 partial, last, lrev = self._readbranchcache()
412 412 else:
413 413 lrev = self.changelog.rev(oldtip)
414 414 partial = self._branchcache
415 415
416 416 self._branchtags(partial, lrev)
417 417 # this private cache holds all heads (not just tips)
418 418 self._branchcache = partial
419 419
420 420 def branchmap(self):
421 421 '''returns a dictionary {branch: [branchheads]}'''
422 422 self.updatebranchcache()
423 423 return self._branchcache
424 424
425 425 def branchtags(self):
426 426 '''return a dict where branch names map to the tipmost head of
427 427 the branch, open heads come before closed'''
428 428 bt = {}
429 429 for bn, heads in self.branchmap().iteritems():
430 430 tip = heads[-1]
431 431 for h in reversed(heads):
432 432 if 'close' not in self.changelog.read(h)[5]:
433 433 tip = h
434 434 break
435 435 bt[bn] = tip
436 436 return bt
437 437
438 438
439 439 def _readbranchcache(self):
440 440 partial = {}
441 441 try:
442 442 f = self.opener("branchheads.cache")
443 443 lines = f.read().split('\n')
444 444 f.close()
445 445 except (IOError, OSError):
446 446 return {}, nullid, nullrev
447 447
448 448 try:
449 449 last, lrev = lines.pop(0).split(" ", 1)
450 450 last, lrev = bin(last), int(lrev)
451 451 if lrev >= len(self) or self[lrev].node() != last:
452 452 # invalidate the cache
453 453 raise ValueError('invalidating branch cache (tip differs)')
454 454 for l in lines:
455 455 if not l:
456 456 continue
457 457 node, label = l.split(" ", 1)
458 458 partial.setdefault(label.strip(), []).append(bin(node))
459 459 except KeyboardInterrupt:
460 460 raise
461 461 except Exception, inst:
462 462 if self.ui.debugflag:
463 463 self.ui.warn(str(inst), '\n')
464 464 partial, last, lrev = {}, nullid, nullrev
465 465 return partial, last, lrev
466 466
467 467 def _writebranchcache(self, branches, tip, tiprev):
468 468 try:
469 469 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 470 f.write("%s %s\n" % (hex(tip), tiprev))
471 471 for label, nodes in branches.iteritems():
472 472 for node in nodes:
473 473 f.write("%s %s\n" % (hex(node), label))
474 474 f.rename()
475 475 except (IOError, OSError):
476 476 pass
477 477
478 478 def _updatebranchcache(self, partial, ctxgen):
479 479 # collect new branch entries
480 480 newbranches = {}
481 481 for c in ctxgen:
482 482 newbranches.setdefault(c.branch(), []).append(c.node())
483 483 # if older branchheads are reachable from new ones, they aren't
484 484 # really branchheads. Note checking parents is insufficient:
485 485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
486 486 for branch, newnodes in newbranches.iteritems():
487 487 bheads = partial.setdefault(branch, [])
488 488 bheads.extend(newnodes)
489 489 if len(bheads) <= 1:
490 490 continue
491 491 # starting from tip means fewer passes over reachable
492 492 while newnodes:
493 493 latest = newnodes.pop()
494 494 if latest not in bheads:
495 495 continue
496 496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 497 reachable = self.changelog.reachable(latest, minbhrev)
498 498 reachable.remove(latest)
499 499 bheads = [b for b in bheads if b not in reachable]
500 500 partial[branch] = bheads
501 501
502 502 def lookup(self, key):
503 503 if isinstance(key, int):
504 504 return self.changelog.node(key)
505 505 elif key == '.':
506 506 return self.dirstate.parents()[0]
507 507 elif key == 'null':
508 508 return nullid
509 509 elif key == 'tip':
510 510 return self.changelog.tip()
511 511 n = self.changelog._match(key)
512 512 if n:
513 513 return n
514 514 if key in self.tags():
515 515 return self.tags()[key]
516 516 if key in self.branchtags():
517 517 return self.branchtags()[key]
518 518 n = self.changelog._partialmatch(key)
519 519 if n:
520 520 return n
521 521
522 522 # can't find key, check if it might have come from damaged dirstate
523 523 if key in self.dirstate.parents():
524 524 raise error.Abort(_("working directory has unknown parent '%s'!")
525 525 % short(key))
526 526 try:
527 527 if len(key) == 20:
528 528 key = hex(key)
529 529 except:
530 530 pass
531 531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
532 532
533 533 def lookupbranch(self, key, remote=None):
534 534 repo = remote or self
535 535 if key in repo.branchmap():
536 536 return key
537 537
538 538 repo = (remote and remote.local()) and remote or self
539 539 return repo[key].branch()
540 540
541 541 def local(self):
542 542 return True
543 543
544 544 def join(self, f):
545 545 return os.path.join(self.path, f)
546 546
547 547 def wjoin(self, f):
548 548 return os.path.join(self.root, f)
549 549
550 550 def file(self, f):
551 551 if f[0] == '/':
552 552 f = f[1:]
553 553 return filelog.filelog(self.sopener, f)
554 554
555 555 def changectx(self, changeid):
556 556 return self[changeid]
557 557
558 558 def parents(self, changeid=None):
559 559 '''get list of changectxs for parents of changeid'''
560 560 return self[changeid].parents()
561 561
562 562 def filectx(self, path, changeid=None, fileid=None):
563 563 """changeid can be a changeset revision, node, or tag.
564 564 fileid can be a file revision or node."""
565 565 return context.filectx(self, path, changeid, fileid)
566 566
567 567 def getcwd(self):
568 568 return self.dirstate.getcwd()
569 569
570 570 def pathto(self, f, cwd=None):
571 571 return self.dirstate.pathto(f, cwd)
572 572
573 573 def wfile(self, f, mode='r'):
574 574 return self.wopener(f, mode)
575 575
576 576 def _link(self, f):
577 577 return os.path.islink(self.wjoin(f))
578 578
579 579 def _loadfilter(self, filter):
580 580 if filter not in self.filterpats:
581 581 l = []
582 582 for pat, cmd in self.ui.configitems(filter):
583 583 if cmd == '!':
584 584 continue
585 585 mf = matchmod.match(self.root, '', [pat])
586 586 fn = None
587 587 params = cmd
588 588 for name, filterfn in self._datafilters.iteritems():
589 589 if cmd.startswith(name):
590 590 fn = filterfn
591 591 params = cmd[len(name):].lstrip()
592 592 break
593 593 if not fn:
594 594 fn = lambda s, c, **kwargs: util.filter(s, c)
595 595 # Wrap old filters not supporting keyword arguments
596 596 if not inspect.getargspec(fn)[2]:
597 597 oldfn = fn
598 598 fn = lambda s, c, **kwargs: oldfn(s, c)
599 599 l.append((mf, fn, params))
600 600 self.filterpats[filter] = l
601 601 return self.filterpats[filter]
602 602
603 603 def _filter(self, filterpats, filename, data):
604 604 for mf, fn, cmd in filterpats:
605 605 if mf(filename):
606 606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
607 607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
608 608 break
609 609
610 610 return data
611 611
612 612 @propertycache
613 613 def _encodefilterpats(self):
614 614 return self._loadfilter('encode')
615 615
616 616 @propertycache
617 617 def _decodefilterpats(self):
618 618 return self._loadfilter('decode')
619 619
620 620 def adddatafilter(self, name, filter):
621 621 self._datafilters[name] = filter
622 622
623 623 def wread(self, filename):
624 624 if self._link(filename):
625 625 data = os.readlink(self.wjoin(filename))
626 626 else:
627 627 data = self.wopener(filename, 'r').read()
628 628 return self._filter(self._encodefilterpats, filename, data)
629 629
630 630 def wwrite(self, filename, data, flags):
631 631 data = self._filter(self._decodefilterpats, filename, data)
632 632 try:
633 633 os.unlink(self.wjoin(filename))
634 634 except OSError:
635 635 pass
636 636 if 'l' in flags:
637 637 self.wopener.symlink(data, filename)
638 638 else:
639 639 self.wopener(filename, 'w').write(data)
640 640 if 'x' in flags:
641 641 util.set_flags(self.wjoin(filename), False, True)
642 642
643 643 def wwritedata(self, filename, data):
644 644 return self._filter(self._decodefilterpats, filename, data)
645 645
646 646 def transaction(self, desc):
647 647 tr = self._transref and self._transref() or None
648 648 if tr and tr.running():
649 649 return tr.nest()
650 650
651 651 # abort here if the journal already exists
652 652 if os.path.exists(self.sjoin("journal")):
653 653 raise error.RepoError(
654 654 _("abandoned transaction found - run hg recover"))
655 655
656 656 # save dirstate for rollback
657 657 try:
658 658 ds = self.opener("dirstate").read()
659 659 except IOError:
660 660 ds = ""
661 661 self.opener("journal.dirstate", "w").write(ds)
662 662 self.opener("journal.branch", "w").write(self.dirstate.branch())
663 663 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
664 664
665 665 renames = [(self.sjoin("journal"), self.sjoin("undo")),
666 666 (self.join("journal.dirstate"), self.join("undo.dirstate")),
667 667 (self.join("journal.branch"), self.join("undo.branch")),
668 668 (self.join("journal.desc"), self.join("undo.desc"))]
669 669 tr = transaction.transaction(self.ui.warn, self.sopener,
670 670 self.sjoin("journal"),
671 671 aftertrans(renames),
672 672 self.store.createmode)
673 673 self._transref = weakref.ref(tr)
674 674 return tr
675 675
676 676 def recover(self):
677 677 lock = self.lock()
678 678 try:
679 679 if os.path.exists(self.sjoin("journal")):
680 680 self.ui.status(_("rolling back interrupted transaction\n"))
681 681 transaction.rollback(self.sopener, self.sjoin("journal"),
682 682 self.ui.warn)
683 683 self.invalidate()
684 684 return True
685 685 else:
686 686 self.ui.warn(_("no interrupted transaction available\n"))
687 687 return False
688 688 finally:
689 689 lock.release()
690 690
691 691 def rollback(self, dryrun=False):
692 692 wlock = lock = None
693 693 try:
694 694 wlock = self.wlock()
695 695 lock = self.lock()
696 696 if os.path.exists(self.sjoin("undo")):
697 697 try:
698 698 args = self.opener("undo.desc", "r").read().splitlines()
699 699 if len(args) >= 3 and self.ui.verbose:
700 700 desc = _("rolling back to revision %s"
701 701 " (undo %s: %s)\n") % (
702 702 int(args[0]) - 1, args[1], args[2])
703 703 elif len(args) >= 2:
704 704 desc = _("rolling back to revision %s (undo %s)\n") % (
705 705 int(args[0]) - 1, args[1])
706 706 except IOError:
707 707 desc = _("rolling back unknown transaction\n")
708 708 self.ui.status(desc)
709 709 if dryrun:
710 710 return
711 711 transaction.rollback(self.sopener, self.sjoin("undo"),
712 712 self.ui.warn)
713 713 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
714 714 try:
715 715 branch = self.opener("undo.branch").read()
716 716 self.dirstate.setbranch(branch)
717 717 except IOError:
718 718 self.ui.warn(_("Named branch could not be reset, "
719 719 "current branch still is: %s\n")
720 720 % encoding.tolocal(self.dirstate.branch()))
721 721 self.invalidate()
722 722 self.dirstate.invalidate()
723 723 self.destroyed()
724 724 else:
725 725 self.ui.warn(_("no rollback information available\n"))
726 726 return 1
727 727 finally:
728 728 release(lock, wlock)
729 729
730 730 def invalidatecaches(self):
731 731 self._tags = None
732 732 self._tagtypes = None
733 733 self.nodetagscache = None
734 734 self._branchcache = None # in UTF-8
735 735 self._branchcachetip = None
736 736
737 737 def invalidate(self):
738 738 for a in "changelog manifest".split():
739 739 if a in self.__dict__:
740 740 delattr(self, a)
741 741 self.invalidatecaches()
742 742
743 743 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
744 744 try:
745 745 l = lock.lock(lockname, 0, releasefn, desc=desc)
746 746 except error.LockHeld, inst:
747 747 if not wait:
748 748 raise
749 749 self.ui.warn(_("waiting for lock on %s held by %r\n") %
750 750 (desc, inst.locker))
751 751 # default to 600 seconds timeout
752 752 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
753 753 releasefn, desc=desc)
754 754 if acquirefn:
755 755 acquirefn()
756 756 return l
757 757
758 758 def lock(self, wait=True):
759 759 '''Lock the repository store (.hg/store) and return a weak reference
760 760 to the lock. Use this before modifying the store (e.g. committing or
761 761 stripping). If you are opening a transaction, get a lock as well.)'''
762 762 l = self._lockref and self._lockref()
763 763 if l is not None and l.held:
764 764 l.lock()
765 765 return l
766 766
767 767 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
768 768 _('repository %s') % self.origroot)
769 769 self._lockref = weakref.ref(l)
770 770 return l
771 771
772 772 def wlock(self, wait=True):
773 773 '''Lock the non-store parts of the repository (everything under
774 774 .hg except .hg/store) and return a weak reference to the lock.
775 775 Use this before modifying files in .hg.'''
776 776 l = self._wlockref and self._wlockref()
777 777 if l is not None and l.held:
778 778 l.lock()
779 779 return l
780 780
781 781 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
782 782 self.dirstate.invalidate, _('working directory of %s') %
783 783 self.origroot)
784 784 self._wlockref = weakref.ref(l)
785 785 return l
786 786
787 787 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
788 788 """
789 789 commit an individual file as part of a larger transaction
790 790 """
791 791
792 792 fname = fctx.path()
793 793 text = fctx.data()
794 794 flog = self.file(fname)
795 795 fparent1 = manifest1.get(fname, nullid)
796 796 fparent2 = fparent2o = manifest2.get(fname, nullid)
797 797
798 798 meta = {}
799 799 copy = fctx.renamed()
800 800 if copy and copy[0] != fname:
801 801 # Mark the new revision of this file as a copy of another
802 802 # file. This copy data will effectively act as a parent
803 803 # of this new revision. If this is a merge, the first
804 804 # parent will be the nullid (meaning "look up the copy data")
805 805 # and the second one will be the other parent. For example:
806 806 #
807 807 # 0 --- 1 --- 3 rev1 changes file foo
808 808 # \ / rev2 renames foo to bar and changes it
809 809 # \- 2 -/ rev3 should have bar with all changes and
810 810 # should record that bar descends from
811 811 # bar in rev2 and foo in rev1
812 812 #
813 813 # this allows this merge to succeed:
814 814 #
815 815 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
816 816 # \ / merging rev3 and rev4 should use bar@rev2
817 817 # \- 2 --- 4 as the merge base
818 818 #
819 819
820 820 cfname = copy[0]
821 821 crev = manifest1.get(cfname)
822 822 newfparent = fparent2
823 823
824 824 if manifest2: # branch merge
825 825 if fparent2 == nullid or crev is None: # copied on remote side
826 826 if cfname in manifest2:
827 827 crev = manifest2[cfname]
828 828 newfparent = fparent1
829 829
830 830 # find source in nearest ancestor if we've lost track
831 831 if not crev:
832 832 self.ui.debug(" %s: searching for copy revision for %s\n" %
833 833 (fname, cfname))
834 834 for ancestor in self[None].ancestors():
835 835 if cfname in ancestor:
836 836 crev = ancestor[cfname].filenode()
837 837 break
838 838
839 839 if crev:
840 840 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
841 841 meta["copy"] = cfname
842 842 meta["copyrev"] = hex(crev)
843 843 fparent1, fparent2 = nullid, newfparent
844 844 else:
845 845 self.ui.warn(_("warning: can't find ancestor for '%s' "
846 846 "copied from '%s'!\n") % (fname, cfname))
847 847
848 848 elif fparent2 != nullid:
849 849 # is one parent an ancestor of the other?
850 850 fparentancestor = flog.ancestor(fparent1, fparent2)
851 851 if fparentancestor == fparent1:
852 852 fparent1, fparent2 = fparent2, nullid
853 853 elif fparentancestor == fparent2:
854 854 fparent2 = nullid
855 855
856 856 # is the file changed?
857 857 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
858 858 changelist.append(fname)
859 859 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
860 860
861 861 # are just the flags changed during merge?
862 862 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
863 863 changelist.append(fname)
864 864
865 865 return fparent1
866 866
867 867 def commit(self, text="", user=None, date=None, match=None, force=False,
868 868 editor=False, extra={}):
869 869 """Add a new revision to current repository.
870 870
871 871 Revision information is gathered from the working directory,
872 872 match can be used to filter the committed files. If editor is
873 873 supplied, it is called to get a commit message.
874 874 """
875 875
876 876 def fail(f, msg):
877 877 raise util.Abort('%s: %s' % (f, msg))
878 878
879 879 if not match:
880 880 match = matchmod.always(self.root, '')
881 881
882 882 if not force:
883 883 vdirs = []
884 884 match.dir = vdirs.append
885 885 match.bad = fail
886 886
887 887 wlock = self.wlock()
888 888 try:
889 889 wctx = self[None]
890 890 merge = len(wctx.parents()) > 1
891 891
892 892 if (not force and merge and match and
893 893 (match.files() or match.anypats())):
894 894 raise util.Abort(_('cannot partially commit a merge '
895 895 '(do not specify files or patterns)'))
896 896
897 897 changes = self.status(match=match, clean=force)
898 898 if force:
899 899 changes[0].extend(changes[6]) # mq may commit unchanged files
900 900
901 901 # check subrepos
902 902 subs = []
903 903 removedsubs = set()
904 904 for p in wctx.parents():
905 905 removedsubs.update(s for s in p.substate if match(s))
906 906 for s in wctx.substate:
907 907 removedsubs.discard(s)
908 908 if match(s) and wctx.sub(s).dirty():
909 909 subs.append(s)
910 910 if (subs or removedsubs):
911 911 if (not match('.hgsub') and
912 912 '.hgsub' in (wctx.modified() + wctx.added())):
913 913 raise util.Abort(_("can't commit subrepos without .hgsub"))
914 914 if '.hgsubstate' not in changes[0]:
915 915 changes[0].insert(0, '.hgsubstate')
916 916
917 917 # make sure all explicit patterns are matched
918 918 if not force and match.files():
919 919 matched = set(changes[0] + changes[1] + changes[2])
920 920
921 921 for f in match.files():
922 922 if f == '.' or f in matched or f in wctx.substate:
923 923 continue
924 924 if f in changes[3]: # missing
925 925 fail(f, _('file not found!'))
926 926 if f in vdirs: # visited directory
927 927 d = f + '/'
928 928 for mf in matched:
929 929 if mf.startswith(d):
930 930 break
931 931 else:
932 932 fail(f, _("no match under directory!"))
933 933 elif f not in self.dirstate:
934 934 fail(f, _("file not tracked!"))
935 935
936 936 if (not force and not extra.get("close") and not merge
937 937 and not (changes[0] or changes[1] or changes[2])
938 938 and wctx.branch() == wctx.p1().branch()):
939 939 return None
940 940
941 941 ms = mergemod.mergestate(self)
942 942 for f in changes[0]:
943 943 if f in ms and ms[f] == 'u':
944 944 raise util.Abort(_("unresolved merge conflicts "
945 945 "(see hg resolve)"))
946 946
947 947 cctx = context.workingctx(self, text, user, date, extra, changes)
948 948 if editor:
949 949 cctx._text = editor(self, cctx, subs)
950 950 edited = (text != cctx._text)
951 951
952 952 # commit subs
953 953 if subs or removedsubs:
954 954 state = wctx.substate.copy()
955 955 for s in sorted(subs):
956 956 sub = wctx.sub(s)
957 957 self.ui.status(_('committing subrepository %s\n') %
958 958 subrepo.subrelpath(sub))
959 959 sr = sub.commit(cctx._text, user, date)
960 960 state[s] = (state[s][0], sr)
961 961 subrepo.writestate(self, state)
962 962
963 963 # Save commit message in case this transaction gets rolled back
964 964 # (e.g. by a pretxncommit hook). Leave the content alone on
965 965 # the assumption that the user will use the same editor again.
966 966 msgfile = self.opener('last-message.txt', 'wb')
967 967 msgfile.write(cctx._text)
968 968 msgfile.close()
969 969
970 970 p1, p2 = self.dirstate.parents()
971 971 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
972 972 try:
973 973 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
974 974 ret = self.commitctx(cctx, True)
975 975 except:
976 976 if edited:
977 977 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
978 978 self.ui.write(
979 979 _('note: commit message saved in %s\n') % msgfn)
980 980 raise
981 981
982 982 # update dirstate and mergestate
983 983 for f in changes[0] + changes[1]:
984 984 self.dirstate.normal(f)
985 985 for f in changes[2]:
986 986 self.dirstate.forget(f)
987 987 self.dirstate.setparents(ret)
988 988 ms.reset()
989 989 finally:
990 990 wlock.release()
991 991
992 992 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
993 993 return ret
994 994
995 995 def commitctx(self, ctx, error=False):
996 996 """Add a new revision to current repository.
997 997 Revision information is passed via the context argument.
998 998 """
999 999
1000 1000 tr = lock = None
1001 1001 removed = list(ctx.removed())
1002 1002 p1, p2 = ctx.p1(), ctx.p2()
1003 1003 m1 = p1.manifest().copy()
1004 1004 m2 = p2.manifest()
1005 1005 user = ctx.user()
1006 1006
1007 1007 lock = self.lock()
1008 1008 try:
1009 1009 tr = self.transaction("commit")
1010 1010 trp = weakref.proxy(tr)
1011 1011
1012 1012 # check in files
1013 1013 new = {}
1014 1014 changed = []
1015 1015 linkrev = len(self)
1016 1016 for f in sorted(ctx.modified() + ctx.added()):
1017 1017 self.ui.note(f + "\n")
1018 1018 try:
1019 1019 fctx = ctx[f]
1020 1020 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1021 1021 changed)
1022 1022 m1.set(f, fctx.flags())
1023 1023 except OSError, inst:
1024 1024 self.ui.warn(_("trouble committing %s!\n") % f)
1025 1025 raise
1026 1026 except IOError, inst:
1027 1027 errcode = getattr(inst, 'errno', errno.ENOENT)
1028 1028 if error or errcode and errcode != errno.ENOENT:
1029 1029 self.ui.warn(_("trouble committing %s!\n") % f)
1030 1030 raise
1031 1031 else:
1032 1032 removed.append(f)
1033 1033
1034 1034 # update manifest
1035 1035 m1.update(new)
1036 1036 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1037 1037 drop = [f for f in removed if f in m1]
1038 1038 for f in drop:
1039 1039 del m1[f]
1040 1040 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1041 1041 p2.manifestnode(), (new, drop))
1042 1042
1043 1043 # update changelog
1044 1044 self.changelog.delayupdate()
1045 1045 n = self.changelog.add(mn, changed + removed, ctx.description(),
1046 1046 trp, p1.node(), p2.node(),
1047 1047 user, ctx.date(), ctx.extra().copy())
1048 1048 p = lambda: self.changelog.writepending() and self.root or ""
1049 1049 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1050 1050 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1051 1051 parent2=xp2, pending=p)
1052 1052 self.changelog.finalize(trp)
1053 1053 tr.close()
1054 1054
1055 1055 if self._branchcache:
1056 1056 self.updatebranchcache()
1057 1057 return n
1058 1058 finally:
1059 1059 if tr:
1060 1060 tr.release()
1061 1061 lock.release()
1062 1062
1063 1063 def destroyed(self):
1064 1064 '''Inform the repository that nodes have been destroyed.
1065 1065 Intended for use by strip and rollback, so there's a common
1066 1066 place for anything that has to be done after destroying history.'''
1067 1067 # XXX it might be nice if we could take the list of destroyed
1068 1068 # nodes, but I don't see an easy way for rollback() to do that
1069 1069
1070 1070 # Ensure the persistent tag cache is updated. Doing it now
1071 1071 # means that the tag cache only has to worry about destroyed
1072 1072 # heads immediately after a strip/rollback. That in turn
1073 1073 # guarantees that "cachetip == currenttip" (comparing both rev
1074 1074 # and node) always means no nodes have been added or destroyed.
1075 1075
1076 1076 # XXX this is suboptimal when qrefresh'ing: we strip the current
1077 1077 # head, refresh the tag cache, then immediately add a new head.
1078 1078 # But I think doing it this way is necessary for the "instant
1079 1079 # tag cache retrieval" case to work.
1080 1080 self.invalidatecaches()
1081 1081
1082 1082 def walk(self, match, node=None):
1083 1083 '''
1084 1084 walk recursively through the directory tree or a given
1085 1085 changeset, finding all files matched by the match
1086 1086 function
1087 1087 '''
1088 1088 return self[node].walk(match)
1089 1089
1090 1090 def status(self, node1='.', node2=None, match=None,
1091 1091 ignored=False, clean=False, unknown=False,
1092 1092 listsubrepos=False):
1093 1093 """return status of files between two nodes or node and working directory
1094 1094
1095 1095 If node1 is None, use the first dirstate parent instead.
1096 1096 If node2 is None, compare node1 with working directory.
1097 1097 """
1098 1098
1099 1099 def mfmatches(ctx):
1100 1100 mf = ctx.manifest().copy()
1101 1101 for fn in mf.keys():
1102 1102 if not match(fn):
1103 1103 del mf[fn]
1104 1104 return mf
1105 1105
1106 1106 if isinstance(node1, context.changectx):
1107 1107 ctx1 = node1
1108 1108 else:
1109 1109 ctx1 = self[node1]
1110 1110 if isinstance(node2, context.changectx):
1111 1111 ctx2 = node2
1112 1112 else:
1113 1113 ctx2 = self[node2]
1114 1114
1115 1115 working = ctx2.rev() is None
1116 1116 parentworking = working and ctx1 == self['.']
1117 1117 match = match or matchmod.always(self.root, self.getcwd())
1118 1118 listignored, listclean, listunknown = ignored, clean, unknown
1119 1119
1120 1120 # load earliest manifest first for caching reasons
1121 1121 if not working and ctx2.rev() < ctx1.rev():
1122 1122 ctx2.manifest()
1123 1123
1124 1124 if not parentworking:
1125 1125 def bad(f, msg):
1126 1126 if f not in ctx1:
1127 1127 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1128 1128 match.bad = bad
1129 1129
1130 1130 if working: # we need to scan the working dir
1131 1131 subrepos = []
1132 1132 if '.hgsub' in self.dirstate:
1133 1133 subrepos = ctx1.substate.keys()
1134 1134 s = self.dirstate.status(match, subrepos, listignored,
1135 1135 listclean, listunknown)
1136 1136 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1137 1137
1138 1138 # check for any possibly clean files
1139 1139 if parentworking and cmp:
1140 1140 fixup = []
1141 1141 # do a full compare of any files that might have changed
1142 1142 for f in sorted(cmp):
1143 1143 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1144 1144 or ctx1[f].cmp(ctx2[f])):
1145 1145 modified.append(f)
1146 1146 else:
1147 1147 fixup.append(f)
1148 1148
1149 1149 # update dirstate for files that are actually clean
1150 1150 if fixup:
1151 1151 if listclean:
1152 1152 clean += fixup
1153 1153
1154 1154 try:
1155 1155 # updating the dirstate is optional
1156 1156 # so we don't wait on the lock
1157 1157 wlock = self.wlock(False)
1158 1158 try:
1159 1159 for f in fixup:
1160 1160 self.dirstate.normal(f)
1161 1161 finally:
1162 1162 wlock.release()
1163 1163 except error.LockError:
1164 1164 pass
1165 1165
1166 1166 if not parentworking:
1167 1167 mf1 = mfmatches(ctx1)
1168 1168 if working:
1169 1169 # we are comparing working dir against non-parent
1170 1170 # generate a pseudo-manifest for the working dir
1171 1171 mf2 = mfmatches(self['.'])
1172 1172 for f in cmp + modified + added:
1173 1173 mf2[f] = None
1174 1174 mf2.set(f, ctx2.flags(f))
1175 1175 for f in removed:
1176 1176 if f in mf2:
1177 1177 del mf2[f]
1178 1178 else:
1179 1179 # we are comparing two revisions
1180 1180 deleted, unknown, ignored = [], [], []
1181 1181 mf2 = mfmatches(ctx2)
1182 1182
1183 1183 modified, added, clean = [], [], []
1184 1184 for fn in mf2:
1185 1185 if fn in mf1:
1186 1186 if (mf1.flags(fn) != mf2.flags(fn) or
1187 1187 (mf1[fn] != mf2[fn] and
1188 1188 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1189 1189 modified.append(fn)
1190 1190 elif listclean:
1191 1191 clean.append(fn)
1192 1192 del mf1[fn]
1193 1193 else:
1194 1194 added.append(fn)
1195 1195 removed = mf1.keys()
1196 1196
1197 1197 r = modified, added, removed, deleted, unknown, ignored, clean
1198 1198
1199 1199 if listsubrepos:
1200 1200 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1201 1201 if working:
1202 1202 rev2 = None
1203 1203 else:
1204 1204 rev2 = ctx2.substate[subpath][1]
1205 1205 try:
1206 1206 submatch = matchmod.narrowmatcher(subpath, match)
1207 1207 s = sub.status(rev2, match=submatch, ignored=listignored,
1208 1208 clean=listclean, unknown=listunknown,
1209 1209 listsubrepos=True)
1210 1210 for rfiles, sfiles in zip(r, s):
1211 1211 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1212 1212 except error.LookupError:
1213 1213 self.ui.status(_("skipping missing subrepository: %s\n")
1214 1214 % subpath)
1215 1215
1216 1216 [l.sort() for l in r]
1217 1217 return r
1218 1218
1219 1219 def heads(self, start=None):
1220 1220 heads = self.changelog.heads(start)
1221 1221 # sort the output in rev descending order
1222 1222 return sorted(heads, key=self.changelog.rev, reverse=True)
1223 1223
1224 1224 def branchheads(self, branch=None, start=None, closed=False):
1225 1225 '''return a (possibly filtered) list of heads for the given branch
1226 1226
1227 1227 Heads are returned in topological order, from newest to oldest.
1228 1228 If branch is None, use the dirstate branch.
1229 1229 If start is not None, return only heads reachable from start.
1230 1230 If closed is True, return heads that are marked as closed as well.
1231 1231 '''
1232 1232 if branch is None:
1233 1233 branch = self[None].branch()
1234 1234 branches = self.branchmap()
1235 1235 if branch not in branches:
1236 1236 return []
1237 1237 # the cache returns heads ordered lowest to highest
1238 1238 bheads = list(reversed(branches[branch]))
1239 1239 if start is not None:
1240 1240 # filter out the heads that cannot be reached from startrev
1241 1241 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1242 1242 bheads = [h for h in bheads if h in fbheads]
1243 1243 if not closed:
1244 1244 bheads = [h for h in bheads if
1245 1245 ('close' not in self.changelog.read(h)[5])]
1246 1246 return bheads
1247 1247
1248 1248 def branches(self, nodes):
1249 1249 if not nodes:
1250 1250 nodes = [self.changelog.tip()]
1251 1251 b = []
1252 1252 for n in nodes:
1253 1253 t = n
1254 1254 while 1:
1255 1255 p = self.changelog.parents(n)
1256 1256 if p[1] != nullid or p[0] == nullid:
1257 1257 b.append((t, n, p[0], p[1]))
1258 1258 break
1259 1259 n = p[0]
1260 1260 return b
1261 1261
1262 1262 def between(self, pairs):
1263 1263 r = []
1264 1264
1265 1265 for top, bottom in pairs:
1266 1266 n, l, i = top, [], 0
1267 1267 f = 1
1268 1268
1269 1269 while n != bottom and n != nullid:
1270 1270 p = self.changelog.parents(n)[0]
1271 1271 if i == f:
1272 1272 l.append(n)
1273 1273 f = f * 2
1274 1274 n = p
1275 1275 i += 1
1276 1276
1277 1277 r.append(l)
1278 1278
1279 1279 return r
1280 1280
1281 1281 def pull(self, remote, heads=None, force=False):
1282 1282 lock = self.lock()
1283 1283 try:
1284 1284 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1285 1285 force=force)
1286 1286 common, fetch, rheads = tmp
1287 1287 if not fetch:
1288 1288 self.ui.status(_("no changes found\n"))
1289 1289 return 0
1290 1290
1291 1291 if heads is None and fetch == [nullid]:
1292 1292 self.ui.status(_("requesting all changes\n"))
1293 1293 elif heads is None and remote.capable('changegroupsubset'):
1294 1294 # issue1320, avoid a race if remote changed after discovery
1295 1295 heads = rheads
1296 1296
1297 1297 if heads is None:
1298 1298 cg = remote.changegroup(fetch, 'pull')
1299 1299 else:
1300 1300 if not remote.capable('changegroupsubset'):
1301 1301 raise util.Abort(_("partial pull cannot be done because "
1302 1302 "other repository doesn't support "
1303 1303 "changegroupsubset."))
1304 1304 cg = remote.changegroupsubset(fetch, heads, 'pull')
1305 1305 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1306 1306 finally:
1307 1307 lock.release()
1308 1308
1309 1309 def push(self, remote, force=False, revs=None, newbranch=False):
1310 1310 '''Push outgoing changesets (limited by revs) from the current
1311 1311 repository to remote. Return an integer:
1312 1312 - 0 means HTTP error *or* nothing to push
1313 1313 - 1 means we pushed and remote head count is unchanged *or*
1314 1314 we have outgoing changesets but refused to push
1315 1315 - other values as described by addchangegroup()
1316 1316 '''
1317 1317 # there are two ways to push to remote repo:
1318 1318 #
1319 1319 # addchangegroup assumes local user can lock remote
1320 1320 # repo (local filesystem, old ssh servers).
1321 1321 #
1322 1322 # unbundle assumes local user cannot lock remote repo (new ssh
1323 1323 # servers, http servers).
1324 1324
1325 1325 lock = None
1326 1326 unbundle = remote.capable('unbundle')
1327 1327 if not unbundle:
1328 1328 lock = remote.lock()
1329 1329 try:
1330 1330 ret = discovery.prepush(self, remote, force, revs, newbranch)
1331 1331 if ret[0] is None:
1332 1332 # and here we return 0 for "nothing to push" or 1 for
1333 1333 # "something to push but I refuse"
1334 1334 return ret[1]
1335 1335
1336 1336 cg, remote_heads = ret
1337 1337 if unbundle:
1338 1338 # local repo finds heads on server, finds out what revs it must
1339 1339 # push. once revs transferred, if server finds it has
1340 1340 # different heads (someone else won commit/push race), server
1341 1341 # aborts.
1342 1342 if force:
1343 1343 remote_heads = ['force']
1344 1344 # ssh: return remote's addchangegroup()
1345 1345 # http: return remote's addchangegroup() or 0 for error
1346 1346 return remote.unbundle(cg, remote_heads, 'push')
1347 1347 else:
1348 1348 # we return an integer indicating remote head count change
1349 1349 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1350 1350 finally:
1351 1351 if lock is not None:
1352 1352 lock.release()
1353 1353
1354 1354 def changegroupinfo(self, nodes, source):
1355 1355 if self.ui.verbose or source == 'bundle':
1356 1356 self.ui.status(_("%d changesets found\n") % len(nodes))
1357 1357 if self.ui.debugflag:
1358 1358 self.ui.debug("list of changesets:\n")
1359 1359 for node in nodes:
1360 1360 self.ui.debug("%s\n" % hex(node))
1361 1361
1362 1362 def changegroupsubset(self, bases, heads, source, extranodes=None):
1363 1363 """Compute a changegroup consisting of all the nodes that are
1364 1364 descendents of any of the bases and ancestors of any of the heads.
1365 1365 Return a chunkbuffer object whose read() method will return
1366 1366 successive changegroup chunks.
1367 1367
1368 1368 It is fairly complex as determining which filenodes and which
1369 1369 manifest nodes need to be included for the changeset to be complete
1370 1370 is non-trivial.
1371 1371
1372 1372 Another wrinkle is doing the reverse, figuring out which changeset in
1373 1373 the changegroup a particular filenode or manifestnode belongs to.
1374 1374
1375 1375 The caller can specify some nodes that must be included in the
1376 1376 changegroup using the extranodes argument. It should be a dict
1377 1377 where the keys are the filenames (or 1 for the manifest), and the
1378 1378 values are lists of (node, linknode) tuples, where node is a wanted
1379 1379 node and linknode is the changelog node that should be transmitted as
1380 1380 the linkrev.
1381 1381 """
1382 1382
1383 1383 # Set up some initial variables
1384 1384 # Make it easy to refer to self.changelog
1385 1385 cl = self.changelog
1386 1386 # Compute the list of changesets in this changegroup.
1387 1387 # Some bases may turn out to be superfluous, and some heads may be
1388 1388 # too. nodesbetween will return the minimal set of bases and heads
1389 1389 # necessary to re-create the changegroup.
1390 1390 if not bases:
1391 1391 bases = [nullid]
1392 1392 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1393 1393
1394 1394 if extranodes is None:
1395 1395 # can we go through the fast path ?
1396 1396 heads.sort()
1397 1397 allheads = self.heads()
1398 1398 allheads.sort()
1399 1399 if heads == allheads:
1400 1400 return self._changegroup(msng_cl_lst, source)
1401 1401
1402 1402 # slow path
1403 1403 self.hook('preoutgoing', throw=True, source=source)
1404 1404
1405 1405 self.changegroupinfo(msng_cl_lst, source)
1406 1406
1407 1407 # We assume that all ancestors of bases are known
1408 1408 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1409 1409
1410 1410 # Make it easy to refer to self.manifest
1411 1411 mnfst = self.manifest
1412 1412 # We don't know which manifests are missing yet
1413 1413 msng_mnfst_set = {}
1414 1414 # Nor do we know which filenodes are missing.
1415 1415 msng_filenode_set = {}
1416 1416
1417 1417 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1418 1418 junk = None
1419 1419
1420 1420 # A changeset always belongs to itself, so the changenode lookup
1421 1421 # function for a changenode is identity.
1422 1422 def identity(x):
1423 1423 return x
1424 1424
1425 1425 # A function generating function that sets up the initial environment
1426 1426 # the inner function.
1427 1427 def filenode_collector(changedfiles):
1428 1428 # This gathers information from each manifestnode included in the
1429 1429 # changegroup about which filenodes the manifest node references
1430 1430 # so we can include those in the changegroup too.
1431 1431 #
1432 1432 # It also remembers which changenode each filenode belongs to. It
1433 1433 # does this by assuming the a filenode belongs to the changenode
1434 1434 # the first manifest that references it belongs to.
1435 1435 def collect_msng_filenodes(mnfstnode):
1436 1436 r = mnfst.rev(mnfstnode)
1437 1437 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1438 1438 # If the previous rev is one of the parents,
1439 1439 # we only need to see a diff.
1440 1440 deltamf = mnfst.readdelta(mnfstnode)
1441 1441 # For each line in the delta
1442 1442 for f, fnode in deltamf.iteritems():
1443 1443 # And if the file is in the list of files we care
1444 1444 # about.
1445 1445 if f in changedfiles:
1446 1446 # Get the changenode this manifest belongs to
1447 1447 clnode = msng_mnfst_set[mnfstnode]
1448 1448 # Create the set of filenodes for the file if
1449 1449 # there isn't one already.
1450 1450 ndset = msng_filenode_set.setdefault(f, {})
1451 1451 # And set the filenode's changelog node to the
1452 1452 # manifest's if it hasn't been set already.
1453 1453 ndset.setdefault(fnode, clnode)
1454 1454 else:
1455 1455 # Otherwise we need a full manifest.
1456 1456 m = mnfst.read(mnfstnode)
1457 1457 # For every file in we care about.
1458 1458 for f in changedfiles:
1459 1459 fnode = m.get(f, None)
1460 1460 # If it's in the manifest
1461 1461 if fnode is not None:
1462 1462 # See comments above.
1463 1463 clnode = msng_mnfst_set[mnfstnode]
1464 1464 ndset = msng_filenode_set.setdefault(f, {})
1465 1465 ndset.setdefault(fnode, clnode)
1466 1466 return collect_msng_filenodes
1467 1467
1468 1468 # If we determine that a particular file or manifest node must be a
1469 1469 # node that the recipient of the changegroup will already have, we can
1470 1470 # also assume the recipient will have all the parents. This function
1471 1471 # prunes them from the set of missing nodes.
1472 1472 def prune(revlog, missingnodes):
1473 1473 hasset = set()
1474 1474 # If a 'missing' filenode thinks it belongs to a changenode we
1475 1475 # assume the recipient must have, then the recipient must have
1476 1476 # that filenode.
1477 1477 for n in missingnodes:
1478 1478 clrev = revlog.linkrev(revlog.rev(n))
1479 1479 if clrev in commonrevs:
1480 1480 hasset.add(n)
1481 1481 for n in hasset:
1482 1482 missingnodes.pop(n, None)
1483 1483 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1484 1484 missingnodes.pop(revlog.node(r), None)
1485 1485
1486 1486 # Add the nodes that were explicitly requested.
1487 1487 def add_extra_nodes(name, nodes):
1488 1488 if not extranodes or name not in extranodes:
1489 1489 return
1490 1490
1491 1491 for node, linknode in extranodes[name]:
1492 1492 if node not in nodes:
1493 1493 nodes[node] = linknode
1494 1494
1495 1495 # Now that we have all theses utility functions to help out and
1496 1496 # logically divide up the task, generate the group.
1497 1497 def gengroup():
1498 1498 # The set of changed files starts empty.
1499 1499 changedfiles = set()
1500 1500 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1501 1501
1502 1502 # Create a changenode group generator that will call our functions
1503 1503 # back to lookup the owning changenode and collect information.
1504 1504 group = cl.group(msng_cl_lst, identity, collect)
1505 1505 for cnt, chnk in enumerate(group):
1506 1506 yield chnk
1507 1507 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1508 1508 self.ui.progress(_('bundling changes'), None)
1509 1509
1510 1510 prune(mnfst, msng_mnfst_set)
1511 1511 add_extra_nodes(1, msng_mnfst_set)
1512 1512 msng_mnfst_lst = msng_mnfst_set.keys()
1513 1513 # Sort the manifestnodes by revision number.
1514 1514 msng_mnfst_lst.sort(key=mnfst.rev)
1515 1515 # Create a generator for the manifestnodes that calls our lookup
1516 1516 # and data collection functions back.
1517 1517 group = mnfst.group(msng_mnfst_lst,
1518 1518 lambda mnode: msng_mnfst_set[mnode],
1519 1519 filenode_collector(changedfiles))
1520 1520 for cnt, chnk in enumerate(group):
1521 1521 yield chnk
1522 1522 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1523 1523 self.ui.progress(_('bundling manifests'), None)
1524 1524
1525 1525 # These are no longer needed, dereference and toss the memory for
1526 1526 # them.
1527 1527 msng_mnfst_lst = None
1528 1528 msng_mnfst_set.clear()
1529 1529
1530 1530 if extranodes:
1531 1531 for fname in extranodes:
1532 1532 if isinstance(fname, int):
1533 1533 continue
1534 1534 msng_filenode_set.setdefault(fname, {})
1535 1535 changedfiles.add(fname)
1536 1536 # Go through all our files in order sorted by name.
1537 1537 cnt = 0
1538 1538 for fname in sorted(changedfiles):
1539 1539 filerevlog = self.file(fname)
1540 1540 if not len(filerevlog):
1541 1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1542 1542 # Toss out the filenodes that the recipient isn't really
1543 1543 # missing.
1544 1544 missingfnodes = msng_filenode_set.pop(fname, {})
1545 1545 prune(filerevlog, missingfnodes)
1546 1546 add_extra_nodes(fname, missingfnodes)
1547 1547 # If any filenodes are left, generate the group for them,
1548 1548 # otherwise don't bother.
1549 1549 if missingfnodes:
1550 1550 yield changegroup.chunkheader(len(fname))
1551 1551 yield fname
1552 1552 # Sort the filenodes by their revision # (topological order)
1553 1553 nodeiter = list(missingfnodes)
1554 1554 nodeiter.sort(key=filerevlog.rev)
1555 1555 # Create a group generator and only pass in a changenode
1556 1556 # lookup function as we need to collect no information
1557 1557 # from filenodes.
1558 1558 group = filerevlog.group(nodeiter,
1559 1559 lambda fnode: missingfnodes[fnode])
1560 1560 for chnk in group:
1561 1561 self.ui.progress(
1562 1562 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1563 1563 cnt += 1
1564 1564 yield chnk
1565 1565 # Signal that no more groups are left.
1566 1566 yield changegroup.closechunk()
1567 1567 self.ui.progress(_('bundling files'), None)
1568 1568
1569 1569 if msng_cl_lst:
1570 1570 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1571 1571
1572 1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573 1573
1574 1574 def changegroup(self, basenodes, source):
1575 1575 # to avoid a race we use changegroupsubset() (issue1320)
1576 1576 return self.changegroupsubset(basenodes, self.heads(), source)
1577 1577
1578 1578 def _changegroup(self, nodes, source):
1579 1579 """Compute the changegroup of all nodes that we have that a recipient
1580 1580 doesn't. Return a chunkbuffer object whose read() method will return
1581 1581 successive changegroup chunks.
1582 1582
1583 1583 This is much easier than the previous function as we can assume that
1584 1584 the recipient has any changenode we aren't sending them.
1585 1585
1586 1586 nodes is the set of nodes to send"""
1587 1587
1588 1588 self.hook('preoutgoing', throw=True, source=source)
1589 1589
1590 1590 cl = self.changelog
1591 1591 revset = set([cl.rev(n) for n in nodes])
1592 1592 self.changegroupinfo(nodes, source)
1593 1593
1594 1594 def identity(x):
1595 1595 return x
1596 1596
1597 1597 def gennodelst(log):
1598 1598 for r in log:
1599 1599 if log.linkrev(r) in revset:
1600 1600 yield log.node(r)
1601 1601
1602 1602 def lookuplinkrev_func(revlog):
1603 1603 def lookuplinkrev(n):
1604 1604 return cl.node(revlog.linkrev(revlog.rev(n)))
1605 1605 return lookuplinkrev
1606 1606
1607 1607 def gengroup():
1608 1608 '''yield a sequence of changegroup chunks (strings)'''
1609 1609 # construct a list of all changed files
1610 1610 changedfiles = set()
1611 1611 mmfs = {}
1612 1612 collect = changegroup.collector(cl, mmfs, changedfiles)
1613 1613
1614 1614 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1615 1615 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1616 1616 yield chnk
1617 1617 self.ui.progress(_('bundling changes'), None)
1618 1618
1619 1619 mnfst = self.manifest
1620 1620 nodeiter = gennodelst(mnfst)
1621 1621 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1622 1622 lookuplinkrev_func(mnfst))):
1623 1623 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1624 1624 yield chnk
1625 1625 self.ui.progress(_('bundling manifests'), None)
1626 1626
1627 1627 cnt = 0
1628 1628 for fname in sorted(changedfiles):
1629 1629 filerevlog = self.file(fname)
1630 1630 if not len(filerevlog):
1631 1631 raise util.Abort(_("empty or missing revlog for %s") % fname)
1632 1632 nodeiter = gennodelst(filerevlog)
1633 1633 nodeiter = list(nodeiter)
1634 1634 if nodeiter:
1635 1635 yield changegroup.chunkheader(len(fname))
1636 1636 yield fname
1637 1637 lookup = lookuplinkrev_func(filerevlog)
1638 1638 for chnk in filerevlog.group(nodeiter, lookup):
1639 1639 self.ui.progress(
1640 1640 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1641 1641 cnt += 1
1642 1642 yield chnk
1643 1643 self.ui.progress(_('bundling files'), None)
1644 1644
1645 1645 yield changegroup.closechunk()
1646 1646
1647 1647 if nodes:
1648 1648 self.hook('outgoing', node=hex(nodes[0]), source=source)
1649 1649
1650 1650 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1651 1651
1652 1652 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1653 1653 """Add the changegroup returned by source.read() to this repo.
1654 1654 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1655 1655 the URL of the repo where this changegroup is coming from.
1656 1656
1657 1657 Return an integer summarizing the change to this repo:
1658 1658 - nothing changed or no source: 0
1659 1659 - more heads than before: 1+added heads (2..n)
1660 1660 - fewer heads than before: -1-removed heads (-2..-n)
1661 1661 - number of heads stays the same: 1
1662 1662 """
1663 1663 def csmap(x):
1664 1664 self.ui.debug("add changeset %s\n" % short(x))
1665 1665 return len(cl)
1666 1666
1667 1667 def revmap(x):
1668 1668 return cl.rev(x)
1669 1669
1670 1670 if not source:
1671 1671 return 0
1672 1672
1673 1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1674 1674
1675 1675 changesets = files = revisions = 0
1676 1676 efiles = set()
1677 1677
1678 1678 # write changelog data to temp files so concurrent readers will not see
1679 1679 # inconsistent view
1680 1680 cl = self.changelog
1681 1681 cl.delayupdate()
1682 1682 oldheads = len(cl.heads())
1683 1683
1684 1684 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1685 1685 try:
1686 1686 trp = weakref.proxy(tr)
1687 1687 # pull off the changeset group
1688 1688 self.ui.status(_("adding changesets\n"))
1689 1689 clstart = len(cl)
1690 1690 class prog(object):
1691 1691 step = _('changesets')
1692 1692 count = 1
1693 1693 ui = self.ui
1694 1694 total = None
1695 1695 def __call__(self):
1696 1696 self.ui.progress(self.step, self.count, unit=_('chunks'),
1697 1697 total=self.total)
1698 1698 self.count += 1
1699 1699 pr = prog()
1700 1700 source.callback = pr
1701 1701
1702 1702 if (cl.addgroup(source, csmap, trp) is None
1703 1703 and not emptyok):
1704 1704 raise util.Abort(_("received changelog group is empty"))
1705 1705 clend = len(cl)
1706 1706 changesets = clend - clstart
1707 1707 for c in xrange(clstart, clend):
1708 1708 efiles.update(self[c].files())
1709 1709 efiles = len(efiles)
1710 1710 self.ui.progress(_('changesets'), None)
1711 1711
1712 1712 # pull off the manifest group
1713 1713 self.ui.status(_("adding manifests\n"))
1714 1714 pr.step = _('manifests')
1715 1715 pr.count = 1
1716 1716 pr.total = changesets # manifests <= changesets
1717 1717 # no need to check for empty manifest group here:
1718 1718 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1719 1719 # no new manifest will be created and the manifest group will
1720 1720 # be empty during the pull
1721 1721 self.manifest.addgroup(source, revmap, trp)
1722 1722 self.ui.progress(_('manifests'), None)
1723 1723
1724 1724 needfiles = {}
1725 1725 if self.ui.configbool('server', 'validate', default=False):
1726 1726 # validate incoming csets have their manifests
1727 1727 for cset in xrange(clstart, clend):
1728 1728 mfest = self.changelog.read(self.changelog.node(cset))[0]
1729 1729 mfest = self.manifest.readdelta(mfest)
1730 1730 # store file nodes we must see
1731 1731 for f, n in mfest.iteritems():
1732 1732 needfiles.setdefault(f, set()).add(n)
1733 1733
1734 1734 # process the files
1735 1735 self.ui.status(_("adding file changes\n"))
1736 1736 pr.step = 'files'
1737 1737 pr.count = 1
1738 1738 pr.total = efiles
1739 1739 source.callback = None
1740 1740
1741 1741 while 1:
1742 1742 f = source.chunk()
1743 1743 if not f:
1744 1744 break
1745 1745 self.ui.debug("adding %s revisions\n" % f)
1746 1746 pr()
1747 1747 fl = self.file(f)
1748 1748 o = len(fl)
1749 1749 if fl.addgroup(source, revmap, trp) is None:
1750 1750 raise util.Abort(_("received file revlog group is empty"))
1751 1751 revisions += len(fl) - o
1752 1752 files += 1
1753 1753 if f in needfiles:
1754 1754 needs = needfiles[f]
1755 1755 for new in xrange(o, len(fl)):
1756 1756 n = fl.node(new)
1757 1757 if n in needs:
1758 1758 needs.remove(n)
1759 1759 if not needs:
1760 1760 del needfiles[f]
1761 1761 self.ui.progress(_('files'), None)
1762 1762
1763 1763 for f, needs in needfiles.iteritems():
1764 1764 fl = self.file(f)
1765 1765 for n in needs:
1766 1766 try:
1767 1767 fl.rev(n)
1768 1768 except error.LookupError:
1769 1769 raise util.Abort(
1770 1770 _('missing file data for %s:%s - run hg verify') %
1771 1771 (f, hex(n)))
1772 1772
1773 1773 newheads = len(cl.heads())
1774 1774 heads = ""
1775 1775 if oldheads and newheads != oldheads:
1776 1776 heads = _(" (%+d heads)") % (newheads - oldheads)
1777 1777
1778 1778 self.ui.status(_("added %d changesets"
1779 1779 " with %d changes to %d files%s\n")
1780 1780 % (changesets, revisions, files, heads))
1781 1781
1782 1782 if changesets > 0:
1783 1783 p = lambda: cl.writepending() and self.root or ""
1784 1784 self.hook('pretxnchangegroup', throw=True,
1785 1785 node=hex(cl.node(clstart)), source=srctype,
1786 1786 url=url, pending=p)
1787 1787
1788 1788 # make changelog see real files again
1789 1789 cl.finalize(trp)
1790 1790
1791 1791 tr.close()
1792 1792 finally:
1793 1793 tr.release()
1794 1794 if lock:
1795 1795 lock.release()
1796 1796
1797 1797 if changesets > 0:
1798 1798 # forcefully update the on-disk branch cache
1799 1799 self.ui.debug("updating the branch cache\n")
1800 1800 self.updatebranchcache()
1801 1801 self.hook("changegroup", node=hex(cl.node(clstart)),
1802 1802 source=srctype, url=url)
1803 1803
1804 1804 for i in xrange(clstart, clend):
1805 1805 self.hook("incoming", node=hex(cl.node(i)),
1806 1806 source=srctype, url=url)
1807 1807
1808 1808 # never return 0 here:
1809 1809 if newheads < oldheads:
1810 1810 return newheads - oldheads - 1
1811 1811 else:
1812 1812 return newheads - oldheads + 1
1813 1813
1814 1814
1815 1815 def stream_in(self, remote, requirements):
1816 1816 fp = remote.stream_out()
1817 1817 l = fp.readline()
1818 1818 try:
1819 1819 resp = int(l)
1820 1820 except ValueError:
1821 1821 raise error.ResponseError(
1822 1822 _('Unexpected response from remote server:'), l)
1823 1823 if resp == 1:
1824 1824 raise util.Abort(_('operation forbidden by server'))
1825 1825 elif resp == 2:
1826 1826 raise util.Abort(_('locking the remote repository failed'))
1827 1827 elif resp != 0:
1828 1828 raise util.Abort(_('the server sent an unknown error code'))
1829 1829 self.ui.status(_('streaming all changes\n'))
1830 1830 l = fp.readline()
1831 1831 try:
1832 1832 total_files, total_bytes = map(int, l.split(' ', 1))
1833 1833 except (ValueError, TypeError):
1834 1834 raise error.ResponseError(
1835 1835 _('Unexpected response from remote server:'), l)
1836 1836 self.ui.status(_('%d files to transfer, %s of data\n') %
1837 1837 (total_files, util.bytecount(total_bytes)))
1838 1838 start = time.time()
1839 1839 for i in xrange(total_files):
1840 1840 # XXX doesn't support '\n' or '\r' in filenames
1841 1841 l = fp.readline()
1842 1842 try:
1843 1843 name, size = l.split('\0', 1)
1844 1844 size = int(size)
1845 1845 except (ValueError, TypeError):
1846 1846 raise error.ResponseError(
1847 1847 _('Unexpected response from remote server:'), l)
1848 1848 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1849 1849 # for backwards compat, name was partially encoded
1850 1850 ofp = self.sopener(store.decodedir(name), 'w')
1851 1851 for chunk in util.filechunkiter(fp, limit=size):
1852 1852 ofp.write(chunk)
1853 1853 ofp.close()
1854 1854 elapsed = time.time() - start
1855 1855 if elapsed <= 0:
1856 1856 elapsed = 0.001
1857 1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1858 1858 (util.bytecount(total_bytes), elapsed,
1859 1859 util.bytecount(total_bytes / elapsed)))
1860 1860
1861 1861 # new requirements = old non-format requirements + new format-related
1862 1862 # requirements from the streamed-in repository
1863 1863 requirements.update(set(self.requirements) - self.supportedformats)
1864 1864 self._applyrequirements(requirements)
1865 1865 self._writerequirements()
1866 1866
1867 1867 self.invalidate()
1868 1868 return len(self.heads()) + 1
1869 1869
1870 1870 def clone(self, remote, heads=[], stream=False):
1871 1871 '''clone remote repository.
1872 1872
1873 1873 keyword arguments:
1874 1874 heads: list of revs to clone (forces use of pull)
1875 1875 stream: use streaming clone if possible'''
1876 1876
1877 1877 # now, all clients that can request uncompressed clones can
1878 1878 # read repo formats supported by all servers that can serve
1879 1879 # them.
1880 1880
1881 1881 # if revlog format changes, client will have to check version
1882 1882 # and format flags on "stream" capability, and use
1883 1883 # uncompressed only if compatible.
1884 1884
1885 1885 if stream and not heads:
1886 1886 # 'stream' means remote revlog format is revlogv1 only
1887 1887 if remote.capable('stream'):
1888 1888 return self.stream_in(remote, set(('revlogv1',)))
1889 1889 # otherwise, 'streamreqs' contains the remote revlog format
1890 1890 streamreqs = remote.capable('streamreqs')
1891 1891 if streamreqs:
1892 1892 streamreqs = set(streamreqs.split(','))
1893 1893 # if we support it, stream in and adjust our requirements
1894 1894 if not streamreqs - self.supportedformats:
1895 1895 return self.stream_in(remote, streamreqs)
1896 1896 return self.pull(remote, heads)
1897 1897
1898 1898 def pushkey(self, namespace, key, old, new):
1899 1899 return pushkey.push(self, namespace, key, old, new)
1900 1900
1901 1901 def listkeys(self, namespace):
1902 1902 return pushkey.list(self, namespace)
1903 1903
1904 1904 # used to avoid circular references so destructors work
1905 1905 def aftertrans(files):
1906 1906 renamefiles = [tuple(t) for t in files]
1907 1907 def a():
1908 1908 for src, dest in renamefiles:
1909 1909 util.rename(src, dest)
1910 1910 return a
1911 1911
1912 1912 def instance(ui, path, create):
1913 1913 return localrepository(ui, util.drop_scheme('file', path), create)
1914 1914
1915 1915 def islocal(path):
1916 1916 return True
General Comments 0
You need to be logged in to leave comments. Login now