##// END OF EJS Templates
localrepo: have _loadfilter return the loaded filter patterns
Nicolas Dumazet -
r12706:9ca08fbb default
parent child Browse files
Show More
@@ -1,1893 +1,1892 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None # in UTF-8
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 181 return dirstate.dirstate(self.opener, self.ui, self.root)
182 182
183 183 def __getitem__(self, changeid):
184 184 if changeid is None:
185 185 return context.workingctx(self)
186 186 return context.changectx(self, changeid)
187 187
188 188 def __contains__(self, changeid):
189 189 try:
190 190 return bool(self.lookup(changeid))
191 191 except error.RepoLookupError:
192 192 return False
193 193
194 194 def __nonzero__(self):
195 195 return True
196 196
197 197 def __len__(self):
198 198 return len(self.changelog)
199 199
200 200 def __iter__(self):
201 201 for i in xrange(len(self)):
202 202 yield i
203 203
204 204 def url(self):
205 205 return 'file:' + self.root
206 206
207 207 def hook(self, name, throw=False, **args):
208 208 return hook.hook(self.ui, self, name, throw, **args)
209 209
210 210 tag_disallowed = ':\r\n'
211 211
212 212 def _tag(self, names, node, message, local, user, date, extra={}):
213 213 if isinstance(names, str):
214 214 allchars = names
215 215 names = (names,)
216 216 else:
217 217 allchars = ''.join(names)
218 218 for c in self.tag_disallowed:
219 219 if c in allchars:
220 220 raise util.Abort(_('%r cannot be used in a tag name') % c)
221 221
222 222 branches = self.branchmap()
223 223 for name in names:
224 224 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 225 local=local)
226 226 if name in branches:
227 227 self.ui.warn(_("warning: tag %s conflicts with existing"
228 228 " branch name\n") % name)
229 229
230 230 def writetags(fp, names, munge, prevtags):
231 231 fp.seek(0, 2)
232 232 if prevtags and prevtags[-1] != '\n':
233 233 fp.write('\n')
234 234 for name in names:
235 235 m = munge and munge(name) or name
236 236 if self._tagtypes and name in self._tagtypes:
237 237 old = self._tags.get(name, nullid)
238 238 fp.write('%s %s\n' % (hex(old), m))
239 239 fp.write('%s %s\n' % (hex(node), m))
240 240 fp.close()
241 241
242 242 prevtags = ''
243 243 if local:
244 244 try:
245 245 fp = self.opener('localtags', 'r+')
246 246 except IOError:
247 247 fp = self.opener('localtags', 'a')
248 248 else:
249 249 prevtags = fp.read()
250 250
251 251 # local tags are stored in the current charset
252 252 writetags(fp, names, None, prevtags)
253 253 for name in names:
254 254 self.hook('tag', node=hex(node), tag=name, local=local)
255 255 return
256 256
257 257 try:
258 258 fp = self.wfile('.hgtags', 'rb+')
259 259 except IOError:
260 260 fp = self.wfile('.hgtags', 'ab')
261 261 else:
262 262 prevtags = fp.read()
263 263
264 264 # committed tags are stored in UTF-8
265 265 writetags(fp, names, encoding.fromlocal, prevtags)
266 266
267 267 if '.hgtags' not in self.dirstate:
268 268 self[None].add(['.hgtags'])
269 269
270 270 m = matchmod.exact(self.root, '', ['.hgtags'])
271 271 tagnode = self.commit(message, user, date, extra=extra, match=m)
272 272
273 273 for name in names:
274 274 self.hook('tag', node=hex(node), tag=name, local=local)
275 275
276 276 return tagnode
277 277
278 278 def tag(self, names, node, message, local, user, date):
279 279 '''tag a revision with one or more symbolic names.
280 280
281 281 names is a list of strings or, when adding a single tag, names may be a
282 282 string.
283 283
284 284 if local is True, the tags are stored in a per-repository file.
285 285 otherwise, they are stored in the .hgtags file, and a new
286 286 changeset is committed with the change.
287 287
288 288 keyword arguments:
289 289
290 290 local: whether to store tags in non-version-controlled file
291 291 (default False)
292 292
293 293 message: commit message to use if committing
294 294
295 295 user: name of user to use if committing
296 296
297 297 date: date tuple to use if committing'''
298 298
299 299 for x in self.status()[:5]:
300 300 if '.hgtags' in x:
301 301 raise util.Abort(_('working copy of .hgtags is changed '
302 302 '(please commit .hgtags manually)'))
303 303
304 304 self.tags() # instantiate the cache
305 305 self._tag(names, node, message, local, user, date)
306 306
307 307 def tags(self):
308 308 '''return a mapping of tag to node'''
309 309 if self._tags is None:
310 310 (self._tags, self._tagtypes) = self._findtags()
311 311
312 312 return self._tags
313 313
314 314 def _findtags(self):
315 315 '''Do the hard work of finding tags. Return a pair of dicts
316 316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 317 maps tag name to a string like \'global\' or \'local\'.
318 318 Subclasses or extensions are free to add their own tags, but
319 319 should be aware that the returned dicts will be retained for the
320 320 duration of the localrepo object.'''
321 321
322 322 # XXX what tagtype should subclasses/extensions use? Currently
323 323 # mq and bookmarks add tags, but do not set the tagtype at all.
324 324 # Should each extension invent its own tag type? Should there
325 325 # be one tagtype for all such "virtual" tags? Or is the status
326 326 # quo fine?
327 327
328 328 alltags = {} # map tag name to (node, hist)
329 329 tagtypes = {}
330 330
331 331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333 333
334 334 # Build the return dicts. Have to re-encode tag names because
335 335 # the tags module always uses UTF-8 (in order not to lose info
336 336 # writing to the cache), but the rest of Mercurial wants them in
337 337 # local encoding.
338 338 tags = {}
339 339 for (name, (node, hist)) in alltags.iteritems():
340 340 if node != nullid:
341 341 tags[encoding.tolocal(name)] = node
342 342 tags['tip'] = self.changelog.tip()
343 343 tagtypes = dict([(encoding.tolocal(name), value)
344 344 for (name, value) in tagtypes.iteritems()])
345 345 return (tags, tagtypes)
346 346
347 347 def tagtype(self, tagname):
348 348 '''
349 349 return the type of the given tag. result can be:
350 350
351 351 'local' : a local tag
352 352 'global' : a global tag
353 353 None : tag does not exist
354 354 '''
355 355
356 356 self.tags()
357 357
358 358 return self._tagtypes.get(tagname)
359 359
360 360 def tagslist(self):
361 361 '''return a list of tags ordered by revision'''
362 362 l = []
363 363 for t, n in self.tags().iteritems():
364 364 try:
365 365 r = self.changelog.rev(n)
366 366 except:
367 367 r = -2 # sort to the beginning of the list if unknown
368 368 l.append((r, t, n))
369 369 return [(t, n) for r, t, n in sorted(l)]
370 370
371 371 def nodetags(self, node):
372 372 '''return the tags associated with a node'''
373 373 if not self.nodetagscache:
374 374 self.nodetagscache = {}
375 375 for t, n in self.tags().iteritems():
376 376 self.nodetagscache.setdefault(n, []).append(t)
377 377 for tags in self.nodetagscache.itervalues():
378 378 tags.sort()
379 379 return self.nodetagscache.get(node, [])
380 380
381 381 def _branchtags(self, partial, lrev):
382 382 # TODO: rename this function?
383 383 tiprev = len(self) - 1
384 384 if lrev != tiprev:
385 385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 386 self._updatebranchcache(partial, ctxgen)
387 387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388 388
389 389 return partial
390 390
391 391 def updatebranchcache(self):
392 392 tip = self.changelog.tip()
393 393 if self._branchcache is not None and self._branchcachetip == tip:
394 394 return self._branchcache
395 395
396 396 oldtip = self._branchcachetip
397 397 self._branchcachetip = tip
398 398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 399 partial, last, lrev = self._readbranchcache()
400 400 else:
401 401 lrev = self.changelog.rev(oldtip)
402 402 partial = self._branchcache
403 403
404 404 self._branchtags(partial, lrev)
405 405 # this private cache holds all heads (not just tips)
406 406 self._branchcache = partial
407 407
408 408 def branchmap(self):
409 409 '''returns a dictionary {branch: [branchheads]}'''
410 410 self.updatebranchcache()
411 411 return self._branchcache
412 412
413 413 def branchtags(self):
414 414 '''return a dict where branch names map to the tipmost head of
415 415 the branch, open heads come before closed'''
416 416 bt = {}
417 417 for bn, heads in self.branchmap().iteritems():
418 418 tip = heads[-1]
419 419 for h in reversed(heads):
420 420 if 'close' not in self.changelog.read(h)[5]:
421 421 tip = h
422 422 break
423 423 bt[bn] = tip
424 424 return bt
425 425
426 426
427 427 def _readbranchcache(self):
428 428 partial = {}
429 429 try:
430 430 f = self.opener("branchheads.cache")
431 431 lines = f.read().split('\n')
432 432 f.close()
433 433 except (IOError, OSError):
434 434 return {}, nullid, nullrev
435 435
436 436 try:
437 437 last, lrev = lines.pop(0).split(" ", 1)
438 438 last, lrev = bin(last), int(lrev)
439 439 if lrev >= len(self) or self[lrev].node() != last:
440 440 # invalidate the cache
441 441 raise ValueError('invalidating branch cache (tip differs)')
442 442 for l in lines:
443 443 if not l:
444 444 continue
445 445 node, label = l.split(" ", 1)
446 446 partial.setdefault(label.strip(), []).append(bin(node))
447 447 except KeyboardInterrupt:
448 448 raise
449 449 except Exception, inst:
450 450 if self.ui.debugflag:
451 451 self.ui.warn(str(inst), '\n')
452 452 partial, last, lrev = {}, nullid, nullrev
453 453 return partial, last, lrev
454 454
455 455 def _writebranchcache(self, branches, tip, tiprev):
456 456 try:
457 457 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 458 f.write("%s %s\n" % (hex(tip), tiprev))
459 459 for label, nodes in branches.iteritems():
460 460 for node in nodes:
461 461 f.write("%s %s\n" % (hex(node), label))
462 462 f.rename()
463 463 except (IOError, OSError):
464 464 pass
465 465
466 466 def _updatebranchcache(self, partial, ctxgen):
467 467 # collect new branch entries
468 468 newbranches = {}
469 469 for c in ctxgen:
470 470 newbranches.setdefault(c.branch(), []).append(c.node())
471 471 # if older branchheads are reachable from new ones, they aren't
472 472 # really branchheads. Note checking parents is insufficient:
473 473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 474 for branch, newnodes in newbranches.iteritems():
475 475 bheads = partial.setdefault(branch, [])
476 476 bheads.extend(newnodes)
477 477 if len(bheads) <= 1:
478 478 continue
479 479 # starting from tip means fewer passes over reachable
480 480 while newnodes:
481 481 latest = newnodes.pop()
482 482 if latest not in bheads:
483 483 continue
484 484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 485 reachable = self.changelog.reachable(latest, minbhrev)
486 486 reachable.remove(latest)
487 487 bheads = [b for b in bheads if b not in reachable]
488 488 partial[branch] = bheads
489 489
490 490 def lookup(self, key):
491 491 if isinstance(key, int):
492 492 return self.changelog.node(key)
493 493 elif key == '.':
494 494 return self.dirstate.parents()[0]
495 495 elif key == 'null':
496 496 return nullid
497 497 elif key == 'tip':
498 498 return self.changelog.tip()
499 499 n = self.changelog._match(key)
500 500 if n:
501 501 return n
502 502 if key in self.tags():
503 503 return self.tags()[key]
504 504 if key in self.branchtags():
505 505 return self.branchtags()[key]
506 506 n = self.changelog._partialmatch(key)
507 507 if n:
508 508 return n
509 509
510 510 # can't find key, check if it might have come from damaged dirstate
511 511 if key in self.dirstate.parents():
512 512 raise error.Abort(_("working directory has unknown parent '%s'!")
513 513 % short(key))
514 514 try:
515 515 if len(key) == 20:
516 516 key = hex(key)
517 517 except:
518 518 pass
519 519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520 520
521 521 def lookupbranch(self, key, remote=None):
522 522 repo = remote or self
523 523 if key in repo.branchmap():
524 524 return key
525 525
526 526 repo = (remote and remote.local()) and remote or self
527 527 return repo[key].branch()
528 528
529 529 def local(self):
530 530 return True
531 531
532 532 def join(self, f):
533 533 return os.path.join(self.path, f)
534 534
535 535 def wjoin(self, f):
536 536 return os.path.join(self.root, f)
537 537
538 538 def file(self, f):
539 539 if f[0] == '/':
540 540 f = f[1:]
541 541 return filelog.filelog(self.sopener, f)
542 542
543 543 def changectx(self, changeid):
544 544 return self[changeid]
545 545
546 546 def parents(self, changeid=None):
547 547 '''get list of changectxs for parents of changeid'''
548 548 return self[changeid].parents()
549 549
550 550 def filectx(self, path, changeid=None, fileid=None):
551 551 """changeid can be a changeset revision, node, or tag.
552 552 fileid can be a file revision or node."""
553 553 return context.filectx(self, path, changeid, fileid)
554 554
555 555 def getcwd(self):
556 556 return self.dirstate.getcwd()
557 557
558 558 def pathto(self, f, cwd=None):
559 559 return self.dirstate.pathto(f, cwd)
560 560
561 561 def wfile(self, f, mode='r'):
562 562 return self.wopener(f, mode)
563 563
564 564 def _link(self, f):
565 565 return os.path.islink(self.wjoin(f))
566 566
567 567 def _loadfilter(self, filter):
568 568 if filter not in self.filterpats:
569 569 l = []
570 570 for pat, cmd in self.ui.configitems(filter):
571 571 if cmd == '!':
572 572 continue
573 573 mf = matchmod.match(self.root, '', [pat])
574 574 fn = None
575 575 params = cmd
576 576 for name, filterfn in self._datafilters.iteritems():
577 577 if cmd.startswith(name):
578 578 fn = filterfn
579 579 params = cmd[len(name):].lstrip()
580 580 break
581 581 if not fn:
582 582 fn = lambda s, c, **kwargs: util.filter(s, c)
583 583 # Wrap old filters not supporting keyword arguments
584 584 if not inspect.getargspec(fn)[2]:
585 585 oldfn = fn
586 586 fn = lambda s, c, **kwargs: oldfn(s, c)
587 587 l.append((mf, fn, params))
588 588 self.filterpats[filter] = l
589 return self.filterpats[filter]
589 590
590 591 def _filter(self, filter, filename, data):
591 self._loadfilter(filter)
592
593 for mf, fn, cmd in self.filterpats[filter]:
592 for mf, fn, cmd in self._loadfilter[filter]:
594 593 if mf(filename):
595 594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
596 595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
597 596 break
598 597
599 598 return data
600 599
601 600 def adddatafilter(self, name, filter):
602 601 self._datafilters[name] = filter
603 602
604 603 def wread(self, filename):
605 604 if self._link(filename):
606 605 data = os.readlink(self.wjoin(filename))
607 606 else:
608 607 data = self.wopener(filename, 'r').read()
609 608 return self._filter("encode", filename, data)
610 609
611 610 def wwrite(self, filename, data, flags):
612 611 data = self._filter("decode", filename, data)
613 612 try:
614 613 os.unlink(self.wjoin(filename))
615 614 except OSError:
616 615 pass
617 616 if 'l' in flags:
618 617 self.wopener.symlink(data, filename)
619 618 else:
620 619 self.wopener(filename, 'w').write(data)
621 620 if 'x' in flags:
622 621 util.set_flags(self.wjoin(filename), False, True)
623 622
624 623 def wwritedata(self, filename, data):
625 624 return self._filter("decode", filename, data)
626 625
627 626 def transaction(self, desc):
628 627 tr = self._transref and self._transref() or None
629 628 if tr and tr.running():
630 629 return tr.nest()
631 630
632 631 # abort here if the journal already exists
633 632 if os.path.exists(self.sjoin("journal")):
634 633 raise error.RepoError(
635 634 _("abandoned transaction found - run hg recover"))
636 635
637 636 # save dirstate for rollback
638 637 try:
639 638 ds = self.opener("dirstate").read()
640 639 except IOError:
641 640 ds = ""
642 641 self.opener("journal.dirstate", "w").write(ds)
643 642 self.opener("journal.branch", "w").write(self.dirstate.branch())
644 643 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
645 644
646 645 renames = [(self.sjoin("journal"), self.sjoin("undo")),
647 646 (self.join("journal.dirstate"), self.join("undo.dirstate")),
648 647 (self.join("journal.branch"), self.join("undo.branch")),
649 648 (self.join("journal.desc"), self.join("undo.desc"))]
650 649 tr = transaction.transaction(self.ui.warn, self.sopener,
651 650 self.sjoin("journal"),
652 651 aftertrans(renames),
653 652 self.store.createmode)
654 653 self._transref = weakref.ref(tr)
655 654 return tr
656 655
657 656 def recover(self):
658 657 lock = self.lock()
659 658 try:
660 659 if os.path.exists(self.sjoin("journal")):
661 660 self.ui.status(_("rolling back interrupted transaction\n"))
662 661 transaction.rollback(self.sopener, self.sjoin("journal"),
663 662 self.ui.warn)
664 663 self.invalidate()
665 664 return True
666 665 else:
667 666 self.ui.warn(_("no interrupted transaction available\n"))
668 667 return False
669 668 finally:
670 669 lock.release()
671 670
672 671 def rollback(self, dryrun=False):
673 672 wlock = lock = None
674 673 try:
675 674 wlock = self.wlock()
676 675 lock = self.lock()
677 676 if os.path.exists(self.sjoin("undo")):
678 677 try:
679 678 args = self.opener("undo.desc", "r").read().splitlines()
680 679 if len(args) >= 3 and self.ui.verbose:
681 680 desc = _("rolling back to revision %s"
682 681 " (undo %s: %s)\n") % (
683 682 int(args[0]) - 1, args[1], args[2])
684 683 elif len(args) >= 2:
685 684 desc = _("rolling back to revision %s (undo %s)\n") % (
686 685 int(args[0]) - 1, args[1])
687 686 except IOError:
688 687 desc = _("rolling back unknown transaction\n")
689 688 self.ui.status(desc)
690 689 if dryrun:
691 690 return
692 691 transaction.rollback(self.sopener, self.sjoin("undo"),
693 692 self.ui.warn)
694 693 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
695 694 try:
696 695 branch = self.opener("undo.branch").read()
697 696 self.dirstate.setbranch(branch)
698 697 except IOError:
699 698 self.ui.warn(_("Named branch could not be reset, "
700 699 "current branch still is: %s\n")
701 700 % encoding.tolocal(self.dirstate.branch()))
702 701 self.invalidate()
703 702 self.dirstate.invalidate()
704 703 self.destroyed()
705 704 else:
706 705 self.ui.warn(_("no rollback information available\n"))
707 706 return 1
708 707 finally:
709 708 release(lock, wlock)
710 709
711 710 def invalidatecaches(self):
712 711 self._tags = None
713 712 self._tagtypes = None
714 713 self.nodetagscache = None
715 714 self._branchcache = None # in UTF-8
716 715 self._branchcachetip = None
717 716
718 717 def invalidate(self):
719 718 for a in "changelog manifest".split():
720 719 if a in self.__dict__:
721 720 delattr(self, a)
722 721 self.invalidatecaches()
723 722
724 723 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
725 724 try:
726 725 l = lock.lock(lockname, 0, releasefn, desc=desc)
727 726 except error.LockHeld, inst:
728 727 if not wait:
729 728 raise
730 729 self.ui.warn(_("waiting for lock on %s held by %r\n") %
731 730 (desc, inst.locker))
732 731 # default to 600 seconds timeout
733 732 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
734 733 releasefn, desc=desc)
735 734 if acquirefn:
736 735 acquirefn()
737 736 return l
738 737
739 738 def lock(self, wait=True):
740 739 '''Lock the repository store (.hg/store) and return a weak reference
741 740 to the lock. Use this before modifying the store (e.g. committing or
742 741 stripping). If you are opening a transaction, get a lock as well.)'''
743 742 l = self._lockref and self._lockref()
744 743 if l is not None and l.held:
745 744 l.lock()
746 745 return l
747 746
748 747 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
749 748 _('repository %s') % self.origroot)
750 749 self._lockref = weakref.ref(l)
751 750 return l
752 751
753 752 def wlock(self, wait=True):
754 753 '''Lock the non-store parts of the repository (everything under
755 754 .hg except .hg/store) and return a weak reference to the lock.
756 755 Use this before modifying files in .hg.'''
757 756 l = self._wlockref and self._wlockref()
758 757 if l is not None and l.held:
759 758 l.lock()
760 759 return l
761 760
762 761 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
763 762 self.dirstate.invalidate, _('working directory of %s') %
764 763 self.origroot)
765 764 self._wlockref = weakref.ref(l)
766 765 return l
767 766
768 767 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
769 768 """
770 769 commit an individual file as part of a larger transaction
771 770 """
772 771
773 772 fname = fctx.path()
774 773 text = fctx.data()
775 774 flog = self.file(fname)
776 775 fparent1 = manifest1.get(fname, nullid)
777 776 fparent2 = fparent2o = manifest2.get(fname, nullid)
778 777
779 778 meta = {}
780 779 copy = fctx.renamed()
781 780 if copy and copy[0] != fname:
782 781 # Mark the new revision of this file as a copy of another
783 782 # file. This copy data will effectively act as a parent
784 783 # of this new revision. If this is a merge, the first
785 784 # parent will be the nullid (meaning "look up the copy data")
786 785 # and the second one will be the other parent. For example:
787 786 #
788 787 # 0 --- 1 --- 3 rev1 changes file foo
789 788 # \ / rev2 renames foo to bar and changes it
790 789 # \- 2 -/ rev3 should have bar with all changes and
791 790 # should record that bar descends from
792 791 # bar in rev2 and foo in rev1
793 792 #
794 793 # this allows this merge to succeed:
795 794 #
796 795 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
797 796 # \ / merging rev3 and rev4 should use bar@rev2
798 797 # \- 2 --- 4 as the merge base
799 798 #
800 799
801 800 cfname = copy[0]
802 801 crev = manifest1.get(cfname)
803 802 newfparent = fparent2
804 803
805 804 if manifest2: # branch merge
806 805 if fparent2 == nullid or crev is None: # copied on remote side
807 806 if cfname in manifest2:
808 807 crev = manifest2[cfname]
809 808 newfparent = fparent1
810 809
811 810 # find source in nearest ancestor if we've lost track
812 811 if not crev:
813 812 self.ui.debug(" %s: searching for copy revision for %s\n" %
814 813 (fname, cfname))
815 814 for ancestor in self['.'].ancestors():
816 815 if cfname in ancestor:
817 816 crev = ancestor[cfname].filenode()
818 817 break
819 818
820 819 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
821 820 meta["copy"] = cfname
822 821 meta["copyrev"] = hex(crev)
823 822 fparent1, fparent2 = nullid, newfparent
824 823 elif fparent2 != nullid:
825 824 # is one parent an ancestor of the other?
826 825 fparentancestor = flog.ancestor(fparent1, fparent2)
827 826 if fparentancestor == fparent1:
828 827 fparent1, fparent2 = fparent2, nullid
829 828 elif fparentancestor == fparent2:
830 829 fparent2 = nullid
831 830
832 831 # is the file changed?
833 832 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
834 833 changelist.append(fname)
835 834 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
836 835
837 836 # are just the flags changed during merge?
838 837 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
839 838 changelist.append(fname)
840 839
841 840 return fparent1
842 841
843 842 def commit(self, text="", user=None, date=None, match=None, force=False,
844 843 editor=False, extra={}):
845 844 """Add a new revision to current repository.
846 845
847 846 Revision information is gathered from the working directory,
848 847 match can be used to filter the committed files. If editor is
849 848 supplied, it is called to get a commit message.
850 849 """
851 850
852 851 def fail(f, msg):
853 852 raise util.Abort('%s: %s' % (f, msg))
854 853
855 854 if not match:
856 855 match = matchmod.always(self.root, '')
857 856
858 857 if not force:
859 858 vdirs = []
860 859 match.dir = vdirs.append
861 860 match.bad = fail
862 861
863 862 wlock = self.wlock()
864 863 try:
865 864 wctx = self[None]
866 865 merge = len(wctx.parents()) > 1
867 866
868 867 if (not force and merge and match and
869 868 (match.files() or match.anypats())):
870 869 raise util.Abort(_('cannot partially commit a merge '
871 870 '(do not specify files or patterns)'))
872 871
873 872 changes = self.status(match=match, clean=force)
874 873 if force:
875 874 changes[0].extend(changes[6]) # mq may commit unchanged files
876 875
877 876 # check subrepos
878 877 subs = []
879 878 removedsubs = set()
880 879 for p in wctx.parents():
881 880 removedsubs.update(s for s in p.substate if match(s))
882 881 for s in wctx.substate:
883 882 removedsubs.discard(s)
884 883 if match(s) and wctx.sub(s).dirty():
885 884 subs.append(s)
886 885 if (subs or removedsubs):
887 886 if (not match('.hgsub') and
888 887 '.hgsub' in (wctx.modified() + wctx.added())):
889 888 raise util.Abort(_("can't commit subrepos without .hgsub"))
890 889 if '.hgsubstate' not in changes[0]:
891 890 changes[0].insert(0, '.hgsubstate')
892 891
893 892 # make sure all explicit patterns are matched
894 893 if not force and match.files():
895 894 matched = set(changes[0] + changes[1] + changes[2])
896 895
897 896 for f in match.files():
898 897 if f == '.' or f in matched or f in wctx.substate:
899 898 continue
900 899 if f in changes[3]: # missing
901 900 fail(f, _('file not found!'))
902 901 if f in vdirs: # visited directory
903 902 d = f + '/'
904 903 for mf in matched:
905 904 if mf.startswith(d):
906 905 break
907 906 else:
908 907 fail(f, _("no match under directory!"))
909 908 elif f not in self.dirstate:
910 909 fail(f, _("file not tracked!"))
911 910
912 911 if (not force and not extra.get("close") and not merge
913 912 and not (changes[0] or changes[1] or changes[2])
914 913 and wctx.branch() == wctx.p1().branch()):
915 914 return None
916 915
917 916 ms = mergemod.mergestate(self)
918 917 for f in changes[0]:
919 918 if f in ms and ms[f] == 'u':
920 919 raise util.Abort(_("unresolved merge conflicts "
921 920 "(see hg resolve)"))
922 921
923 922 cctx = context.workingctx(self, text, user, date, extra, changes)
924 923 if editor:
925 924 cctx._text = editor(self, cctx, subs)
926 925 edited = (text != cctx._text)
927 926
928 927 # commit subs
929 928 if subs or removedsubs:
930 929 state = wctx.substate.copy()
931 930 for s in sorted(subs):
932 931 sub = wctx.sub(s)
933 932 self.ui.status(_('committing subrepository %s\n') %
934 933 subrepo.relpath(sub))
935 934 sr = sub.commit(cctx._text, user, date)
936 935 state[s] = (state[s][0], sr)
937 936 subrepo.writestate(self, state)
938 937
939 938 # Save commit message in case this transaction gets rolled back
940 939 # (e.g. by a pretxncommit hook). Leave the content alone on
941 940 # the assumption that the user will use the same editor again.
942 941 msgfile = self.opener('last-message.txt', 'wb')
943 942 msgfile.write(cctx._text)
944 943 msgfile.close()
945 944
946 945 p1, p2 = self.dirstate.parents()
947 946 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
948 947 try:
949 948 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
950 949 ret = self.commitctx(cctx, True)
951 950 except:
952 951 if edited:
953 952 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
954 953 self.ui.write(
955 954 _('note: commit message saved in %s\n') % msgfn)
956 955 raise
957 956
958 957 # update dirstate and mergestate
959 958 for f in changes[0] + changes[1]:
960 959 self.dirstate.normal(f)
961 960 for f in changes[2]:
962 961 self.dirstate.forget(f)
963 962 self.dirstate.setparents(ret)
964 963 ms.reset()
965 964 finally:
966 965 wlock.release()
967 966
968 967 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
969 968 return ret
970 969
971 970 def commitctx(self, ctx, error=False):
972 971 """Add a new revision to current repository.
973 972 Revision information is passed via the context argument.
974 973 """
975 974
976 975 tr = lock = None
977 976 removed = ctx.removed()
978 977 p1, p2 = ctx.p1(), ctx.p2()
979 978 m1 = p1.manifest().copy()
980 979 m2 = p2.manifest()
981 980 user = ctx.user()
982 981
983 982 lock = self.lock()
984 983 try:
985 984 tr = self.transaction("commit")
986 985 trp = weakref.proxy(tr)
987 986
988 987 # check in files
989 988 new = {}
990 989 changed = []
991 990 linkrev = len(self)
992 991 for f in sorted(ctx.modified() + ctx.added()):
993 992 self.ui.note(f + "\n")
994 993 try:
995 994 fctx = ctx[f]
996 995 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
997 996 changed)
998 997 m1.set(f, fctx.flags())
999 998 except OSError, inst:
1000 999 self.ui.warn(_("trouble committing %s!\n") % f)
1001 1000 raise
1002 1001 except IOError, inst:
1003 1002 errcode = getattr(inst, 'errno', errno.ENOENT)
1004 1003 if error or errcode and errcode != errno.ENOENT:
1005 1004 self.ui.warn(_("trouble committing %s!\n") % f)
1006 1005 raise
1007 1006 else:
1008 1007 removed.append(f)
1009 1008
1010 1009 # update manifest
1011 1010 m1.update(new)
1012 1011 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1013 1012 drop = [f for f in removed if f in m1]
1014 1013 for f in drop:
1015 1014 del m1[f]
1016 1015 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1017 1016 p2.manifestnode(), (new, drop))
1018 1017
1019 1018 # update changelog
1020 1019 self.changelog.delayupdate()
1021 1020 n = self.changelog.add(mn, changed + removed, ctx.description(),
1022 1021 trp, p1.node(), p2.node(),
1023 1022 user, ctx.date(), ctx.extra().copy())
1024 1023 p = lambda: self.changelog.writepending() and self.root or ""
1025 1024 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1026 1025 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1027 1026 parent2=xp2, pending=p)
1028 1027 self.changelog.finalize(trp)
1029 1028 tr.close()
1030 1029
1031 1030 if self._branchcache:
1032 1031 self.updatebranchcache()
1033 1032 return n
1034 1033 finally:
1035 1034 if tr:
1036 1035 tr.release()
1037 1036 lock.release()
1038 1037
1039 1038 def destroyed(self):
1040 1039 '''Inform the repository that nodes have been destroyed.
1041 1040 Intended for use by strip and rollback, so there's a common
1042 1041 place for anything that has to be done after destroying history.'''
1043 1042 # XXX it might be nice if we could take the list of destroyed
1044 1043 # nodes, but I don't see an easy way for rollback() to do that
1045 1044
1046 1045 # Ensure the persistent tag cache is updated. Doing it now
1047 1046 # means that the tag cache only has to worry about destroyed
1048 1047 # heads immediately after a strip/rollback. That in turn
1049 1048 # guarantees that "cachetip == currenttip" (comparing both rev
1050 1049 # and node) always means no nodes have been added or destroyed.
1051 1050
1052 1051 # XXX this is suboptimal when qrefresh'ing: we strip the current
1053 1052 # head, refresh the tag cache, then immediately add a new head.
1054 1053 # But I think doing it this way is necessary for the "instant
1055 1054 # tag cache retrieval" case to work.
1056 1055 self.invalidatecaches()
1057 1056
1058 1057 def walk(self, match, node=None):
1059 1058 '''
1060 1059 walk recursively through the directory tree or a given
1061 1060 changeset, finding all files matched by the match
1062 1061 function
1063 1062 '''
1064 1063 return self[node].walk(match)
1065 1064
1066 1065 def status(self, node1='.', node2=None, match=None,
1067 1066 ignored=False, clean=False, unknown=False,
1068 1067 listsubrepos=False):
1069 1068 """return status of files between two nodes or node and working directory
1070 1069
1071 1070 If node1 is None, use the first dirstate parent instead.
1072 1071 If node2 is None, compare node1 with working directory.
1073 1072 """
1074 1073
1075 1074 def mfmatches(ctx):
1076 1075 mf = ctx.manifest().copy()
1077 1076 for fn in mf.keys():
1078 1077 if not match(fn):
1079 1078 del mf[fn]
1080 1079 return mf
1081 1080
1082 1081 if isinstance(node1, context.changectx):
1083 1082 ctx1 = node1
1084 1083 else:
1085 1084 ctx1 = self[node1]
1086 1085 if isinstance(node2, context.changectx):
1087 1086 ctx2 = node2
1088 1087 else:
1089 1088 ctx2 = self[node2]
1090 1089
1091 1090 working = ctx2.rev() is None
1092 1091 parentworking = working and ctx1 == self['.']
1093 1092 match = match or matchmod.always(self.root, self.getcwd())
1094 1093 listignored, listclean, listunknown = ignored, clean, unknown
1095 1094
1096 1095 # load earliest manifest first for caching reasons
1097 1096 if not working and ctx2.rev() < ctx1.rev():
1098 1097 ctx2.manifest()
1099 1098
1100 1099 if not parentworking:
1101 1100 def bad(f, msg):
1102 1101 if f not in ctx1:
1103 1102 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1104 1103 match.bad = bad
1105 1104
1106 1105 if working: # we need to scan the working dir
1107 1106 subrepos = []
1108 1107 if '.hgsub' in self.dirstate:
1109 1108 subrepos = ctx1.substate.keys()
1110 1109 s = self.dirstate.status(match, subrepos, listignored,
1111 1110 listclean, listunknown)
1112 1111 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1113 1112
1114 1113 # check for any possibly clean files
1115 1114 if parentworking and cmp:
1116 1115 fixup = []
1117 1116 # do a full compare of any files that might have changed
1118 1117 for f in sorted(cmp):
1119 1118 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1120 1119 or ctx1[f].cmp(ctx2[f])):
1121 1120 modified.append(f)
1122 1121 else:
1123 1122 fixup.append(f)
1124 1123
1125 1124 # update dirstate for files that are actually clean
1126 1125 if fixup:
1127 1126 if listclean:
1128 1127 clean += fixup
1129 1128
1130 1129 try:
1131 1130 # updating the dirstate is optional
1132 1131 # so we don't wait on the lock
1133 1132 wlock = self.wlock(False)
1134 1133 try:
1135 1134 for f in fixup:
1136 1135 self.dirstate.normal(f)
1137 1136 finally:
1138 1137 wlock.release()
1139 1138 except error.LockError:
1140 1139 pass
1141 1140
1142 1141 if not parentworking:
1143 1142 mf1 = mfmatches(ctx1)
1144 1143 if working:
1145 1144 # we are comparing working dir against non-parent
1146 1145 # generate a pseudo-manifest for the working dir
1147 1146 mf2 = mfmatches(self['.'])
1148 1147 for f in cmp + modified + added:
1149 1148 mf2[f] = None
1150 1149 mf2.set(f, ctx2.flags(f))
1151 1150 for f in removed:
1152 1151 if f in mf2:
1153 1152 del mf2[f]
1154 1153 else:
1155 1154 # we are comparing two revisions
1156 1155 deleted, unknown, ignored = [], [], []
1157 1156 mf2 = mfmatches(ctx2)
1158 1157
1159 1158 modified, added, clean = [], [], []
1160 1159 for fn in mf2:
1161 1160 if fn in mf1:
1162 1161 if (mf1.flags(fn) != mf2.flags(fn) or
1163 1162 (mf1[fn] != mf2[fn] and
1164 1163 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1165 1164 modified.append(fn)
1166 1165 elif listclean:
1167 1166 clean.append(fn)
1168 1167 del mf1[fn]
1169 1168 else:
1170 1169 added.append(fn)
1171 1170 removed = mf1.keys()
1172 1171
1173 1172 r = modified, added, removed, deleted, unknown, ignored, clean
1174 1173
1175 1174 if listsubrepos:
1176 1175 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1177 1176 if working:
1178 1177 rev2 = None
1179 1178 else:
1180 1179 rev2 = ctx2.substate[subpath][1]
1181 1180 try:
1182 1181 submatch = matchmod.narrowmatcher(subpath, match)
1183 1182 s = sub.status(rev2, match=submatch, ignored=listignored,
1184 1183 clean=listclean, unknown=listunknown,
1185 1184 listsubrepos=True)
1186 1185 for rfiles, sfiles in zip(r, s):
1187 1186 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1188 1187 except error.LookupError:
1189 1188 self.ui.status(_("skipping missing subrepository: %s\n")
1190 1189 % subpath)
1191 1190
1192 1191 [l.sort() for l in r]
1193 1192 return r
1194 1193
1195 1194 def heads(self, start=None):
1196 1195 heads = self.changelog.heads(start)
1197 1196 # sort the output in rev descending order
1198 1197 heads = [(-self.changelog.rev(h), h) for h in heads]
1199 1198 return [n for (r, n) in sorted(heads)]
1200 1199
1201 1200 def branchheads(self, branch=None, start=None, closed=False):
1202 1201 '''return a (possibly filtered) list of heads for the given branch
1203 1202
1204 1203 Heads are returned in topological order, from newest to oldest.
1205 1204 If branch is None, use the dirstate branch.
1206 1205 If start is not None, return only heads reachable from start.
1207 1206 If closed is True, return heads that are marked as closed as well.
1208 1207 '''
1209 1208 if branch is None:
1210 1209 branch = self[None].branch()
1211 1210 branches = self.branchmap()
1212 1211 if branch not in branches:
1213 1212 return []
1214 1213 # the cache returns heads ordered lowest to highest
1215 1214 bheads = list(reversed(branches[branch]))
1216 1215 if start is not None:
1217 1216 # filter out the heads that cannot be reached from startrev
1218 1217 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1219 1218 bheads = [h for h in bheads if h in fbheads]
1220 1219 if not closed:
1221 1220 bheads = [h for h in bheads if
1222 1221 ('close' not in self.changelog.read(h)[5])]
1223 1222 return bheads
1224 1223
1225 1224 def branches(self, nodes):
1226 1225 if not nodes:
1227 1226 nodes = [self.changelog.tip()]
1228 1227 b = []
1229 1228 for n in nodes:
1230 1229 t = n
1231 1230 while 1:
1232 1231 p = self.changelog.parents(n)
1233 1232 if p[1] != nullid or p[0] == nullid:
1234 1233 b.append((t, n, p[0], p[1]))
1235 1234 break
1236 1235 n = p[0]
1237 1236 return b
1238 1237
1239 1238 def between(self, pairs):
1240 1239 r = []
1241 1240
1242 1241 for top, bottom in pairs:
1243 1242 n, l, i = top, [], 0
1244 1243 f = 1
1245 1244
1246 1245 while n != bottom and n != nullid:
1247 1246 p = self.changelog.parents(n)[0]
1248 1247 if i == f:
1249 1248 l.append(n)
1250 1249 f = f * 2
1251 1250 n = p
1252 1251 i += 1
1253 1252
1254 1253 r.append(l)
1255 1254
1256 1255 return r
1257 1256
1258 1257 def pull(self, remote, heads=None, force=False):
1259 1258 lock = self.lock()
1260 1259 try:
1261 1260 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1262 1261 force=force)
1263 1262 common, fetch, rheads = tmp
1264 1263 if not fetch:
1265 1264 self.ui.status(_("no changes found\n"))
1266 1265 return 0
1267 1266
1268 1267 if fetch == [nullid]:
1269 1268 self.ui.status(_("requesting all changes\n"))
1270 1269 elif heads is None and remote.capable('changegroupsubset'):
1271 1270 # issue1320, avoid a race if remote changed after discovery
1272 1271 heads = rheads
1273 1272
1274 1273 if heads is None:
1275 1274 cg = remote.changegroup(fetch, 'pull')
1276 1275 else:
1277 1276 if not remote.capable('changegroupsubset'):
1278 1277 raise util.Abort(_("partial pull cannot be done because "
1279 1278 "other repository doesn't support "
1280 1279 "changegroupsubset."))
1281 1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1282 1281 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1283 1282 finally:
1284 1283 lock.release()
1285 1284
1286 1285 def push(self, remote, force=False, revs=None, newbranch=False):
1287 1286 '''Push outgoing changesets (limited by revs) from the current
1288 1287 repository to remote. Return an integer:
1289 1288 - 0 means HTTP error *or* nothing to push
1290 1289 - 1 means we pushed and remote head count is unchanged *or*
1291 1290 we have outgoing changesets but refused to push
1292 1291 - other values as described by addchangegroup()
1293 1292 '''
1294 1293 # there are two ways to push to remote repo:
1295 1294 #
1296 1295 # addchangegroup assumes local user can lock remote
1297 1296 # repo (local filesystem, old ssh servers).
1298 1297 #
1299 1298 # unbundle assumes local user cannot lock remote repo (new ssh
1300 1299 # servers, http servers).
1301 1300
1302 1301 lock = None
1303 1302 unbundle = remote.capable('unbundle')
1304 1303 if not unbundle:
1305 1304 lock = remote.lock()
1306 1305 try:
1307 1306 ret = discovery.prepush(self, remote, force, revs, newbranch)
1308 1307 if ret[0] is None:
1309 1308 # and here we return 0 for "nothing to push" or 1 for
1310 1309 # "something to push but I refuse"
1311 1310 return ret[1]
1312 1311
1313 1312 cg, remote_heads = ret
1314 1313 if unbundle:
1315 1314 # local repo finds heads on server, finds out what revs it must
1316 1315 # push. once revs transferred, if server finds it has
1317 1316 # different heads (someone else won commit/push race), server
1318 1317 # aborts.
1319 1318 if force:
1320 1319 remote_heads = ['force']
1321 1320 # ssh: return remote's addchangegroup()
1322 1321 # http: return remote's addchangegroup() or 0 for error
1323 1322 return remote.unbundle(cg, remote_heads, 'push')
1324 1323 else:
1325 1324 # we return an integer indicating remote head count change
1326 1325 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1327 1326 finally:
1328 1327 if lock is not None:
1329 1328 lock.release()
1330 1329
1331 1330 def changegroupinfo(self, nodes, source):
1332 1331 if self.ui.verbose or source == 'bundle':
1333 1332 self.ui.status(_("%d changesets found\n") % len(nodes))
1334 1333 if self.ui.debugflag:
1335 1334 self.ui.debug("list of changesets:\n")
1336 1335 for node in nodes:
1337 1336 self.ui.debug("%s\n" % hex(node))
1338 1337
1339 1338 def changegroupsubset(self, bases, heads, source, extranodes=None):
1340 1339 """Compute a changegroup consisting of all the nodes that are
1341 1340 descendents of any of the bases and ancestors of any of the heads.
1342 1341 Return a chunkbuffer object whose read() method will return
1343 1342 successive changegroup chunks.
1344 1343
1345 1344 It is fairly complex as determining which filenodes and which
1346 1345 manifest nodes need to be included for the changeset to be complete
1347 1346 is non-trivial.
1348 1347
1349 1348 Another wrinkle is doing the reverse, figuring out which changeset in
1350 1349 the changegroup a particular filenode or manifestnode belongs to.
1351 1350
1352 1351 The caller can specify some nodes that must be included in the
1353 1352 changegroup using the extranodes argument. It should be a dict
1354 1353 where the keys are the filenames (or 1 for the manifest), and the
1355 1354 values are lists of (node, linknode) tuples, where node is a wanted
1356 1355 node and linknode is the changelog node that should be transmitted as
1357 1356 the linkrev.
1358 1357 """
1359 1358
1360 1359 # Set up some initial variables
1361 1360 # Make it easy to refer to self.changelog
1362 1361 cl = self.changelog
1363 1362 # Compute the list of changesets in this changegroup.
1364 1363 # Some bases may turn out to be superfluous, and some heads may be
1365 1364 # too. nodesbetween will return the minimal set of bases and heads
1366 1365 # necessary to re-create the changegroup.
1367 1366 if not bases:
1368 1367 bases = [nullid]
1369 1368 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1370 1369
1371 1370 if extranodes is None:
1372 1371 # can we go through the fast path ?
1373 1372 heads.sort()
1374 1373 allheads = self.heads()
1375 1374 allheads.sort()
1376 1375 if heads == allheads:
1377 1376 return self._changegroup(msng_cl_lst, source)
1378 1377
1379 1378 # slow path
1380 1379 self.hook('preoutgoing', throw=True, source=source)
1381 1380
1382 1381 self.changegroupinfo(msng_cl_lst, source)
1383 1382
1384 1383 # We assume that all ancestors of bases are known
1385 1384 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1386 1385
1387 1386 # Make it easy to refer to self.manifest
1388 1387 mnfst = self.manifest
1389 1388 # We don't know which manifests are missing yet
1390 1389 msng_mnfst_set = {}
1391 1390 # Nor do we know which filenodes are missing.
1392 1391 msng_filenode_set = {}
1393 1392
1394 1393 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1395 1394 junk = None
1396 1395
1397 1396 # A changeset always belongs to itself, so the changenode lookup
1398 1397 # function for a changenode is identity.
1399 1398 def identity(x):
1400 1399 return x
1401 1400
1402 1401 # A function generating function that sets up the initial environment
1403 1402 # the inner function.
1404 1403 def filenode_collector(changedfiles):
1405 1404 # This gathers information from each manifestnode included in the
1406 1405 # changegroup about which filenodes the manifest node references
1407 1406 # so we can include those in the changegroup too.
1408 1407 #
1409 1408 # It also remembers which changenode each filenode belongs to. It
1410 1409 # does this by assuming the a filenode belongs to the changenode
1411 1410 # the first manifest that references it belongs to.
1412 1411 def collect_msng_filenodes(mnfstnode):
1413 1412 r = mnfst.rev(mnfstnode)
1414 1413 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1415 1414 # If the previous rev is one of the parents,
1416 1415 # we only need to see a diff.
1417 1416 deltamf = mnfst.readdelta(mnfstnode)
1418 1417 # For each line in the delta
1419 1418 for f, fnode in deltamf.iteritems():
1420 1419 # And if the file is in the list of files we care
1421 1420 # about.
1422 1421 if f in changedfiles:
1423 1422 # Get the changenode this manifest belongs to
1424 1423 clnode = msng_mnfst_set[mnfstnode]
1425 1424 # Create the set of filenodes for the file if
1426 1425 # there isn't one already.
1427 1426 ndset = msng_filenode_set.setdefault(f, {})
1428 1427 # And set the filenode's changelog node to the
1429 1428 # manifest's if it hasn't been set already.
1430 1429 ndset.setdefault(fnode, clnode)
1431 1430 else:
1432 1431 # Otherwise we need a full manifest.
1433 1432 m = mnfst.read(mnfstnode)
1434 1433 # For every file in we care about.
1435 1434 for f in changedfiles:
1436 1435 fnode = m.get(f, None)
1437 1436 # If it's in the manifest
1438 1437 if fnode is not None:
1439 1438 # See comments above.
1440 1439 clnode = msng_mnfst_set[mnfstnode]
1441 1440 ndset = msng_filenode_set.setdefault(f, {})
1442 1441 ndset.setdefault(fnode, clnode)
1443 1442 return collect_msng_filenodes
1444 1443
1445 1444 # If we determine that a particular file or manifest node must be a
1446 1445 # node that the recipient of the changegroup will already have, we can
1447 1446 # also assume the recipient will have all the parents. This function
1448 1447 # prunes them from the set of missing nodes.
1449 1448 def prune(revlog, missingnodes):
1450 1449 hasset = set()
1451 1450 # If a 'missing' filenode thinks it belongs to a changenode we
1452 1451 # assume the recipient must have, then the recipient must have
1453 1452 # that filenode.
1454 1453 for n in missingnodes:
1455 1454 clrev = revlog.linkrev(revlog.rev(n))
1456 1455 if clrev in commonrevs:
1457 1456 hasset.add(n)
1458 1457 for n in hasset:
1459 1458 missingnodes.pop(n, None)
1460 1459 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1461 1460 missingnodes.pop(revlog.node(r), None)
1462 1461
1463 1462 # Add the nodes that were explicitly requested.
1464 1463 def add_extra_nodes(name, nodes):
1465 1464 if not extranodes or name not in extranodes:
1466 1465 return
1467 1466
1468 1467 for node, linknode in extranodes[name]:
1469 1468 if node not in nodes:
1470 1469 nodes[node] = linknode
1471 1470
1472 1471 # Now that we have all theses utility functions to help out and
1473 1472 # logically divide up the task, generate the group.
1474 1473 def gengroup():
1475 1474 # The set of changed files starts empty.
1476 1475 changedfiles = set()
1477 1476 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1478 1477
1479 1478 # Create a changenode group generator that will call our functions
1480 1479 # back to lookup the owning changenode and collect information.
1481 1480 group = cl.group(msng_cl_lst, identity, collect)
1482 1481 for cnt, chnk in enumerate(group):
1483 1482 yield chnk
1484 1483 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1485 1484 self.ui.progress(_('bundling changes'), None)
1486 1485
1487 1486 prune(mnfst, msng_mnfst_set)
1488 1487 add_extra_nodes(1, msng_mnfst_set)
1489 1488 msng_mnfst_lst = msng_mnfst_set.keys()
1490 1489 # Sort the manifestnodes by revision number.
1491 1490 msng_mnfst_lst.sort(key=mnfst.rev)
1492 1491 # Create a generator for the manifestnodes that calls our lookup
1493 1492 # and data collection functions back.
1494 1493 group = mnfst.group(msng_mnfst_lst,
1495 1494 lambda mnode: msng_mnfst_set[mnode],
1496 1495 filenode_collector(changedfiles))
1497 1496 for cnt, chnk in enumerate(group):
1498 1497 yield chnk
1499 1498 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1500 1499 self.ui.progress(_('bundling manifests'), None)
1501 1500
1502 1501 # These are no longer needed, dereference and toss the memory for
1503 1502 # them.
1504 1503 msng_mnfst_lst = None
1505 1504 msng_mnfst_set.clear()
1506 1505
1507 1506 if extranodes:
1508 1507 for fname in extranodes:
1509 1508 if isinstance(fname, int):
1510 1509 continue
1511 1510 msng_filenode_set.setdefault(fname, {})
1512 1511 changedfiles.add(fname)
1513 1512 # Go through all our files in order sorted by name.
1514 1513 cnt = 0
1515 1514 for fname in sorted(changedfiles):
1516 1515 filerevlog = self.file(fname)
1517 1516 if not len(filerevlog):
1518 1517 raise util.Abort(_("empty or missing revlog for %s") % fname)
1519 1518 # Toss out the filenodes that the recipient isn't really
1520 1519 # missing.
1521 1520 missingfnodes = msng_filenode_set.pop(fname, {})
1522 1521 prune(filerevlog, missingfnodes)
1523 1522 add_extra_nodes(fname, missingfnodes)
1524 1523 # If any filenodes are left, generate the group for them,
1525 1524 # otherwise don't bother.
1526 1525 if missingfnodes:
1527 1526 yield changegroup.chunkheader(len(fname))
1528 1527 yield fname
1529 1528 # Sort the filenodes by their revision # (topological order)
1530 1529 nodeiter = list(missingfnodes)
1531 1530 nodeiter.sort(key=filerevlog.rev)
1532 1531 # Create a group generator and only pass in a changenode
1533 1532 # lookup function as we need to collect no information
1534 1533 # from filenodes.
1535 1534 group = filerevlog.group(nodeiter,
1536 1535 lambda fnode: missingfnodes[fnode])
1537 1536 for chnk in group:
1538 1537 self.ui.progress(
1539 1538 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1540 1539 cnt += 1
1541 1540 yield chnk
1542 1541 # Signal that no more groups are left.
1543 1542 yield changegroup.closechunk()
1544 1543 self.ui.progress(_('bundling files'), None)
1545 1544
1546 1545 if msng_cl_lst:
1547 1546 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1548 1547
1549 1548 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1550 1549
1551 1550 def changegroup(self, basenodes, source):
1552 1551 # to avoid a race we use changegroupsubset() (issue1320)
1553 1552 return self.changegroupsubset(basenodes, self.heads(), source)
1554 1553
1555 1554 def _changegroup(self, nodes, source):
1556 1555 """Compute the changegroup of all nodes that we have that a recipient
1557 1556 doesn't. Return a chunkbuffer object whose read() method will return
1558 1557 successive changegroup chunks.
1559 1558
1560 1559 This is much easier than the previous function as we can assume that
1561 1560 the recipient has any changenode we aren't sending them.
1562 1561
1563 1562 nodes is the set of nodes to send"""
1564 1563
1565 1564 self.hook('preoutgoing', throw=True, source=source)
1566 1565
1567 1566 cl = self.changelog
1568 1567 revset = set([cl.rev(n) for n in nodes])
1569 1568 self.changegroupinfo(nodes, source)
1570 1569
1571 1570 def identity(x):
1572 1571 return x
1573 1572
1574 1573 def gennodelst(log):
1575 1574 for r in log:
1576 1575 if log.linkrev(r) in revset:
1577 1576 yield log.node(r)
1578 1577
1579 1578 def lookuplinkrev_func(revlog):
1580 1579 def lookuplinkrev(n):
1581 1580 return cl.node(revlog.linkrev(revlog.rev(n)))
1582 1581 return lookuplinkrev
1583 1582
1584 1583 def gengroup():
1585 1584 '''yield a sequence of changegroup chunks (strings)'''
1586 1585 # construct a list of all changed files
1587 1586 changedfiles = set()
1588 1587 mmfs = {}
1589 1588 collect = changegroup.collector(cl, mmfs, changedfiles)
1590 1589
1591 1590 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1592 1591 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1593 1592 yield chnk
1594 1593 self.ui.progress(_('bundling changes'), None)
1595 1594
1596 1595 mnfst = self.manifest
1597 1596 nodeiter = gennodelst(mnfst)
1598 1597 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1599 1598 lookuplinkrev_func(mnfst))):
1600 1599 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1601 1600 yield chnk
1602 1601 self.ui.progress(_('bundling manifests'), None)
1603 1602
1604 1603 cnt = 0
1605 1604 for fname in sorted(changedfiles):
1606 1605 filerevlog = self.file(fname)
1607 1606 if not len(filerevlog):
1608 1607 raise util.Abort(_("empty or missing revlog for %s") % fname)
1609 1608 nodeiter = gennodelst(filerevlog)
1610 1609 nodeiter = list(nodeiter)
1611 1610 if nodeiter:
1612 1611 yield changegroup.chunkheader(len(fname))
1613 1612 yield fname
1614 1613 lookup = lookuplinkrev_func(filerevlog)
1615 1614 for chnk in filerevlog.group(nodeiter, lookup):
1616 1615 self.ui.progress(
1617 1616 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1618 1617 cnt += 1
1619 1618 yield chnk
1620 1619 self.ui.progress(_('bundling files'), None)
1621 1620
1622 1621 yield changegroup.closechunk()
1623 1622
1624 1623 if nodes:
1625 1624 self.hook('outgoing', node=hex(nodes[0]), source=source)
1626 1625
1627 1626 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1628 1627
1629 1628 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1630 1629 """Add the changegroup returned by source.read() to this repo.
1631 1630 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1632 1631 the URL of the repo where this changegroup is coming from.
1633 1632
1634 1633 Return an integer summarizing the change to this repo:
1635 1634 - nothing changed or no source: 0
1636 1635 - more heads than before: 1+added heads (2..n)
1637 1636 - fewer heads than before: -1-removed heads (-2..-n)
1638 1637 - number of heads stays the same: 1
1639 1638 """
1640 1639 def csmap(x):
1641 1640 self.ui.debug("add changeset %s\n" % short(x))
1642 1641 return len(cl)
1643 1642
1644 1643 def revmap(x):
1645 1644 return cl.rev(x)
1646 1645
1647 1646 if not source:
1648 1647 return 0
1649 1648
1650 1649 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1651 1650
1652 1651 changesets = files = revisions = 0
1653 1652 efiles = set()
1654 1653
1655 1654 # write changelog data to temp files so concurrent readers will not see
1656 1655 # inconsistent view
1657 1656 cl = self.changelog
1658 1657 cl.delayupdate()
1659 1658 oldheads = len(cl.heads())
1660 1659
1661 1660 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1662 1661 try:
1663 1662 trp = weakref.proxy(tr)
1664 1663 # pull off the changeset group
1665 1664 self.ui.status(_("adding changesets\n"))
1666 1665 clstart = len(cl)
1667 1666 class prog(object):
1668 1667 step = _('changesets')
1669 1668 count = 1
1670 1669 ui = self.ui
1671 1670 total = None
1672 1671 def __call__(self):
1673 1672 self.ui.progress(self.step, self.count, unit=_('chunks'),
1674 1673 total=self.total)
1675 1674 self.count += 1
1676 1675 pr = prog()
1677 1676 source.callback = pr
1678 1677
1679 1678 if (cl.addgroup(source, csmap, trp) is None
1680 1679 and not emptyok):
1681 1680 raise util.Abort(_("received changelog group is empty"))
1682 1681 clend = len(cl)
1683 1682 changesets = clend - clstart
1684 1683 for c in xrange(clstart, clend):
1685 1684 efiles.update(self[c].files())
1686 1685 efiles = len(efiles)
1687 1686 self.ui.progress(_('changesets'), None)
1688 1687
1689 1688 # pull off the manifest group
1690 1689 self.ui.status(_("adding manifests\n"))
1691 1690 pr.step = _('manifests')
1692 1691 pr.count = 1
1693 1692 pr.total = changesets # manifests <= changesets
1694 1693 # no need to check for empty manifest group here:
1695 1694 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1696 1695 # no new manifest will be created and the manifest group will
1697 1696 # be empty during the pull
1698 1697 self.manifest.addgroup(source, revmap, trp)
1699 1698 self.ui.progress(_('manifests'), None)
1700 1699
1701 1700 needfiles = {}
1702 1701 if self.ui.configbool('server', 'validate', default=False):
1703 1702 # validate incoming csets have their manifests
1704 1703 for cset in xrange(clstart, clend):
1705 1704 mfest = self.changelog.read(self.changelog.node(cset))[0]
1706 1705 mfest = self.manifest.readdelta(mfest)
1707 1706 # store file nodes we must see
1708 1707 for f, n in mfest.iteritems():
1709 1708 needfiles.setdefault(f, set()).add(n)
1710 1709
1711 1710 # process the files
1712 1711 self.ui.status(_("adding file changes\n"))
1713 1712 pr.step = 'files'
1714 1713 pr.count = 1
1715 1714 pr.total = efiles
1716 1715 source.callback = None
1717 1716
1718 1717 while 1:
1719 1718 f = source.chunk()
1720 1719 if not f:
1721 1720 break
1722 1721 self.ui.debug("adding %s revisions\n" % f)
1723 1722 pr()
1724 1723 fl = self.file(f)
1725 1724 o = len(fl)
1726 1725 if fl.addgroup(source, revmap, trp) is None:
1727 1726 raise util.Abort(_("received file revlog group is empty"))
1728 1727 revisions += len(fl) - o
1729 1728 files += 1
1730 1729 if f in needfiles:
1731 1730 needs = needfiles[f]
1732 1731 for new in xrange(o, len(fl)):
1733 1732 n = fl.node(new)
1734 1733 if n in needs:
1735 1734 needs.remove(n)
1736 1735 if not needs:
1737 1736 del needfiles[f]
1738 1737 self.ui.progress(_('files'), None)
1739 1738
1740 1739 for f, needs in needfiles.iteritems():
1741 1740 fl = self.file(f)
1742 1741 for n in needs:
1743 1742 try:
1744 1743 fl.rev(n)
1745 1744 except error.LookupError:
1746 1745 raise util.Abort(
1747 1746 _('missing file data for %s:%s - run hg verify') %
1748 1747 (f, hex(n)))
1749 1748
1750 1749 newheads = len(cl.heads())
1751 1750 heads = ""
1752 1751 if oldheads and newheads != oldheads:
1753 1752 heads = _(" (%+d heads)") % (newheads - oldheads)
1754 1753
1755 1754 self.ui.status(_("added %d changesets"
1756 1755 " with %d changes to %d files%s\n")
1757 1756 % (changesets, revisions, files, heads))
1758 1757
1759 1758 if changesets > 0:
1760 1759 p = lambda: cl.writepending() and self.root or ""
1761 1760 self.hook('pretxnchangegroup', throw=True,
1762 1761 node=hex(cl.node(clstart)), source=srctype,
1763 1762 url=url, pending=p)
1764 1763
1765 1764 # make changelog see real files again
1766 1765 cl.finalize(trp)
1767 1766
1768 1767 tr.close()
1769 1768 finally:
1770 1769 tr.release()
1771 1770 if lock:
1772 1771 lock.release()
1773 1772
1774 1773 if changesets > 0:
1775 1774 # forcefully update the on-disk branch cache
1776 1775 self.ui.debug("updating the branch cache\n")
1777 1776 self.updatebranchcache()
1778 1777 self.hook("changegroup", node=hex(cl.node(clstart)),
1779 1778 source=srctype, url=url)
1780 1779
1781 1780 for i in xrange(clstart, clend):
1782 1781 self.hook("incoming", node=hex(cl.node(i)),
1783 1782 source=srctype, url=url)
1784 1783
1785 1784 # never return 0 here:
1786 1785 if newheads < oldheads:
1787 1786 return newheads - oldheads - 1
1788 1787 else:
1789 1788 return newheads - oldheads + 1
1790 1789
1791 1790
1792 1791 def stream_in(self, remote, requirements):
1793 1792 fp = remote.stream_out()
1794 1793 l = fp.readline()
1795 1794 try:
1796 1795 resp = int(l)
1797 1796 except ValueError:
1798 1797 raise error.ResponseError(
1799 1798 _('Unexpected response from remote server:'), l)
1800 1799 if resp == 1:
1801 1800 raise util.Abort(_('operation forbidden by server'))
1802 1801 elif resp == 2:
1803 1802 raise util.Abort(_('locking the remote repository failed'))
1804 1803 elif resp != 0:
1805 1804 raise util.Abort(_('the server sent an unknown error code'))
1806 1805 self.ui.status(_('streaming all changes\n'))
1807 1806 l = fp.readline()
1808 1807 try:
1809 1808 total_files, total_bytes = map(int, l.split(' ', 1))
1810 1809 except (ValueError, TypeError):
1811 1810 raise error.ResponseError(
1812 1811 _('Unexpected response from remote server:'), l)
1813 1812 self.ui.status(_('%d files to transfer, %s of data\n') %
1814 1813 (total_files, util.bytecount(total_bytes)))
1815 1814 start = time.time()
1816 1815 for i in xrange(total_files):
1817 1816 # XXX doesn't support '\n' or '\r' in filenames
1818 1817 l = fp.readline()
1819 1818 try:
1820 1819 name, size = l.split('\0', 1)
1821 1820 size = int(size)
1822 1821 except (ValueError, TypeError):
1823 1822 raise error.ResponseError(
1824 1823 _('Unexpected response from remote server:'), l)
1825 1824 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1826 1825 # for backwards compat, name was partially encoded
1827 1826 ofp = self.sopener(store.decodedir(name), 'w')
1828 1827 for chunk in util.filechunkiter(fp, limit=size):
1829 1828 ofp.write(chunk)
1830 1829 ofp.close()
1831 1830 elapsed = time.time() - start
1832 1831 if elapsed <= 0:
1833 1832 elapsed = 0.001
1834 1833 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1835 1834 (util.bytecount(total_bytes), elapsed,
1836 1835 util.bytecount(total_bytes / elapsed)))
1837 1836
1838 1837 # new requirements = old non-format requirements + new format-related
1839 1838 # requirements from the streamed-in repository
1840 1839 requirements.update(set(self.requirements) - self.supportedformats)
1841 1840 self._applyrequirements(requirements)
1842 1841 self._writerequirements()
1843 1842
1844 1843 self.invalidate()
1845 1844 return len(self.heads()) + 1
1846 1845
1847 1846 def clone(self, remote, heads=[], stream=False):
1848 1847 '''clone remote repository.
1849 1848
1850 1849 keyword arguments:
1851 1850 heads: list of revs to clone (forces use of pull)
1852 1851 stream: use streaming clone if possible'''
1853 1852
1854 1853 # now, all clients that can request uncompressed clones can
1855 1854 # read repo formats supported by all servers that can serve
1856 1855 # them.
1857 1856
1858 1857 # if revlog format changes, client will have to check version
1859 1858 # and format flags on "stream" capability, and use
1860 1859 # uncompressed only if compatible.
1861 1860
1862 1861 if stream and not heads:
1863 1862 # 'stream' means remote revlog format is revlogv1 only
1864 1863 if remote.capable('stream'):
1865 1864 return self.stream_in(remote, set(('revlogv1',)))
1866 1865 # otherwise, 'streamreqs' contains the remote revlog format
1867 1866 streamreqs = remote.capable('streamreqs')
1868 1867 if streamreqs:
1869 1868 streamreqs = set(streamreqs.split(','))
1870 1869 # if we support it, stream in and adjust our requirements
1871 1870 if not streamreqs - self.supportedformats:
1872 1871 return self.stream_in(remote, streamreqs)
1873 1872 return self.pull(remote, heads)
1874 1873
1875 1874 def pushkey(self, namespace, key, old, new):
1876 1875 return pushkey.push(self, namespace, key, old, new)
1877 1876
1878 1877 def listkeys(self, namespace):
1879 1878 return pushkey.list(self, namespace)
1880 1879
1881 1880 # used to avoid circular references so destructors work
1882 1881 def aftertrans(files):
1883 1882 renamefiles = [tuple(t) for t in files]
1884 1883 def a():
1885 1884 for src, dest in renamefiles:
1886 1885 util.rename(src, dest)
1887 1886 return a
1888 1887
1889 1888 def instance(ui, path, create):
1890 1889 return localrepository(ui, util.drop_scheme('file', path), create)
1891 1890
1892 1891 def islocal(path):
1893 1892 return True
General Comments 0
You need to be logged in to leave comments. Login now