##// END OF EJS Templates
localrepo: check nested repos against working directory...
Martin Geisler -
r12174:7bccd042 default
parent child Browse files
Show More
@@ -1,1864 +1,1864 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25 25
26 26 def __init__(self, baseui, path=None, create=0):
27 27 repo.repository.__init__(self)
28 28 self.root = os.path.realpath(util.expandpath(path))
29 29 self.path = os.path.join(self.root, ".hg")
30 30 self.origroot = path
31 31 self.auditor = util.path_auditor(self.root, self._checknested)
32 32 self.opener = util.opener(self.path)
33 33 self.wopener = util.opener(self.root)
34 34 self.baseui = baseui
35 35 self.ui = baseui.copy()
36 36
37 37 try:
38 38 self.ui.readconfig(self.join("hgrc"), self.root)
39 39 extensions.loadall(self.ui)
40 40 except IOError:
41 41 pass
42 42
43 43 if not os.path.isdir(self.path):
44 44 if create:
45 45 if not os.path.exists(path):
46 46 util.makedirs(path)
47 47 os.mkdir(self.path)
48 48 requirements = ["revlogv1"]
49 49 if self.ui.configbool('format', 'usestore', True):
50 50 os.mkdir(os.path.join(self.path, "store"))
51 51 requirements.append("store")
52 52 if self.ui.configbool('format', 'usefncache', True):
53 53 requirements.append("fncache")
54 54 # create an invalid changelog
55 55 self.opener("00changelog.i", "a").write(
56 56 '\0\0\0\2' # represents revlogv2
57 57 ' dummy changelog to prevent using the old repo layout'
58 58 )
59 59 if self.ui.configbool('format', 'parentdelta', False):
60 60 requirements.append("parentdelta")
61 61 reqfile = self.opener("requires", "w")
62 62 for r in requirements:
63 63 reqfile.write("%s\n" % r)
64 64 reqfile.close()
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self.sopener.options = {}
97 97 if 'parentdelta' in requirements:
98 98 self.sopener.options['parentdelta'] = 1
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None # in UTF-8
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _checknested(self, path):
116 116 """Determine if path is a legal nested repository."""
117 117 if not path.startswith(self.root):
118 118 return False
119 119 subpath = path[len(self.root) + 1:]
120 120
121 121 # XXX: Checking against the current working copy is wrong in
122 122 # the sense that it can reject things like
123 123 #
124 124 # $ hg cat -r 10 sub/x.txt
125 125 #
126 126 # if sub/ is no longer a subrepository in the working copy
127 127 # parent revision.
128 128 #
129 129 # However, it can of course also allow things that would have
130 130 # been rejected before, such as the above cat command if sub/
131 131 # is a subrepository now, but was a normal directory before.
132 132 # The old path auditor would have rejected by mistake since it
133 133 # panics when it sees sub/.hg/.
134 134 #
135 # All in all, checking against the working copy parent
136 # revision seems sensible since we want to prevent access to
137 # nested repositories on the filesystem *now*.
138 ctx = self['.']
135 # All in all, checking against the working copy seems sensible
136 # since we want to prevent access to nested repositories on
137 # the filesystem *now*.
138 ctx = self[None]
139 139 parts = util.splitpath(subpath)
140 140 while parts:
141 141 prefix = os.sep.join(parts)
142 142 if prefix in ctx.substate:
143 143 if prefix == subpath:
144 144 return True
145 145 else:
146 146 sub = ctx.sub(prefix)
147 147 return sub.checknested(subpath[len(prefix) + 1:])
148 148 else:
149 149 parts.pop()
150 150 return False
151 151
152 152
153 153 @propertycache
154 154 def changelog(self):
155 155 c = changelog.changelog(self.sopener)
156 156 if 'HG_PENDING' in os.environ:
157 157 p = os.environ['HG_PENDING']
158 158 if p.startswith(self.root):
159 159 c.readpending('00changelog.i.a')
160 160 self.sopener.options['defversion'] = c.version
161 161 return c
162 162
163 163 @propertycache
164 164 def manifest(self):
165 165 return manifest.manifest(self.sopener)
166 166
167 167 @propertycache
168 168 def dirstate(self):
169 169 return dirstate.dirstate(self.opener, self.ui, self.root)
170 170
171 171 def __getitem__(self, changeid):
172 172 if changeid is None:
173 173 return context.workingctx(self)
174 174 return context.changectx(self, changeid)
175 175
176 176 def __contains__(self, changeid):
177 177 try:
178 178 return bool(self.lookup(changeid))
179 179 except error.RepoLookupError:
180 180 return False
181 181
182 182 def __nonzero__(self):
183 183 return True
184 184
185 185 def __len__(self):
186 186 return len(self.changelog)
187 187
188 188 def __iter__(self):
189 189 for i in xrange(len(self)):
190 190 yield i
191 191
192 192 def url(self):
193 193 return 'file:' + self.root
194 194
195 195 def hook(self, name, throw=False, **args):
196 196 return hook.hook(self.ui, self, name, throw, **args)
197 197
198 198 tag_disallowed = ':\r\n'
199 199
200 200 def _tag(self, names, node, message, local, user, date, extra={}):
201 201 if isinstance(names, str):
202 202 allchars = names
203 203 names = (names,)
204 204 else:
205 205 allchars = ''.join(names)
206 206 for c in self.tag_disallowed:
207 207 if c in allchars:
208 208 raise util.Abort(_('%r cannot be used in a tag name') % c)
209 209
210 210 branches = self.branchmap()
211 211 for name in names:
212 212 self.hook('pretag', throw=True, node=hex(node), tag=name,
213 213 local=local)
214 214 if name in branches:
215 215 self.ui.warn(_("warning: tag %s conflicts with existing"
216 216 " branch name\n") % name)
217 217
218 218 def writetags(fp, names, munge, prevtags):
219 219 fp.seek(0, 2)
220 220 if prevtags and prevtags[-1] != '\n':
221 221 fp.write('\n')
222 222 for name in names:
223 223 m = munge and munge(name) or name
224 224 if self._tagtypes and name in self._tagtypes:
225 225 old = self._tags.get(name, nullid)
226 226 fp.write('%s %s\n' % (hex(old), m))
227 227 fp.write('%s %s\n' % (hex(node), m))
228 228 fp.close()
229 229
230 230 prevtags = ''
231 231 if local:
232 232 try:
233 233 fp = self.opener('localtags', 'r+')
234 234 except IOError:
235 235 fp = self.opener('localtags', 'a')
236 236 else:
237 237 prevtags = fp.read()
238 238
239 239 # local tags are stored in the current charset
240 240 writetags(fp, names, None, prevtags)
241 241 for name in names:
242 242 self.hook('tag', node=hex(node), tag=name, local=local)
243 243 return
244 244
245 245 try:
246 246 fp = self.wfile('.hgtags', 'rb+')
247 247 except IOError:
248 248 fp = self.wfile('.hgtags', 'ab')
249 249 else:
250 250 prevtags = fp.read()
251 251
252 252 # committed tags are stored in UTF-8
253 253 writetags(fp, names, encoding.fromlocal, prevtags)
254 254
255 255 if '.hgtags' not in self.dirstate:
256 256 self[None].add(['.hgtags'])
257 257
258 258 m = matchmod.exact(self.root, '', ['.hgtags'])
259 259 tagnode = self.commit(message, user, date, extra=extra, match=m)
260 260
261 261 for name in names:
262 262 self.hook('tag', node=hex(node), tag=name, local=local)
263 263
264 264 return tagnode
265 265
266 266 def tag(self, names, node, message, local, user, date):
267 267 '''tag a revision with one or more symbolic names.
268 268
269 269 names is a list of strings or, when adding a single tag, names may be a
270 270 string.
271 271
272 272 if local is True, the tags are stored in a per-repository file.
273 273 otherwise, they are stored in the .hgtags file, and a new
274 274 changeset is committed with the change.
275 275
276 276 keyword arguments:
277 277
278 278 local: whether to store tags in non-version-controlled file
279 279 (default False)
280 280
281 281 message: commit message to use if committing
282 282
283 283 user: name of user to use if committing
284 284
285 285 date: date tuple to use if committing'''
286 286
287 287 for x in self.status()[:5]:
288 288 if '.hgtags' in x:
289 289 raise util.Abort(_('working copy of .hgtags is changed '
290 290 '(please commit .hgtags manually)'))
291 291
292 292 self.tags() # instantiate the cache
293 293 self._tag(names, node, message, local, user, date)
294 294
295 295 def tags(self):
296 296 '''return a mapping of tag to node'''
297 297 if self._tags is None:
298 298 (self._tags, self._tagtypes) = self._findtags()
299 299
300 300 return self._tags
301 301
302 302 def _findtags(self):
303 303 '''Do the hard work of finding tags. Return a pair of dicts
304 304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
305 305 maps tag name to a string like \'global\' or \'local\'.
306 306 Subclasses or extensions are free to add their own tags, but
307 307 should be aware that the returned dicts will be retained for the
308 308 duration of the localrepo object.'''
309 309
310 310 # XXX what tagtype should subclasses/extensions use? Currently
311 311 # mq and bookmarks add tags, but do not set the tagtype at all.
312 312 # Should each extension invent its own tag type? Should there
313 313 # be one tagtype for all such "virtual" tags? Or is the status
314 314 # quo fine?
315 315
316 316 alltags = {} # map tag name to (node, hist)
317 317 tagtypes = {}
318 318
319 319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
320 320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
321 321
322 322 # Build the return dicts. Have to re-encode tag names because
323 323 # the tags module always uses UTF-8 (in order not to lose info
324 324 # writing to the cache), but the rest of Mercurial wants them in
325 325 # local encoding.
326 326 tags = {}
327 327 for (name, (node, hist)) in alltags.iteritems():
328 328 if node != nullid:
329 329 tags[encoding.tolocal(name)] = node
330 330 tags['tip'] = self.changelog.tip()
331 331 tagtypes = dict([(encoding.tolocal(name), value)
332 332 for (name, value) in tagtypes.iteritems()])
333 333 return (tags, tagtypes)
334 334
335 335 def tagtype(self, tagname):
336 336 '''
337 337 return the type of the given tag. result can be:
338 338
339 339 'local' : a local tag
340 340 'global' : a global tag
341 341 None : tag does not exist
342 342 '''
343 343
344 344 self.tags()
345 345
346 346 return self._tagtypes.get(tagname)
347 347
348 348 def tagslist(self):
349 349 '''return a list of tags ordered by revision'''
350 350 l = []
351 351 for t, n in self.tags().iteritems():
352 352 try:
353 353 r = self.changelog.rev(n)
354 354 except:
355 355 r = -2 # sort to the beginning of the list if unknown
356 356 l.append((r, t, n))
357 357 return [(t, n) for r, t, n in sorted(l)]
358 358
359 359 def nodetags(self, node):
360 360 '''return the tags associated with a node'''
361 361 if not self.nodetagscache:
362 362 self.nodetagscache = {}
363 363 for t, n in self.tags().iteritems():
364 364 self.nodetagscache.setdefault(n, []).append(t)
365 365 for tags in self.nodetagscache.itervalues():
366 366 tags.sort()
367 367 return self.nodetagscache.get(node, [])
368 368
369 369 def _branchtags(self, partial, lrev):
370 370 # TODO: rename this function?
371 371 tiprev = len(self) - 1
372 372 if lrev != tiprev:
373 373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
374 374 self._updatebranchcache(partial, ctxgen)
375 375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376 376
377 377 return partial
378 378
379 379 def updatebranchcache(self):
380 380 tip = self.changelog.tip()
381 381 if self._branchcache is not None and self._branchcachetip == tip:
382 382 return self._branchcache
383 383
384 384 oldtip = self._branchcachetip
385 385 self._branchcachetip = tip
386 386 if oldtip is None or oldtip not in self.changelog.nodemap:
387 387 partial, last, lrev = self._readbranchcache()
388 388 else:
389 389 lrev = self.changelog.rev(oldtip)
390 390 partial = self._branchcache
391 391
392 392 self._branchtags(partial, lrev)
393 393 # this private cache holds all heads (not just tips)
394 394 self._branchcache = partial
395 395
396 396 def branchmap(self):
397 397 '''returns a dictionary {branch: [branchheads]}'''
398 398 self.updatebranchcache()
399 399 return self._branchcache
400 400
401 401 def branchtags(self):
402 402 '''return a dict where branch names map to the tipmost head of
403 403 the branch, open heads come before closed'''
404 404 bt = {}
405 405 for bn, heads in self.branchmap().iteritems():
406 406 tip = heads[-1]
407 407 for h in reversed(heads):
408 408 if 'close' not in self.changelog.read(h)[5]:
409 409 tip = h
410 410 break
411 411 bt[bn] = tip
412 412 return bt
413 413
414 414
415 415 def _readbranchcache(self):
416 416 partial = {}
417 417 try:
418 418 f = self.opener("branchheads.cache")
419 419 lines = f.read().split('\n')
420 420 f.close()
421 421 except (IOError, OSError):
422 422 return {}, nullid, nullrev
423 423
424 424 try:
425 425 last, lrev = lines.pop(0).split(" ", 1)
426 426 last, lrev = bin(last), int(lrev)
427 427 if lrev >= len(self) or self[lrev].node() != last:
428 428 # invalidate the cache
429 429 raise ValueError('invalidating branch cache (tip differs)')
430 430 for l in lines:
431 431 if not l:
432 432 continue
433 433 node, label = l.split(" ", 1)
434 434 partial.setdefault(label.strip(), []).append(bin(node))
435 435 except KeyboardInterrupt:
436 436 raise
437 437 except Exception, inst:
438 438 if self.ui.debugflag:
439 439 self.ui.warn(str(inst), '\n')
440 440 partial, last, lrev = {}, nullid, nullrev
441 441 return partial, last, lrev
442 442
443 443 def _writebranchcache(self, branches, tip, tiprev):
444 444 try:
445 445 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 446 f.write("%s %s\n" % (hex(tip), tiprev))
447 447 for label, nodes in branches.iteritems():
448 448 for node in nodes:
449 449 f.write("%s %s\n" % (hex(node), label))
450 450 f.rename()
451 451 except (IOError, OSError):
452 452 pass
453 453
454 454 def _updatebranchcache(self, partial, ctxgen):
455 455 # collect new branch entries
456 456 newbranches = {}
457 457 for c in ctxgen:
458 458 newbranches.setdefault(c.branch(), []).append(c.node())
459 459 # if older branchheads are reachable from new ones, they aren't
460 460 # really branchheads. Note checking parents is insufficient:
461 461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
462 462 for branch, newnodes in newbranches.iteritems():
463 463 bheads = partial.setdefault(branch, [])
464 464 bheads.extend(newnodes)
465 465 if len(bheads) <= 1:
466 466 continue
467 467 # starting from tip means fewer passes over reachable
468 468 while newnodes:
469 469 latest = newnodes.pop()
470 470 if latest not in bheads:
471 471 continue
472 472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
473 473 reachable = self.changelog.reachable(latest, minbhrev)
474 474 reachable.remove(latest)
475 475 bheads = [b for b in bheads if b not in reachable]
476 476 partial[branch] = bheads
477 477
478 478 def lookup(self, key):
479 479 if isinstance(key, int):
480 480 return self.changelog.node(key)
481 481 elif key == '.':
482 482 return self.dirstate.parents()[0]
483 483 elif key == 'null':
484 484 return nullid
485 485 elif key == 'tip':
486 486 return self.changelog.tip()
487 487 n = self.changelog._match(key)
488 488 if n:
489 489 return n
490 490 if key in self.tags():
491 491 return self.tags()[key]
492 492 if key in self.branchtags():
493 493 return self.branchtags()[key]
494 494 n = self.changelog._partialmatch(key)
495 495 if n:
496 496 return n
497 497
498 498 # can't find key, check if it might have come from damaged dirstate
499 499 if key in self.dirstate.parents():
500 500 raise error.Abort(_("working directory has unknown parent '%s'!")
501 501 % short(key))
502 502 try:
503 503 if len(key) == 20:
504 504 key = hex(key)
505 505 except:
506 506 pass
507 507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
508 508
509 509 def lookupbranch(self, key, remote=None):
510 510 repo = remote or self
511 511 if key in repo.branchmap():
512 512 return key
513 513
514 514 repo = (remote and remote.local()) and remote or self
515 515 return repo[key].branch()
516 516
517 517 def local(self):
518 518 return True
519 519
520 520 def join(self, f):
521 521 return os.path.join(self.path, f)
522 522
523 523 def wjoin(self, f):
524 524 return os.path.join(self.root, f)
525 525
526 526 def file(self, f):
527 527 if f[0] == '/':
528 528 f = f[1:]
529 529 return filelog.filelog(self.sopener, f)
530 530
531 531 def changectx(self, changeid):
532 532 return self[changeid]
533 533
534 534 def parents(self, changeid=None):
535 535 '''get list of changectxs for parents of changeid'''
536 536 return self[changeid].parents()
537 537
538 538 def filectx(self, path, changeid=None, fileid=None):
539 539 """changeid can be a changeset revision, node, or tag.
540 540 fileid can be a file revision or node."""
541 541 return context.filectx(self, path, changeid, fileid)
542 542
543 543 def getcwd(self):
544 544 return self.dirstate.getcwd()
545 545
546 546 def pathto(self, f, cwd=None):
547 547 return self.dirstate.pathto(f, cwd)
548 548
549 549 def wfile(self, f, mode='r'):
550 550 return self.wopener(f, mode)
551 551
552 552 def _link(self, f):
553 553 return os.path.islink(self.wjoin(f))
554 554
555 555 def _loadfilter(self, filter):
556 556 if filter not in self.filterpats:
557 557 l = []
558 558 for pat, cmd in self.ui.configitems(filter):
559 559 if cmd == '!':
560 560 continue
561 561 mf = matchmod.match(self.root, '', [pat])
562 562 fn = None
563 563 params = cmd
564 564 for name, filterfn in self._datafilters.iteritems():
565 565 if cmd.startswith(name):
566 566 fn = filterfn
567 567 params = cmd[len(name):].lstrip()
568 568 break
569 569 if not fn:
570 570 fn = lambda s, c, **kwargs: util.filter(s, c)
571 571 # Wrap old filters not supporting keyword arguments
572 572 if not inspect.getargspec(fn)[2]:
573 573 oldfn = fn
574 574 fn = lambda s, c, **kwargs: oldfn(s, c)
575 575 l.append((mf, fn, params))
576 576 self.filterpats[filter] = l
577 577
578 578 def _filter(self, filter, filename, data):
579 579 self._loadfilter(filter)
580 580
581 581 for mf, fn, cmd in self.filterpats[filter]:
582 582 if mf(filename):
583 583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
584 584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
585 585 break
586 586
587 587 return data
588 588
589 589 def adddatafilter(self, name, filter):
590 590 self._datafilters[name] = filter
591 591
592 592 def wread(self, filename):
593 593 if self._link(filename):
594 594 data = os.readlink(self.wjoin(filename))
595 595 else:
596 596 data = self.wopener(filename, 'r').read()
597 597 return self._filter("encode", filename, data)
598 598
599 599 def wwrite(self, filename, data, flags):
600 600 data = self._filter("decode", filename, data)
601 601 try:
602 602 os.unlink(self.wjoin(filename))
603 603 except OSError:
604 604 pass
605 605 if 'l' in flags:
606 606 self.wopener.symlink(data, filename)
607 607 else:
608 608 self.wopener(filename, 'w').write(data)
609 609 if 'x' in flags:
610 610 util.set_flags(self.wjoin(filename), False, True)
611 611
612 612 def wwritedata(self, filename, data):
613 613 return self._filter("decode", filename, data)
614 614
615 615 def transaction(self, desc):
616 616 tr = self._transref and self._transref() or None
617 617 if tr and tr.running():
618 618 return tr.nest()
619 619
620 620 # abort here if the journal already exists
621 621 if os.path.exists(self.sjoin("journal")):
622 622 raise error.RepoError(
623 623 _("abandoned transaction found - run hg recover"))
624 624
625 625 # save dirstate for rollback
626 626 try:
627 627 ds = self.opener("dirstate").read()
628 628 except IOError:
629 629 ds = ""
630 630 self.opener("journal.dirstate", "w").write(ds)
631 631 self.opener("journal.branch", "w").write(self.dirstate.branch())
632 632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
633 633
634 634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
635 635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
636 636 (self.join("journal.branch"), self.join("undo.branch")),
637 637 (self.join("journal.desc"), self.join("undo.desc"))]
638 638 tr = transaction.transaction(self.ui.warn, self.sopener,
639 639 self.sjoin("journal"),
640 640 aftertrans(renames),
641 641 self.store.createmode)
642 642 self._transref = weakref.ref(tr)
643 643 return tr
644 644
645 645 def recover(self):
646 646 lock = self.lock()
647 647 try:
648 648 if os.path.exists(self.sjoin("journal")):
649 649 self.ui.status(_("rolling back interrupted transaction\n"))
650 650 transaction.rollback(self.sopener, self.sjoin("journal"),
651 651 self.ui.warn)
652 652 self.invalidate()
653 653 return True
654 654 else:
655 655 self.ui.warn(_("no interrupted transaction available\n"))
656 656 return False
657 657 finally:
658 658 lock.release()
659 659
660 660 def rollback(self, dryrun=False):
661 661 wlock = lock = None
662 662 try:
663 663 wlock = self.wlock()
664 664 lock = self.lock()
665 665 if os.path.exists(self.sjoin("undo")):
666 666 try:
667 667 args = self.opener("undo.desc", "r").read().splitlines()
668 668 if len(args) >= 3 and self.ui.verbose:
669 669 desc = _("rolling back to revision %s"
670 670 " (undo %s: %s)\n") % (
671 671 int(args[0]) - 1, args[1], args[2])
672 672 elif len(args) >= 2:
673 673 desc = _("rolling back to revision %s (undo %s)\n") % (
674 674 int(args[0]) - 1, args[1])
675 675 except IOError:
676 676 desc = _("rolling back unknown transaction\n")
677 677 self.ui.status(desc)
678 678 if dryrun:
679 679 return
680 680 transaction.rollback(self.sopener, self.sjoin("undo"),
681 681 self.ui.warn)
682 682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
683 683 try:
684 684 branch = self.opener("undo.branch").read()
685 685 self.dirstate.setbranch(branch)
686 686 except IOError:
687 687 self.ui.warn(_("Named branch could not be reset, "
688 688 "current branch still is: %s\n")
689 689 % encoding.tolocal(self.dirstate.branch()))
690 690 self.invalidate()
691 691 self.dirstate.invalidate()
692 692 self.destroyed()
693 693 else:
694 694 self.ui.warn(_("no rollback information available\n"))
695 695 return 1
696 696 finally:
697 697 release(lock, wlock)
698 698
699 699 def invalidatecaches(self):
700 700 self._tags = None
701 701 self._tagtypes = None
702 702 self.nodetagscache = None
703 703 self._branchcache = None # in UTF-8
704 704 self._branchcachetip = None
705 705
706 706 def invalidate(self):
707 707 for a in "changelog manifest".split():
708 708 if a in self.__dict__:
709 709 delattr(self, a)
710 710 self.invalidatecaches()
711 711
712 712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
713 713 try:
714 714 l = lock.lock(lockname, 0, releasefn, desc=desc)
715 715 except error.LockHeld, inst:
716 716 if not wait:
717 717 raise
718 718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
719 719 (desc, inst.locker))
720 720 # default to 600 seconds timeout
721 721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
722 722 releasefn, desc=desc)
723 723 if acquirefn:
724 724 acquirefn()
725 725 return l
726 726
727 727 def lock(self, wait=True):
728 728 '''Lock the repository store (.hg/store) and return a weak reference
729 729 to the lock. Use this before modifying the store (e.g. committing or
730 730 stripping). If you are opening a transaction, get a lock as well.)'''
731 731 l = self._lockref and self._lockref()
732 732 if l is not None and l.held:
733 733 l.lock()
734 734 return l
735 735
736 736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
737 737 _('repository %s') % self.origroot)
738 738 self._lockref = weakref.ref(l)
739 739 return l
740 740
741 741 def wlock(self, wait=True):
742 742 '''Lock the non-store parts of the repository (everything under
743 743 .hg except .hg/store) and return a weak reference to the lock.
744 744 Use this before modifying files in .hg.'''
745 745 l = self._wlockref and self._wlockref()
746 746 if l is not None and l.held:
747 747 l.lock()
748 748 return l
749 749
750 750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
751 751 self.dirstate.invalidate, _('working directory of %s') %
752 752 self.origroot)
753 753 self._wlockref = weakref.ref(l)
754 754 return l
755 755
756 756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
757 757 """
758 758 commit an individual file as part of a larger transaction
759 759 """
760 760
761 761 fname = fctx.path()
762 762 text = fctx.data()
763 763 flog = self.file(fname)
764 764 fparent1 = manifest1.get(fname, nullid)
765 765 fparent2 = fparent2o = manifest2.get(fname, nullid)
766 766
767 767 meta = {}
768 768 copy = fctx.renamed()
769 769 if copy and copy[0] != fname:
770 770 # Mark the new revision of this file as a copy of another
771 771 # file. This copy data will effectively act as a parent
772 772 # of this new revision. If this is a merge, the first
773 773 # parent will be the nullid (meaning "look up the copy data")
774 774 # and the second one will be the other parent. For example:
775 775 #
776 776 # 0 --- 1 --- 3 rev1 changes file foo
777 777 # \ / rev2 renames foo to bar and changes it
778 778 # \- 2 -/ rev3 should have bar with all changes and
779 779 # should record that bar descends from
780 780 # bar in rev2 and foo in rev1
781 781 #
782 782 # this allows this merge to succeed:
783 783 #
784 784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
785 785 # \ / merging rev3 and rev4 should use bar@rev2
786 786 # \- 2 --- 4 as the merge base
787 787 #
788 788
789 789 cfname = copy[0]
790 790 crev = manifest1.get(cfname)
791 791 newfparent = fparent2
792 792
793 793 if manifest2: # branch merge
794 794 if fparent2 == nullid or crev is None: # copied on remote side
795 795 if cfname in manifest2:
796 796 crev = manifest2[cfname]
797 797 newfparent = fparent1
798 798
799 799 # find source in nearest ancestor if we've lost track
800 800 if not crev:
801 801 self.ui.debug(" %s: searching for copy revision for %s\n" %
802 802 (fname, cfname))
803 803 for ancestor in self['.'].ancestors():
804 804 if cfname in ancestor:
805 805 crev = ancestor[cfname].filenode()
806 806 break
807 807
808 808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
809 809 meta["copy"] = cfname
810 810 meta["copyrev"] = hex(crev)
811 811 fparent1, fparent2 = nullid, newfparent
812 812 elif fparent2 != nullid:
813 813 # is one parent an ancestor of the other?
814 814 fparentancestor = flog.ancestor(fparent1, fparent2)
815 815 if fparentancestor == fparent1:
816 816 fparent1, fparent2 = fparent2, nullid
817 817 elif fparentancestor == fparent2:
818 818 fparent2 = nullid
819 819
820 820 # is the file changed?
821 821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
822 822 changelist.append(fname)
823 823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
824 824
825 825 # are just the flags changed during merge?
826 826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
827 827 changelist.append(fname)
828 828
829 829 return fparent1
830 830
831 831 def commit(self, text="", user=None, date=None, match=None, force=False,
832 832 editor=False, extra={}):
833 833 """Add a new revision to current repository.
834 834
835 835 Revision information is gathered from the working directory,
836 836 match can be used to filter the committed files. If editor is
837 837 supplied, it is called to get a commit message.
838 838 """
839 839
840 840 def fail(f, msg):
841 841 raise util.Abort('%s: %s' % (f, msg))
842 842
843 843 if not match:
844 844 match = matchmod.always(self.root, '')
845 845
846 846 if not force:
847 847 vdirs = []
848 848 match.dir = vdirs.append
849 849 match.bad = fail
850 850
851 851 wlock = self.wlock()
852 852 try:
853 853 wctx = self[None]
854 854 merge = len(wctx.parents()) > 1
855 855
856 856 if (not force and merge and match and
857 857 (match.files() or match.anypats())):
858 858 raise util.Abort(_('cannot partially commit a merge '
859 859 '(do not specify files or patterns)'))
860 860
861 861 changes = self.status(match=match, clean=force)
862 862 if force:
863 863 changes[0].extend(changes[6]) # mq may commit unchanged files
864 864
865 865 # check subrepos
866 866 subs = []
867 867 removedsubs = set()
868 868 for p in wctx.parents():
869 869 removedsubs.update(s for s in p.substate if match(s))
870 870 for s in wctx.substate:
871 871 removedsubs.discard(s)
872 872 if match(s) and wctx.sub(s).dirty():
873 873 subs.append(s)
874 874 if (subs or removedsubs):
875 875 if (not match('.hgsub') and
876 876 '.hgsub' in (wctx.modified() + wctx.added())):
877 877 raise util.Abort(_("can't commit subrepos without .hgsub"))
878 878 if '.hgsubstate' not in changes[0]:
879 879 changes[0].insert(0, '.hgsubstate')
880 880
881 881 # make sure all explicit patterns are matched
882 882 if not force and match.files():
883 883 matched = set(changes[0] + changes[1] + changes[2])
884 884
885 885 for f in match.files():
886 886 if f == '.' or f in matched or f in wctx.substate:
887 887 continue
888 888 if f in changes[3]: # missing
889 889 fail(f, _('file not found!'))
890 890 if f in vdirs: # visited directory
891 891 d = f + '/'
892 892 for mf in matched:
893 893 if mf.startswith(d):
894 894 break
895 895 else:
896 896 fail(f, _("no match under directory!"))
897 897 elif f not in self.dirstate:
898 898 fail(f, _("file not tracked!"))
899 899
900 900 if (not force and not extra.get("close") and not merge
901 901 and not (changes[0] or changes[1] or changes[2])
902 902 and wctx.branch() == wctx.p1().branch()):
903 903 return None
904 904
905 905 ms = mergemod.mergestate(self)
906 906 for f in changes[0]:
907 907 if f in ms and ms[f] == 'u':
908 908 raise util.Abort(_("unresolved merge conflicts "
909 909 "(see hg resolve)"))
910 910
911 911 cctx = context.workingctx(self, text, user, date, extra, changes)
912 912 if editor:
913 913 cctx._text = editor(self, cctx, subs)
914 914 edited = (text != cctx._text)
915 915
916 916 # commit subs
917 917 if subs or removedsubs:
918 918 state = wctx.substate.copy()
919 919 for s in sorted(subs):
920 920 sub = wctx.sub(s)
921 921 self.ui.status(_('committing subrepository %s\n') %
922 922 subrepo.relpath(sub))
923 923 sr = sub.commit(cctx._text, user, date)
924 924 state[s] = (state[s][0], sr)
925 925 subrepo.writestate(self, state)
926 926
927 927 # Save commit message in case this transaction gets rolled back
928 928 # (e.g. by a pretxncommit hook). Leave the content alone on
929 929 # the assumption that the user will use the same editor again.
930 930 msgfile = self.opener('last-message.txt', 'wb')
931 931 msgfile.write(cctx._text)
932 932 msgfile.close()
933 933
934 934 p1, p2 = self.dirstate.parents()
935 935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
936 936 try:
937 937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
938 938 ret = self.commitctx(cctx, True)
939 939 except:
940 940 if edited:
941 941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
942 942 self.ui.write(
943 943 _('note: commit message saved in %s\n') % msgfn)
944 944 raise
945 945
946 946 # update dirstate and mergestate
947 947 for f in changes[0] + changes[1]:
948 948 self.dirstate.normal(f)
949 949 for f in changes[2]:
950 950 self.dirstate.forget(f)
951 951 self.dirstate.setparents(ret)
952 952 ms.reset()
953 953 finally:
954 954 wlock.release()
955 955
956 956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
957 957 return ret
958 958
959 959 def commitctx(self, ctx, error=False):
960 960 """Add a new revision to current repository.
961 961 Revision information is passed via the context argument.
962 962 """
963 963
964 964 tr = lock = None
965 965 removed = ctx.removed()
966 966 p1, p2 = ctx.p1(), ctx.p2()
967 967 m1 = p1.manifest().copy()
968 968 m2 = p2.manifest()
969 969 user = ctx.user()
970 970
971 971 lock = self.lock()
972 972 try:
973 973 tr = self.transaction("commit")
974 974 trp = weakref.proxy(tr)
975 975
976 976 # check in files
977 977 new = {}
978 978 changed = []
979 979 linkrev = len(self)
980 980 for f in sorted(ctx.modified() + ctx.added()):
981 981 self.ui.note(f + "\n")
982 982 try:
983 983 fctx = ctx[f]
984 984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
985 985 changed)
986 986 m1.set(f, fctx.flags())
987 987 except OSError, inst:
988 988 self.ui.warn(_("trouble committing %s!\n") % f)
989 989 raise
990 990 except IOError, inst:
991 991 errcode = getattr(inst, 'errno', errno.ENOENT)
992 992 if error or errcode and errcode != errno.ENOENT:
993 993 self.ui.warn(_("trouble committing %s!\n") % f)
994 994 raise
995 995 else:
996 996 removed.append(f)
997 997
998 998 # update manifest
999 999 m1.update(new)
1000 1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1001 1001 drop = [f for f in removed if f in m1]
1002 1002 for f in drop:
1003 1003 del m1[f]
1004 1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1005 1005 p2.manifestnode(), (new, drop))
1006 1006
1007 1007 # update changelog
1008 1008 self.changelog.delayupdate()
1009 1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1010 1010 trp, p1.node(), p2.node(),
1011 1011 user, ctx.date(), ctx.extra().copy())
1012 1012 p = lambda: self.changelog.writepending() and self.root or ""
1013 1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1014 1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1015 1015 parent2=xp2, pending=p)
1016 1016 self.changelog.finalize(trp)
1017 1017 tr.close()
1018 1018
1019 1019 if self._branchcache:
1020 1020 self.updatebranchcache()
1021 1021 return n
1022 1022 finally:
1023 1023 if tr:
1024 1024 tr.release()
1025 1025 lock.release()
1026 1026
1027 1027 def destroyed(self):
1028 1028 '''Inform the repository that nodes have been destroyed.
1029 1029 Intended for use by strip and rollback, so there's a common
1030 1030 place for anything that has to be done after destroying history.'''
1031 1031 # XXX it might be nice if we could take the list of destroyed
1032 1032 # nodes, but I don't see an easy way for rollback() to do that
1033 1033
1034 1034 # Ensure the persistent tag cache is updated. Doing it now
1035 1035 # means that the tag cache only has to worry about destroyed
1036 1036 # heads immediately after a strip/rollback. That in turn
1037 1037 # guarantees that "cachetip == currenttip" (comparing both rev
1038 1038 # and node) always means no nodes have been added or destroyed.
1039 1039
1040 1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1041 1041 # head, refresh the tag cache, then immediately add a new head.
1042 1042 # But I think doing it this way is necessary for the "instant
1043 1043 # tag cache retrieval" case to work.
1044 1044 self.invalidatecaches()
1045 1045
1046 1046 def walk(self, match, node=None):
1047 1047 '''
1048 1048 walk recursively through the directory tree or a given
1049 1049 changeset, finding all files matched by the match
1050 1050 function
1051 1051 '''
1052 1052 return self[node].walk(match)
1053 1053
1054 1054 def status(self, node1='.', node2=None, match=None,
1055 1055 ignored=False, clean=False, unknown=False,
1056 1056 listsubrepos=False):
1057 1057 """return status of files between two nodes or node and working directory
1058 1058
1059 1059 If node1 is None, use the first dirstate parent instead.
1060 1060 If node2 is None, compare node1 with working directory.
1061 1061 """
1062 1062
1063 1063 def mfmatches(ctx):
1064 1064 mf = ctx.manifest().copy()
1065 1065 for fn in mf.keys():
1066 1066 if not match(fn):
1067 1067 del mf[fn]
1068 1068 return mf
1069 1069
1070 1070 if isinstance(node1, context.changectx):
1071 1071 ctx1 = node1
1072 1072 else:
1073 1073 ctx1 = self[node1]
1074 1074 if isinstance(node2, context.changectx):
1075 1075 ctx2 = node2
1076 1076 else:
1077 1077 ctx2 = self[node2]
1078 1078
1079 1079 working = ctx2.rev() is None
1080 1080 parentworking = working and ctx1 == self['.']
1081 1081 match = match or matchmod.always(self.root, self.getcwd())
1082 1082 listignored, listclean, listunknown = ignored, clean, unknown
1083 1083
1084 1084 # load earliest manifest first for caching reasons
1085 1085 if not working and ctx2.rev() < ctx1.rev():
1086 1086 ctx2.manifest()
1087 1087
1088 1088 if not parentworking:
1089 1089 def bad(f, msg):
1090 1090 if f not in ctx1:
1091 1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1092 1092 match.bad = bad
1093 1093
1094 1094 if working: # we need to scan the working dir
1095 1095 subrepos = []
1096 1096 if '.hgsub' in self.dirstate:
1097 1097 subrepos = ctx1.substate.keys()
1098 1098 s = self.dirstate.status(match, subrepos, listignored,
1099 1099 listclean, listunknown)
1100 1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1101 1101
1102 1102 # check for any possibly clean files
1103 1103 if parentworking and cmp:
1104 1104 fixup = []
1105 1105 # do a full compare of any files that might have changed
1106 1106 for f in sorted(cmp):
1107 1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1108 1108 or ctx1[f].cmp(ctx2[f])):
1109 1109 modified.append(f)
1110 1110 else:
1111 1111 fixup.append(f)
1112 1112
1113 1113 # update dirstate for files that are actually clean
1114 1114 if fixup:
1115 1115 if listclean:
1116 1116 clean += fixup
1117 1117
1118 1118 try:
1119 1119 # updating the dirstate is optional
1120 1120 # so we don't wait on the lock
1121 1121 wlock = self.wlock(False)
1122 1122 try:
1123 1123 for f in fixup:
1124 1124 self.dirstate.normal(f)
1125 1125 finally:
1126 1126 wlock.release()
1127 1127 except error.LockError:
1128 1128 pass
1129 1129
1130 1130 if not parentworking:
1131 1131 mf1 = mfmatches(ctx1)
1132 1132 if working:
1133 1133 # we are comparing working dir against non-parent
1134 1134 # generate a pseudo-manifest for the working dir
1135 1135 mf2 = mfmatches(self['.'])
1136 1136 for f in cmp + modified + added:
1137 1137 mf2[f] = None
1138 1138 mf2.set(f, ctx2.flags(f))
1139 1139 for f in removed:
1140 1140 if f in mf2:
1141 1141 del mf2[f]
1142 1142 else:
1143 1143 # we are comparing two revisions
1144 1144 deleted, unknown, ignored = [], [], []
1145 1145 mf2 = mfmatches(ctx2)
1146 1146
1147 1147 modified, added, clean = [], [], []
1148 1148 for fn in mf2:
1149 1149 if fn in mf1:
1150 1150 if (mf1.flags(fn) != mf2.flags(fn) or
1151 1151 (mf1[fn] != mf2[fn] and
1152 1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1153 1153 modified.append(fn)
1154 1154 elif listclean:
1155 1155 clean.append(fn)
1156 1156 del mf1[fn]
1157 1157 else:
1158 1158 added.append(fn)
1159 1159 removed = mf1.keys()
1160 1160
1161 1161 r = modified, added, removed, deleted, unknown, ignored, clean
1162 1162
1163 1163 if listsubrepos:
1164 1164 for subpath in ctx1.substate:
1165 1165 sub = ctx1.sub(subpath)
1166 1166 if working:
1167 1167 rev2 = None
1168 1168 else:
1169 1169 rev2 = ctx2.substate[subpath][1]
1170 1170 try:
1171 1171 submatch = matchmod.narrowmatcher(subpath, match)
1172 1172 s = sub.status(rev2, match=submatch, ignored=listignored,
1173 1173 clean=listclean, unknown=listunknown,
1174 1174 listsubrepos=True)
1175 1175 for rfiles, sfiles in zip(r, s):
1176 1176 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1177 1177 except error.LookupError:
1178 1178 self.ui.status(_("skipping missing subrepository: %s\n")
1179 1179 % subpath)
1180 1180
1181 1181 [l.sort() for l in r]
1182 1182 return r
1183 1183
1184 1184 def heads(self, start=None):
1185 1185 heads = self.changelog.heads(start)
1186 1186 # sort the output in rev descending order
1187 1187 heads = [(-self.changelog.rev(h), h) for h in heads]
1188 1188 return [n for (r, n) in sorted(heads)]
1189 1189
1190 1190 def branchheads(self, branch=None, start=None, closed=False):
1191 1191 '''return a (possibly filtered) list of heads for the given branch
1192 1192
1193 1193 Heads are returned in topological order, from newest to oldest.
1194 1194 If branch is None, use the dirstate branch.
1195 1195 If start is not None, return only heads reachable from start.
1196 1196 If closed is True, return heads that are marked as closed as well.
1197 1197 '''
1198 1198 if branch is None:
1199 1199 branch = self[None].branch()
1200 1200 branches = self.branchmap()
1201 1201 if branch not in branches:
1202 1202 return []
1203 1203 # the cache returns heads ordered lowest to highest
1204 1204 bheads = list(reversed(branches[branch]))
1205 1205 if start is not None:
1206 1206 # filter out the heads that cannot be reached from startrev
1207 1207 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1208 1208 bheads = [h for h in bheads if h in fbheads]
1209 1209 if not closed:
1210 1210 bheads = [h for h in bheads if
1211 1211 ('close' not in self.changelog.read(h)[5])]
1212 1212 return bheads
1213 1213
1214 1214 def branches(self, nodes):
1215 1215 if not nodes:
1216 1216 nodes = [self.changelog.tip()]
1217 1217 b = []
1218 1218 for n in nodes:
1219 1219 t = n
1220 1220 while 1:
1221 1221 p = self.changelog.parents(n)
1222 1222 if p[1] != nullid or p[0] == nullid:
1223 1223 b.append((t, n, p[0], p[1]))
1224 1224 break
1225 1225 n = p[0]
1226 1226 return b
1227 1227
1228 1228 def between(self, pairs):
1229 1229 r = []
1230 1230
1231 1231 for top, bottom in pairs:
1232 1232 n, l, i = top, [], 0
1233 1233 f = 1
1234 1234
1235 1235 while n != bottom and n != nullid:
1236 1236 p = self.changelog.parents(n)[0]
1237 1237 if i == f:
1238 1238 l.append(n)
1239 1239 f = f * 2
1240 1240 n = p
1241 1241 i += 1
1242 1242
1243 1243 r.append(l)
1244 1244
1245 1245 return r
1246 1246
1247 1247 def pull(self, remote, heads=None, force=False):
1248 1248 lock = self.lock()
1249 1249 try:
1250 1250 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1251 1251 force=force)
1252 1252 common, fetch, rheads = tmp
1253 1253 if not fetch:
1254 1254 self.ui.status(_("no changes found\n"))
1255 1255 return 0
1256 1256
1257 1257 if fetch == [nullid]:
1258 1258 self.ui.status(_("requesting all changes\n"))
1259 1259 elif heads is None and remote.capable('changegroupsubset'):
1260 1260 # issue1320, avoid a race if remote changed after discovery
1261 1261 heads = rheads
1262 1262
1263 1263 if heads is None:
1264 1264 cg = remote.changegroup(fetch, 'pull')
1265 1265 else:
1266 1266 if not remote.capable('changegroupsubset'):
1267 1267 raise util.Abort(_("partial pull cannot be done because "
1268 1268 "other repository doesn't support "
1269 1269 "changegroupsubset."))
1270 1270 cg = remote.changegroupsubset(fetch, heads, 'pull')
1271 1271 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1272 1272 finally:
1273 1273 lock.release()
1274 1274
1275 1275 def push(self, remote, force=False, revs=None, newbranch=False):
1276 1276 '''Push outgoing changesets (limited by revs) from the current
1277 1277 repository to remote. Return an integer:
1278 1278 - 0 means HTTP error *or* nothing to push
1279 1279 - 1 means we pushed and remote head count is unchanged *or*
1280 1280 we have outgoing changesets but refused to push
1281 1281 - other values as described by addchangegroup()
1282 1282 '''
1283 1283 # there are two ways to push to remote repo:
1284 1284 #
1285 1285 # addchangegroup assumes local user can lock remote
1286 1286 # repo (local filesystem, old ssh servers).
1287 1287 #
1288 1288 # unbundle assumes local user cannot lock remote repo (new ssh
1289 1289 # servers, http servers).
1290 1290
1291 1291 lock = None
1292 1292 unbundle = remote.capable('unbundle')
1293 1293 if not unbundle:
1294 1294 lock = remote.lock()
1295 1295 try:
1296 1296 ret = discovery.prepush(self, remote, force, revs, newbranch)
1297 1297 if ret[0] is None:
1298 1298 # and here we return 0 for "nothing to push" or 1 for
1299 1299 # "something to push but I refuse"
1300 1300 return ret[1]
1301 1301
1302 1302 cg, remote_heads = ret
1303 1303 if unbundle:
1304 1304 # local repo finds heads on server, finds out what revs it must
1305 1305 # push. once revs transferred, if server finds it has
1306 1306 # different heads (someone else won commit/push race), server
1307 1307 # aborts.
1308 1308 if force:
1309 1309 remote_heads = ['force']
1310 1310 # ssh: return remote's addchangegroup()
1311 1311 # http: return remote's addchangegroup() or 0 for error
1312 1312 return remote.unbundle(cg, remote_heads, 'push')
1313 1313 else:
1314 1314 # we return an integer indicating remote head count change
1315 1315 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1316 1316 finally:
1317 1317 if lock is not None:
1318 1318 lock.release()
1319 1319
1320 1320 def changegroupinfo(self, nodes, source):
1321 1321 if self.ui.verbose or source == 'bundle':
1322 1322 self.ui.status(_("%d changesets found\n") % len(nodes))
1323 1323 if self.ui.debugflag:
1324 1324 self.ui.debug("list of changesets:\n")
1325 1325 for node in nodes:
1326 1326 self.ui.debug("%s\n" % hex(node))
1327 1327
1328 1328 def changegroupsubset(self, bases, heads, source, extranodes=None):
1329 1329 """Compute a changegroup consisting of all the nodes that are
1330 1330 descendents of any of the bases and ancestors of any of the heads.
1331 1331 Return a chunkbuffer object whose read() method will return
1332 1332 successive changegroup chunks.
1333 1333
1334 1334 It is fairly complex as determining which filenodes and which
1335 1335 manifest nodes need to be included for the changeset to be complete
1336 1336 is non-trivial.
1337 1337
1338 1338 Another wrinkle is doing the reverse, figuring out which changeset in
1339 1339 the changegroup a particular filenode or manifestnode belongs to.
1340 1340
1341 1341 The caller can specify some nodes that must be included in the
1342 1342 changegroup using the extranodes argument. It should be a dict
1343 1343 where the keys are the filenames (or 1 for the manifest), and the
1344 1344 values are lists of (node, linknode) tuples, where node is a wanted
1345 1345 node and linknode is the changelog node that should be transmitted as
1346 1346 the linkrev.
1347 1347 """
1348 1348
1349 1349 # Set up some initial variables
1350 1350 # Make it easy to refer to self.changelog
1351 1351 cl = self.changelog
1352 1352 # Compute the list of changesets in this changegroup.
1353 1353 # Some bases may turn out to be superfluous, and some heads may be
1354 1354 # too. nodesbetween will return the minimal set of bases and heads
1355 1355 # necessary to re-create the changegroup.
1356 1356 if not bases:
1357 1357 bases = [nullid]
1358 1358 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1359 1359
1360 1360 if extranodes is None:
1361 1361 # can we go through the fast path ?
1362 1362 heads.sort()
1363 1363 allheads = self.heads()
1364 1364 allheads.sort()
1365 1365 if heads == allheads:
1366 1366 return self._changegroup(msng_cl_lst, source)
1367 1367
1368 1368 # slow path
1369 1369 self.hook('preoutgoing', throw=True, source=source)
1370 1370
1371 1371 self.changegroupinfo(msng_cl_lst, source)
1372 1372
1373 1373 # We assume that all ancestors of bases are known
1374 1374 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1375 1375
1376 1376 # Make it easy to refer to self.manifest
1377 1377 mnfst = self.manifest
1378 1378 # We don't know which manifests are missing yet
1379 1379 msng_mnfst_set = {}
1380 1380 # Nor do we know which filenodes are missing.
1381 1381 msng_filenode_set = {}
1382 1382
1383 1383 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1384 1384 junk = None
1385 1385
1386 1386 # A changeset always belongs to itself, so the changenode lookup
1387 1387 # function for a changenode is identity.
1388 1388 def identity(x):
1389 1389 return x
1390 1390
1391 1391 # A function generating function that sets up the initial environment
1392 1392 # the inner function.
1393 1393 def filenode_collector(changedfiles):
1394 1394 # This gathers information from each manifestnode included in the
1395 1395 # changegroup about which filenodes the manifest node references
1396 1396 # so we can include those in the changegroup too.
1397 1397 #
1398 1398 # It also remembers which changenode each filenode belongs to. It
1399 1399 # does this by assuming the a filenode belongs to the changenode
1400 1400 # the first manifest that references it belongs to.
1401 1401 def collect_msng_filenodes(mnfstnode):
1402 1402 r = mnfst.rev(mnfstnode)
1403 1403 if r - 1 in mnfst.parentrevs(r):
1404 1404 # If the previous rev is one of the parents,
1405 1405 # we only need to see a diff.
1406 1406 deltamf = mnfst.readdelta(mnfstnode)
1407 1407 # For each line in the delta
1408 1408 for f, fnode in deltamf.iteritems():
1409 1409 # And if the file is in the list of files we care
1410 1410 # about.
1411 1411 if f in changedfiles:
1412 1412 # Get the changenode this manifest belongs to
1413 1413 clnode = msng_mnfst_set[mnfstnode]
1414 1414 # Create the set of filenodes for the file if
1415 1415 # there isn't one already.
1416 1416 ndset = msng_filenode_set.setdefault(f, {})
1417 1417 # And set the filenode's changelog node to the
1418 1418 # manifest's if it hasn't been set already.
1419 1419 ndset.setdefault(fnode, clnode)
1420 1420 else:
1421 1421 # Otherwise we need a full manifest.
1422 1422 m = mnfst.read(mnfstnode)
1423 1423 # For every file in we care about.
1424 1424 for f in changedfiles:
1425 1425 fnode = m.get(f, None)
1426 1426 # If it's in the manifest
1427 1427 if fnode is not None:
1428 1428 # See comments above.
1429 1429 clnode = msng_mnfst_set[mnfstnode]
1430 1430 ndset = msng_filenode_set.setdefault(f, {})
1431 1431 ndset.setdefault(fnode, clnode)
1432 1432 return collect_msng_filenodes
1433 1433
1434 1434 # If we determine that a particular file or manifest node must be a
1435 1435 # node that the recipient of the changegroup will already have, we can
1436 1436 # also assume the recipient will have all the parents. This function
1437 1437 # prunes them from the set of missing nodes.
1438 1438 def prune(revlog, missingnodes):
1439 1439 hasset = set()
1440 1440 # If a 'missing' filenode thinks it belongs to a changenode we
1441 1441 # assume the recipient must have, then the recipient must have
1442 1442 # that filenode.
1443 1443 for n in missingnodes:
1444 1444 clrev = revlog.linkrev(revlog.rev(n))
1445 1445 if clrev in commonrevs:
1446 1446 hasset.add(n)
1447 1447 for n in hasset:
1448 1448 missingnodes.pop(n, None)
1449 1449 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1450 1450 missingnodes.pop(revlog.node(r), None)
1451 1451
1452 1452 # Add the nodes that were explicitly requested.
1453 1453 def add_extra_nodes(name, nodes):
1454 1454 if not extranodes or name not in extranodes:
1455 1455 return
1456 1456
1457 1457 for node, linknode in extranodes[name]:
1458 1458 if node not in nodes:
1459 1459 nodes[node] = linknode
1460 1460
1461 1461 # Now that we have all theses utility functions to help out and
1462 1462 # logically divide up the task, generate the group.
1463 1463 def gengroup():
1464 1464 # The set of changed files starts empty.
1465 1465 changedfiles = set()
1466 1466 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1467 1467
1468 1468 # Create a changenode group generator that will call our functions
1469 1469 # back to lookup the owning changenode and collect information.
1470 1470 group = cl.group(msng_cl_lst, identity, collect)
1471 1471 for cnt, chnk in enumerate(group):
1472 1472 yield chnk
1473 1473 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1474 1474 self.ui.progress(_('bundling changes'), None)
1475 1475
1476 1476 prune(mnfst, msng_mnfst_set)
1477 1477 add_extra_nodes(1, msng_mnfst_set)
1478 1478 msng_mnfst_lst = msng_mnfst_set.keys()
1479 1479 # Sort the manifestnodes by revision number.
1480 1480 msng_mnfst_lst.sort(key=mnfst.rev)
1481 1481 # Create a generator for the manifestnodes that calls our lookup
1482 1482 # and data collection functions back.
1483 1483 group = mnfst.group(msng_mnfst_lst,
1484 1484 lambda mnode: msng_mnfst_set[mnode],
1485 1485 filenode_collector(changedfiles))
1486 1486 for cnt, chnk in enumerate(group):
1487 1487 yield chnk
1488 1488 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1489 1489 self.ui.progress(_('bundling manifests'), None)
1490 1490
1491 1491 # These are no longer needed, dereference and toss the memory for
1492 1492 # them.
1493 1493 msng_mnfst_lst = None
1494 1494 msng_mnfst_set.clear()
1495 1495
1496 1496 if extranodes:
1497 1497 for fname in extranodes:
1498 1498 if isinstance(fname, int):
1499 1499 continue
1500 1500 msng_filenode_set.setdefault(fname, {})
1501 1501 changedfiles.add(fname)
1502 1502 # Go through all our files in order sorted by name.
1503 1503 cnt = 0
1504 1504 for fname in sorted(changedfiles):
1505 1505 filerevlog = self.file(fname)
1506 1506 if not len(filerevlog):
1507 1507 raise util.Abort(_("empty or missing revlog for %s") % fname)
1508 1508 # Toss out the filenodes that the recipient isn't really
1509 1509 # missing.
1510 1510 missingfnodes = msng_filenode_set.pop(fname, {})
1511 1511 prune(filerevlog, missingfnodes)
1512 1512 add_extra_nodes(fname, missingfnodes)
1513 1513 # If any filenodes are left, generate the group for them,
1514 1514 # otherwise don't bother.
1515 1515 if missingfnodes:
1516 1516 yield changegroup.chunkheader(len(fname))
1517 1517 yield fname
1518 1518 # Sort the filenodes by their revision # (topological order)
1519 1519 nodeiter = list(missingfnodes)
1520 1520 nodeiter.sort(key=filerevlog.rev)
1521 1521 # Create a group generator and only pass in a changenode
1522 1522 # lookup function as we need to collect no information
1523 1523 # from filenodes.
1524 1524 group = filerevlog.group(nodeiter,
1525 1525 lambda fnode: missingfnodes[fnode])
1526 1526 for chnk in group:
1527 1527 self.ui.progress(
1528 1528 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1529 1529 cnt += 1
1530 1530 yield chnk
1531 1531 # Signal that no more groups are left.
1532 1532 yield changegroup.closechunk()
1533 1533 self.ui.progress(_('bundling files'), None)
1534 1534
1535 1535 if msng_cl_lst:
1536 1536 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1537 1537
1538 1538 return util.chunkbuffer(gengroup())
1539 1539
1540 1540 def changegroup(self, basenodes, source):
1541 1541 # to avoid a race we use changegroupsubset() (issue1320)
1542 1542 return self.changegroupsubset(basenodes, self.heads(), source)
1543 1543
1544 1544 def _changegroup(self, nodes, source):
1545 1545 """Compute the changegroup of all nodes that we have that a recipient
1546 1546 doesn't. Return a chunkbuffer object whose read() method will return
1547 1547 successive changegroup chunks.
1548 1548
1549 1549 This is much easier than the previous function as we can assume that
1550 1550 the recipient has any changenode we aren't sending them.
1551 1551
1552 1552 nodes is the set of nodes to send"""
1553 1553
1554 1554 self.hook('preoutgoing', throw=True, source=source)
1555 1555
1556 1556 cl = self.changelog
1557 1557 revset = set([cl.rev(n) for n in nodes])
1558 1558 self.changegroupinfo(nodes, source)
1559 1559
1560 1560 def identity(x):
1561 1561 return x
1562 1562
1563 1563 def gennodelst(log):
1564 1564 for r in log:
1565 1565 if log.linkrev(r) in revset:
1566 1566 yield log.node(r)
1567 1567
1568 1568 def lookuplinkrev_func(revlog):
1569 1569 def lookuplinkrev(n):
1570 1570 return cl.node(revlog.linkrev(revlog.rev(n)))
1571 1571 return lookuplinkrev
1572 1572
1573 1573 def gengroup():
1574 1574 '''yield a sequence of changegroup chunks (strings)'''
1575 1575 # construct a list of all changed files
1576 1576 changedfiles = set()
1577 1577 mmfs = {}
1578 1578 collect = changegroup.collector(cl, mmfs, changedfiles)
1579 1579
1580 1580 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1581 1581 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1582 1582 yield chnk
1583 1583 self.ui.progress(_('bundling changes'), None)
1584 1584
1585 1585 mnfst = self.manifest
1586 1586 nodeiter = gennodelst(mnfst)
1587 1587 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1588 1588 lookuplinkrev_func(mnfst))):
1589 1589 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1590 1590 yield chnk
1591 1591 self.ui.progress(_('bundling manifests'), None)
1592 1592
1593 1593 cnt = 0
1594 1594 for fname in sorted(changedfiles):
1595 1595 filerevlog = self.file(fname)
1596 1596 if not len(filerevlog):
1597 1597 raise util.Abort(_("empty or missing revlog for %s") % fname)
1598 1598 nodeiter = gennodelst(filerevlog)
1599 1599 nodeiter = list(nodeiter)
1600 1600 if nodeiter:
1601 1601 yield changegroup.chunkheader(len(fname))
1602 1602 yield fname
1603 1603 lookup = lookuplinkrev_func(filerevlog)
1604 1604 for chnk in filerevlog.group(nodeiter, lookup):
1605 1605 self.ui.progress(
1606 1606 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1607 1607 cnt += 1
1608 1608 yield chnk
1609 1609 self.ui.progress(_('bundling files'), None)
1610 1610
1611 1611 yield changegroup.closechunk()
1612 1612
1613 1613 if nodes:
1614 1614 self.hook('outgoing', node=hex(nodes[0]), source=source)
1615 1615
1616 1616 return util.chunkbuffer(gengroup())
1617 1617
1618 1618 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1619 1619 """Add the changegroup returned by source.read() to this repo.
1620 1620 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1621 1621 the URL of the repo where this changegroup is coming from.
1622 1622
1623 1623 Return an integer summarizing the change to this repo:
1624 1624 - nothing changed or no source: 0
1625 1625 - more heads than before: 1+added heads (2..n)
1626 1626 - fewer heads than before: -1-removed heads (-2..-n)
1627 1627 - number of heads stays the same: 1
1628 1628 """
1629 1629 def csmap(x):
1630 1630 self.ui.debug("add changeset %s\n" % short(x))
1631 1631 return len(cl)
1632 1632
1633 1633 def revmap(x):
1634 1634 return cl.rev(x)
1635 1635
1636 1636 if not source:
1637 1637 return 0
1638 1638
1639 1639 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1640 1640
1641 1641 changesets = files = revisions = 0
1642 1642 efiles = set()
1643 1643
1644 1644 # write changelog data to temp files so concurrent readers will not see
1645 1645 # inconsistent view
1646 1646 cl = self.changelog
1647 1647 cl.delayupdate()
1648 1648 oldheads = len(cl.heads())
1649 1649
1650 1650 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1651 1651 try:
1652 1652 trp = weakref.proxy(tr)
1653 1653 # pull off the changeset group
1654 1654 self.ui.status(_("adding changesets\n"))
1655 1655 clstart = len(cl)
1656 1656 class prog(object):
1657 1657 step = _('changesets')
1658 1658 count = 1
1659 1659 ui = self.ui
1660 1660 total = None
1661 1661 def __call__(self):
1662 1662 self.ui.progress(self.step, self.count, unit=_('chunks'),
1663 1663 total=self.total)
1664 1664 self.count += 1
1665 1665 pr = prog()
1666 1666 chunkiter = changegroup.chunkiter(source, progress=pr)
1667 1667 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1668 1668 raise util.Abort(_("received changelog group is empty"))
1669 1669 clend = len(cl)
1670 1670 changesets = clend - clstart
1671 1671 for c in xrange(clstart, clend):
1672 1672 efiles.update(self[c].files())
1673 1673 efiles = len(efiles)
1674 1674 self.ui.progress(_('changesets'), None)
1675 1675
1676 1676 # pull off the manifest group
1677 1677 self.ui.status(_("adding manifests\n"))
1678 1678 pr.step = _('manifests')
1679 1679 pr.count = 1
1680 1680 pr.total = changesets # manifests <= changesets
1681 1681 chunkiter = changegroup.chunkiter(source, progress=pr)
1682 1682 # no need to check for empty manifest group here:
1683 1683 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1684 1684 # no new manifest will be created and the manifest group will
1685 1685 # be empty during the pull
1686 1686 self.manifest.addgroup(chunkiter, revmap, trp)
1687 1687 self.ui.progress(_('manifests'), None)
1688 1688
1689 1689 needfiles = {}
1690 1690 if self.ui.configbool('server', 'validate', default=False):
1691 1691 # validate incoming csets have their manifests
1692 1692 for cset in xrange(clstart, clend):
1693 1693 mfest = self.changelog.read(self.changelog.node(cset))[0]
1694 1694 mfest = self.manifest.readdelta(mfest)
1695 1695 # store file nodes we must see
1696 1696 for f, n in mfest.iteritems():
1697 1697 needfiles.setdefault(f, set()).add(n)
1698 1698
1699 1699 # process the files
1700 1700 self.ui.status(_("adding file changes\n"))
1701 1701 pr.step = 'files'
1702 1702 pr.count = 1
1703 1703 pr.total = efiles
1704 1704 while 1:
1705 1705 f = changegroup.getchunk(source)
1706 1706 if not f:
1707 1707 break
1708 1708 self.ui.debug("adding %s revisions\n" % f)
1709 1709 pr()
1710 1710 fl = self.file(f)
1711 1711 o = len(fl)
1712 1712 chunkiter = changegroup.chunkiter(source)
1713 1713 if fl.addgroup(chunkiter, revmap, trp) is None:
1714 1714 raise util.Abort(_("received file revlog group is empty"))
1715 1715 revisions += len(fl) - o
1716 1716 files += 1
1717 1717 if f in needfiles:
1718 1718 needs = needfiles[f]
1719 1719 for new in xrange(o, len(fl)):
1720 1720 n = fl.node(new)
1721 1721 if n in needs:
1722 1722 needs.remove(n)
1723 1723 if not needs:
1724 1724 del needfiles[f]
1725 1725 self.ui.progress(_('files'), None)
1726 1726
1727 1727 for f, needs in needfiles.iteritems():
1728 1728 fl = self.file(f)
1729 1729 for n in needs:
1730 1730 try:
1731 1731 fl.rev(n)
1732 1732 except error.LookupError:
1733 1733 raise util.Abort(
1734 1734 _('missing file data for %s:%s - run hg verify') %
1735 1735 (f, hex(n)))
1736 1736
1737 1737 newheads = len(cl.heads())
1738 1738 heads = ""
1739 1739 if oldheads and newheads != oldheads:
1740 1740 heads = _(" (%+d heads)") % (newheads - oldheads)
1741 1741
1742 1742 self.ui.status(_("added %d changesets"
1743 1743 " with %d changes to %d files%s\n")
1744 1744 % (changesets, revisions, files, heads))
1745 1745
1746 1746 if changesets > 0:
1747 1747 p = lambda: cl.writepending() and self.root or ""
1748 1748 self.hook('pretxnchangegroup', throw=True,
1749 1749 node=hex(cl.node(clstart)), source=srctype,
1750 1750 url=url, pending=p)
1751 1751
1752 1752 # make changelog see real files again
1753 1753 cl.finalize(trp)
1754 1754
1755 1755 tr.close()
1756 1756 finally:
1757 1757 tr.release()
1758 1758 if lock:
1759 1759 lock.release()
1760 1760
1761 1761 if changesets > 0:
1762 1762 # forcefully update the on-disk branch cache
1763 1763 self.ui.debug("updating the branch cache\n")
1764 1764 self.updatebranchcache()
1765 1765 self.hook("changegroup", node=hex(cl.node(clstart)),
1766 1766 source=srctype, url=url)
1767 1767
1768 1768 for i in xrange(clstart, clend):
1769 1769 self.hook("incoming", node=hex(cl.node(i)),
1770 1770 source=srctype, url=url)
1771 1771
1772 1772 # never return 0 here:
1773 1773 if newheads < oldheads:
1774 1774 return newheads - oldheads - 1
1775 1775 else:
1776 1776 return newheads - oldheads + 1
1777 1777
1778 1778
1779 1779 def stream_in(self, remote):
1780 1780 fp = remote.stream_out()
1781 1781 l = fp.readline()
1782 1782 try:
1783 1783 resp = int(l)
1784 1784 except ValueError:
1785 1785 raise error.ResponseError(
1786 1786 _('Unexpected response from remote server:'), l)
1787 1787 if resp == 1:
1788 1788 raise util.Abort(_('operation forbidden by server'))
1789 1789 elif resp == 2:
1790 1790 raise util.Abort(_('locking the remote repository failed'))
1791 1791 elif resp != 0:
1792 1792 raise util.Abort(_('the server sent an unknown error code'))
1793 1793 self.ui.status(_('streaming all changes\n'))
1794 1794 l = fp.readline()
1795 1795 try:
1796 1796 total_files, total_bytes = map(int, l.split(' ', 1))
1797 1797 except (ValueError, TypeError):
1798 1798 raise error.ResponseError(
1799 1799 _('Unexpected response from remote server:'), l)
1800 1800 self.ui.status(_('%d files to transfer, %s of data\n') %
1801 1801 (total_files, util.bytecount(total_bytes)))
1802 1802 start = time.time()
1803 1803 for i in xrange(total_files):
1804 1804 # XXX doesn't support '\n' or '\r' in filenames
1805 1805 l = fp.readline()
1806 1806 try:
1807 1807 name, size = l.split('\0', 1)
1808 1808 size = int(size)
1809 1809 except (ValueError, TypeError):
1810 1810 raise error.ResponseError(
1811 1811 _('Unexpected response from remote server:'), l)
1812 1812 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1813 1813 # for backwards compat, name was partially encoded
1814 1814 ofp = self.sopener(store.decodedir(name), 'w')
1815 1815 for chunk in util.filechunkiter(fp, limit=size):
1816 1816 ofp.write(chunk)
1817 1817 ofp.close()
1818 1818 elapsed = time.time() - start
1819 1819 if elapsed <= 0:
1820 1820 elapsed = 0.001
1821 1821 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1822 1822 (util.bytecount(total_bytes), elapsed,
1823 1823 util.bytecount(total_bytes / elapsed)))
1824 1824 self.invalidate()
1825 1825 return len(self.heads()) + 1
1826 1826
1827 1827 def clone(self, remote, heads=[], stream=False):
1828 1828 '''clone remote repository.
1829 1829
1830 1830 keyword arguments:
1831 1831 heads: list of revs to clone (forces use of pull)
1832 1832 stream: use streaming clone if possible'''
1833 1833
1834 1834 # now, all clients that can request uncompressed clones can
1835 1835 # read repo formats supported by all servers that can serve
1836 1836 # them.
1837 1837
1838 1838 # if revlog format changes, client will have to check version
1839 1839 # and format flags on "stream" capability, and use
1840 1840 # uncompressed only if compatible.
1841 1841
1842 1842 if stream and not heads and remote.capable('stream'):
1843 1843 return self.stream_in(remote)
1844 1844 return self.pull(remote, heads)
1845 1845
1846 1846 def pushkey(self, namespace, key, old, new):
1847 1847 return pushkey.push(self, namespace, key, old, new)
1848 1848
1849 1849 def listkeys(self, namespace):
1850 1850 return pushkey.list(self, namespace)
1851 1851
1852 1852 # used to avoid circular references so destructors work
1853 1853 def aftertrans(files):
1854 1854 renamefiles = [tuple(t) for t in files]
1855 1855 def a():
1856 1856 for src, dest in renamefiles:
1857 1857 util.rename(src, dest)
1858 1858 return a
1859 1859
1860 1860 def instance(ui, path, create):
1861 1861 return localrepository(ui, util.drop_scheme('file', path), create)
1862 1862
1863 1863 def islocal(path):
1864 1864 return True
General Comments 0
You need to be logged in to leave comments. Login now