##// END OF EJS Templates
convert: differentiate between IOError and OSError on commitctx()...
Giorgos Keramidas -
r10428:e553a425 stable
parent child Browse files
Show More
@@ -1,2154 +1,2158 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self._branchcache = None # in UTF-8
102 102 self._branchcachetip = None
103 103 self.nodetagscache = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 @propertycache
109 109 def changelog(self):
110 110 c = changelog.changelog(self.sopener)
111 111 if 'HG_PENDING' in os.environ:
112 112 p = os.environ['HG_PENDING']
113 113 if p.startswith(self.root):
114 114 c.readpending('00changelog.i.a')
115 115 self.sopener.defversion = c.version
116 116 return c
117 117
118 118 @propertycache
119 119 def manifest(self):
120 120 return manifest.manifest(self.sopener)
121 121
122 122 @propertycache
123 123 def dirstate(self):
124 124 return dirstate.dirstate(self.opener, self.ui, self.root)
125 125
126 126 def __getitem__(self, changeid):
127 127 if changeid is None:
128 128 return context.workingctx(self)
129 129 return context.changectx(self, changeid)
130 130
131 131 def __nonzero__(self):
132 132 return True
133 133
134 134 def __len__(self):
135 135 return len(self.changelog)
136 136
137 137 def __iter__(self):
138 138 for i in xrange(len(self)):
139 139 yield i
140 140
141 141 def url(self):
142 142 return 'file:' + self.root
143 143
144 144 def hook(self, name, throw=False, **args):
145 145 return hook.hook(self.ui, self, name, throw, **args)
146 146
147 147 tag_disallowed = ':\r\n'
148 148
149 149 def _tag(self, names, node, message, local, user, date, extra={}):
150 150 if isinstance(names, str):
151 151 allchars = names
152 152 names = (names,)
153 153 else:
154 154 allchars = ''.join(names)
155 155 for c in self.tag_disallowed:
156 156 if c in allchars:
157 157 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 158
159 159 for name in names:
160 160 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 161 local=local)
162 162
163 163 def writetags(fp, names, munge, prevtags):
164 164 fp.seek(0, 2)
165 165 if prevtags and prevtags[-1] != '\n':
166 166 fp.write('\n')
167 167 for name in names:
168 168 m = munge and munge(name) or name
169 169 if self._tagtypes and name in self._tagtypes:
170 170 old = self._tags.get(name, nullid)
171 171 fp.write('%s %s\n' % (hex(old), m))
172 172 fp.write('%s %s\n' % (hex(node), m))
173 173 fp.close()
174 174
175 175 prevtags = ''
176 176 if local:
177 177 try:
178 178 fp = self.opener('localtags', 'r+')
179 179 except IOError:
180 180 fp = self.opener('localtags', 'a')
181 181 else:
182 182 prevtags = fp.read()
183 183
184 184 # local tags are stored in the current charset
185 185 writetags(fp, names, None, prevtags)
186 186 for name in names:
187 187 self.hook('tag', node=hex(node), tag=name, local=local)
188 188 return
189 189
190 190 try:
191 191 fp = self.wfile('.hgtags', 'rb+')
192 192 except IOError:
193 193 fp = self.wfile('.hgtags', 'ab')
194 194 else:
195 195 prevtags = fp.read()
196 196
197 197 # committed tags are stored in UTF-8
198 198 writetags(fp, names, encoding.fromlocal, prevtags)
199 199
200 200 if '.hgtags' not in self.dirstate:
201 201 self.add(['.hgtags'])
202 202
203 203 m = match_.exact(self.root, '', ['.hgtags'])
204 204 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 205
206 206 for name in names:
207 207 self.hook('tag', node=hex(node), tag=name, local=local)
208 208
209 209 return tagnode
210 210
211 211 def tag(self, names, node, message, local, user, date):
212 212 '''tag a revision with one or more symbolic names.
213 213
214 214 names is a list of strings or, when adding a single tag, names may be a
215 215 string.
216 216
217 217 if local is True, the tags are stored in a per-repository file.
218 218 otherwise, they are stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tags in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for x in self.status()[:5]:
233 233 if '.hgtags' in x:
234 234 raise util.Abort(_('working copy of .hgtags is changed '
235 235 '(please commit .hgtags manually)'))
236 236
237 237 self.tags() # instantiate the cache
238 238 self._tag(names, node, message, local, user, date)
239 239
240 240 def tags(self):
241 241 '''return a mapping of tag to node'''
242 242 if self._tags is None:
243 243 (self._tags, self._tagtypes) = self._findtags()
244 244
245 245 return self._tags
246 246
247 247 def _findtags(self):
248 248 '''Do the hard work of finding tags. Return a pair of dicts
249 249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 250 maps tag name to a string like \'global\' or \'local\'.
251 251 Subclasses or extensions are free to add their own tags, but
252 252 should be aware that the returned dicts will be retained for the
253 253 duration of the localrepo object.'''
254 254
255 255 # XXX what tagtype should subclasses/extensions use? Currently
256 256 # mq and bookmarks add tags, but do not set the tagtype at all.
257 257 # Should each extension invent its own tag type? Should there
258 258 # be one tagtype for all such "virtual" tags? Or is the status
259 259 # quo fine?
260 260
261 261 alltags = {} # map tag name to (node, hist)
262 262 tagtypes = {}
263 263
264 264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 266
267 267 # Build the return dicts. Have to re-encode tag names because
268 268 # the tags module always uses UTF-8 (in order not to lose info
269 269 # writing to the cache), but the rest of Mercurial wants them in
270 270 # local encoding.
271 271 tags = {}
272 272 for (name, (node, hist)) in alltags.iteritems():
273 273 if node != nullid:
274 274 tags[encoding.tolocal(name)] = node
275 275 tags['tip'] = self.changelog.tip()
276 276 tagtypes = dict([(encoding.tolocal(name), value)
277 277 for (name, value) in tagtypes.iteritems()])
278 278 return (tags, tagtypes)
279 279
280 280 def tagtype(self, tagname):
281 281 '''
282 282 return the type of the given tag. result can be:
283 283
284 284 'local' : a local tag
285 285 'global' : a global tag
286 286 None : tag does not exist
287 287 '''
288 288
289 289 self.tags()
290 290
291 291 return self._tagtypes.get(tagname)
292 292
293 293 def tagslist(self):
294 294 '''return a list of tags ordered by revision'''
295 295 l = []
296 296 for t, n in self.tags().iteritems():
297 297 try:
298 298 r = self.changelog.rev(n)
299 299 except:
300 300 r = -2 # sort to the beginning of the list if unknown
301 301 l.append((r, t, n))
302 302 return [(t, n) for r, t, n in sorted(l)]
303 303
304 304 def nodetags(self, node):
305 305 '''return the tags associated with a node'''
306 306 if not self.nodetagscache:
307 307 self.nodetagscache = {}
308 308 for t, n in self.tags().iteritems():
309 309 self.nodetagscache.setdefault(n, []).append(t)
310 310 return self.nodetagscache.get(node, [])
311 311
312 312 def _branchtags(self, partial, lrev):
313 313 # TODO: rename this function?
314 314 tiprev = len(self) - 1
315 315 if lrev != tiprev:
316 316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 318
319 319 return partial
320 320
321 321 def branchmap(self):
322 322 tip = self.changelog.tip()
323 323 if self._branchcache is not None and self._branchcachetip == tip:
324 324 return self._branchcache
325 325
326 326 oldtip = self._branchcachetip
327 327 self._branchcachetip = tip
328 328 if oldtip is None or oldtip not in self.changelog.nodemap:
329 329 partial, last, lrev = self._readbranchcache()
330 330 else:
331 331 lrev = self.changelog.rev(oldtip)
332 332 partial = self._branchcache
333 333
334 334 self._branchtags(partial, lrev)
335 335 # this private cache holds all heads (not just tips)
336 336 self._branchcache = partial
337 337
338 338 return self._branchcache
339 339
340 340 def branchtags(self):
341 341 '''return a dict where branch names map to the tipmost head of
342 342 the branch, open heads come before closed'''
343 343 bt = {}
344 344 for bn, heads in self.branchmap().iteritems():
345 345 head = None
346 346 for i in range(len(heads)-1, -1, -1):
347 347 h = heads[i]
348 348 if 'close' not in self.changelog.read(h)[5]:
349 349 head = h
350 350 break
351 351 # no open heads were found
352 352 if head is None:
353 353 head = heads[-1]
354 354 bt[bn] = head
355 355 return bt
356 356
357 357
358 358 def _readbranchcache(self):
359 359 partial = {}
360 360 try:
361 361 f = self.opener("branchheads.cache")
362 362 lines = f.read().split('\n')
363 363 f.close()
364 364 except (IOError, OSError):
365 365 return {}, nullid, nullrev
366 366
367 367 try:
368 368 last, lrev = lines.pop(0).split(" ", 1)
369 369 last, lrev = bin(last), int(lrev)
370 370 if lrev >= len(self) or self[lrev].node() != last:
371 371 # invalidate the cache
372 372 raise ValueError('invalidating branch cache (tip differs)')
373 373 for l in lines:
374 374 if not l: continue
375 375 node, label = l.split(" ", 1)
376 376 partial.setdefault(label.strip(), []).append(bin(node))
377 377 except KeyboardInterrupt:
378 378 raise
379 379 except Exception, inst:
380 380 if self.ui.debugflag:
381 381 self.ui.warn(str(inst), '\n')
382 382 partial, last, lrev = {}, nullid, nullrev
383 383 return partial, last, lrev
384 384
385 385 def _writebranchcache(self, branches, tip, tiprev):
386 386 try:
387 387 f = self.opener("branchheads.cache", "w", atomictemp=True)
388 388 f.write("%s %s\n" % (hex(tip), tiprev))
389 389 for label, nodes in branches.iteritems():
390 390 for node in nodes:
391 391 f.write("%s %s\n" % (hex(node), label))
392 392 f.rename()
393 393 except (IOError, OSError):
394 394 pass
395 395
396 396 def _updatebranchcache(self, partial, start, end):
397 397 # collect new branch entries
398 398 newbranches = {}
399 399 for r in xrange(start, end):
400 400 c = self[r]
401 401 newbranches.setdefault(c.branch(), []).append(c.node())
402 402 # if older branchheads are reachable from new ones, they aren't
403 403 # really branchheads. Note checking parents is insufficient:
404 404 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
405 405 for branch, newnodes in newbranches.iteritems():
406 406 bheads = partial.setdefault(branch, [])
407 407 bheads.extend(newnodes)
408 408 if len(bheads) < 2:
409 409 continue
410 410 newbheads = []
411 411 # starting from tip means fewer passes over reachable
412 412 while newnodes:
413 413 latest = newnodes.pop()
414 414 if latest not in bheads:
415 415 continue
416 416 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
417 417 reachable = self.changelog.reachable(latest, minbhrev)
418 418 bheads = [b for b in bheads if b not in reachable]
419 419 newbheads.insert(0, latest)
420 420 bheads.extend(newbheads)
421 421 partial[branch] = bheads
422 422
423 423 def lookup(self, key):
424 424 if isinstance(key, int):
425 425 return self.changelog.node(key)
426 426 elif key == '.':
427 427 return self.dirstate.parents()[0]
428 428 elif key == 'null':
429 429 return nullid
430 430 elif key == 'tip':
431 431 return self.changelog.tip()
432 432 n = self.changelog._match(key)
433 433 if n:
434 434 return n
435 435 if key in self.tags():
436 436 return self.tags()[key]
437 437 if key in self.branchtags():
438 438 return self.branchtags()[key]
439 439 n = self.changelog._partialmatch(key)
440 440 if n:
441 441 return n
442 442
443 443 # can't find key, check if it might have come from damaged dirstate
444 444 if key in self.dirstate.parents():
445 445 raise error.Abort(_("working directory has unknown parent '%s'!")
446 446 % short(key))
447 447 try:
448 448 if len(key) == 20:
449 449 key = hex(key)
450 450 except:
451 451 pass
452 452 raise error.RepoLookupError(_("unknown revision '%s'") % key)
453 453
454 454 def local(self):
455 455 return True
456 456
457 457 def join(self, f):
458 458 return os.path.join(self.path, f)
459 459
460 460 def wjoin(self, f):
461 461 return os.path.join(self.root, f)
462 462
463 463 def rjoin(self, f):
464 464 return os.path.join(self.root, util.pconvert(f))
465 465
466 466 def file(self, f):
467 467 if f[0] == '/':
468 468 f = f[1:]
469 469 return filelog.filelog(self.sopener, f)
470 470
471 471 def changectx(self, changeid):
472 472 return self[changeid]
473 473
474 474 def parents(self, changeid=None):
475 475 '''get list of changectxs for parents of changeid'''
476 476 return self[changeid].parents()
477 477
478 478 def filectx(self, path, changeid=None, fileid=None):
479 479 """changeid can be a changeset revision, node, or tag.
480 480 fileid can be a file revision or node."""
481 481 return context.filectx(self, path, changeid, fileid)
482 482
483 483 def getcwd(self):
484 484 return self.dirstate.getcwd()
485 485
486 486 def pathto(self, f, cwd=None):
487 487 return self.dirstate.pathto(f, cwd)
488 488
489 489 def wfile(self, f, mode='r'):
490 490 return self.wopener(f, mode)
491 491
492 492 def _link(self, f):
493 493 return os.path.islink(self.wjoin(f))
494 494
495 495 def _filter(self, filter, filename, data):
496 496 if filter not in self.filterpats:
497 497 l = []
498 498 for pat, cmd in self.ui.configitems(filter):
499 499 if cmd == '!':
500 500 continue
501 501 mf = match_.match(self.root, '', [pat])
502 502 fn = None
503 503 params = cmd
504 504 for name, filterfn in self._datafilters.iteritems():
505 505 if cmd.startswith(name):
506 506 fn = filterfn
507 507 params = cmd[len(name):].lstrip()
508 508 break
509 509 if not fn:
510 510 fn = lambda s, c, **kwargs: util.filter(s, c)
511 511 # Wrap old filters not supporting keyword arguments
512 512 if not inspect.getargspec(fn)[2]:
513 513 oldfn = fn
514 514 fn = lambda s, c, **kwargs: oldfn(s, c)
515 515 l.append((mf, fn, params))
516 516 self.filterpats[filter] = l
517 517
518 518 for mf, fn, cmd in self.filterpats[filter]:
519 519 if mf(filename):
520 520 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
521 521 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
522 522 break
523 523
524 524 return data
525 525
526 526 def adddatafilter(self, name, filter):
527 527 self._datafilters[name] = filter
528 528
529 529 def wread(self, filename):
530 530 if self._link(filename):
531 531 data = os.readlink(self.wjoin(filename))
532 532 else:
533 533 data = self.wopener(filename, 'r').read()
534 534 return self._filter("encode", filename, data)
535 535
536 536 def wwrite(self, filename, data, flags):
537 537 data = self._filter("decode", filename, data)
538 538 try:
539 539 os.unlink(self.wjoin(filename))
540 540 except OSError:
541 541 pass
542 542 if 'l' in flags:
543 543 self.wopener.symlink(data, filename)
544 544 else:
545 545 self.wopener(filename, 'w').write(data)
546 546 if 'x' in flags:
547 547 util.set_flags(self.wjoin(filename), False, True)
548 548
549 549 def wwritedata(self, filename, data):
550 550 return self._filter("decode", filename, data)
551 551
552 552 def transaction(self):
553 553 tr = self._transref and self._transref() or None
554 554 if tr and tr.running():
555 555 return tr.nest()
556 556
557 557 # abort here if the journal already exists
558 558 if os.path.exists(self.sjoin("journal")):
559 559 raise error.RepoError(_("abandoned transaction found - run hg recover"))
560 560
561 561 # save dirstate for rollback
562 562 try:
563 563 ds = self.opener("dirstate").read()
564 564 except IOError:
565 565 ds = ""
566 566 self.opener("journal.dirstate", "w").write(ds)
567 567 self.opener("journal.branch", "w").write(self.dirstate.branch())
568 568
569 569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
571 571 (self.join("journal.branch"), self.join("undo.branch"))]
572 572 tr = transaction.transaction(self.ui.warn, self.sopener,
573 573 self.sjoin("journal"),
574 574 aftertrans(renames),
575 575 self.store.createmode)
576 576 self._transref = weakref.ref(tr)
577 577 return tr
578 578
579 579 def recover(self):
580 580 lock = self.lock()
581 581 try:
582 582 if os.path.exists(self.sjoin("journal")):
583 583 self.ui.status(_("rolling back interrupted transaction\n"))
584 584 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
585 585 self.invalidate()
586 586 return True
587 587 else:
588 588 self.ui.warn(_("no interrupted transaction available\n"))
589 589 return False
590 590 finally:
591 591 lock.release()
592 592
593 593 def rollback(self):
594 594 wlock = lock = None
595 595 try:
596 596 wlock = self.wlock()
597 597 lock = self.lock()
598 598 if os.path.exists(self.sjoin("undo")):
599 599 self.ui.status(_("rolling back last transaction\n"))
600 600 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
601 601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
602 602 try:
603 603 branch = self.opener("undo.branch").read()
604 604 self.dirstate.setbranch(branch)
605 605 except IOError:
606 606 self.ui.warn(_("Named branch could not be reset, "
607 607 "current branch still is: %s\n")
608 608 % encoding.tolocal(self.dirstate.branch()))
609 609 self.invalidate()
610 610 self.dirstate.invalidate()
611 611 self.destroyed()
612 612 else:
613 613 self.ui.warn(_("no rollback information available\n"))
614 614 finally:
615 615 release(lock, wlock)
616 616
617 617 def invalidate(self):
618 618 for a in "changelog manifest".split():
619 619 if a in self.__dict__:
620 620 delattr(self, a)
621 621 self._tags = None
622 622 self._tagtypes = None
623 623 self.nodetagscache = None
624 624 self._branchcache = None # in UTF-8
625 625 self._branchcachetip = None
626 626
627 627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
628 628 try:
629 629 l = lock.lock(lockname, 0, releasefn, desc=desc)
630 630 except error.LockHeld, inst:
631 631 if not wait:
632 632 raise
633 633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
634 634 (desc, inst.locker))
635 635 # default to 600 seconds timeout
636 636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
637 637 releasefn, desc=desc)
638 638 if acquirefn:
639 639 acquirefn()
640 640 return l
641 641
642 642 def lock(self, wait=True):
643 643 '''Lock the repository store (.hg/store) and return a weak reference
644 644 to the lock. Use this before modifying the store (e.g. committing or
645 645 stripping). If you are opening a transaction, get a lock as well.)'''
646 646 l = self._lockref and self._lockref()
647 647 if l is not None and l.held:
648 648 l.lock()
649 649 return l
650 650
651 651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 652 _('repository %s') % self.origroot)
653 653 self._lockref = weakref.ref(l)
654 654 return l
655 655
656 656 def wlock(self, wait=True):
657 657 '''Lock the non-store parts of the repository (everything under
658 658 .hg except .hg/store) and return a weak reference to the lock.
659 659 Use this before modifying files in .hg.'''
660 660 l = self._wlockref and self._wlockref()
661 661 if l is not None and l.held:
662 662 l.lock()
663 663 return l
664 664
665 665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 666 self.dirstate.invalidate, _('working directory of %s') %
667 667 self.origroot)
668 668 self._wlockref = weakref.ref(l)
669 669 return l
670 670
671 671 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 672 """
673 673 commit an individual file as part of a larger transaction
674 674 """
675 675
676 676 fname = fctx.path()
677 677 text = fctx.data()
678 678 flog = self.file(fname)
679 679 fparent1 = manifest1.get(fname, nullid)
680 680 fparent2 = fparent2o = manifest2.get(fname, nullid)
681 681
682 682 meta = {}
683 683 copy = fctx.renamed()
684 684 if copy and copy[0] != fname:
685 685 # Mark the new revision of this file as a copy of another
686 686 # file. This copy data will effectively act as a parent
687 687 # of this new revision. If this is a merge, the first
688 688 # parent will be the nullid (meaning "look up the copy data")
689 689 # and the second one will be the other parent. For example:
690 690 #
691 691 # 0 --- 1 --- 3 rev1 changes file foo
692 692 # \ / rev2 renames foo to bar and changes it
693 693 # \- 2 -/ rev3 should have bar with all changes and
694 694 # should record that bar descends from
695 695 # bar in rev2 and foo in rev1
696 696 #
697 697 # this allows this merge to succeed:
698 698 #
699 699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 700 # \ / merging rev3 and rev4 should use bar@rev2
701 701 # \- 2 --- 4 as the merge base
702 702 #
703 703
704 704 cfname = copy[0]
705 705 crev = manifest1.get(cfname)
706 706 newfparent = fparent2
707 707
708 708 if manifest2: # branch merge
709 709 if fparent2 == nullid or crev is None: # copied on remote side
710 710 if cfname in manifest2:
711 711 crev = manifest2[cfname]
712 712 newfparent = fparent1
713 713
714 714 # find source in nearest ancestor if we've lost track
715 715 if not crev:
716 716 self.ui.debug(" %s: searching for copy revision for %s\n" %
717 717 (fname, cfname))
718 718 for ancestor in self['.'].ancestors():
719 719 if cfname in ancestor:
720 720 crev = ancestor[cfname].filenode()
721 721 break
722 722
723 723 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
724 724 meta["copy"] = cfname
725 725 meta["copyrev"] = hex(crev)
726 726 fparent1, fparent2 = nullid, newfparent
727 727 elif fparent2 != nullid:
728 728 # is one parent an ancestor of the other?
729 729 fparentancestor = flog.ancestor(fparent1, fparent2)
730 730 if fparentancestor == fparent1:
731 731 fparent1, fparent2 = fparent2, nullid
732 732 elif fparentancestor == fparent2:
733 733 fparent2 = nullid
734 734
735 735 # is the file changed?
736 736 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
737 737 changelist.append(fname)
738 738 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
739 739
740 740 # are just the flags changed during merge?
741 741 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
742 742 changelist.append(fname)
743 743
744 744 return fparent1
745 745
746 746 def commit(self, text="", user=None, date=None, match=None, force=False,
747 747 editor=False, extra={}):
748 748 """Add a new revision to current repository.
749 749
750 750 Revision information is gathered from the working directory,
751 751 match can be used to filter the committed files. If editor is
752 752 supplied, it is called to get a commit message.
753 753 """
754 754
755 755 def fail(f, msg):
756 756 raise util.Abort('%s: %s' % (f, msg))
757 757
758 758 if not match:
759 759 match = match_.always(self.root, '')
760 760
761 761 if not force:
762 762 vdirs = []
763 763 match.dir = vdirs.append
764 764 match.bad = fail
765 765
766 766 wlock = self.wlock()
767 767 try:
768 768 p1, p2 = self.dirstate.parents()
769 769 wctx = self[None]
770 770
771 771 if (not force and p2 != nullid and match and
772 772 (match.files() or match.anypats())):
773 773 raise util.Abort(_('cannot partially commit a merge '
774 774 '(do not specify files or patterns)'))
775 775
776 776 changes = self.status(match=match, clean=force)
777 777 if force:
778 778 changes[0].extend(changes[6]) # mq may commit unchanged files
779 779
780 780 # check subrepos
781 781 subs = []
782 782 for s in wctx.substate:
783 783 if match(s) and wctx.sub(s).dirty():
784 784 subs.append(s)
785 785 if subs and '.hgsubstate' not in changes[0]:
786 786 changes[0].insert(0, '.hgsubstate')
787 787
788 788 # make sure all explicit patterns are matched
789 789 if not force and match.files():
790 790 matched = set(changes[0] + changes[1] + changes[2])
791 791
792 792 for f in match.files():
793 793 if f == '.' or f in matched or f in wctx.substate:
794 794 continue
795 795 if f in changes[3]: # missing
796 796 fail(f, _('file not found!'))
797 797 if f in vdirs: # visited directory
798 798 d = f + '/'
799 799 for mf in matched:
800 800 if mf.startswith(d):
801 801 break
802 802 else:
803 803 fail(f, _("no match under directory!"))
804 804 elif f not in self.dirstate:
805 805 fail(f, _("file not tracked!"))
806 806
807 807 if (not force and not extra.get("close") and p2 == nullid
808 808 and not (changes[0] or changes[1] or changes[2])
809 809 and self[None].branch() == self['.'].branch()):
810 810 return None
811 811
812 812 ms = merge_.mergestate(self)
813 813 for f in changes[0]:
814 814 if f in ms and ms[f] == 'u':
815 815 raise util.Abort(_("unresolved merge conflicts "
816 816 "(see hg resolve)"))
817 817
818 818 cctx = context.workingctx(self, (p1, p2), text, user, date,
819 819 extra, changes)
820 820 if editor:
821 821 cctx._text = editor(self, cctx, subs)
822 822
823 823 # commit subs
824 824 if subs:
825 825 state = wctx.substate.copy()
826 826 for s in subs:
827 827 self.ui.status(_('committing subrepository %s\n') % s)
828 828 sr = wctx.sub(s).commit(cctx._text, user, date)
829 829 state[s] = (state[s][0], sr)
830 830 subrepo.writestate(self, state)
831 831
832 832 ret = self.commitctx(cctx, True)
833 833
834 834 # update dirstate and mergestate
835 835 for f in changes[0] + changes[1]:
836 836 self.dirstate.normal(f)
837 837 for f in changes[2]:
838 838 self.dirstate.forget(f)
839 839 self.dirstate.setparents(ret)
840 840 ms.reset()
841 841
842 842 return ret
843 843
844 844 finally:
845 845 wlock.release()
846 846
847 847 def commitctx(self, ctx, error=False):
848 848 """Add a new revision to current repository.
849 849
850 850 Revision information is passed via the context argument.
851 851 """
852 852
853 853 tr = lock = None
854 854 removed = ctx.removed()
855 855 p1, p2 = ctx.p1(), ctx.p2()
856 856 m1 = p1.manifest().copy()
857 857 m2 = p2.manifest()
858 858 user = ctx.user()
859 859
860 860 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
861 861 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
862 862
863 863 lock = self.lock()
864 864 try:
865 865 tr = self.transaction()
866 866 trp = weakref.proxy(tr)
867 867
868 868 # check in files
869 869 new = {}
870 870 changed = []
871 871 linkrev = len(self)
872 872 for f in sorted(ctx.modified() + ctx.added()):
873 873 self.ui.note(f + "\n")
874 874 try:
875 875 fctx = ctx[f]
876 876 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
877 877 changed)
878 878 m1.set(f, fctx.flags())
879 except (OSError, IOError):
880 if error:
879 except OSError, inst:
880 self.ui.warn(_("trouble committing %s!\n") % f)
881 raise
882 except IOError, inst:
883 errcode = getattr(inst, 'errno', errno.ENOENT)
884 if error or errcode and errcode != errno.ENOENT:
881 885 self.ui.warn(_("trouble committing %s!\n") % f)
882 886 raise
883 887 else:
884 888 removed.append(f)
885 889
886 890 # update manifest
887 891 m1.update(new)
888 892 removed = [f for f in sorted(removed) if f in m1 or f in m2]
889 893 drop = [f for f in removed if f in m1]
890 894 for f in drop:
891 895 del m1[f]
892 896 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
893 897 p2.manifestnode(), (new, drop))
894 898
895 899 # update changelog
896 900 self.changelog.delayupdate()
897 901 n = self.changelog.add(mn, changed + removed, ctx.description(),
898 902 trp, p1.node(), p2.node(),
899 903 user, ctx.date(), ctx.extra().copy())
900 904 p = lambda: self.changelog.writepending() and self.root or ""
901 905 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
902 906 parent2=xp2, pending=p)
903 907 self.changelog.finalize(trp)
904 908 tr.close()
905 909
906 910 if self._branchcache:
907 911 self.branchtags()
908 912
909 913 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 914 return n
911 915 finally:
912 916 del tr
913 917 lock.release()
914 918
915 919 def destroyed(self):
916 920 '''Inform the repository that nodes have been destroyed.
917 921 Intended for use by strip and rollback, so there's a common
918 922 place for anything that has to be done after destroying history.'''
919 923 # XXX it might be nice if we could take the list of destroyed
920 924 # nodes, but I don't see an easy way for rollback() to do that
921 925
922 926 # Ensure the persistent tag cache is updated. Doing it now
923 927 # means that the tag cache only has to worry about destroyed
924 928 # heads immediately after a strip/rollback. That in turn
925 929 # guarantees that "cachetip == currenttip" (comparing both rev
926 930 # and node) always means no nodes have been added or destroyed.
927 931
928 932 # XXX this is suboptimal when qrefresh'ing: we strip the current
929 933 # head, refresh the tag cache, then immediately add a new head.
930 934 # But I think doing it this way is necessary for the "instant
931 935 # tag cache retrieval" case to work.
932 936 tags_.findglobaltags(self.ui, self, {}, {})
933 937
934 938 def walk(self, match, node=None):
935 939 '''
936 940 walk recursively through the directory tree or a given
937 941 changeset, finding all files matched by the match
938 942 function
939 943 '''
940 944 return self[node].walk(match)
941 945
942 946 def status(self, node1='.', node2=None, match=None,
943 947 ignored=False, clean=False, unknown=False):
944 948 """return status of files between two nodes or node and working directory
945 949
946 950 If node1 is None, use the first dirstate parent instead.
947 951 If node2 is None, compare node1 with working directory.
948 952 """
949 953
950 954 def mfmatches(ctx):
951 955 mf = ctx.manifest().copy()
952 956 for fn in mf.keys():
953 957 if not match(fn):
954 958 del mf[fn]
955 959 return mf
956 960
957 961 if isinstance(node1, context.changectx):
958 962 ctx1 = node1
959 963 else:
960 964 ctx1 = self[node1]
961 965 if isinstance(node2, context.changectx):
962 966 ctx2 = node2
963 967 else:
964 968 ctx2 = self[node2]
965 969
966 970 working = ctx2.rev() is None
967 971 parentworking = working and ctx1 == self['.']
968 972 match = match or match_.always(self.root, self.getcwd())
969 973 listignored, listclean, listunknown = ignored, clean, unknown
970 974
971 975 # load earliest manifest first for caching reasons
972 976 if not working and ctx2.rev() < ctx1.rev():
973 977 ctx2.manifest()
974 978
975 979 if not parentworking:
976 980 def bad(f, msg):
977 981 if f not in ctx1:
978 982 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
979 983 match.bad = bad
980 984
981 985 if working: # we need to scan the working dir
982 986 s = self.dirstate.status(match, listignored, listclean, listunknown)
983 987 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
984 988
985 989 # check for any possibly clean files
986 990 if parentworking and cmp:
987 991 fixup = []
988 992 # do a full compare of any files that might have changed
989 993 for f in sorted(cmp):
990 994 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
991 995 or ctx1[f].cmp(ctx2[f].data())):
992 996 modified.append(f)
993 997 else:
994 998 fixup.append(f)
995 999
996 1000 if listclean:
997 1001 clean += fixup
998 1002
999 1003 # update dirstate for files that are actually clean
1000 1004 if fixup:
1001 1005 try:
1002 1006 # updating the dirstate is optional
1003 1007 # so we don't wait on the lock
1004 1008 wlock = self.wlock(False)
1005 1009 try:
1006 1010 for f in fixup:
1007 1011 self.dirstate.normal(f)
1008 1012 finally:
1009 1013 wlock.release()
1010 1014 except error.LockError:
1011 1015 pass
1012 1016
1013 1017 if not parentworking:
1014 1018 mf1 = mfmatches(ctx1)
1015 1019 if working:
1016 1020 # we are comparing working dir against non-parent
1017 1021 # generate a pseudo-manifest for the working dir
1018 1022 mf2 = mfmatches(self['.'])
1019 1023 for f in cmp + modified + added:
1020 1024 mf2[f] = None
1021 1025 mf2.set(f, ctx2.flags(f))
1022 1026 for f in removed:
1023 1027 if f in mf2:
1024 1028 del mf2[f]
1025 1029 else:
1026 1030 # we are comparing two revisions
1027 1031 deleted, unknown, ignored = [], [], []
1028 1032 mf2 = mfmatches(ctx2)
1029 1033
1030 1034 modified, added, clean = [], [], []
1031 1035 for fn in mf2:
1032 1036 if fn in mf1:
1033 1037 if (mf1.flags(fn) != mf2.flags(fn) or
1034 1038 (mf1[fn] != mf2[fn] and
1035 1039 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1036 1040 modified.append(fn)
1037 1041 elif listclean:
1038 1042 clean.append(fn)
1039 1043 del mf1[fn]
1040 1044 else:
1041 1045 added.append(fn)
1042 1046 removed = mf1.keys()
1043 1047
1044 1048 r = modified, added, removed, deleted, unknown, ignored, clean
1045 1049 [l.sort() for l in r]
1046 1050 return r
1047 1051
1048 1052 def add(self, list):
1049 1053 wlock = self.wlock()
1050 1054 try:
1051 1055 rejected = []
1052 1056 for f in list:
1053 1057 p = self.wjoin(f)
1054 1058 try:
1055 1059 st = os.lstat(p)
1056 1060 except:
1057 1061 self.ui.warn(_("%s does not exist!\n") % f)
1058 1062 rejected.append(f)
1059 1063 continue
1060 1064 if st.st_size > 10000000:
1061 1065 self.ui.warn(_("%s: files over 10MB may cause memory and"
1062 1066 " performance problems\n"
1063 1067 "(use 'hg revert %s' to unadd the file)\n")
1064 1068 % (f, f))
1065 1069 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1066 1070 self.ui.warn(_("%s not added: only files and symlinks "
1067 1071 "supported currently\n") % f)
1068 1072 rejected.append(p)
1069 1073 elif self.dirstate[f] in 'amn':
1070 1074 self.ui.warn(_("%s already tracked!\n") % f)
1071 1075 elif self.dirstate[f] == 'r':
1072 1076 self.dirstate.normallookup(f)
1073 1077 else:
1074 1078 self.dirstate.add(f)
1075 1079 return rejected
1076 1080 finally:
1077 1081 wlock.release()
1078 1082
1079 1083 def forget(self, list):
1080 1084 wlock = self.wlock()
1081 1085 try:
1082 1086 for f in list:
1083 1087 if self.dirstate[f] != 'a':
1084 1088 self.ui.warn(_("%s not added!\n") % f)
1085 1089 else:
1086 1090 self.dirstate.forget(f)
1087 1091 finally:
1088 1092 wlock.release()
1089 1093
1090 1094 def remove(self, list, unlink=False):
1091 1095 if unlink:
1092 1096 for f in list:
1093 1097 try:
1094 1098 util.unlink(self.wjoin(f))
1095 1099 except OSError, inst:
1096 1100 if inst.errno != errno.ENOENT:
1097 1101 raise
1098 1102 wlock = self.wlock()
1099 1103 try:
1100 1104 for f in list:
1101 1105 if unlink and os.path.exists(self.wjoin(f)):
1102 1106 self.ui.warn(_("%s still exists!\n") % f)
1103 1107 elif self.dirstate[f] == 'a':
1104 1108 self.dirstate.forget(f)
1105 1109 elif f not in self.dirstate:
1106 1110 self.ui.warn(_("%s not tracked!\n") % f)
1107 1111 else:
1108 1112 self.dirstate.remove(f)
1109 1113 finally:
1110 1114 wlock.release()
1111 1115
1112 1116 def undelete(self, list):
1113 1117 manifests = [self.manifest.read(self.changelog.read(p)[0])
1114 1118 for p in self.dirstate.parents() if p != nullid]
1115 1119 wlock = self.wlock()
1116 1120 try:
1117 1121 for f in list:
1118 1122 if self.dirstate[f] != 'r':
1119 1123 self.ui.warn(_("%s not removed!\n") % f)
1120 1124 else:
1121 1125 m = f in manifests[0] and manifests[0] or manifests[1]
1122 1126 t = self.file(f).read(m[f])
1123 1127 self.wwrite(f, t, m.flags(f))
1124 1128 self.dirstate.normal(f)
1125 1129 finally:
1126 1130 wlock.release()
1127 1131
1128 1132 def copy(self, source, dest):
1129 1133 p = self.wjoin(dest)
1130 1134 if not (os.path.exists(p) or os.path.islink(p)):
1131 1135 self.ui.warn(_("%s does not exist!\n") % dest)
1132 1136 elif not (os.path.isfile(p) or os.path.islink(p)):
1133 1137 self.ui.warn(_("copy failed: %s is not a file or a "
1134 1138 "symbolic link\n") % dest)
1135 1139 else:
1136 1140 wlock = self.wlock()
1137 1141 try:
1138 1142 if self.dirstate[dest] in '?r':
1139 1143 self.dirstate.add(dest)
1140 1144 self.dirstate.copy(source, dest)
1141 1145 finally:
1142 1146 wlock.release()
1143 1147
1144 1148 def heads(self, start=None):
1145 1149 heads = self.changelog.heads(start)
1146 1150 # sort the output in rev descending order
1147 1151 heads = [(-self.changelog.rev(h), h) for h in heads]
1148 1152 return [n for (r, n) in sorted(heads)]
1149 1153
1150 1154 def branchheads(self, branch=None, start=None, closed=False):
1151 1155 '''return a (possibly filtered) list of heads for the given branch
1152 1156
1153 1157 Heads are returned in topological order, from newest to oldest.
1154 1158 If branch is None, use the dirstate branch.
1155 1159 If start is not None, return only heads reachable from start.
1156 1160 If closed is True, return heads that are marked as closed as well.
1157 1161 '''
1158 1162 if branch is None:
1159 1163 branch = self[None].branch()
1160 1164 branches = self.branchmap()
1161 1165 if branch not in branches:
1162 1166 return []
1163 1167 # the cache returns heads ordered lowest to highest
1164 1168 bheads = list(reversed(branches[branch]))
1165 1169 if start is not None:
1166 1170 # filter out the heads that cannot be reached from startrev
1167 1171 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1168 1172 bheads = [h for h in bheads if h in fbheads]
1169 1173 if not closed:
1170 1174 bheads = [h for h in bheads if
1171 1175 ('close' not in self.changelog.read(h)[5])]
1172 1176 return bheads
1173 1177
1174 1178 def branches(self, nodes):
1175 1179 if not nodes:
1176 1180 nodes = [self.changelog.tip()]
1177 1181 b = []
1178 1182 for n in nodes:
1179 1183 t = n
1180 1184 while 1:
1181 1185 p = self.changelog.parents(n)
1182 1186 if p[1] != nullid or p[0] == nullid:
1183 1187 b.append((t, n, p[0], p[1]))
1184 1188 break
1185 1189 n = p[0]
1186 1190 return b
1187 1191
1188 1192 def between(self, pairs):
1189 1193 r = []
1190 1194
1191 1195 for top, bottom in pairs:
1192 1196 n, l, i = top, [], 0
1193 1197 f = 1
1194 1198
1195 1199 while n != bottom and n != nullid:
1196 1200 p = self.changelog.parents(n)[0]
1197 1201 if i == f:
1198 1202 l.append(n)
1199 1203 f = f * 2
1200 1204 n = p
1201 1205 i += 1
1202 1206
1203 1207 r.append(l)
1204 1208
1205 1209 return r
1206 1210
1207 1211 def findincoming(self, remote, base=None, heads=None, force=False):
1208 1212 """Return list of roots of the subsets of missing nodes from remote
1209 1213
1210 1214 If base dict is specified, assume that these nodes and their parents
1211 1215 exist on the remote side and that no child of a node of base exists
1212 1216 in both remote and self.
1213 1217 Furthermore base will be updated to include the nodes that exists
1214 1218 in self and remote but no children exists in self and remote.
1215 1219 If a list of heads is specified, return only nodes which are heads
1216 1220 or ancestors of these heads.
1217 1221
1218 1222 All the ancestors of base are in self and in remote.
1219 1223 All the descendants of the list returned are missing in self.
1220 1224 (and so we know that the rest of the nodes are missing in remote, see
1221 1225 outgoing)
1222 1226 """
1223 1227 return self.findcommonincoming(remote, base, heads, force)[1]
1224 1228
1225 1229 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1226 1230 """Return a tuple (common, missing roots, heads) used to identify
1227 1231 missing nodes from remote.
1228 1232
1229 1233 If base dict is specified, assume that these nodes and their parents
1230 1234 exist on the remote side and that no child of a node of base exists
1231 1235 in both remote and self.
1232 1236 Furthermore base will be updated to include the nodes that exists
1233 1237 in self and remote but no children exists in self and remote.
1234 1238 If a list of heads is specified, return only nodes which are heads
1235 1239 or ancestors of these heads.
1236 1240
1237 1241 All the ancestors of base are in self and in remote.
1238 1242 """
1239 1243 m = self.changelog.nodemap
1240 1244 search = []
1241 1245 fetch = set()
1242 1246 seen = set()
1243 1247 seenbranch = set()
1244 1248 if base is None:
1245 1249 base = {}
1246 1250
1247 1251 if not heads:
1248 1252 heads = remote.heads()
1249 1253
1250 1254 if self.changelog.tip() == nullid:
1251 1255 base[nullid] = 1
1252 1256 if heads != [nullid]:
1253 1257 return [nullid], [nullid], list(heads)
1254 1258 return [nullid], [], []
1255 1259
1256 1260 # assume we're closer to the tip than the root
1257 1261 # and start by examining the heads
1258 1262 self.ui.status(_("searching for changes\n"))
1259 1263
1260 1264 unknown = []
1261 1265 for h in heads:
1262 1266 if h not in m:
1263 1267 unknown.append(h)
1264 1268 else:
1265 1269 base[h] = 1
1266 1270
1267 1271 heads = unknown
1268 1272 if not unknown:
1269 1273 return base.keys(), [], []
1270 1274
1271 1275 req = set(unknown)
1272 1276 reqcnt = 0
1273 1277
1274 1278 # search through remote branches
1275 1279 # a 'branch' here is a linear segment of history, with four parts:
1276 1280 # head, root, first parent, second parent
1277 1281 # (a branch always has two parents (or none) by definition)
1278 1282 unknown = remote.branches(unknown)
1279 1283 while unknown:
1280 1284 r = []
1281 1285 while unknown:
1282 1286 n = unknown.pop(0)
1283 1287 if n[0] in seen:
1284 1288 continue
1285 1289
1286 1290 self.ui.debug("examining %s:%s\n"
1287 1291 % (short(n[0]), short(n[1])))
1288 1292 if n[0] == nullid: # found the end of the branch
1289 1293 pass
1290 1294 elif n in seenbranch:
1291 1295 self.ui.debug("branch already found\n")
1292 1296 continue
1293 1297 elif n[1] and n[1] in m: # do we know the base?
1294 1298 self.ui.debug("found incomplete branch %s:%s\n"
1295 1299 % (short(n[0]), short(n[1])))
1296 1300 search.append(n[0:2]) # schedule branch range for scanning
1297 1301 seenbranch.add(n)
1298 1302 else:
1299 1303 if n[1] not in seen and n[1] not in fetch:
1300 1304 if n[2] in m and n[3] in m:
1301 1305 self.ui.debug("found new changeset %s\n" %
1302 1306 short(n[1]))
1303 1307 fetch.add(n[1]) # earliest unknown
1304 1308 for p in n[2:4]:
1305 1309 if p in m:
1306 1310 base[p] = 1 # latest known
1307 1311
1308 1312 for p in n[2:4]:
1309 1313 if p not in req and p not in m:
1310 1314 r.append(p)
1311 1315 req.add(p)
1312 1316 seen.add(n[0])
1313 1317
1314 1318 if r:
1315 1319 reqcnt += 1
1316 1320 self.ui.debug("request %d: %s\n" %
1317 1321 (reqcnt, " ".join(map(short, r))))
1318 1322 for p in xrange(0, len(r), 10):
1319 1323 for b in remote.branches(r[p:p+10]):
1320 1324 self.ui.debug("received %s:%s\n" %
1321 1325 (short(b[0]), short(b[1])))
1322 1326 unknown.append(b)
1323 1327
1324 1328 # do binary search on the branches we found
1325 1329 while search:
1326 1330 newsearch = []
1327 1331 reqcnt += 1
1328 1332 for n, l in zip(search, remote.between(search)):
1329 1333 l.append(n[1])
1330 1334 p = n[0]
1331 1335 f = 1
1332 1336 for i in l:
1333 1337 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1334 1338 if i in m:
1335 1339 if f <= 2:
1336 1340 self.ui.debug("found new branch changeset %s\n" %
1337 1341 short(p))
1338 1342 fetch.add(p)
1339 1343 base[i] = 1
1340 1344 else:
1341 1345 self.ui.debug("narrowed branch search to %s:%s\n"
1342 1346 % (short(p), short(i)))
1343 1347 newsearch.append((p, i))
1344 1348 break
1345 1349 p, f = i, f * 2
1346 1350 search = newsearch
1347 1351
1348 1352 # sanity check our fetch list
1349 1353 for f in fetch:
1350 1354 if f in m:
1351 1355 raise error.RepoError(_("already have changeset ")
1352 1356 + short(f[:4]))
1353 1357
1354 1358 if base.keys() == [nullid]:
1355 1359 if force:
1356 1360 self.ui.warn(_("warning: repository is unrelated\n"))
1357 1361 else:
1358 1362 raise util.Abort(_("repository is unrelated"))
1359 1363
1360 1364 self.ui.debug("found new changesets starting at " +
1361 1365 " ".join([short(f) for f in fetch]) + "\n")
1362 1366
1363 1367 self.ui.debug("%d total queries\n" % reqcnt)
1364 1368
1365 1369 return base.keys(), list(fetch), heads
1366 1370
1367 1371 def findoutgoing(self, remote, base=None, heads=None, force=False):
1368 1372 """Return list of nodes that are roots of subsets not in remote
1369 1373
1370 1374 If base dict is specified, assume that these nodes and their parents
1371 1375 exist on the remote side.
1372 1376 If a list of heads is specified, return only nodes which are heads
1373 1377 or ancestors of these heads, and return a second element which
1374 1378 contains all remote heads which get new children.
1375 1379 """
1376 1380 if base is None:
1377 1381 base = {}
1378 1382 self.findincoming(remote, base, heads, force=force)
1379 1383
1380 1384 self.ui.debug("common changesets up to "
1381 1385 + " ".join(map(short, base.keys())) + "\n")
1382 1386
1383 1387 remain = set(self.changelog.nodemap)
1384 1388
1385 1389 # prune everything remote has from the tree
1386 1390 remain.remove(nullid)
1387 1391 remove = base.keys()
1388 1392 while remove:
1389 1393 n = remove.pop(0)
1390 1394 if n in remain:
1391 1395 remain.remove(n)
1392 1396 for p in self.changelog.parents(n):
1393 1397 remove.append(p)
1394 1398
1395 1399 # find every node whose parents have been pruned
1396 1400 subset = []
1397 1401 # find every remote head that will get new children
1398 1402 updated_heads = set()
1399 1403 for n in remain:
1400 1404 p1, p2 = self.changelog.parents(n)
1401 1405 if p1 not in remain and p2 not in remain:
1402 1406 subset.append(n)
1403 1407 if heads:
1404 1408 if p1 in heads:
1405 1409 updated_heads.add(p1)
1406 1410 if p2 in heads:
1407 1411 updated_heads.add(p2)
1408 1412
1409 1413 # this is the set of all roots we have to push
1410 1414 if heads:
1411 1415 return subset, list(updated_heads)
1412 1416 else:
1413 1417 return subset
1414 1418
1415 1419 def pull(self, remote, heads=None, force=False):
1416 1420 lock = self.lock()
1417 1421 try:
1418 1422 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1419 1423 force=force)
1420 1424 if fetch == [nullid]:
1421 1425 self.ui.status(_("requesting all changes\n"))
1422 1426
1423 1427 if not fetch:
1424 1428 self.ui.status(_("no changes found\n"))
1425 1429 return 0
1426 1430
1427 1431 if heads is None and remote.capable('changegroupsubset'):
1428 1432 heads = rheads
1429 1433
1430 1434 if heads is None:
1431 1435 cg = remote.changegroup(fetch, 'pull')
1432 1436 else:
1433 1437 if not remote.capable('changegroupsubset'):
1434 1438 raise util.Abort(_("Partial pull cannot be done because "
1435 1439 "other repository doesn't support "
1436 1440 "changegroupsubset."))
1437 1441 cg = remote.changegroupsubset(fetch, heads, 'pull')
1438 1442 return self.addchangegroup(cg, 'pull', remote.url())
1439 1443 finally:
1440 1444 lock.release()
1441 1445
1442 1446 def push(self, remote, force=False, revs=None):
1443 1447 # there are two ways to push to remote repo:
1444 1448 #
1445 1449 # addchangegroup assumes local user can lock remote
1446 1450 # repo (local filesystem, old ssh servers).
1447 1451 #
1448 1452 # unbundle assumes local user cannot lock remote repo (new ssh
1449 1453 # servers, http servers).
1450 1454
1451 1455 if remote.capable('unbundle'):
1452 1456 return self.push_unbundle(remote, force, revs)
1453 1457 return self.push_addchangegroup(remote, force, revs)
1454 1458
1455 1459 def prepush(self, remote, force, revs):
1456 1460 '''Analyze the local and remote repositories and determine which
1457 1461 changesets need to be pushed to the remote. Return a tuple
1458 1462 (changegroup, remoteheads). changegroup is a readable file-like
1459 1463 object whose read() returns successive changegroup chunks ready to
1460 1464 be sent over the wire. remoteheads is the list of remote heads.
1461 1465 '''
1462 1466 common = {}
1463 1467 remote_heads = remote.heads()
1464 1468 inc = self.findincoming(remote, common, remote_heads, force=force)
1465 1469
1466 1470 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1467 1471 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468 1472
1469 1473 def checkbranch(lheads, rheads, updatelb):
1470 1474 '''
1471 1475 check whether there are more local heads than remote heads on
1472 1476 a specific branch.
1473 1477
1474 1478 lheads: local branch heads
1475 1479 rheads: remote branch heads
1476 1480 updatelb: outgoing local branch bases
1477 1481 '''
1478 1482
1479 1483 warn = 0
1480 1484
1481 1485 if not revs and len(lheads) > len(rheads):
1482 1486 warn = 1
1483 1487 else:
1484 1488 # add local heads involved in the push
1485 1489 updatelheads = [self.changelog.heads(x, lheads)
1486 1490 for x in updatelb]
1487 1491 newheads = set(sum(updatelheads, [])) & set(lheads)
1488 1492
1489 1493 if not newheads:
1490 1494 return True
1491 1495
1492 1496 # add heads we don't have or that are not involved in the push
1493 1497 for r in rheads:
1494 1498 if r in self.changelog.nodemap:
1495 1499 desc = self.changelog.heads(r, heads)
1496 1500 l = [h for h in heads if h in desc]
1497 1501 if not l:
1498 1502 newheads.add(r)
1499 1503 else:
1500 1504 newheads.add(r)
1501 1505 if len(newheads) > len(rheads):
1502 1506 warn = 1
1503 1507
1504 1508 if warn:
1505 1509 if not rheads: # new branch requires --force
1506 1510 self.ui.warn(_("abort: push creates new"
1507 1511 " remote branch '%s'!\n") %
1508 1512 self[lheads[0]].branch())
1509 1513 else:
1510 1514 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511 1515
1512 1516 self.ui.status(_("(did you forget to merge?"
1513 1517 " use push -f to force)\n"))
1514 1518 return False
1515 1519 return True
1516 1520
1517 1521 if not bases:
1518 1522 self.ui.status(_("no changes found\n"))
1519 1523 return None, 1
1520 1524 elif not force:
1521 1525 # Check for each named branch if we're creating new remote heads.
1522 1526 # To be a remote head after push, node must be either:
1523 1527 # - unknown locally
1524 1528 # - a local outgoing head descended from update
1525 1529 # - a remote head that's known locally and not
1526 1530 # ancestral to an outgoing head
1527 1531 #
1528 1532 # New named branches cannot be created without --force.
1529 1533
1530 1534 if remote_heads != [nullid]:
1531 1535 if remote.capable('branchmap'):
1532 1536 localhds = {}
1533 1537 if not revs:
1534 1538 localhds = self.branchmap()
1535 1539 else:
1536 1540 for n in heads:
1537 1541 branch = self[n].branch()
1538 1542 if branch in localhds:
1539 1543 localhds[branch].append(n)
1540 1544 else:
1541 1545 localhds[branch] = [n]
1542 1546
1543 1547 remotehds = remote.branchmap()
1544 1548
1545 1549 for lh in localhds:
1546 1550 if lh in remotehds:
1547 1551 rheads = remotehds[lh]
1548 1552 else:
1549 1553 rheads = []
1550 1554 lheads = localhds[lh]
1551 1555 if not checkbranch(lheads, rheads, update):
1552 1556 return None, 0
1553 1557 else:
1554 1558 if not checkbranch(heads, remote_heads, update):
1555 1559 return None, 0
1556 1560
1557 1561 if inc:
1558 1562 self.ui.warn(_("note: unsynced remote changes!\n"))
1559 1563
1560 1564
1561 1565 if revs is None:
1562 1566 # use the fast path, no race possible on push
1563 1567 nodes = self.changelog.findmissing(common.keys())
1564 1568 cg = self._changegroup(nodes, 'push')
1565 1569 else:
1566 1570 cg = self.changegroupsubset(update, revs, 'push')
1567 1571 return cg, remote_heads
1568 1572
1569 1573 def push_addchangegroup(self, remote, force, revs):
1570 1574 lock = remote.lock()
1571 1575 try:
1572 1576 ret = self.prepush(remote, force, revs)
1573 1577 if ret[0] is not None:
1574 1578 cg, remote_heads = ret
1575 1579 return remote.addchangegroup(cg, 'push', self.url())
1576 1580 return ret[1]
1577 1581 finally:
1578 1582 lock.release()
1579 1583
1580 1584 def push_unbundle(self, remote, force, revs):
1581 1585 # local repo finds heads on server, finds out what revs it
1582 1586 # must push. once revs transferred, if server finds it has
1583 1587 # different heads (someone else won commit/push race), server
1584 1588 # aborts.
1585 1589
1586 1590 ret = self.prepush(remote, force, revs)
1587 1591 if ret[0] is not None:
1588 1592 cg, remote_heads = ret
1589 1593 if force: remote_heads = ['force']
1590 1594 return remote.unbundle(cg, remote_heads, 'push')
1591 1595 return ret[1]
1592 1596
1593 1597 def changegroupinfo(self, nodes, source):
1594 1598 if self.ui.verbose or source == 'bundle':
1595 1599 self.ui.status(_("%d changesets found\n") % len(nodes))
1596 1600 if self.ui.debugflag:
1597 1601 self.ui.debug("list of changesets:\n")
1598 1602 for node in nodes:
1599 1603 self.ui.debug("%s\n" % hex(node))
1600 1604
1601 1605 def changegroupsubset(self, bases, heads, source, extranodes=None):
1602 1606 """Compute a changegroup consisting of all the nodes that are
1603 1607 descendents of any of the bases and ancestors of any of the heads.
1604 1608 Return a chunkbuffer object whose read() method will return
1605 1609 successive changegroup chunks.
1606 1610
1607 1611 It is fairly complex as determining which filenodes and which
1608 1612 manifest nodes need to be included for the changeset to be complete
1609 1613 is non-trivial.
1610 1614
1611 1615 Another wrinkle is doing the reverse, figuring out which changeset in
1612 1616 the changegroup a particular filenode or manifestnode belongs to.
1613 1617
1614 1618 The caller can specify some nodes that must be included in the
1615 1619 changegroup using the extranodes argument. It should be a dict
1616 1620 where the keys are the filenames (or 1 for the manifest), and the
1617 1621 values are lists of (node, linknode) tuples, where node is a wanted
1618 1622 node and linknode is the changelog node that should be transmitted as
1619 1623 the linkrev.
1620 1624 """
1621 1625
1622 1626 # Set up some initial variables
1623 1627 # Make it easy to refer to self.changelog
1624 1628 cl = self.changelog
1625 1629 # msng is short for missing - compute the list of changesets in this
1626 1630 # changegroup.
1627 1631 if not bases:
1628 1632 bases = [nullid]
1629 1633 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1630 1634
1631 1635 if extranodes is None:
1632 1636 # can we go through the fast path ?
1633 1637 heads.sort()
1634 1638 allheads = self.heads()
1635 1639 allheads.sort()
1636 1640 if heads == allheads:
1637 1641 return self._changegroup(msng_cl_lst, source)
1638 1642
1639 1643 # slow path
1640 1644 self.hook('preoutgoing', throw=True, source=source)
1641 1645
1642 1646 self.changegroupinfo(msng_cl_lst, source)
1643 1647 # Some bases may turn out to be superfluous, and some heads may be
1644 1648 # too. nodesbetween will return the minimal set of bases and heads
1645 1649 # necessary to re-create the changegroup.
1646 1650
1647 1651 # Known heads are the list of heads that it is assumed the recipient
1648 1652 # of this changegroup will know about.
1649 1653 knownheads = set()
1650 1654 # We assume that all parents of bases are known heads.
1651 1655 for n in bases:
1652 1656 knownheads.update(cl.parents(n))
1653 1657 knownheads.discard(nullid)
1654 1658 knownheads = list(knownheads)
1655 1659 if knownheads:
1656 1660 # Now that we know what heads are known, we can compute which
1657 1661 # changesets are known. The recipient must know about all
1658 1662 # changesets required to reach the known heads from the null
1659 1663 # changeset.
1660 1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1661 1665 junk = None
1662 1666 # Transform the list into a set.
1663 1667 has_cl_set = set(has_cl_set)
1664 1668 else:
1665 1669 # If there were no known heads, the recipient cannot be assumed to
1666 1670 # know about any changesets.
1667 1671 has_cl_set = set()
1668 1672
1669 1673 # Make it easy to refer to self.manifest
1670 1674 mnfst = self.manifest
1671 1675 # We don't know which manifests are missing yet
1672 1676 msng_mnfst_set = {}
1673 1677 # Nor do we know which filenodes are missing.
1674 1678 msng_filenode_set = {}
1675 1679
1676 1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1677 1681 junk = None
1678 1682
1679 1683 # A changeset always belongs to itself, so the changenode lookup
1680 1684 # function for a changenode is identity.
1681 1685 def identity(x):
1682 1686 return x
1683 1687
1684 1688 # If we determine that a particular file or manifest node must be a
1685 1689 # node that the recipient of the changegroup will already have, we can
1686 1690 # also assume the recipient will have all the parents. This function
1687 1691 # prunes them from the set of missing nodes.
1688 1692 def prune_parents(revlog, hasset, msngset):
1689 1693 haslst = list(hasset)
1690 1694 haslst.sort(key=revlog.rev)
1691 1695 for node in haslst:
1692 1696 parentlst = [p for p in revlog.parents(node) if p != nullid]
1693 1697 while parentlst:
1694 1698 n = parentlst.pop()
1695 1699 if n not in hasset:
1696 1700 hasset.add(n)
1697 1701 p = [p for p in revlog.parents(n) if p != nullid]
1698 1702 parentlst.extend(p)
1699 1703 for n in hasset:
1700 1704 msngset.pop(n, None)
1701 1705
1702 1706 # This is a function generating function used to set up an environment
1703 1707 # for the inner function to execute in.
1704 1708 def manifest_and_file_collector(changedfileset):
1705 1709 # This is an information gathering function that gathers
1706 1710 # information from each changeset node that goes out as part of
1707 1711 # the changegroup. The information gathered is a list of which
1708 1712 # manifest nodes are potentially required (the recipient may
1709 1713 # already have them) and total list of all files which were
1710 1714 # changed in any changeset in the changegroup.
1711 1715 #
1712 1716 # We also remember the first changenode we saw any manifest
1713 1717 # referenced by so we can later determine which changenode 'owns'
1714 1718 # the manifest.
1715 1719 def collect_manifests_and_files(clnode):
1716 1720 c = cl.read(clnode)
1717 1721 for f in c[3]:
1718 1722 # This is to make sure we only have one instance of each
1719 1723 # filename string for each filename.
1720 1724 changedfileset.setdefault(f, f)
1721 1725 msng_mnfst_set.setdefault(c[0], clnode)
1722 1726 return collect_manifests_and_files
1723 1727
1724 1728 # Figure out which manifest nodes (of the ones we think might be part
1725 1729 # of the changegroup) the recipient must know about and remove them
1726 1730 # from the changegroup.
1727 1731 def prune_manifests():
1728 1732 has_mnfst_set = set()
1729 1733 for n in msng_mnfst_set:
1730 1734 # If a 'missing' manifest thinks it belongs to a changenode
1731 1735 # the recipient is assumed to have, obviously the recipient
1732 1736 # must have that manifest.
1733 1737 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1734 1738 if linknode in has_cl_set:
1735 1739 has_mnfst_set.add(n)
1736 1740 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1737 1741
1738 1742 # Use the information collected in collect_manifests_and_files to say
1739 1743 # which changenode any manifestnode belongs to.
1740 1744 def lookup_manifest_link(mnfstnode):
1741 1745 return msng_mnfst_set[mnfstnode]
1742 1746
1743 1747 # A function generating function that sets up the initial environment
1744 1748 # the inner function.
1745 1749 def filenode_collector(changedfiles):
1746 1750 next_rev = [0]
1747 1751 # This gathers information from each manifestnode included in the
1748 1752 # changegroup about which filenodes the manifest node references
1749 1753 # so we can include those in the changegroup too.
1750 1754 #
1751 1755 # It also remembers which changenode each filenode belongs to. It
1752 1756 # does this by assuming the a filenode belongs to the changenode
1753 1757 # the first manifest that references it belongs to.
1754 1758 def collect_msng_filenodes(mnfstnode):
1755 1759 r = mnfst.rev(mnfstnode)
1756 1760 if r == next_rev[0]:
1757 1761 # If the last rev we looked at was the one just previous,
1758 1762 # we only need to see a diff.
1759 1763 deltamf = mnfst.readdelta(mnfstnode)
1760 1764 # For each line in the delta
1761 1765 for f, fnode in deltamf.iteritems():
1762 1766 f = changedfiles.get(f, None)
1763 1767 # And if the file is in the list of files we care
1764 1768 # about.
1765 1769 if f is not None:
1766 1770 # Get the changenode this manifest belongs to
1767 1771 clnode = msng_mnfst_set[mnfstnode]
1768 1772 # Create the set of filenodes for the file if
1769 1773 # there isn't one already.
1770 1774 ndset = msng_filenode_set.setdefault(f, {})
1771 1775 # And set the filenode's changelog node to the
1772 1776 # manifest's if it hasn't been set already.
1773 1777 ndset.setdefault(fnode, clnode)
1774 1778 else:
1775 1779 # Otherwise we need a full manifest.
1776 1780 m = mnfst.read(mnfstnode)
1777 1781 # For every file in we care about.
1778 1782 for f in changedfiles:
1779 1783 fnode = m.get(f, None)
1780 1784 # If it's in the manifest
1781 1785 if fnode is not None:
1782 1786 # See comments above.
1783 1787 clnode = msng_mnfst_set[mnfstnode]
1784 1788 ndset = msng_filenode_set.setdefault(f, {})
1785 1789 ndset.setdefault(fnode, clnode)
1786 1790 # Remember the revision we hope to see next.
1787 1791 next_rev[0] = r + 1
1788 1792 return collect_msng_filenodes
1789 1793
1790 1794 # We have a list of filenodes we think we need for a file, lets remove
1791 1795 # all those we know the recipient must have.
1792 1796 def prune_filenodes(f, filerevlog):
1793 1797 msngset = msng_filenode_set[f]
1794 1798 hasset = set()
1795 1799 # If a 'missing' filenode thinks it belongs to a changenode we
1796 1800 # assume the recipient must have, then the recipient must have
1797 1801 # that filenode.
1798 1802 for n in msngset:
1799 1803 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1800 1804 if clnode in has_cl_set:
1801 1805 hasset.add(n)
1802 1806 prune_parents(filerevlog, hasset, msngset)
1803 1807
1804 1808 # A function generator function that sets up the a context for the
1805 1809 # inner function.
1806 1810 def lookup_filenode_link_func(fname):
1807 1811 msngset = msng_filenode_set[fname]
1808 1812 # Lookup the changenode the filenode belongs to.
1809 1813 def lookup_filenode_link(fnode):
1810 1814 return msngset[fnode]
1811 1815 return lookup_filenode_link
1812 1816
1813 1817 # Add the nodes that were explicitly requested.
1814 1818 def add_extra_nodes(name, nodes):
1815 1819 if not extranodes or name not in extranodes:
1816 1820 return
1817 1821
1818 1822 for node, linknode in extranodes[name]:
1819 1823 if node not in nodes:
1820 1824 nodes[node] = linknode
1821 1825
1822 1826 # Now that we have all theses utility functions to help out and
1823 1827 # logically divide up the task, generate the group.
1824 1828 def gengroup():
1825 1829 # The set of changed files starts empty.
1826 1830 changedfiles = {}
1827 1831 # Create a changenode group generator that will call our functions
1828 1832 # back to lookup the owning changenode and collect information.
1829 1833 group = cl.group(msng_cl_lst, identity,
1830 1834 manifest_and_file_collector(changedfiles))
1831 1835 for chnk in group:
1832 1836 yield chnk
1833 1837
1834 1838 # The list of manifests has been collected by the generator
1835 1839 # calling our functions back.
1836 1840 prune_manifests()
1837 1841 add_extra_nodes(1, msng_mnfst_set)
1838 1842 msng_mnfst_lst = msng_mnfst_set.keys()
1839 1843 # Sort the manifestnodes by revision number.
1840 1844 msng_mnfst_lst.sort(key=mnfst.rev)
1841 1845 # Create a generator for the manifestnodes that calls our lookup
1842 1846 # and data collection functions back.
1843 1847 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1844 1848 filenode_collector(changedfiles))
1845 1849 for chnk in group:
1846 1850 yield chnk
1847 1851
1848 1852 # These are no longer needed, dereference and toss the memory for
1849 1853 # them.
1850 1854 msng_mnfst_lst = None
1851 1855 msng_mnfst_set.clear()
1852 1856
1853 1857 if extranodes:
1854 1858 for fname in extranodes:
1855 1859 if isinstance(fname, int):
1856 1860 continue
1857 1861 msng_filenode_set.setdefault(fname, {})
1858 1862 changedfiles[fname] = 1
1859 1863 # Go through all our files in order sorted by name.
1860 1864 for fname in sorted(changedfiles):
1861 1865 filerevlog = self.file(fname)
1862 1866 if not len(filerevlog):
1863 1867 raise util.Abort(_("empty or missing revlog for %s") % fname)
1864 1868 # Toss out the filenodes that the recipient isn't really
1865 1869 # missing.
1866 1870 if fname in msng_filenode_set:
1867 1871 prune_filenodes(fname, filerevlog)
1868 1872 add_extra_nodes(fname, msng_filenode_set[fname])
1869 1873 msng_filenode_lst = msng_filenode_set[fname].keys()
1870 1874 else:
1871 1875 msng_filenode_lst = []
1872 1876 # If any filenodes are left, generate the group for them,
1873 1877 # otherwise don't bother.
1874 1878 if len(msng_filenode_lst) > 0:
1875 1879 yield changegroup.chunkheader(len(fname))
1876 1880 yield fname
1877 1881 # Sort the filenodes by their revision #
1878 1882 msng_filenode_lst.sort(key=filerevlog.rev)
1879 1883 # Create a group generator and only pass in a changenode
1880 1884 # lookup function as we need to collect no information
1881 1885 # from filenodes.
1882 1886 group = filerevlog.group(msng_filenode_lst,
1883 1887 lookup_filenode_link_func(fname))
1884 1888 for chnk in group:
1885 1889 yield chnk
1886 1890 if fname in msng_filenode_set:
1887 1891 # Don't need this anymore, toss it to free memory.
1888 1892 del msng_filenode_set[fname]
1889 1893 # Signal that no more groups are left.
1890 1894 yield changegroup.closechunk()
1891 1895
1892 1896 if msng_cl_lst:
1893 1897 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1894 1898
1895 1899 return util.chunkbuffer(gengroup())
1896 1900
1897 1901 def changegroup(self, basenodes, source):
1898 1902 # to avoid a race we use changegroupsubset() (issue1320)
1899 1903 return self.changegroupsubset(basenodes, self.heads(), source)
1900 1904
1901 1905 def _changegroup(self, nodes, source):
1902 1906 """Compute the changegroup of all nodes that we have that a recipient
1903 1907 doesn't. Return a chunkbuffer object whose read() method will return
1904 1908 successive changegroup chunks.
1905 1909
1906 1910 This is much easier than the previous function as we can assume that
1907 1911 the recipient has any changenode we aren't sending them.
1908 1912
1909 1913 nodes is the set of nodes to send"""
1910 1914
1911 1915 self.hook('preoutgoing', throw=True, source=source)
1912 1916
1913 1917 cl = self.changelog
1914 1918 revset = set([cl.rev(n) for n in nodes])
1915 1919 self.changegroupinfo(nodes, source)
1916 1920
1917 1921 def identity(x):
1918 1922 return x
1919 1923
1920 1924 def gennodelst(log):
1921 1925 for r in log:
1922 1926 if log.linkrev(r) in revset:
1923 1927 yield log.node(r)
1924 1928
1925 1929 def changed_file_collector(changedfileset):
1926 1930 def collect_changed_files(clnode):
1927 1931 c = cl.read(clnode)
1928 1932 changedfileset.update(c[3])
1929 1933 return collect_changed_files
1930 1934
1931 1935 def lookuprevlink_func(revlog):
1932 1936 def lookuprevlink(n):
1933 1937 return cl.node(revlog.linkrev(revlog.rev(n)))
1934 1938 return lookuprevlink
1935 1939
1936 1940 def gengroup():
1937 1941 '''yield a sequence of changegroup chunks (strings)'''
1938 1942 # construct a list of all changed files
1939 1943 changedfiles = set()
1940 1944
1941 1945 for chnk in cl.group(nodes, identity,
1942 1946 changed_file_collector(changedfiles)):
1943 1947 yield chnk
1944 1948
1945 1949 mnfst = self.manifest
1946 1950 nodeiter = gennodelst(mnfst)
1947 1951 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1948 1952 yield chnk
1949 1953
1950 1954 for fname in sorted(changedfiles):
1951 1955 filerevlog = self.file(fname)
1952 1956 if not len(filerevlog):
1953 1957 raise util.Abort(_("empty or missing revlog for %s") % fname)
1954 1958 nodeiter = gennodelst(filerevlog)
1955 1959 nodeiter = list(nodeiter)
1956 1960 if nodeiter:
1957 1961 yield changegroup.chunkheader(len(fname))
1958 1962 yield fname
1959 1963 lookup = lookuprevlink_func(filerevlog)
1960 1964 for chnk in filerevlog.group(nodeiter, lookup):
1961 1965 yield chnk
1962 1966
1963 1967 yield changegroup.closechunk()
1964 1968
1965 1969 if nodes:
1966 1970 self.hook('outgoing', node=hex(nodes[0]), source=source)
1967 1971
1968 1972 return util.chunkbuffer(gengroup())
1969 1973
1970 1974 def addchangegroup(self, source, srctype, url, emptyok=False):
1971 1975 """add changegroup to repo.
1972 1976
1973 1977 return values:
1974 1978 - nothing changed or no source: 0
1975 1979 - more heads than before: 1+added heads (2..n)
1976 1980 - less heads than before: -1-removed heads (-2..-n)
1977 1981 - number of heads stays the same: 1
1978 1982 """
1979 1983 def csmap(x):
1980 1984 self.ui.debug("add changeset %s\n" % short(x))
1981 1985 return len(cl)
1982 1986
1983 1987 def revmap(x):
1984 1988 return cl.rev(x)
1985 1989
1986 1990 if not source:
1987 1991 return 0
1988 1992
1989 1993 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1990 1994
1991 1995 changesets = files = revisions = 0
1992 1996
1993 1997 # write changelog data to temp files so concurrent readers will not see
1994 1998 # inconsistent view
1995 1999 cl = self.changelog
1996 2000 cl.delayupdate()
1997 2001 oldheads = len(cl.heads())
1998 2002
1999 2003 tr = self.transaction()
2000 2004 try:
2001 2005 trp = weakref.proxy(tr)
2002 2006 # pull off the changeset group
2003 2007 self.ui.status(_("adding changesets\n"))
2004 2008 clstart = len(cl)
2005 2009 chunkiter = changegroup.chunkiter(source)
2006 2010 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2007 2011 raise util.Abort(_("received changelog group is empty"))
2008 2012 clend = len(cl)
2009 2013 changesets = clend - clstart
2010 2014
2011 2015 # pull off the manifest group
2012 2016 self.ui.status(_("adding manifests\n"))
2013 2017 chunkiter = changegroup.chunkiter(source)
2014 2018 # no need to check for empty manifest group here:
2015 2019 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2016 2020 # no new manifest will be created and the manifest group will
2017 2021 # be empty during the pull
2018 2022 self.manifest.addgroup(chunkiter, revmap, trp)
2019 2023
2020 2024 # process the files
2021 2025 self.ui.status(_("adding file changes\n"))
2022 2026 while 1:
2023 2027 f = changegroup.getchunk(source)
2024 2028 if not f:
2025 2029 break
2026 2030 self.ui.debug("adding %s revisions\n" % f)
2027 2031 fl = self.file(f)
2028 2032 o = len(fl)
2029 2033 chunkiter = changegroup.chunkiter(source)
2030 2034 if fl.addgroup(chunkiter, revmap, trp) is None:
2031 2035 raise util.Abort(_("received file revlog group is empty"))
2032 2036 revisions += len(fl) - o
2033 2037 files += 1
2034 2038
2035 2039 newheads = len(cl.heads())
2036 2040 heads = ""
2037 2041 if oldheads and newheads != oldheads:
2038 2042 heads = _(" (%+d heads)") % (newheads - oldheads)
2039 2043
2040 2044 self.ui.status(_("added %d changesets"
2041 2045 " with %d changes to %d files%s\n")
2042 2046 % (changesets, revisions, files, heads))
2043 2047
2044 2048 if changesets > 0:
2045 2049 p = lambda: cl.writepending() and self.root or ""
2046 2050 self.hook('pretxnchangegroup', throw=True,
2047 2051 node=hex(cl.node(clstart)), source=srctype,
2048 2052 url=url, pending=p)
2049 2053
2050 2054 # make changelog see real files again
2051 2055 cl.finalize(trp)
2052 2056
2053 2057 tr.close()
2054 2058 finally:
2055 2059 del tr
2056 2060
2057 2061 if changesets > 0:
2058 2062 # forcefully update the on-disk branch cache
2059 2063 self.ui.debug("updating the branch cache\n")
2060 2064 self.branchtags()
2061 2065 self.hook("changegroup", node=hex(cl.node(clstart)),
2062 2066 source=srctype, url=url)
2063 2067
2064 2068 for i in xrange(clstart, clend):
2065 2069 self.hook("incoming", node=hex(cl.node(i)),
2066 2070 source=srctype, url=url)
2067 2071
2068 2072 # never return 0 here:
2069 2073 if newheads < oldheads:
2070 2074 return newheads - oldheads - 1
2071 2075 else:
2072 2076 return newheads - oldheads + 1
2073 2077
2074 2078
2075 2079 def stream_in(self, remote):
2076 2080 fp = remote.stream_out()
2077 2081 l = fp.readline()
2078 2082 try:
2079 2083 resp = int(l)
2080 2084 except ValueError:
2081 2085 raise error.ResponseError(
2082 2086 _('Unexpected response from remote server:'), l)
2083 2087 if resp == 1:
2084 2088 raise util.Abort(_('operation forbidden by server'))
2085 2089 elif resp == 2:
2086 2090 raise util.Abort(_('locking the remote repository failed'))
2087 2091 elif resp != 0:
2088 2092 raise util.Abort(_('the server sent an unknown error code'))
2089 2093 self.ui.status(_('streaming all changes\n'))
2090 2094 l = fp.readline()
2091 2095 try:
2092 2096 total_files, total_bytes = map(int, l.split(' ', 1))
2093 2097 except (ValueError, TypeError):
2094 2098 raise error.ResponseError(
2095 2099 _('Unexpected response from remote server:'), l)
2096 2100 self.ui.status(_('%d files to transfer, %s of data\n') %
2097 2101 (total_files, util.bytecount(total_bytes)))
2098 2102 start = time.time()
2099 2103 for i in xrange(total_files):
2100 2104 # XXX doesn't support '\n' or '\r' in filenames
2101 2105 l = fp.readline()
2102 2106 try:
2103 2107 name, size = l.split('\0', 1)
2104 2108 size = int(size)
2105 2109 except (ValueError, TypeError):
2106 2110 raise error.ResponseError(
2107 2111 _('Unexpected response from remote server:'), l)
2108 2112 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2109 2113 # for backwards compat, name was partially encoded
2110 2114 ofp = self.sopener(store.decodedir(name), 'w')
2111 2115 for chunk in util.filechunkiter(fp, limit=size):
2112 2116 ofp.write(chunk)
2113 2117 ofp.close()
2114 2118 elapsed = time.time() - start
2115 2119 if elapsed <= 0:
2116 2120 elapsed = 0.001
2117 2121 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2118 2122 (util.bytecount(total_bytes), elapsed,
2119 2123 util.bytecount(total_bytes / elapsed)))
2120 2124 self.invalidate()
2121 2125 return len(self.heads()) + 1
2122 2126
2123 2127 def clone(self, remote, heads=[], stream=False):
2124 2128 '''clone remote repository.
2125 2129
2126 2130 keyword arguments:
2127 2131 heads: list of revs to clone (forces use of pull)
2128 2132 stream: use streaming clone if possible'''
2129 2133
2130 2134 # now, all clients that can request uncompressed clones can
2131 2135 # read repo formats supported by all servers that can serve
2132 2136 # them.
2133 2137
2134 2138 # if revlog format changes, client will have to check version
2135 2139 # and format flags on "stream" capability, and use
2136 2140 # uncompressed only if compatible.
2137 2141
2138 2142 if stream and not heads and remote.capable('stream'):
2139 2143 return self.stream_in(remote)
2140 2144 return self.pull(remote, heads)
2141 2145
2142 2146 # used to avoid circular references so destructors work
2143 2147 def aftertrans(files):
2144 2148 renamefiles = [tuple(t) for t in files]
2145 2149 def a():
2146 2150 for src, dest in renamefiles:
2147 2151 util.rename(src, dest)
2148 2152 return a
2149 2153
2150 2154 def instance(ui, path, create):
2151 2155 return localrepository(ui, util.drop_scheme('file', path), create)
2152 2156
2153 2157 def islocal(path):
2154 2158 return True
General Comments 0
You need to be logged in to leave comments. Login now