##// END OF EJS Templates
commit: trade O(n^2) file checks for O(n^2) dir checks
Matt Mackall -
r8710:bcb6e5be default
parent child Browse files
Show More
@@ -1,2152 +1,2154 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 from lock import release
17 17 import weakref, stat, errno, os, time, inspect, bisect
18 18 propertycache = util.propertycache
19 19
20 20 class localrepository(repo.repository):
21 21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 22 supported = set('revlogv1 store fncache'.split())
23 23
24 24 def __init__(self, baseui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 self.root = os.path.realpath(path)
27 27 self.path = os.path.join(self.root, ".hg")
28 28 self.origroot = path
29 29 self.opener = util.opener(self.path)
30 30 self.wopener = util.opener(self.root)
31 31
32 32 if not os.path.isdir(self.path):
33 33 if create:
34 34 if not os.path.exists(path):
35 35 os.mkdir(path)
36 36 os.mkdir(self.path)
37 37 requirements = ["revlogv1"]
38 38 if baseui.configbool('format', 'usestore', True):
39 39 os.mkdir(os.path.join(self.path, "store"))
40 40 requirements.append("store")
41 41 if baseui.configbool('format', 'usefncache', True):
42 42 requirements.append("fncache")
43 43 # create an invalid changelog
44 44 self.opener("00changelog.i", "a").write(
45 45 '\0\0\0\2' # represents revlogv2
46 46 ' dummy changelog to prevent using the old repo layout'
47 47 )
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 else:
53 53 raise error.RepoError(_("repository %s not found") % path)
54 54 elif create:
55 55 raise error.RepoError(_("repository %s already exists") % path)
56 56 else:
57 57 # find requirements
58 58 requirements = set()
59 59 try:
60 60 requirements = set(self.opener("requires").read().splitlines())
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64 for r in requirements - self.supported:
65 65 raise error.RepoError(_("requirement '%s' not supported") % r)
66 66
67 67 self.store = store.store(requirements, self.path, util.opener)
68 68 self.spath = self.store.path
69 69 self.sopener = self.store.opener
70 70 self.sjoin = self.store.join
71 71 self.opener.createmode = self.store.createmode
72 72
73 73 self.baseui = baseui
74 74 self.ui = baseui.copy()
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self._ubranchcache = None # UTF-8 version of branchcache
85 85 self._branchcachetip = None
86 86 self.nodetagscache = None
87 87 self.filterpats = {}
88 88 self._datafilters = {}
89 89 self._transref = self._lockref = self._wlockref = None
90 90
91 91 @propertycache
92 92 def changelog(self):
93 93 c = changelog.changelog(self.sopener)
94 94 if 'HG_PENDING' in os.environ:
95 95 p = os.environ['HG_PENDING']
96 96 if p.startswith(self.root):
97 97 c.readpending('00changelog.i.a')
98 98 self.sopener.defversion = c.version
99 99 return c
100 100
101 101 @propertycache
102 102 def manifest(self):
103 103 return manifest.manifest(self.sopener)
104 104
105 105 @propertycache
106 106 def dirstate(self):
107 107 return dirstate.dirstate(self.opener, self.ui, self.root)
108 108
109 109 def __getitem__(self, changeid):
110 110 if changeid is None:
111 111 return context.workingctx(self)
112 112 return context.changectx(self, changeid)
113 113
114 114 def __nonzero__(self):
115 115 return True
116 116
117 117 def __len__(self):
118 118 return len(self.changelog)
119 119
120 120 def __iter__(self):
121 121 for i in xrange(len(self)):
122 122 yield i
123 123
124 124 def url(self):
125 125 return 'file:' + self.root
126 126
127 127 def hook(self, name, throw=False, **args):
128 128 return hook.hook(self.ui, self, name, throw, **args)
129 129
130 130 tag_disallowed = ':\r\n'
131 131
132 132 def _tag(self, names, node, message, local, user, date, extra={}):
133 133 if isinstance(names, str):
134 134 allchars = names
135 135 names = (names,)
136 136 else:
137 137 allchars = ''.join(names)
138 138 for c in self.tag_disallowed:
139 139 if c in allchars:
140 140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 141
142 142 for name in names:
143 143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 144 local=local)
145 145
146 146 def writetags(fp, names, munge, prevtags):
147 147 fp.seek(0, 2)
148 148 if prevtags and prevtags[-1] != '\n':
149 149 fp.write('\n')
150 150 for name in names:
151 151 m = munge and munge(name) or name
152 152 if self._tagstypecache and name in self._tagstypecache:
153 153 old = self.tagscache.get(name, nullid)
154 154 fp.write('%s %s\n' % (hex(old), m))
155 155 fp.write('%s %s\n' % (hex(node), m))
156 156 fp.close()
157 157
158 158 prevtags = ''
159 159 if local:
160 160 try:
161 161 fp = self.opener('localtags', 'r+')
162 162 except IOError:
163 163 fp = self.opener('localtags', 'a')
164 164 else:
165 165 prevtags = fp.read()
166 166
167 167 # local tags are stored in the current charset
168 168 writetags(fp, names, None, prevtags)
169 169 for name in names:
170 170 self.hook('tag', node=hex(node), tag=name, local=local)
171 171 return
172 172
173 173 try:
174 174 fp = self.wfile('.hgtags', 'rb+')
175 175 except IOError:
176 176 fp = self.wfile('.hgtags', 'ab')
177 177 else:
178 178 prevtags = fp.read()
179 179
180 180 # committed tags are stored in UTF-8
181 181 writetags(fp, names, encoding.fromlocal, prevtags)
182 182
183 183 if '.hgtags' not in self.dirstate:
184 184 self.add(['.hgtags'])
185 185
186 186 m = match_.exact(self.root, '', ['.hgtags'])
187 187 tagnode = self.commit(message, user, date, extra=extra, match=m)
188 188
189 189 for name in names:
190 190 self.hook('tag', node=hex(node), tag=name, local=local)
191 191
192 192 return tagnode
193 193
194 194 def tag(self, names, node, message, local, user, date):
195 195 '''tag a revision with one or more symbolic names.
196 196
197 197 names is a list of strings or, when adding a single tag, names may be a
198 198 string.
199 199
200 200 if local is True, the tags are stored in a per-repository file.
201 201 otherwise, they are stored in the .hgtags file, and a new
202 202 changeset is committed with the change.
203 203
204 204 keyword arguments:
205 205
206 206 local: whether to store tags in non-version-controlled file
207 207 (default False)
208 208
209 209 message: commit message to use if committing
210 210
211 211 user: name of user to use if committing
212 212
213 213 date: date tuple to use if committing'''
214 214
215 215 for x in self.status()[:5]:
216 216 if '.hgtags' in x:
217 217 raise util.Abort(_('working copy of .hgtags is changed '
218 218 '(please commit .hgtags manually)'))
219 219
220 220 self.tags() # instantiate the cache
221 221 self._tag(names, node, message, local, user, date)
222 222
223 223 def tags(self):
224 224 '''return a mapping of tag to node'''
225 225 if self.tagscache:
226 226 return self.tagscache
227 227
228 228 globaltags = {}
229 229 tagtypes = {}
230 230
231 231 def readtags(lines, fn, tagtype):
232 232 filetags = {}
233 233 count = 0
234 234
235 235 def warn(msg):
236 236 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
237 237
238 238 for l in lines:
239 239 count += 1
240 240 if not l:
241 241 continue
242 242 s = l.split(" ", 1)
243 243 if len(s) != 2:
244 244 warn(_("cannot parse entry"))
245 245 continue
246 246 node, key = s
247 247 key = encoding.tolocal(key.strip()) # stored in UTF-8
248 248 try:
249 249 bin_n = bin(node)
250 250 except TypeError:
251 251 warn(_("node '%s' is not well formed") % node)
252 252 continue
253 253 if bin_n not in self.changelog.nodemap:
254 254 warn(_("tag '%s' refers to unknown node") % key)
255 255 continue
256 256
257 257 h = []
258 258 if key in filetags:
259 259 n, h = filetags[key]
260 260 h.append(n)
261 261 filetags[key] = (bin_n, h)
262 262
263 263 for k, nh in filetags.iteritems():
264 264 if k not in globaltags:
265 265 globaltags[k] = nh
266 266 tagtypes[k] = tagtype
267 267 continue
268 268
269 269 # we prefer the global tag if:
270 270 # it supercedes us OR
271 271 # mutual supercedes and it has a higher rank
272 272 # otherwise we win because we're tip-most
273 273 an, ah = nh
274 274 bn, bh = globaltags[k]
275 275 if (bn != an and an in bh and
276 276 (bn not in ah or len(bh) > len(ah))):
277 277 an = bn
278 278 ah.extend([n for n in bh if n not in ah])
279 279 globaltags[k] = an, ah
280 280 tagtypes[k] = tagtype
281 281
282 282 # read the tags file from each head, ending with the tip
283 283 f = None
284 284 for rev, node, fnode in self._hgtagsnodes():
285 285 f = (f and f.filectx(fnode) or
286 286 self.filectx('.hgtags', fileid=fnode))
287 287 readtags(f.data().splitlines(), f, "global")
288 288
289 289 try:
290 290 data = encoding.fromlocal(self.opener("localtags").read())
291 291 # localtags are stored in the local character set
292 292 # while the internal tag table is stored in UTF-8
293 293 readtags(data.splitlines(), "localtags", "local")
294 294 except IOError:
295 295 pass
296 296
297 297 self.tagscache = {}
298 298 self._tagstypecache = {}
299 299 for k, nh in globaltags.iteritems():
300 300 n = nh[0]
301 301 if n != nullid:
302 302 self.tagscache[k] = n
303 303 self._tagstypecache[k] = tagtypes[k]
304 304 self.tagscache['tip'] = self.changelog.tip()
305 305 return self.tagscache
306 306
307 307 def tagtype(self, tagname):
308 308 '''
309 309 return the type of the given tag. result can be:
310 310
311 311 'local' : a local tag
312 312 'global' : a global tag
313 313 None : tag does not exist
314 314 '''
315 315
316 316 self.tags()
317 317
318 318 return self._tagstypecache.get(tagname)
319 319
320 320 def _hgtagsnodes(self):
321 321 last = {}
322 322 ret = []
323 323 for node in reversed(self.heads()):
324 324 c = self[node]
325 325 rev = c.rev()
326 326 try:
327 327 fnode = c.filenode('.hgtags')
328 328 except error.LookupError:
329 329 continue
330 330 ret.append((rev, node, fnode))
331 331 if fnode in last:
332 332 ret[last[fnode]] = None
333 333 last[fnode] = len(ret) - 1
334 334 return [item for item in ret if item]
335 335
336 336 def tagslist(self):
337 337 '''return a list of tags ordered by revision'''
338 338 l = []
339 339 for t, n in self.tags().iteritems():
340 340 try:
341 341 r = self.changelog.rev(n)
342 342 except:
343 343 r = -2 # sort to the beginning of the list if unknown
344 344 l.append((r, t, n))
345 345 return [(t, n) for r, t, n in sorted(l)]
346 346
347 347 def nodetags(self, node):
348 348 '''return the tags associated with a node'''
349 349 if not self.nodetagscache:
350 350 self.nodetagscache = {}
351 351 for t, n in self.tags().iteritems():
352 352 self.nodetagscache.setdefault(n, []).append(t)
353 353 return self.nodetagscache.get(node, [])
354 354
355 355 def _branchtags(self, partial, lrev):
356 356 # TODO: rename this function?
357 357 tiprev = len(self) - 1
358 358 if lrev != tiprev:
359 359 self._updatebranchcache(partial, lrev+1, tiprev+1)
360 360 self._writebranchcache(partial, self.changelog.tip(), tiprev)
361 361
362 362 return partial
363 363
364 364 def branchmap(self):
365 365 tip = self.changelog.tip()
366 366 if self.branchcache is not None and self._branchcachetip == tip:
367 367 return self.branchcache
368 368
369 369 oldtip = self._branchcachetip
370 370 self._branchcachetip = tip
371 371 if self.branchcache is None:
372 372 self.branchcache = {} # avoid recursion in changectx
373 373 else:
374 374 self.branchcache.clear() # keep using the same dict
375 375 if oldtip is None or oldtip not in self.changelog.nodemap:
376 376 partial, last, lrev = self._readbranchcache()
377 377 else:
378 378 lrev = self.changelog.rev(oldtip)
379 379 partial = self._ubranchcache
380 380
381 381 self._branchtags(partial, lrev)
382 382 # this private cache holds all heads (not just tips)
383 383 self._ubranchcache = partial
384 384
385 385 # the branch cache is stored on disk as UTF-8, but in the local
386 386 # charset internally
387 387 for k, v in partial.iteritems():
388 388 self.branchcache[encoding.tolocal(k)] = v
389 389 return self.branchcache
390 390
391 391
392 392 def branchtags(self):
393 393 '''return a dict where branch names map to the tipmost head of
394 394 the branch, open heads come before closed'''
395 395 bt = {}
396 396 for bn, heads in self.branchmap().iteritems():
397 397 head = None
398 398 for i in range(len(heads)-1, -1, -1):
399 399 h = heads[i]
400 400 if 'close' not in self.changelog.read(h)[5]:
401 401 head = h
402 402 break
403 403 # no open heads were found
404 404 if head is None:
405 405 head = heads[-1]
406 406 bt[bn] = head
407 407 return bt
408 408
409 409
410 410 def _readbranchcache(self):
411 411 partial = {}
412 412 try:
413 413 f = self.opener("branchheads.cache")
414 414 lines = f.read().split('\n')
415 415 f.close()
416 416 except (IOError, OSError):
417 417 return {}, nullid, nullrev
418 418
419 419 try:
420 420 last, lrev = lines.pop(0).split(" ", 1)
421 421 last, lrev = bin(last), int(lrev)
422 422 if lrev >= len(self) or self[lrev].node() != last:
423 423 # invalidate the cache
424 424 raise ValueError('invalidating branch cache (tip differs)')
425 425 for l in lines:
426 426 if not l: continue
427 427 node, label = l.split(" ", 1)
428 428 partial.setdefault(label.strip(), []).append(bin(node))
429 429 except KeyboardInterrupt:
430 430 raise
431 431 except Exception, inst:
432 432 if self.ui.debugflag:
433 433 self.ui.warn(str(inst), '\n')
434 434 partial, last, lrev = {}, nullid, nullrev
435 435 return partial, last, lrev
436 436
437 437 def _writebranchcache(self, branches, tip, tiprev):
438 438 try:
439 439 f = self.opener("branchheads.cache", "w", atomictemp=True)
440 440 f.write("%s %s\n" % (hex(tip), tiprev))
441 441 for label, nodes in branches.iteritems():
442 442 for node in nodes:
443 443 f.write("%s %s\n" % (hex(node), label))
444 444 f.rename()
445 445 except (IOError, OSError):
446 446 pass
447 447
448 448 def _updatebranchcache(self, partial, start, end):
449 449 for r in xrange(start, end):
450 450 c = self[r]
451 451 b = c.branch()
452 452 bheads = partial.setdefault(b, [])
453 453 bheads.append(c.node())
454 454 for p in c.parents():
455 455 pn = p.node()
456 456 if pn in bheads:
457 457 bheads.remove(pn)
458 458
459 459 def lookup(self, key):
460 460 if isinstance(key, int):
461 461 return self.changelog.node(key)
462 462 elif key == '.':
463 463 return self.dirstate.parents()[0]
464 464 elif key == 'null':
465 465 return nullid
466 466 elif key == 'tip':
467 467 return self.changelog.tip()
468 468 n = self.changelog._match(key)
469 469 if n:
470 470 return n
471 471 if key in self.tags():
472 472 return self.tags()[key]
473 473 if key in self.branchtags():
474 474 return self.branchtags()[key]
475 475 n = self.changelog._partialmatch(key)
476 476 if n:
477 477 return n
478 478
479 479 # can't find key, check if it might have come from damaged dirstate
480 480 if key in self.dirstate.parents():
481 481 raise error.Abort(_("working directory has unknown parent '%s'!")
482 482 % short(key))
483 483 try:
484 484 if len(key) == 20:
485 485 key = hex(key)
486 486 except:
487 487 pass
488 488 raise error.RepoError(_("unknown revision '%s'") % key)
489 489
490 490 def local(self):
491 491 return True
492 492
493 493 def join(self, f):
494 494 return os.path.join(self.path, f)
495 495
496 496 def wjoin(self, f):
497 497 return os.path.join(self.root, f)
498 498
499 499 def rjoin(self, f):
500 500 return os.path.join(self.root, util.pconvert(f))
501 501
502 502 def file(self, f):
503 503 if f[0] == '/':
504 504 f = f[1:]
505 505 return filelog.filelog(self.sopener, f)
506 506
507 507 def changectx(self, changeid):
508 508 return self[changeid]
509 509
510 510 def parents(self, changeid=None):
511 511 '''get list of changectxs for parents of changeid'''
512 512 return self[changeid].parents()
513 513
514 514 def filectx(self, path, changeid=None, fileid=None):
515 515 """changeid can be a changeset revision, node, or tag.
516 516 fileid can be a file revision or node."""
517 517 return context.filectx(self, path, changeid, fileid)
518 518
519 519 def getcwd(self):
520 520 return self.dirstate.getcwd()
521 521
522 522 def pathto(self, f, cwd=None):
523 523 return self.dirstate.pathto(f, cwd)
524 524
525 525 def wfile(self, f, mode='r'):
526 526 return self.wopener(f, mode)
527 527
528 528 def _link(self, f):
529 529 return os.path.islink(self.wjoin(f))
530 530
531 531 def _filter(self, filter, filename, data):
532 532 if filter not in self.filterpats:
533 533 l = []
534 534 for pat, cmd in self.ui.configitems(filter):
535 535 if cmd == '!':
536 536 continue
537 537 mf = match_.match(self.root, '', [pat])
538 538 fn = None
539 539 params = cmd
540 540 for name, filterfn in self._datafilters.iteritems():
541 541 if cmd.startswith(name):
542 542 fn = filterfn
543 543 params = cmd[len(name):].lstrip()
544 544 break
545 545 if not fn:
546 546 fn = lambda s, c, **kwargs: util.filter(s, c)
547 547 # Wrap old filters not supporting keyword arguments
548 548 if not inspect.getargspec(fn)[2]:
549 549 oldfn = fn
550 550 fn = lambda s, c, **kwargs: oldfn(s, c)
551 551 l.append((mf, fn, params))
552 552 self.filterpats[filter] = l
553 553
554 554 for mf, fn, cmd in self.filterpats[filter]:
555 555 if mf(filename):
556 556 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
557 557 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
558 558 break
559 559
560 560 return data
561 561
562 562 def adddatafilter(self, name, filter):
563 563 self._datafilters[name] = filter
564 564
565 565 def wread(self, filename):
566 566 if self._link(filename):
567 567 data = os.readlink(self.wjoin(filename))
568 568 else:
569 569 data = self.wopener(filename, 'r').read()
570 570 return self._filter("encode", filename, data)
571 571
572 572 def wwrite(self, filename, data, flags):
573 573 data = self._filter("decode", filename, data)
574 574 try:
575 575 os.unlink(self.wjoin(filename))
576 576 except OSError:
577 577 pass
578 578 if 'l' in flags:
579 579 self.wopener.symlink(data, filename)
580 580 else:
581 581 self.wopener(filename, 'w').write(data)
582 582 if 'x' in flags:
583 583 util.set_flags(self.wjoin(filename), False, True)
584 584
585 585 def wwritedata(self, filename, data):
586 586 return self._filter("decode", filename, data)
587 587
588 588 def transaction(self):
589 589 tr = self._transref and self._transref() or None
590 590 if tr and tr.running():
591 591 return tr.nest()
592 592
593 593 # abort here if the journal already exists
594 594 if os.path.exists(self.sjoin("journal")):
595 595 raise error.RepoError(_("journal already exists - run hg recover"))
596 596
597 597 # save dirstate for rollback
598 598 try:
599 599 ds = self.opener("dirstate").read()
600 600 except IOError:
601 601 ds = ""
602 602 self.opener("journal.dirstate", "w").write(ds)
603 603 self.opener("journal.branch", "w").write(self.dirstate.branch())
604 604
605 605 renames = [(self.sjoin("journal"), self.sjoin("undo")),
606 606 (self.join("journal.dirstate"), self.join("undo.dirstate")),
607 607 (self.join("journal.branch"), self.join("undo.branch"))]
608 608 tr = transaction.transaction(self.ui.warn, self.sopener,
609 609 self.sjoin("journal"),
610 610 aftertrans(renames),
611 611 self.store.createmode)
612 612 self._transref = weakref.ref(tr)
613 613 return tr
614 614
615 615 def recover(self):
616 616 lock = self.lock()
617 617 try:
618 618 if os.path.exists(self.sjoin("journal")):
619 619 self.ui.status(_("rolling back interrupted transaction\n"))
620 620 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
621 621 self.invalidate()
622 622 return True
623 623 else:
624 624 self.ui.warn(_("no interrupted transaction available\n"))
625 625 return False
626 626 finally:
627 627 lock.release()
628 628
629 629 def rollback(self):
630 630 wlock = lock = None
631 631 try:
632 632 wlock = self.wlock()
633 633 lock = self.lock()
634 634 if os.path.exists(self.sjoin("undo")):
635 635 self.ui.status(_("rolling back last transaction\n"))
636 636 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
637 637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 638 try:
639 639 branch = self.opener("undo.branch").read()
640 640 self.dirstate.setbranch(branch)
641 641 except IOError:
642 642 self.ui.warn(_("Named branch could not be reset, "
643 643 "current branch still is: %s\n")
644 644 % encoding.tolocal(self.dirstate.branch()))
645 645 self.invalidate()
646 646 self.dirstate.invalidate()
647 647 else:
648 648 self.ui.warn(_("no rollback information available\n"))
649 649 finally:
650 650 release(lock, wlock)
651 651
652 652 def invalidate(self):
653 653 for a in "changelog manifest".split():
654 654 if a in self.__dict__:
655 655 delattr(self, a)
656 656 self.tagscache = None
657 657 self._tagstypecache = None
658 658 self.nodetagscache = None
659 659 self.branchcache = None
660 660 self._ubranchcache = None
661 661 self._branchcachetip = None
662 662
663 663 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
664 664 try:
665 665 l = lock.lock(lockname, 0, releasefn, desc=desc)
666 666 except error.LockHeld, inst:
667 667 if not wait:
668 668 raise
669 669 self.ui.warn(_("waiting for lock on %s held by %r\n") %
670 670 (desc, inst.locker))
671 671 # default to 600 seconds timeout
672 672 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
673 673 releasefn, desc=desc)
674 674 if acquirefn:
675 675 acquirefn()
676 676 return l
677 677
678 678 def lock(self, wait=True):
679 679 l = self._lockref and self._lockref()
680 680 if l is not None and l.held:
681 681 l.lock()
682 682 return l
683 683
684 684 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
685 685 _('repository %s') % self.origroot)
686 686 self._lockref = weakref.ref(l)
687 687 return l
688 688
689 689 def wlock(self, wait=True):
690 690 l = self._wlockref and self._wlockref()
691 691 if l is not None and l.held:
692 692 l.lock()
693 693 return l
694 694
695 695 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
696 696 self.dirstate.invalidate, _('working directory of %s') %
697 697 self.origroot)
698 698 self._wlockref = weakref.ref(l)
699 699 return l
700 700
701 701 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
702 702 """
703 703 commit an individual file as part of a larger transaction
704 704 """
705 705
706 706 fname = fctx.path()
707 707 text = fctx.data()
708 708 flog = self.file(fname)
709 709 fparent1 = manifest1.get(fname, nullid)
710 710 fparent2 = fparent2o = manifest2.get(fname, nullid)
711 711
712 712 meta = {}
713 713 copy = fctx.renamed()
714 714 if copy and copy[0] != fname:
715 715 # Mark the new revision of this file as a copy of another
716 716 # file. This copy data will effectively act as a parent
717 717 # of this new revision. If this is a merge, the first
718 718 # parent will be the nullid (meaning "look up the copy data")
719 719 # and the second one will be the other parent. For example:
720 720 #
721 721 # 0 --- 1 --- 3 rev1 changes file foo
722 722 # \ / rev2 renames foo to bar and changes it
723 723 # \- 2 -/ rev3 should have bar with all changes and
724 724 # should record that bar descends from
725 725 # bar in rev2 and foo in rev1
726 726 #
727 727 # this allows this merge to succeed:
728 728 #
729 729 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
730 730 # \ / merging rev3 and rev4 should use bar@rev2
731 731 # \- 2 --- 4 as the merge base
732 732 #
733 733
734 734 cfname = copy[0]
735 735 crev = manifest1.get(cfname)
736 736 newfparent = fparent2
737 737
738 738 if manifest2: # branch merge
739 739 if fparent2 == nullid or crev is None: # copied on remote side
740 740 if cfname in manifest2:
741 741 crev = manifest2[cfname]
742 742 newfparent = fparent1
743 743
744 744 # find source in nearest ancestor if we've lost track
745 745 if not crev:
746 746 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
747 747 (fname, cfname))
748 748 for ancestor in self['.'].ancestors():
749 749 if cfname in ancestor:
750 750 crev = ancestor[cfname].filenode()
751 751 break
752 752
753 753 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
754 754 meta["copy"] = cfname
755 755 meta["copyrev"] = hex(crev)
756 756 fparent1, fparent2 = nullid, newfparent
757 757 elif fparent2 != nullid:
758 758 # is one parent an ancestor of the other?
759 759 fparentancestor = flog.ancestor(fparent1, fparent2)
760 760 if fparentancestor == fparent1:
761 761 fparent1, fparent2 = fparent2, nullid
762 762 elif fparentancestor == fparent2:
763 763 fparent2 = nullid
764 764
765 765 # is the file changed?
766 766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
767 767 changelist.append(fname)
768 768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
769 769
770 770 # are just the flags changed during merge?
771 771 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
772 772 changelist.append(fname)
773 773
774 774 return fparent1
775 775
776 776 def commit(self, text="", user=None, date=None, match=None, force=False,
777 777 editor=False, extra={}):
778 778 """Add a new revision to current repository.
779 779
780 780 Revision information is gathered from the working directory,
781 781 match can be used to filter the committed files. If editor is
782 782 supplied, it is called to get a commit message.
783 783 """
784 784
785 785 wlock = self.wlock()
786 786 try:
787 787 p1, p2 = self.dirstate.parents()
788 788
789 789 if (not force and p2 != nullid and match and
790 790 (match.files() or match.anypats())):
791 791 raise util.Abort(_('cannot partially commit a merge '
792 792 '(do not specify files or patterns)'))
793 793
794 794 def fail(f, msg):
795 795 raise util.Abort('%s: %s' % (f, msg))
796 796
797 797 if not match:
798 798 match = match_.always(self.root, '')
799 799
800 800 if not force:
801 801 vdirs = []
802 802 match.dir = vdirs.append
803 803 match.bad = fail
804 804
805 805 changes = self.status(match=match, clean=force)
806 806 if force:
807 807 changes[0].extend(changes[6]) # mq may commit unchanged files
808 808
809 809 # make sure all explicit patterns are matched
810 810 if not force and match.files():
811 files = sorted(changes[0] + changes[1] + changes[2])
811 matched = set(changes[0] + changes[1] + changes[2])
812 812
813 813 for f in match.files():
814 if f == '.' or f in files: # matched
814 if f == '.' or f in matched: # matched
815 815 continue
816 816 if f in changes[3]: # missing
817 817 fail(f, _('file not found!'))
818 818 if f in vdirs: # visited directory
819 819 d = f + '/'
820 i = bisect.bisect(files, d)
821 if i >= len(files) or not files[i].startswith(d):
820 for mf in matched:
821 if mf.startswith(d):
822 break
823 else:
822 824 fail(f, _("no match under directory!"))
823 825 elif f not in self.dirstate:
824 826 fail(f, _("file not tracked!"))
825 827
826 828 if (not force and not extra.get("close") and p2 == nullid
827 829 and not (changes[0] or changes[1] or changes[2])
828 830 and self[None].branch() == self['.'].branch()):
829 831 self.ui.status(_("nothing changed\n"))
830 832 return None
831 833
832 834 ms = merge_.mergestate(self)
833 835 for f in changes[0]:
834 836 if f in ms and ms[f] == 'u':
835 837 raise util.Abort(_("unresolved merge conflicts "
836 838 "(see hg resolve)"))
837 839
838 840 wctx = context.workingctx(self, (p1, p2), text, user, date,
839 841 extra, changes)
840 842 if editor:
841 843 wctx._text = editor(self, wctx)
842 844 ret = self.commitctx(wctx, True)
843 845
844 846 # update dirstate and mergestate
845 847 for f in changes[0] + changes[1]:
846 848 self.dirstate.normal(f)
847 849 for f in changes[2]:
848 850 self.dirstate.forget(f)
849 851 self.dirstate.setparents(ret)
850 852 ms.reset()
851 853
852 854 return ret
853 855
854 856 finally:
855 857 wlock.release()
856 858
857 859 def commitctx(self, ctx, error=False):
858 860 """Add a new revision to current repository.
859 861
860 862 Revision information is passed via the context argument.
861 863 """
862 864
863 865 tr = lock = None
864 866 removed = ctx.removed()
865 867 p1, p2 = ctx.p1(), ctx.p2()
866 868 m1 = p1.manifest().copy()
867 869 m2 = p2.manifest()
868 870 user = ctx.user()
869 871
870 872 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
871 873 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
872 874
873 875 lock = self.lock()
874 876 try:
875 877 tr = self.transaction()
876 878 trp = weakref.proxy(tr)
877 879
878 880 # check in files
879 881 new = {}
880 882 changed = []
881 883 linkrev = len(self)
882 884 for f in sorted(ctx.modified() + ctx.added()):
883 885 self.ui.note(f + "\n")
884 886 try:
885 887 fctx = ctx[f]
886 888 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
887 889 changed)
888 890 m1.set(f, fctx.flags())
889 891 except (OSError, IOError):
890 892 if error:
891 893 self.ui.warn(_("trouble committing %s!\n") % f)
892 894 raise
893 895 else:
894 896 removed.append(f)
895 897
896 898 # update manifest
897 899 m1.update(new)
898 900 removed = [f for f in sorted(removed) if f in m1 or f in m2]
899 901 drop = [f for f in removed if f in m1]
900 902 for f in drop:
901 903 del m1[f]
902 904 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
903 905 p2.manifestnode(), (new, drop))
904 906
905 907 # update changelog
906 908 self.changelog.delayupdate()
907 909 n = self.changelog.add(mn, changed + removed, ctx.description(),
908 910 trp, p1.node(), p2.node(),
909 911 user, ctx.date(), ctx.extra().copy())
910 912 p = lambda: self.changelog.writepending() and self.root or ""
911 913 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 914 parent2=xp2, pending=p)
913 915 self.changelog.finalize(trp)
914 916 tr.close()
915 917
916 918 if self.branchcache:
917 919 self.branchtags()
918 920
919 921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
920 922 return n
921 923 finally:
922 924 del tr
923 925 lock.release()
924 926
925 927 def walk(self, match, node=None):
926 928 '''
927 929 walk recursively through the directory tree or a given
928 930 changeset, finding all files matched by the match
929 931 function
930 932 '''
931 933 return self[node].walk(match)
932 934
933 935 def status(self, node1='.', node2=None, match=None,
934 936 ignored=False, clean=False, unknown=False):
935 937 """return status of files between two nodes or node and working directory
936 938
937 939 If node1 is None, use the first dirstate parent instead.
938 940 If node2 is None, compare node1 with working directory.
939 941 """
940 942
941 943 def mfmatches(ctx):
942 944 mf = ctx.manifest().copy()
943 945 for fn in mf.keys():
944 946 if not match(fn):
945 947 del mf[fn]
946 948 return mf
947 949
948 950 if isinstance(node1, context.changectx):
949 951 ctx1 = node1
950 952 else:
951 953 ctx1 = self[node1]
952 954 if isinstance(node2, context.changectx):
953 955 ctx2 = node2
954 956 else:
955 957 ctx2 = self[node2]
956 958
957 959 working = ctx2.rev() is None
958 960 parentworking = working and ctx1 == self['.']
959 961 match = match or match_.always(self.root, self.getcwd())
960 962 listignored, listclean, listunknown = ignored, clean, unknown
961 963
962 964 # load earliest manifest first for caching reasons
963 965 if not working and ctx2.rev() < ctx1.rev():
964 966 ctx2.manifest()
965 967
966 968 if not parentworking:
967 969 def bad(f, msg):
968 970 if f not in ctx1:
969 971 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
970 972 match.bad = bad
971 973
972 974 if working: # we need to scan the working dir
973 975 s = self.dirstate.status(match, listignored, listclean, listunknown)
974 976 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
975 977
976 978 # check for any possibly clean files
977 979 if parentworking and cmp:
978 980 fixup = []
979 981 # do a full compare of any files that might have changed
980 982 for f in sorted(cmp):
981 983 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
982 984 or ctx1[f].cmp(ctx2[f].data())):
983 985 modified.append(f)
984 986 else:
985 987 fixup.append(f)
986 988
987 989 if listclean:
988 990 clean += fixup
989 991
990 992 # update dirstate for files that are actually clean
991 993 if fixup:
992 994 try:
993 995 # updating the dirstate is optional
994 996 # so we don't wait on the lock
995 997 wlock = self.wlock(False)
996 998 try:
997 999 for f in fixup:
998 1000 self.dirstate.normal(f)
999 1001 finally:
1000 1002 wlock.release()
1001 1003 except error.LockError:
1002 1004 pass
1003 1005
1004 1006 if not parentworking:
1005 1007 mf1 = mfmatches(ctx1)
1006 1008 if working:
1007 1009 # we are comparing working dir against non-parent
1008 1010 # generate a pseudo-manifest for the working dir
1009 1011 mf2 = mfmatches(self['.'])
1010 1012 for f in cmp + modified + added:
1011 1013 mf2[f] = None
1012 1014 mf2.set(f, ctx2.flags(f))
1013 1015 for f in removed:
1014 1016 if f in mf2:
1015 1017 del mf2[f]
1016 1018 else:
1017 1019 # we are comparing two revisions
1018 1020 deleted, unknown, ignored = [], [], []
1019 1021 mf2 = mfmatches(ctx2)
1020 1022
1021 1023 modified, added, clean = [], [], []
1022 1024 for fn in mf2:
1023 1025 if fn in mf1:
1024 1026 if (mf1.flags(fn) != mf2.flags(fn) or
1025 1027 (mf1[fn] != mf2[fn] and
1026 1028 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1027 1029 modified.append(fn)
1028 1030 elif listclean:
1029 1031 clean.append(fn)
1030 1032 del mf1[fn]
1031 1033 else:
1032 1034 added.append(fn)
1033 1035 removed = mf1.keys()
1034 1036
1035 1037 r = modified, added, removed, deleted, unknown, ignored, clean
1036 1038 [l.sort() for l in r]
1037 1039 return r
1038 1040
1039 1041 def add(self, list):
1040 1042 wlock = self.wlock()
1041 1043 try:
1042 1044 rejected = []
1043 1045 for f in list:
1044 1046 p = self.wjoin(f)
1045 1047 try:
1046 1048 st = os.lstat(p)
1047 1049 except:
1048 1050 self.ui.warn(_("%s does not exist!\n") % f)
1049 1051 rejected.append(f)
1050 1052 continue
1051 1053 if st.st_size > 10000000:
1052 1054 self.ui.warn(_("%s: files over 10MB may cause memory and"
1053 1055 " performance problems\n"
1054 1056 "(use 'hg revert %s' to unadd the file)\n")
1055 1057 % (f, f))
1056 1058 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1057 1059 self.ui.warn(_("%s not added: only files and symlinks "
1058 1060 "supported currently\n") % f)
1059 1061 rejected.append(p)
1060 1062 elif self.dirstate[f] in 'amn':
1061 1063 self.ui.warn(_("%s already tracked!\n") % f)
1062 1064 elif self.dirstate[f] == 'r':
1063 1065 self.dirstate.normallookup(f)
1064 1066 else:
1065 1067 self.dirstate.add(f)
1066 1068 return rejected
1067 1069 finally:
1068 1070 wlock.release()
1069 1071
1070 1072 def forget(self, list):
1071 1073 wlock = self.wlock()
1072 1074 try:
1073 1075 for f in list:
1074 1076 if self.dirstate[f] != 'a':
1075 1077 self.ui.warn(_("%s not added!\n") % f)
1076 1078 else:
1077 1079 self.dirstate.forget(f)
1078 1080 finally:
1079 1081 wlock.release()
1080 1082
1081 1083 def remove(self, list, unlink=False):
1082 1084 if unlink:
1083 1085 for f in list:
1084 1086 try:
1085 1087 util.unlink(self.wjoin(f))
1086 1088 except OSError, inst:
1087 1089 if inst.errno != errno.ENOENT:
1088 1090 raise
1089 1091 wlock = self.wlock()
1090 1092 try:
1091 1093 for f in list:
1092 1094 if unlink and os.path.exists(self.wjoin(f)):
1093 1095 self.ui.warn(_("%s still exists!\n") % f)
1094 1096 elif self.dirstate[f] == 'a':
1095 1097 self.dirstate.forget(f)
1096 1098 elif f not in self.dirstate:
1097 1099 self.ui.warn(_("%s not tracked!\n") % f)
1098 1100 else:
1099 1101 self.dirstate.remove(f)
1100 1102 finally:
1101 1103 wlock.release()
1102 1104
1103 1105 def undelete(self, list):
1104 1106 manifests = [self.manifest.read(self.changelog.read(p)[0])
1105 1107 for p in self.dirstate.parents() if p != nullid]
1106 1108 wlock = self.wlock()
1107 1109 try:
1108 1110 for f in list:
1109 1111 if self.dirstate[f] != 'r':
1110 1112 self.ui.warn(_("%s not removed!\n") % f)
1111 1113 else:
1112 1114 m = f in manifests[0] and manifests[0] or manifests[1]
1113 1115 t = self.file(f).read(m[f])
1114 1116 self.wwrite(f, t, m.flags(f))
1115 1117 self.dirstate.normal(f)
1116 1118 finally:
1117 1119 wlock.release()
1118 1120
1119 1121 def copy(self, source, dest):
1120 1122 p = self.wjoin(dest)
1121 1123 if not (os.path.exists(p) or os.path.islink(p)):
1122 1124 self.ui.warn(_("%s does not exist!\n") % dest)
1123 1125 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 1126 self.ui.warn(_("copy failed: %s is not a file or a "
1125 1127 "symbolic link\n") % dest)
1126 1128 else:
1127 1129 wlock = self.wlock()
1128 1130 try:
1129 1131 if self.dirstate[dest] in '?r':
1130 1132 self.dirstate.add(dest)
1131 1133 self.dirstate.copy(source, dest)
1132 1134 finally:
1133 1135 wlock.release()
1134 1136
1135 1137 def heads(self, start=None, closed=False):
1136 1138 heads = self.changelog.heads(start)
1137 1139 def display(head):
1138 1140 if closed:
1139 1141 return True
1140 1142 extras = self.changelog.read(head)[5]
1141 1143 return ('close' not in extras)
1142 1144 # sort the output in rev descending order
1143 1145 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1144 1146 return [n for (r, n) in sorted(heads)]
1145 1147
1146 1148 def branchheads(self, branch=None, start=None, closed=False):
1147 1149 if branch is None:
1148 1150 branch = self[None].branch()
1149 1151 branches = self.branchmap()
1150 1152 if branch not in branches:
1151 1153 return []
1152 1154 bheads = branches[branch]
1153 1155 # the cache returns heads ordered lowest to highest
1154 1156 bheads.reverse()
1155 1157 if start is not None:
1156 1158 # filter out the heads that cannot be reached from startrev
1157 1159 bheads = self.changelog.nodesbetween([start], bheads)[2]
1158 1160 if not closed:
1159 1161 bheads = [h for h in bheads if
1160 1162 ('close' not in self.changelog.read(h)[5])]
1161 1163 return bheads
1162 1164
1163 1165 def branches(self, nodes):
1164 1166 if not nodes:
1165 1167 nodes = [self.changelog.tip()]
1166 1168 b = []
1167 1169 for n in nodes:
1168 1170 t = n
1169 1171 while 1:
1170 1172 p = self.changelog.parents(n)
1171 1173 if p[1] != nullid or p[0] == nullid:
1172 1174 b.append((t, n, p[0], p[1]))
1173 1175 break
1174 1176 n = p[0]
1175 1177 return b
1176 1178
1177 1179 def between(self, pairs):
1178 1180 r = []
1179 1181
1180 1182 for top, bottom in pairs:
1181 1183 n, l, i = top, [], 0
1182 1184 f = 1
1183 1185
1184 1186 while n != bottom and n != nullid:
1185 1187 p = self.changelog.parents(n)[0]
1186 1188 if i == f:
1187 1189 l.append(n)
1188 1190 f = f * 2
1189 1191 n = p
1190 1192 i += 1
1191 1193
1192 1194 r.append(l)
1193 1195
1194 1196 return r
1195 1197
1196 1198 def findincoming(self, remote, base=None, heads=None, force=False):
1197 1199 """Return list of roots of the subsets of missing nodes from remote
1198 1200
1199 1201 If base dict is specified, assume that these nodes and their parents
1200 1202 exist on the remote side and that no child of a node of base exists
1201 1203 in both remote and self.
1202 1204 Furthermore base will be updated to include the nodes that exists
1203 1205 in self and remote but no children exists in self and remote.
1204 1206 If a list of heads is specified, return only nodes which are heads
1205 1207 or ancestors of these heads.
1206 1208
1207 1209 All the ancestors of base are in self and in remote.
1208 1210 All the descendants of the list returned are missing in self.
1209 1211 (and so we know that the rest of the nodes are missing in remote, see
1210 1212 outgoing)
1211 1213 """
1212 1214 return self.findcommonincoming(remote, base, heads, force)[1]
1213 1215
1214 1216 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1215 1217 """Return a tuple (common, missing roots, heads) used to identify
1216 1218 missing nodes from remote.
1217 1219
1218 1220 If base dict is specified, assume that these nodes and their parents
1219 1221 exist on the remote side and that no child of a node of base exists
1220 1222 in both remote and self.
1221 1223 Furthermore base will be updated to include the nodes that exists
1222 1224 in self and remote but no children exists in self and remote.
1223 1225 If a list of heads is specified, return only nodes which are heads
1224 1226 or ancestors of these heads.
1225 1227
1226 1228 All the ancestors of base are in self and in remote.
1227 1229 """
1228 1230 m = self.changelog.nodemap
1229 1231 search = []
1230 1232 fetch = set()
1231 1233 seen = set()
1232 1234 seenbranch = set()
1233 1235 if base is None:
1234 1236 base = {}
1235 1237
1236 1238 if not heads:
1237 1239 heads = remote.heads()
1238 1240
1239 1241 if self.changelog.tip() == nullid:
1240 1242 base[nullid] = 1
1241 1243 if heads != [nullid]:
1242 1244 return [nullid], [nullid], list(heads)
1243 1245 return [nullid], [], []
1244 1246
1245 1247 # assume we're closer to the tip than the root
1246 1248 # and start by examining the heads
1247 1249 self.ui.status(_("searching for changes\n"))
1248 1250
1249 1251 unknown = []
1250 1252 for h in heads:
1251 1253 if h not in m:
1252 1254 unknown.append(h)
1253 1255 else:
1254 1256 base[h] = 1
1255 1257
1256 1258 heads = unknown
1257 1259 if not unknown:
1258 1260 return base.keys(), [], []
1259 1261
1260 1262 req = set(unknown)
1261 1263 reqcnt = 0
1262 1264
1263 1265 # search through remote branches
1264 1266 # a 'branch' here is a linear segment of history, with four parts:
1265 1267 # head, root, first parent, second parent
1266 1268 # (a branch always has two parents (or none) by definition)
1267 1269 unknown = remote.branches(unknown)
1268 1270 while unknown:
1269 1271 r = []
1270 1272 while unknown:
1271 1273 n = unknown.pop(0)
1272 1274 if n[0] in seen:
1273 1275 continue
1274 1276
1275 1277 self.ui.debug(_("examining %s:%s\n")
1276 1278 % (short(n[0]), short(n[1])))
1277 1279 if n[0] == nullid: # found the end of the branch
1278 1280 pass
1279 1281 elif n in seenbranch:
1280 1282 self.ui.debug(_("branch already found\n"))
1281 1283 continue
1282 1284 elif n[1] and n[1] in m: # do we know the base?
1283 1285 self.ui.debug(_("found incomplete branch %s:%s\n")
1284 1286 % (short(n[0]), short(n[1])))
1285 1287 search.append(n[0:2]) # schedule branch range for scanning
1286 1288 seenbranch.add(n)
1287 1289 else:
1288 1290 if n[1] not in seen and n[1] not in fetch:
1289 1291 if n[2] in m and n[3] in m:
1290 1292 self.ui.debug(_("found new changeset %s\n") %
1291 1293 short(n[1]))
1292 1294 fetch.add(n[1]) # earliest unknown
1293 1295 for p in n[2:4]:
1294 1296 if p in m:
1295 1297 base[p] = 1 # latest known
1296 1298
1297 1299 for p in n[2:4]:
1298 1300 if p not in req and p not in m:
1299 1301 r.append(p)
1300 1302 req.add(p)
1301 1303 seen.add(n[0])
1302 1304
1303 1305 if r:
1304 1306 reqcnt += 1
1305 1307 self.ui.debug(_("request %d: %s\n") %
1306 1308 (reqcnt, " ".join(map(short, r))))
1307 1309 for p in xrange(0, len(r), 10):
1308 1310 for b in remote.branches(r[p:p+10]):
1309 1311 self.ui.debug(_("received %s:%s\n") %
1310 1312 (short(b[0]), short(b[1])))
1311 1313 unknown.append(b)
1312 1314
1313 1315 # do binary search on the branches we found
1314 1316 while search:
1315 1317 newsearch = []
1316 1318 reqcnt += 1
1317 1319 for n, l in zip(search, remote.between(search)):
1318 1320 l.append(n[1])
1319 1321 p = n[0]
1320 1322 f = 1
1321 1323 for i in l:
1322 1324 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1323 1325 if i in m:
1324 1326 if f <= 2:
1325 1327 self.ui.debug(_("found new branch changeset %s\n") %
1326 1328 short(p))
1327 1329 fetch.add(p)
1328 1330 base[i] = 1
1329 1331 else:
1330 1332 self.ui.debug(_("narrowed branch search to %s:%s\n")
1331 1333 % (short(p), short(i)))
1332 1334 newsearch.append((p, i))
1333 1335 break
1334 1336 p, f = i, f * 2
1335 1337 search = newsearch
1336 1338
1337 1339 # sanity check our fetch list
1338 1340 for f in fetch:
1339 1341 if f in m:
1340 1342 raise error.RepoError(_("already have changeset ")
1341 1343 + short(f[:4]))
1342 1344
1343 1345 if base.keys() == [nullid]:
1344 1346 if force:
1345 1347 self.ui.warn(_("warning: repository is unrelated\n"))
1346 1348 else:
1347 1349 raise util.Abort(_("repository is unrelated"))
1348 1350
1349 1351 self.ui.debug(_("found new changesets starting at ") +
1350 1352 " ".join([short(f) for f in fetch]) + "\n")
1351 1353
1352 1354 self.ui.debug(_("%d total queries\n") % reqcnt)
1353 1355
1354 1356 return base.keys(), list(fetch), heads
1355 1357
1356 1358 def findoutgoing(self, remote, base=None, heads=None, force=False):
1357 1359 """Return list of nodes that are roots of subsets not in remote
1358 1360
1359 1361 If base dict is specified, assume that these nodes and their parents
1360 1362 exist on the remote side.
1361 1363 If a list of heads is specified, return only nodes which are heads
1362 1364 or ancestors of these heads, and return a second element which
1363 1365 contains all remote heads which get new children.
1364 1366 """
1365 1367 if base is None:
1366 1368 base = {}
1367 1369 self.findincoming(remote, base, heads, force=force)
1368 1370
1369 1371 self.ui.debug(_("common changesets up to ")
1370 1372 + " ".join(map(short, base.keys())) + "\n")
1371 1373
1372 1374 remain = set(self.changelog.nodemap)
1373 1375
1374 1376 # prune everything remote has from the tree
1375 1377 remain.remove(nullid)
1376 1378 remove = base.keys()
1377 1379 while remove:
1378 1380 n = remove.pop(0)
1379 1381 if n in remain:
1380 1382 remain.remove(n)
1381 1383 for p in self.changelog.parents(n):
1382 1384 remove.append(p)
1383 1385
1384 1386 # find every node whose parents have been pruned
1385 1387 subset = []
1386 1388 # find every remote head that will get new children
1387 1389 updated_heads = set()
1388 1390 for n in remain:
1389 1391 p1, p2 = self.changelog.parents(n)
1390 1392 if p1 not in remain and p2 not in remain:
1391 1393 subset.append(n)
1392 1394 if heads:
1393 1395 if p1 in heads:
1394 1396 updated_heads.add(p1)
1395 1397 if p2 in heads:
1396 1398 updated_heads.add(p2)
1397 1399
1398 1400 # this is the set of all roots we have to push
1399 1401 if heads:
1400 1402 return subset, list(updated_heads)
1401 1403 else:
1402 1404 return subset
1403 1405
1404 1406 def pull(self, remote, heads=None, force=False):
1405 1407 lock = self.lock()
1406 1408 try:
1407 1409 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1408 1410 force=force)
1409 1411 if fetch == [nullid]:
1410 1412 self.ui.status(_("requesting all changes\n"))
1411 1413
1412 1414 if not fetch:
1413 1415 self.ui.status(_("no changes found\n"))
1414 1416 return 0
1415 1417
1416 1418 if heads is None and remote.capable('changegroupsubset'):
1417 1419 heads = rheads
1418 1420
1419 1421 if heads is None:
1420 1422 cg = remote.changegroup(fetch, 'pull')
1421 1423 else:
1422 1424 if not remote.capable('changegroupsubset'):
1423 1425 raise util.Abort(_("Partial pull cannot be done because "
1424 1426 "other repository doesn't support "
1425 1427 "changegroupsubset."))
1426 1428 cg = remote.changegroupsubset(fetch, heads, 'pull')
1427 1429 return self.addchangegroup(cg, 'pull', remote.url())
1428 1430 finally:
1429 1431 lock.release()
1430 1432
1431 1433 def push(self, remote, force=False, revs=None):
1432 1434 # there are two ways to push to remote repo:
1433 1435 #
1434 1436 # addchangegroup assumes local user can lock remote
1435 1437 # repo (local filesystem, old ssh servers).
1436 1438 #
1437 1439 # unbundle assumes local user cannot lock remote repo (new ssh
1438 1440 # servers, http servers).
1439 1441
1440 1442 if remote.capable('unbundle'):
1441 1443 return self.push_unbundle(remote, force, revs)
1442 1444 return self.push_addchangegroup(remote, force, revs)
1443 1445
1444 1446 def prepush(self, remote, force, revs):
1445 1447 common = {}
1446 1448 remote_heads = remote.heads()
1447 1449 inc = self.findincoming(remote, common, remote_heads, force=force)
1448 1450
1449 1451 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1450 1452 if revs is not None:
1451 1453 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1452 1454 else:
1453 1455 bases, heads = update, self.changelog.heads()
1454 1456
1455 1457 def checkbranch(lheads, rheads, updatelh):
1456 1458 '''
1457 1459 check whether there are more local heads than remote heads on
1458 1460 a specific branch.
1459 1461
1460 1462 lheads: local branch heads
1461 1463 rheads: remote branch heads
1462 1464 updatelh: outgoing local branch heads
1463 1465 '''
1464 1466
1465 1467 warn = 0
1466 1468
1467 1469 if not revs and len(lheads) > len(rheads):
1468 1470 warn = 1
1469 1471 else:
1470 1472 updatelheads = [self.changelog.heads(x, lheads)
1471 1473 for x in updatelh]
1472 1474 newheads = set(sum(updatelheads, [])) & set(lheads)
1473 1475
1474 1476 if not newheads:
1475 1477 return True
1476 1478
1477 1479 for r in rheads:
1478 1480 if r in self.changelog.nodemap:
1479 1481 desc = self.changelog.heads(r, heads)
1480 1482 l = [h for h in heads if h in desc]
1481 1483 if not l:
1482 1484 newheads.add(r)
1483 1485 else:
1484 1486 newheads.add(r)
1485 1487 if len(newheads) > len(rheads):
1486 1488 warn = 1
1487 1489
1488 1490 if warn:
1489 1491 if not rheads: # new branch requires --force
1490 1492 self.ui.warn(_("abort: push creates new"
1491 1493 " remote branch '%s'!\n" %
1492 1494 self[updatelh[0]].branch()))
1493 1495 else:
1494 1496 self.ui.warn(_("abort: push creates new remote heads!\n"))
1495 1497
1496 1498 self.ui.status(_("(did you forget to merge?"
1497 1499 " use push -f to force)\n"))
1498 1500 return False
1499 1501 return True
1500 1502
1501 1503 if not bases:
1502 1504 self.ui.status(_("no changes found\n"))
1503 1505 return None, 1
1504 1506 elif not force:
1505 1507 # Check for each named branch if we're creating new remote heads.
1506 1508 # To be a remote head after push, node must be either:
1507 1509 # - unknown locally
1508 1510 # - a local outgoing head descended from update
1509 1511 # - a remote head that's known locally and not
1510 1512 # ancestral to an outgoing head
1511 1513 #
1512 1514 # New named branches cannot be created without --force.
1513 1515
1514 1516 if remote_heads != [nullid]:
1515 1517 if remote.capable('branchmap'):
1516 1518 localhds = {}
1517 1519 if not revs:
1518 1520 localhds = self.branchmap()
1519 1521 else:
1520 1522 for n in heads:
1521 1523 branch = self[n].branch()
1522 1524 if branch in localhds:
1523 1525 localhds[branch].append(n)
1524 1526 else:
1525 1527 localhds[branch] = [n]
1526 1528
1527 1529 remotehds = remote.branchmap()
1528 1530
1529 1531 for lh in localhds:
1530 1532 if lh in remotehds:
1531 1533 rheads = remotehds[lh]
1532 1534 else:
1533 1535 rheads = []
1534 1536 lheads = localhds[lh]
1535 1537 updatelh = [upd for upd in update
1536 1538 if self[upd].branch() == lh]
1537 1539 if not updatelh:
1538 1540 continue
1539 1541 if not checkbranch(lheads, rheads, updatelh):
1540 1542 return None, 0
1541 1543 else:
1542 1544 if not checkbranch(heads, remote_heads, update):
1543 1545 return None, 0
1544 1546
1545 1547 if inc:
1546 1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1547 1549
1548 1550
1549 1551 if revs is None:
1550 1552 # use the fast path, no race possible on push
1551 1553 cg = self._changegroup(common.keys(), 'push')
1552 1554 else:
1553 1555 cg = self.changegroupsubset(update, revs, 'push')
1554 1556 return cg, remote_heads
1555 1557
1556 1558 def push_addchangegroup(self, remote, force, revs):
1557 1559 lock = remote.lock()
1558 1560 try:
1559 1561 ret = self.prepush(remote, force, revs)
1560 1562 if ret[0] is not None:
1561 1563 cg, remote_heads = ret
1562 1564 return remote.addchangegroup(cg, 'push', self.url())
1563 1565 return ret[1]
1564 1566 finally:
1565 1567 lock.release()
1566 1568
1567 1569 def push_unbundle(self, remote, force, revs):
1568 1570 # local repo finds heads on server, finds out what revs it
1569 1571 # must push. once revs transferred, if server finds it has
1570 1572 # different heads (someone else won commit/push race), server
1571 1573 # aborts.
1572 1574
1573 1575 ret = self.prepush(remote, force, revs)
1574 1576 if ret[0] is not None:
1575 1577 cg, remote_heads = ret
1576 1578 if force: remote_heads = ['force']
1577 1579 return remote.unbundle(cg, remote_heads, 'push')
1578 1580 return ret[1]
1579 1581
1580 1582 def changegroupinfo(self, nodes, source):
1581 1583 if self.ui.verbose or source == 'bundle':
1582 1584 self.ui.status(_("%d changesets found\n") % len(nodes))
1583 1585 if self.ui.debugflag:
1584 1586 self.ui.debug(_("list of changesets:\n"))
1585 1587 for node in nodes:
1586 1588 self.ui.debug("%s\n" % hex(node))
1587 1589
1588 1590 def changegroupsubset(self, bases, heads, source, extranodes=None):
1589 1591 """This function generates a changegroup consisting of all the nodes
1590 1592 that are descendents of any of the bases, and ancestors of any of
1591 1593 the heads.
1592 1594
1593 1595 It is fairly complex as determining which filenodes and which
1594 1596 manifest nodes need to be included for the changeset to be complete
1595 1597 is non-trivial.
1596 1598
1597 1599 Another wrinkle is doing the reverse, figuring out which changeset in
1598 1600 the changegroup a particular filenode or manifestnode belongs to.
1599 1601
1600 1602 The caller can specify some nodes that must be included in the
1601 1603 changegroup using the extranodes argument. It should be a dict
1602 1604 where the keys are the filenames (or 1 for the manifest), and the
1603 1605 values are lists of (node, linknode) tuples, where node is a wanted
1604 1606 node and linknode is the changelog node that should be transmitted as
1605 1607 the linkrev.
1606 1608 """
1607 1609
1608 1610 if extranodes is None:
1609 1611 # can we go through the fast path ?
1610 1612 heads.sort()
1611 1613 allheads = self.heads()
1612 1614 allheads.sort()
1613 1615 if heads == allheads:
1614 1616 common = []
1615 1617 # parents of bases are known from both sides
1616 1618 for n in bases:
1617 1619 for p in self.changelog.parents(n):
1618 1620 if p != nullid:
1619 1621 common.append(p)
1620 1622 return self._changegroup(common, source)
1621 1623
1622 1624 self.hook('preoutgoing', throw=True, source=source)
1623 1625
1624 1626 # Set up some initial variables
1625 1627 # Make it easy to refer to self.changelog
1626 1628 cl = self.changelog
1627 1629 # msng is short for missing - compute the list of changesets in this
1628 1630 # changegroup.
1629 1631 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1630 1632 self.changegroupinfo(msng_cl_lst, source)
1631 1633 # Some bases may turn out to be superfluous, and some heads may be
1632 1634 # too. nodesbetween will return the minimal set of bases and heads
1633 1635 # necessary to re-create the changegroup.
1634 1636
1635 1637 # Known heads are the list of heads that it is assumed the recipient
1636 1638 # of this changegroup will know about.
1637 1639 knownheads = set()
1638 1640 # We assume that all parents of bases are known heads.
1639 1641 for n in bases:
1640 1642 knownheads.update(cl.parents(n))
1641 1643 knownheads.discard(nullid)
1642 1644 knownheads = list(knownheads)
1643 1645 if knownheads:
1644 1646 # Now that we know what heads are known, we can compute which
1645 1647 # changesets are known. The recipient must know about all
1646 1648 # changesets required to reach the known heads from the null
1647 1649 # changeset.
1648 1650 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1649 1651 junk = None
1650 1652 # Transform the list into a set.
1651 1653 has_cl_set = set(has_cl_set)
1652 1654 else:
1653 1655 # If there were no known heads, the recipient cannot be assumed to
1654 1656 # know about any changesets.
1655 1657 has_cl_set = set()
1656 1658
1657 1659 # Make it easy to refer to self.manifest
1658 1660 mnfst = self.manifest
1659 1661 # We don't know which manifests are missing yet
1660 1662 msng_mnfst_set = {}
1661 1663 # Nor do we know which filenodes are missing.
1662 1664 msng_filenode_set = {}
1663 1665
1664 1666 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1665 1667 junk = None
1666 1668
1667 1669 # A changeset always belongs to itself, so the changenode lookup
1668 1670 # function for a changenode is identity.
1669 1671 def identity(x):
1670 1672 return x
1671 1673
1672 1674 # A function generating function. Sets up an environment for the
1673 1675 # inner function.
1674 1676 def cmp_by_rev_func(revlog):
1675 1677 # Compare two nodes by their revision number in the environment's
1676 1678 # revision history. Since the revision number both represents the
1677 1679 # most efficient order to read the nodes in, and represents a
1678 1680 # topological sorting of the nodes, this function is often useful.
1679 1681 def cmp_by_rev(a, b):
1680 1682 return cmp(revlog.rev(a), revlog.rev(b))
1681 1683 return cmp_by_rev
1682 1684
1683 1685 # If we determine that a particular file or manifest node must be a
1684 1686 # node that the recipient of the changegroup will already have, we can
1685 1687 # also assume the recipient will have all the parents. This function
1686 1688 # prunes them from the set of missing nodes.
1687 1689 def prune_parents(revlog, hasset, msngset):
1688 1690 haslst = list(hasset)
1689 1691 haslst.sort(cmp_by_rev_func(revlog))
1690 1692 for node in haslst:
1691 1693 parentlst = [p for p in revlog.parents(node) if p != nullid]
1692 1694 while parentlst:
1693 1695 n = parentlst.pop()
1694 1696 if n not in hasset:
1695 1697 hasset.add(n)
1696 1698 p = [p for p in revlog.parents(n) if p != nullid]
1697 1699 parentlst.extend(p)
1698 1700 for n in hasset:
1699 1701 msngset.pop(n, None)
1700 1702
1701 1703 # This is a function generating function used to set up an environment
1702 1704 # for the inner function to execute in.
1703 1705 def manifest_and_file_collector(changedfileset):
1704 1706 # This is an information gathering function that gathers
1705 1707 # information from each changeset node that goes out as part of
1706 1708 # the changegroup. The information gathered is a list of which
1707 1709 # manifest nodes are potentially required (the recipient may
1708 1710 # already have them) and total list of all files which were
1709 1711 # changed in any changeset in the changegroup.
1710 1712 #
1711 1713 # We also remember the first changenode we saw any manifest
1712 1714 # referenced by so we can later determine which changenode 'owns'
1713 1715 # the manifest.
1714 1716 def collect_manifests_and_files(clnode):
1715 1717 c = cl.read(clnode)
1716 1718 for f in c[3]:
1717 1719 # This is to make sure we only have one instance of each
1718 1720 # filename string for each filename.
1719 1721 changedfileset.setdefault(f, f)
1720 1722 msng_mnfst_set.setdefault(c[0], clnode)
1721 1723 return collect_manifests_and_files
1722 1724
1723 1725 # Figure out which manifest nodes (of the ones we think might be part
1724 1726 # of the changegroup) the recipient must know about and remove them
1725 1727 # from the changegroup.
1726 1728 def prune_manifests():
1727 1729 has_mnfst_set = set()
1728 1730 for n in msng_mnfst_set:
1729 1731 # If a 'missing' manifest thinks it belongs to a changenode
1730 1732 # the recipient is assumed to have, obviously the recipient
1731 1733 # must have that manifest.
1732 1734 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1733 1735 if linknode in has_cl_set:
1734 1736 has_mnfst_set.add(n)
1735 1737 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1736 1738
1737 1739 # Use the information collected in collect_manifests_and_files to say
1738 1740 # which changenode any manifestnode belongs to.
1739 1741 def lookup_manifest_link(mnfstnode):
1740 1742 return msng_mnfst_set[mnfstnode]
1741 1743
1742 1744 # A function generating function that sets up the initial environment
1743 1745 # the inner function.
1744 1746 def filenode_collector(changedfiles):
1745 1747 next_rev = [0]
1746 1748 # This gathers information from each manifestnode included in the
1747 1749 # changegroup about which filenodes the manifest node references
1748 1750 # so we can include those in the changegroup too.
1749 1751 #
1750 1752 # It also remembers which changenode each filenode belongs to. It
1751 1753 # does this by assuming the a filenode belongs to the changenode
1752 1754 # the first manifest that references it belongs to.
1753 1755 def collect_msng_filenodes(mnfstnode):
1754 1756 r = mnfst.rev(mnfstnode)
1755 1757 if r == next_rev[0]:
1756 1758 # If the last rev we looked at was the one just previous,
1757 1759 # we only need to see a diff.
1758 1760 deltamf = mnfst.readdelta(mnfstnode)
1759 1761 # For each line in the delta
1760 1762 for f, fnode in deltamf.iteritems():
1761 1763 f = changedfiles.get(f, None)
1762 1764 # And if the file is in the list of files we care
1763 1765 # about.
1764 1766 if f is not None:
1765 1767 # Get the changenode this manifest belongs to
1766 1768 clnode = msng_mnfst_set[mnfstnode]
1767 1769 # Create the set of filenodes for the file if
1768 1770 # there isn't one already.
1769 1771 ndset = msng_filenode_set.setdefault(f, {})
1770 1772 # And set the filenode's changelog node to the
1771 1773 # manifest's if it hasn't been set already.
1772 1774 ndset.setdefault(fnode, clnode)
1773 1775 else:
1774 1776 # Otherwise we need a full manifest.
1775 1777 m = mnfst.read(mnfstnode)
1776 1778 # For every file in we care about.
1777 1779 for f in changedfiles:
1778 1780 fnode = m.get(f, None)
1779 1781 # If it's in the manifest
1780 1782 if fnode is not None:
1781 1783 # See comments above.
1782 1784 clnode = msng_mnfst_set[mnfstnode]
1783 1785 ndset = msng_filenode_set.setdefault(f, {})
1784 1786 ndset.setdefault(fnode, clnode)
1785 1787 # Remember the revision we hope to see next.
1786 1788 next_rev[0] = r + 1
1787 1789 return collect_msng_filenodes
1788 1790
1789 1791 # We have a list of filenodes we think we need for a file, lets remove
1790 1792 # all those we know the recipient must have.
1791 1793 def prune_filenodes(f, filerevlog):
1792 1794 msngset = msng_filenode_set[f]
1793 1795 hasset = set()
1794 1796 # If a 'missing' filenode thinks it belongs to a changenode we
1795 1797 # assume the recipient must have, then the recipient must have
1796 1798 # that filenode.
1797 1799 for n in msngset:
1798 1800 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1799 1801 if clnode in has_cl_set:
1800 1802 hasset.add(n)
1801 1803 prune_parents(filerevlog, hasset, msngset)
1802 1804
1803 1805 # A function generator function that sets up the a context for the
1804 1806 # inner function.
1805 1807 def lookup_filenode_link_func(fname):
1806 1808 msngset = msng_filenode_set[fname]
1807 1809 # Lookup the changenode the filenode belongs to.
1808 1810 def lookup_filenode_link(fnode):
1809 1811 return msngset[fnode]
1810 1812 return lookup_filenode_link
1811 1813
1812 1814 # Add the nodes that were explicitly requested.
1813 1815 def add_extra_nodes(name, nodes):
1814 1816 if not extranodes or name not in extranodes:
1815 1817 return
1816 1818
1817 1819 for node, linknode in extranodes[name]:
1818 1820 if node not in nodes:
1819 1821 nodes[node] = linknode
1820 1822
1821 1823 # Now that we have all theses utility functions to help out and
1822 1824 # logically divide up the task, generate the group.
1823 1825 def gengroup():
1824 1826 # The set of changed files starts empty.
1825 1827 changedfiles = {}
1826 1828 # Create a changenode group generator that will call our functions
1827 1829 # back to lookup the owning changenode and collect information.
1828 1830 group = cl.group(msng_cl_lst, identity,
1829 1831 manifest_and_file_collector(changedfiles))
1830 1832 for chnk in group:
1831 1833 yield chnk
1832 1834
1833 1835 # The list of manifests has been collected by the generator
1834 1836 # calling our functions back.
1835 1837 prune_manifests()
1836 1838 add_extra_nodes(1, msng_mnfst_set)
1837 1839 msng_mnfst_lst = msng_mnfst_set.keys()
1838 1840 # Sort the manifestnodes by revision number.
1839 1841 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1840 1842 # Create a generator for the manifestnodes that calls our lookup
1841 1843 # and data collection functions back.
1842 1844 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1843 1845 filenode_collector(changedfiles))
1844 1846 for chnk in group:
1845 1847 yield chnk
1846 1848
1847 1849 # These are no longer needed, dereference and toss the memory for
1848 1850 # them.
1849 1851 msng_mnfst_lst = None
1850 1852 msng_mnfst_set.clear()
1851 1853
1852 1854 if extranodes:
1853 1855 for fname in extranodes:
1854 1856 if isinstance(fname, int):
1855 1857 continue
1856 1858 msng_filenode_set.setdefault(fname, {})
1857 1859 changedfiles[fname] = 1
1858 1860 # Go through all our files in order sorted by name.
1859 1861 for fname in sorted(changedfiles):
1860 1862 filerevlog = self.file(fname)
1861 1863 if not len(filerevlog):
1862 1864 raise util.Abort(_("empty or missing revlog for %s") % fname)
1863 1865 # Toss out the filenodes that the recipient isn't really
1864 1866 # missing.
1865 1867 if fname in msng_filenode_set:
1866 1868 prune_filenodes(fname, filerevlog)
1867 1869 add_extra_nodes(fname, msng_filenode_set[fname])
1868 1870 msng_filenode_lst = msng_filenode_set[fname].keys()
1869 1871 else:
1870 1872 msng_filenode_lst = []
1871 1873 # If any filenodes are left, generate the group for them,
1872 1874 # otherwise don't bother.
1873 1875 if len(msng_filenode_lst) > 0:
1874 1876 yield changegroup.chunkheader(len(fname))
1875 1877 yield fname
1876 1878 # Sort the filenodes by their revision #
1877 1879 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1878 1880 # Create a group generator and only pass in a changenode
1879 1881 # lookup function as we need to collect no information
1880 1882 # from filenodes.
1881 1883 group = filerevlog.group(msng_filenode_lst,
1882 1884 lookup_filenode_link_func(fname))
1883 1885 for chnk in group:
1884 1886 yield chnk
1885 1887 if fname in msng_filenode_set:
1886 1888 # Don't need this anymore, toss it to free memory.
1887 1889 del msng_filenode_set[fname]
1888 1890 # Signal that no more groups are left.
1889 1891 yield changegroup.closechunk()
1890 1892
1891 1893 if msng_cl_lst:
1892 1894 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1893 1895
1894 1896 return util.chunkbuffer(gengroup())
1895 1897
1896 1898 def changegroup(self, basenodes, source):
1897 1899 # to avoid a race we use changegroupsubset() (issue1320)
1898 1900 return self.changegroupsubset(basenodes, self.heads(), source)
1899 1901
1900 1902 def _changegroup(self, common, source):
1901 1903 """Generate a changegroup of all nodes that we have that a recipient
1902 1904 doesn't.
1903 1905
1904 1906 This is much easier than the previous function as we can assume that
1905 1907 the recipient has any changenode we aren't sending them.
1906 1908
1907 1909 common is the set of common nodes between remote and self"""
1908 1910
1909 1911 self.hook('preoutgoing', throw=True, source=source)
1910 1912
1911 1913 cl = self.changelog
1912 1914 nodes = cl.findmissing(common)
1913 1915 revset = set([cl.rev(n) for n in nodes])
1914 1916 self.changegroupinfo(nodes, source)
1915 1917
1916 1918 def identity(x):
1917 1919 return x
1918 1920
1919 1921 def gennodelst(log):
1920 1922 for r in log:
1921 1923 if log.linkrev(r) in revset:
1922 1924 yield log.node(r)
1923 1925
1924 1926 def changed_file_collector(changedfileset):
1925 1927 def collect_changed_files(clnode):
1926 1928 c = cl.read(clnode)
1927 1929 changedfileset.update(c[3])
1928 1930 return collect_changed_files
1929 1931
1930 1932 def lookuprevlink_func(revlog):
1931 1933 def lookuprevlink(n):
1932 1934 return cl.node(revlog.linkrev(revlog.rev(n)))
1933 1935 return lookuprevlink
1934 1936
1935 1937 def gengroup():
1936 1938 # construct a list of all changed files
1937 1939 changedfiles = set()
1938 1940
1939 1941 for chnk in cl.group(nodes, identity,
1940 1942 changed_file_collector(changedfiles)):
1941 1943 yield chnk
1942 1944
1943 1945 mnfst = self.manifest
1944 1946 nodeiter = gennodelst(mnfst)
1945 1947 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1946 1948 yield chnk
1947 1949
1948 1950 for fname in sorted(changedfiles):
1949 1951 filerevlog = self.file(fname)
1950 1952 if not len(filerevlog):
1951 1953 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 1954 nodeiter = gennodelst(filerevlog)
1953 1955 nodeiter = list(nodeiter)
1954 1956 if nodeiter:
1955 1957 yield changegroup.chunkheader(len(fname))
1956 1958 yield fname
1957 1959 lookup = lookuprevlink_func(filerevlog)
1958 1960 for chnk in filerevlog.group(nodeiter, lookup):
1959 1961 yield chnk
1960 1962
1961 1963 yield changegroup.closechunk()
1962 1964
1963 1965 if nodes:
1964 1966 self.hook('outgoing', node=hex(nodes[0]), source=source)
1965 1967
1966 1968 return util.chunkbuffer(gengroup())
1967 1969
1968 1970 def addchangegroup(self, source, srctype, url, emptyok=False):
1969 1971 """add changegroup to repo.
1970 1972
1971 1973 return values:
1972 1974 - nothing changed or no source: 0
1973 1975 - more heads than before: 1+added heads (2..n)
1974 1976 - less heads than before: -1-removed heads (-2..-n)
1975 1977 - number of heads stays the same: 1
1976 1978 """
1977 1979 def csmap(x):
1978 1980 self.ui.debug(_("add changeset %s\n") % short(x))
1979 1981 return len(cl)
1980 1982
1981 1983 def revmap(x):
1982 1984 return cl.rev(x)
1983 1985
1984 1986 if not source:
1985 1987 return 0
1986 1988
1987 1989 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1988 1990
1989 1991 changesets = files = revisions = 0
1990 1992
1991 1993 # write changelog data to temp files so concurrent readers will not see
1992 1994 # inconsistent view
1993 1995 cl = self.changelog
1994 1996 cl.delayupdate()
1995 1997 oldheads = len(cl.heads())
1996 1998
1997 1999 tr = self.transaction()
1998 2000 try:
1999 2001 trp = weakref.proxy(tr)
2000 2002 # pull off the changeset group
2001 2003 self.ui.status(_("adding changesets\n"))
2002 2004 clstart = len(cl)
2003 2005 chunkiter = changegroup.chunkiter(source)
2004 2006 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2005 2007 raise util.Abort(_("received changelog group is empty"))
2006 2008 clend = len(cl)
2007 2009 changesets = clend - clstart
2008 2010
2009 2011 # pull off the manifest group
2010 2012 self.ui.status(_("adding manifests\n"))
2011 2013 chunkiter = changegroup.chunkiter(source)
2012 2014 # no need to check for empty manifest group here:
2013 2015 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2014 2016 # no new manifest will be created and the manifest group will
2015 2017 # be empty during the pull
2016 2018 self.manifest.addgroup(chunkiter, revmap, trp)
2017 2019
2018 2020 # process the files
2019 2021 self.ui.status(_("adding file changes\n"))
2020 2022 while 1:
2021 2023 f = changegroup.getchunk(source)
2022 2024 if not f:
2023 2025 break
2024 2026 self.ui.debug(_("adding %s revisions\n") % f)
2025 2027 fl = self.file(f)
2026 2028 o = len(fl)
2027 2029 chunkiter = changegroup.chunkiter(source)
2028 2030 if fl.addgroup(chunkiter, revmap, trp) is None:
2029 2031 raise util.Abort(_("received file revlog group is empty"))
2030 2032 revisions += len(fl) - o
2031 2033 files += 1
2032 2034
2033 2035 newheads = len(cl.heads())
2034 2036 heads = ""
2035 2037 if oldheads and newheads != oldheads:
2036 2038 heads = _(" (%+d heads)") % (newheads - oldheads)
2037 2039
2038 2040 self.ui.status(_("added %d changesets"
2039 2041 " with %d changes to %d files%s\n")
2040 2042 % (changesets, revisions, files, heads))
2041 2043
2042 2044 if changesets > 0:
2043 2045 p = lambda: cl.writepending() and self.root or ""
2044 2046 self.hook('pretxnchangegroup', throw=True,
2045 2047 node=hex(cl.node(clstart)), source=srctype,
2046 2048 url=url, pending=p)
2047 2049
2048 2050 # make changelog see real files again
2049 2051 cl.finalize(trp)
2050 2052
2051 2053 tr.close()
2052 2054 finally:
2053 2055 del tr
2054 2056
2055 2057 if changesets > 0:
2056 2058 # forcefully update the on-disk branch cache
2057 2059 self.ui.debug(_("updating the branch cache\n"))
2058 2060 self.branchtags()
2059 2061 self.hook("changegroup", node=hex(cl.node(clstart)),
2060 2062 source=srctype, url=url)
2061 2063
2062 2064 for i in xrange(clstart, clend):
2063 2065 self.hook("incoming", node=hex(cl.node(i)),
2064 2066 source=srctype, url=url)
2065 2067
2066 2068 # never return 0 here:
2067 2069 if newheads < oldheads:
2068 2070 return newheads - oldheads - 1
2069 2071 else:
2070 2072 return newheads - oldheads + 1
2071 2073
2072 2074
2073 2075 def stream_in(self, remote):
2074 2076 fp = remote.stream_out()
2075 2077 l = fp.readline()
2076 2078 try:
2077 2079 resp = int(l)
2078 2080 except ValueError:
2079 2081 raise error.ResponseError(
2080 2082 _('Unexpected response from remote server:'), l)
2081 2083 if resp == 1:
2082 2084 raise util.Abort(_('operation forbidden by server'))
2083 2085 elif resp == 2:
2084 2086 raise util.Abort(_('locking the remote repository failed'))
2085 2087 elif resp != 0:
2086 2088 raise util.Abort(_('the server sent an unknown error code'))
2087 2089 self.ui.status(_('streaming all changes\n'))
2088 2090 l = fp.readline()
2089 2091 try:
2090 2092 total_files, total_bytes = map(int, l.split(' ', 1))
2091 2093 except (ValueError, TypeError):
2092 2094 raise error.ResponseError(
2093 2095 _('Unexpected response from remote server:'), l)
2094 2096 self.ui.status(_('%d files to transfer, %s of data\n') %
2095 2097 (total_files, util.bytecount(total_bytes)))
2096 2098 start = time.time()
2097 2099 for i in xrange(total_files):
2098 2100 # XXX doesn't support '\n' or '\r' in filenames
2099 2101 l = fp.readline()
2100 2102 try:
2101 2103 name, size = l.split('\0', 1)
2102 2104 size = int(size)
2103 2105 except (ValueError, TypeError):
2104 2106 raise error.ResponseError(
2105 2107 _('Unexpected response from remote server:'), l)
2106 2108 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2107 2109 # for backwards compat, name was partially encoded
2108 2110 ofp = self.sopener(store.decodedir(name), 'w')
2109 2111 for chunk in util.filechunkiter(fp, limit=size):
2110 2112 ofp.write(chunk)
2111 2113 ofp.close()
2112 2114 elapsed = time.time() - start
2113 2115 if elapsed <= 0:
2114 2116 elapsed = 0.001
2115 2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2116 2118 (util.bytecount(total_bytes), elapsed,
2117 2119 util.bytecount(total_bytes / elapsed)))
2118 2120 self.invalidate()
2119 2121 return len(self.heads()) + 1
2120 2122
2121 2123 def clone(self, remote, heads=[], stream=False):
2122 2124 '''clone remote repository.
2123 2125
2124 2126 keyword arguments:
2125 2127 heads: list of revs to clone (forces use of pull)
2126 2128 stream: use streaming clone if possible'''
2127 2129
2128 2130 # now, all clients that can request uncompressed clones can
2129 2131 # read repo formats supported by all servers that can serve
2130 2132 # them.
2131 2133
2132 2134 # if revlog format changes, client will have to check version
2133 2135 # and format flags on "stream" capability, and use
2134 2136 # uncompressed only if compatible.
2135 2137
2136 2138 if stream and not heads and remote.capable('stream'):
2137 2139 return self.stream_in(remote)
2138 2140 return self.pull(remote, heads)
2139 2141
2140 2142 # used to avoid circular references so destructors work
2141 2143 def aftertrans(files):
2142 2144 renamefiles = [tuple(t) for t in files]
2143 2145 def a():
2144 2146 for src, dest in renamefiles:
2145 2147 util.rename(src, dest)
2146 2148 return a
2147 2149
2148 2150 def instance(ui, path, create):
2149 2151 return localrepository(ui, util.drop_scheme('file', path), create)
2150 2152
2151 2153 def islocal(path):
2152 2154 return True
General Comments 0
You need to be logged in to leave comments. Login now