##// END OF EJS Templates
addchangegroup: add option to skip check for empty changelog group
Alexis S. L. Carvalho -
r5907:afb7bdf1 default
parent child Browse files
Show More
@@ -1,2034 +1,2034 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71 self.sopener = util.encodedopener(util.opener(self.spath),
72 72 self.encodefn)
73 73
74 74 self.ui = ui.ui(parentui=parentui)
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.filterpats = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError, name
102 102
103 103 def url(self):
104 104 return 'file:' + self.root
105 105
106 106 def hook(self, name, throw=False, **args):
107 107 return hook.hook(self.ui, self, name, throw, **args)
108 108
109 109 tag_disallowed = ':\r\n'
110 110
111 111 def _tag(self, name, node, message, local, user, date, parent=None,
112 112 extra={}):
113 113 use_dirstate = parent is None
114 114
115 115 for c in self.tag_disallowed:
116 116 if c in name:
117 117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 118
119 119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 120
121 121 def writetag(fp, name, munge, prevtags):
122 122 if prevtags and prevtags[-1] != '\n':
123 123 fp.write('\n')
124 124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 125 fp.close()
126 126
127 127 prevtags = ''
128 128 if local:
129 129 try:
130 130 fp = self.opener('localtags', 'r+')
131 131 except IOError, err:
132 132 fp = self.opener('localtags', 'a')
133 133 else:
134 134 prevtags = fp.read()
135 135
136 136 # local tags are stored in the current charset
137 137 writetag(fp, name, None, prevtags)
138 138 self.hook('tag', node=hex(node), tag=name, local=local)
139 139 return
140 140
141 141 if use_dirstate:
142 142 try:
143 143 fp = self.wfile('.hgtags', 'rb+')
144 144 except IOError, err:
145 145 fp = self.wfile('.hgtags', 'ab')
146 146 else:
147 147 prevtags = fp.read()
148 148 else:
149 149 try:
150 150 prevtags = self.filectx('.hgtags', parent).data()
151 151 except revlog.LookupError:
152 152 pass
153 153 fp = self.wfile('.hgtags', 'wb')
154 154 if prevtags:
155 155 fp.write(prevtags)
156 156
157 157 # committed tags are stored in UTF-8
158 158 writetag(fp, name, util.fromlocal, prevtags)
159 159
160 160 if use_dirstate and '.hgtags' not in self.dirstate:
161 161 self.add(['.hgtags'])
162 162
163 163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 164 extra=extra)
165 165
166 166 self.hook('tag', node=hex(node), tag=name, local=local)
167 167
168 168 return tagnode
169 169
170 170 def tag(self, name, node, message, local, user, date):
171 171 '''tag a revision with a symbolic name.
172 172
173 173 if local is True, the tag is stored in a per-repository file.
174 174 otherwise, it is stored in the .hgtags file, and a new
175 175 changeset is committed with the change.
176 176
177 177 keyword arguments:
178 178
179 179 local: whether to store tag in non-version-controlled file
180 180 (default False)
181 181
182 182 message: commit message to use if committing
183 183
184 184 user: name of user to use if committing
185 185
186 186 date: date tuple to use if committing'''
187 187
188 188 for x in self.status()[:5]:
189 189 if '.hgtags' in x:
190 190 raise util.Abort(_('working copy of .hgtags is changed '
191 191 '(please commit .hgtags manually)'))
192 192
193 193
194 194 self._tag(name, node, message, local, user, date)
195 195
196 196 def tags(self):
197 197 '''return a mapping of tag to node'''
198 198 if self.tagscache:
199 199 return self.tagscache
200 200
201 201 globaltags = {}
202 202 tagtypes = {}
203 203
204 204 def readtags(lines, fn, tagtype):
205 205 filetags = {}
206 206 count = 0
207 207
208 208 def warn(msg):
209 209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210 210
211 211 for l in lines:
212 212 count += 1
213 213 if not l:
214 214 continue
215 215 s = l.split(" ", 1)
216 216 if len(s) != 2:
217 217 warn(_("cannot parse entry"))
218 218 continue
219 219 node, key = s
220 220 key = util.tolocal(key.strip()) # stored in UTF-8
221 221 try:
222 222 bin_n = bin(node)
223 223 except TypeError:
224 224 warn(_("node '%s' is not well formed") % node)
225 225 continue
226 226 if bin_n not in self.changelog.nodemap:
227 227 warn(_("tag '%s' refers to unknown node") % key)
228 228 continue
229 229
230 230 h = []
231 231 if key in filetags:
232 232 n, h = filetags[key]
233 233 h.append(n)
234 234 filetags[key] = (bin_n, h)
235 235
236 236 for k, nh in filetags.items():
237 237 if k not in globaltags:
238 238 globaltags[k] = nh
239 239 tagtypes[k] = tagtype
240 240 continue
241 241
242 242 # we prefer the global tag if:
243 243 # it supercedes us OR
244 244 # mutual supercedes and it has a higher rank
245 245 # otherwise we win because we're tip-most
246 246 an, ah = nh
247 247 bn, bh = globaltags[k]
248 248 if (bn != an and an in bh and
249 249 (bn not in ah or len(bh) > len(ah))):
250 250 an = bn
251 251 ah.extend([n for n in bh if n not in ah])
252 252 globaltags[k] = an, ah
253 253 tagtypes[k] = tagtype
254 254
255 255 # read the tags file from each head, ending with the tip
256 256 f = None
257 257 for rev, node, fnode in self._hgtagsnodes():
258 258 f = (f and f.filectx(fnode) or
259 259 self.filectx('.hgtags', fileid=fnode))
260 260 readtags(f.data().splitlines(), f, "global")
261 261
262 262 try:
263 263 data = util.fromlocal(self.opener("localtags").read())
264 264 # localtags are stored in the local character set
265 265 # while the internal tag table is stored in UTF-8
266 266 readtags(data.splitlines(), "localtags", "local")
267 267 except IOError:
268 268 pass
269 269
270 270 self.tagscache = {}
271 271 self._tagstypecache = {}
272 272 for k,nh in globaltags.items():
273 273 n = nh[0]
274 274 if n != nullid:
275 275 self.tagscache[k] = n
276 276 self._tagstypecache[k] = tagtypes[k]
277 277 self.tagscache['tip'] = self.changelog.tip()
278 278
279 279 return self.tagscache
280 280
281 281 def tagtype(self, tagname):
282 282 '''
283 283 return the type of the given tag. result can be:
284 284
285 285 'local' : a local tag
286 286 'global' : a global tag
287 287 None : tag does not exist
288 288 '''
289 289
290 290 self.tags()
291 291
292 292 return self._tagstypecache.get(tagname)
293 293
294 294 def _hgtagsnodes(self):
295 295 heads = self.heads()
296 296 heads.reverse()
297 297 last = {}
298 298 ret = []
299 299 for node in heads:
300 300 c = self.changectx(node)
301 301 rev = c.rev()
302 302 try:
303 303 fnode = c.filenode('.hgtags')
304 304 except revlog.LookupError:
305 305 continue
306 306 ret.append((rev, node, fnode))
307 307 if fnode in last:
308 308 ret[last[fnode]] = None
309 309 last[fnode] = len(ret) - 1
310 310 return [item for item in ret if item]
311 311
312 312 def tagslist(self):
313 313 '''return a list of tags ordered by revision'''
314 314 l = []
315 315 for t, n in self.tags().items():
316 316 try:
317 317 r = self.changelog.rev(n)
318 318 except:
319 319 r = -2 # sort to the beginning of the list if unknown
320 320 l.append((r, t, n))
321 321 l.sort()
322 322 return [(t, n) for r, t, n in l]
323 323
324 324 def nodetags(self, node):
325 325 '''return the tags associated with a node'''
326 326 if not self.nodetagscache:
327 327 self.nodetagscache = {}
328 328 for t, n in self.tags().items():
329 329 self.nodetagscache.setdefault(n, []).append(t)
330 330 return self.nodetagscache.get(node, [])
331 331
332 332 def _branchtags(self):
333 333 partial, last, lrev = self._readbranchcache()
334 334
335 335 tiprev = self.changelog.count() - 1
336 336 if lrev != tiprev:
337 337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339 339
340 340 return partial
341 341
342 342 def branchtags(self):
343 343 if self.branchcache is not None:
344 344 return self.branchcache
345 345
346 346 self.branchcache = {} # avoid recursion in changectx
347 347 partial = self._branchtags()
348 348
349 349 # the branch cache is stored on disk as UTF-8, but in the local
350 350 # charset internally
351 351 for k, v in partial.items():
352 352 self.branchcache[util.tolocal(k)] = v
353 353 return self.branchcache
354 354
355 355 def _readbranchcache(self):
356 356 partial = {}
357 357 try:
358 358 f = self.opener("branch.cache")
359 359 lines = f.read().split('\n')
360 360 f.close()
361 361 except (IOError, OSError):
362 362 return {}, nullid, nullrev
363 363
364 364 try:
365 365 last, lrev = lines.pop(0).split(" ", 1)
366 366 last, lrev = bin(last), int(lrev)
367 367 if not (lrev < self.changelog.count() and
368 368 self.changelog.node(lrev) == last): # sanity check
369 369 # invalidate the cache
370 370 raise ValueError('Invalid branch cache: unknown tip')
371 371 for l in lines:
372 372 if not l: continue
373 373 node, label = l.split(" ", 1)
374 374 partial[label.strip()] = bin(node)
375 375 except (KeyboardInterrupt, util.SignalInterrupt):
376 376 raise
377 377 except Exception, inst:
378 378 if self.ui.debugflag:
379 379 self.ui.warn(str(inst), '\n')
380 380 partial, last, lrev = {}, nullid, nullrev
381 381 return partial, last, lrev
382 382
383 383 def _writebranchcache(self, branches, tip, tiprev):
384 384 try:
385 385 f = self.opener("branch.cache", "w", atomictemp=True)
386 386 f.write("%s %s\n" % (hex(tip), tiprev))
387 387 for label, node in branches.iteritems():
388 388 f.write("%s %s\n" % (hex(node), label))
389 389 f.rename()
390 390 except (IOError, OSError):
391 391 pass
392 392
393 393 def _updatebranchcache(self, partial, start, end):
394 394 for r in xrange(start, end):
395 395 c = self.changectx(r)
396 396 b = c.branch()
397 397 partial[b] = c.node()
398 398
399 399 def lookup(self, key):
400 400 if key == '.':
401 401 key, second = self.dirstate.parents()
402 402 if key == nullid:
403 403 raise repo.RepoError(_("no revision checked out"))
404 404 if second != nullid:
405 405 self.ui.warn(_("warning: working directory has two parents, "
406 406 "tag '.' uses the first\n"))
407 407 elif key == 'null':
408 408 return nullid
409 409 n = self.changelog._match(key)
410 410 if n:
411 411 return n
412 412 if key in self.tags():
413 413 return self.tags()[key]
414 414 if key in self.branchtags():
415 415 return self.branchtags()[key]
416 416 n = self.changelog._partialmatch(key)
417 417 if n:
418 418 return n
419 419 try:
420 420 if len(key) == 20:
421 421 key = hex(key)
422 422 except:
423 423 pass
424 424 raise repo.RepoError(_("unknown revision '%s'") % key)
425 425
426 426 def dev(self):
427 427 return os.lstat(self.path).st_dev
428 428
429 429 def local(self):
430 430 return True
431 431
432 432 def join(self, f):
433 433 return os.path.join(self.path, f)
434 434
435 435 def sjoin(self, f):
436 436 f = self.encodefn(f)
437 437 return os.path.join(self.spath, f)
438 438
439 439 def wjoin(self, f):
440 440 return os.path.join(self.root, f)
441 441
442 442 def file(self, f):
443 443 if f[0] == '/':
444 444 f = f[1:]
445 445 return filelog.filelog(self.sopener, f)
446 446
447 447 def changectx(self, changeid=None):
448 448 return context.changectx(self, changeid)
449 449
450 450 def workingctx(self):
451 451 return context.workingctx(self)
452 452
453 453 def parents(self, changeid=None):
454 454 '''
455 455 get list of changectxs for parents of changeid or working directory
456 456 '''
457 457 if changeid is None:
458 458 pl = self.dirstate.parents()
459 459 else:
460 460 n = self.changelog.lookup(changeid)
461 461 pl = self.changelog.parents(n)
462 462 if pl[1] == nullid:
463 463 return [self.changectx(pl[0])]
464 464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465 465
466 466 def filectx(self, path, changeid=None, fileid=None):
467 467 """changeid can be a changeset revision, node, or tag.
468 468 fileid can be a file revision or node."""
469 469 return context.filectx(self, path, changeid, fileid)
470 470
471 471 def getcwd(self):
472 472 return self.dirstate.getcwd()
473 473
474 474 def pathto(self, f, cwd=None):
475 475 return self.dirstate.pathto(f, cwd)
476 476
477 477 def wfile(self, f, mode='r'):
478 478 return self.wopener(f, mode)
479 479
480 480 def _link(self, f):
481 481 return os.path.islink(self.wjoin(f))
482 482
483 483 def _filter(self, filter, filename, data):
484 484 if filter not in self.filterpats:
485 485 l = []
486 486 for pat, cmd in self.ui.configitems(filter):
487 487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 488 l.append((mf, cmd))
489 489 self.filterpats[filter] = l
490 490
491 491 for mf, cmd in self.filterpats[filter]:
492 492 if mf(filename):
493 493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 494 data = util.filter(data, cmd)
495 495 break
496 496
497 497 return data
498 498
499 499 def wread(self, filename):
500 500 if self._link(filename):
501 501 data = os.readlink(self.wjoin(filename))
502 502 else:
503 503 data = self.wopener(filename, 'r').read()
504 504 return self._filter("encode", filename, data)
505 505
506 506 def wwrite(self, filename, data, flags):
507 507 data = self._filter("decode", filename, data)
508 508 try:
509 509 os.unlink(self.wjoin(filename))
510 510 except OSError:
511 511 pass
512 512 self.wopener(filename, 'w').write(data)
513 513 util.set_flags(self.wjoin(filename), flags)
514 514
515 515 def wwritedata(self, filename, data):
516 516 return self._filter("decode", filename, data)
517 517
518 518 def transaction(self):
519 519 if self._transref and self._transref():
520 520 return self._transref().nest()
521 521
522 522 # abort here if the journal already exists
523 523 if os.path.exists(self.sjoin("journal")):
524 524 raise repo.RepoError(_("journal already exists - run hg recover"))
525 525
526 526 # save dirstate for rollback
527 527 try:
528 528 ds = self.opener("dirstate").read()
529 529 except IOError:
530 530 ds = ""
531 531 self.opener("journal.dirstate", "w").write(ds)
532 532 self.opener("journal.branch", "w").write(self.dirstate.branch())
533 533
534 534 renames = [(self.sjoin("journal"), self.sjoin("undo")),
535 535 (self.join("journal.dirstate"), self.join("undo.dirstate")),
536 536 (self.join("journal.branch"), self.join("undo.branch"))]
537 537 tr = transaction.transaction(self.ui.warn, self.sopener,
538 538 self.sjoin("journal"),
539 539 aftertrans(renames))
540 540 self._transref = weakref.ref(tr)
541 541 return tr
542 542
543 543 def recover(self):
544 544 l = self.lock()
545 545 try:
546 546 if os.path.exists(self.sjoin("journal")):
547 547 self.ui.status(_("rolling back interrupted transaction\n"))
548 548 transaction.rollback(self.sopener, self.sjoin("journal"))
549 549 self.invalidate()
550 550 return True
551 551 else:
552 552 self.ui.warn(_("no interrupted transaction available\n"))
553 553 return False
554 554 finally:
555 555 del l
556 556
557 557 def rollback(self):
558 558 wlock = lock = None
559 559 try:
560 560 wlock = self.wlock()
561 561 lock = self.lock()
562 562 if os.path.exists(self.sjoin("undo")):
563 563 self.ui.status(_("rolling back last transaction\n"))
564 564 transaction.rollback(self.sopener, self.sjoin("undo"))
565 565 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
566 566 branch = self.opener("undo.branch").read()
567 567 self.dirstate.setbranch(branch)
568 568 self.invalidate()
569 569 self.dirstate.invalidate()
570 570 else:
571 571 self.ui.warn(_("no rollback information available\n"))
572 572 finally:
573 573 del lock, wlock
574 574
575 575 def invalidate(self):
576 576 for a in "changelog manifest".split():
577 577 if hasattr(self, a):
578 578 self.__delattr__(a)
579 579 self.tagscache = None
580 580 self._tagstypecache = None
581 581 self.nodetagscache = None
582 582
583 583 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
584 584 try:
585 585 l = lock.lock(lockname, 0, releasefn, desc=desc)
586 586 except lock.LockHeld, inst:
587 587 if not wait:
588 588 raise
589 589 self.ui.warn(_("waiting for lock on %s held by %r\n") %
590 590 (desc, inst.locker))
591 591 # default to 600 seconds timeout
592 592 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
593 593 releasefn, desc=desc)
594 594 if acquirefn:
595 595 acquirefn()
596 596 return l
597 597
598 598 def lock(self, wait=True):
599 599 if self._lockref and self._lockref():
600 600 return self._lockref()
601 601
602 602 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
603 603 _('repository %s') % self.origroot)
604 604 self._lockref = weakref.ref(l)
605 605 return l
606 606
607 607 def wlock(self, wait=True):
608 608 if self._wlockref and self._wlockref():
609 609 return self._wlockref()
610 610
611 611 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
612 612 self.dirstate.invalidate, _('working directory of %s') %
613 613 self.origroot)
614 614 self._wlockref = weakref.ref(l)
615 615 return l
616 616
617 617 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
618 618 """
619 619 commit an individual file as part of a larger transaction
620 620 """
621 621
622 622 t = self.wread(fn)
623 623 fl = self.file(fn)
624 624 fp1 = manifest1.get(fn, nullid)
625 625 fp2 = manifest2.get(fn, nullid)
626 626
627 627 meta = {}
628 628 cp = self.dirstate.copied(fn)
629 629 if cp:
630 630 # Mark the new revision of this file as a copy of another
631 631 # file. This copy data will effectively act as a parent
632 632 # of this new revision. If this is a merge, the first
633 633 # parent will be the nullid (meaning "look up the copy data")
634 634 # and the second one will be the other parent. For example:
635 635 #
636 636 # 0 --- 1 --- 3 rev1 changes file foo
637 637 # \ / rev2 renames foo to bar and changes it
638 638 # \- 2 -/ rev3 should have bar with all changes and
639 639 # should record that bar descends from
640 640 # bar in rev2 and foo in rev1
641 641 #
642 642 # this allows this merge to succeed:
643 643 #
644 644 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
645 645 # \ / merging rev3 and rev4 should use bar@rev2
646 646 # \- 2 --- 4 as the merge base
647 647 #
648 648 meta["copy"] = cp
649 649 if not manifest2: # not a branch merge
650 650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
651 651 fp2 = nullid
652 652 elif fp2 != nullid: # copied on remote side
653 653 meta["copyrev"] = hex(manifest1.get(cp, nullid))
654 654 elif fp1 != nullid: # copied on local side, reversed
655 655 meta["copyrev"] = hex(manifest2.get(cp))
656 656 fp2 = fp1
657 657 elif cp in manifest2: # directory rename on local side
658 658 meta["copyrev"] = hex(manifest2[cp])
659 659 else: # directory rename on remote side
660 660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
661 661 self.ui.debug(_(" %s: copy %s:%s\n") %
662 662 (fn, cp, meta["copyrev"]))
663 663 fp1 = nullid
664 664 elif fp2 != nullid:
665 665 # is one parent an ancestor of the other?
666 666 fpa = fl.ancestor(fp1, fp2)
667 667 if fpa == fp1:
668 668 fp1, fp2 = fp2, nullid
669 669 elif fpa == fp2:
670 670 fp2 = nullid
671 671
672 672 # is the file unmodified from the parent? report existing entry
673 673 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
674 674 return fp1
675 675
676 676 changelist.append(fn)
677 677 return fl.add(t, meta, tr, linkrev, fp1, fp2)
678 678
679 679 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
680 680 if p1 is None:
681 681 p1, p2 = self.dirstate.parents()
682 682 return self.commit(files=files, text=text, user=user, date=date,
683 683 p1=p1, p2=p2, extra=extra, empty_ok=True)
684 684
685 685 def commit(self, files=None, text="", user=None, date=None,
686 686 match=util.always, force=False, force_editor=False,
687 687 p1=None, p2=None, extra={}, empty_ok=False):
688 688 wlock = lock = tr = None
689 689 valid = 0 # don't save the dirstate if this isn't set
690 690 try:
691 691 commit = []
692 692 remove = []
693 693 changed = []
694 694 use_dirstate = (p1 is None) # not rawcommit
695 695 extra = extra.copy()
696 696
697 697 if use_dirstate:
698 698 if files:
699 699 for f in files:
700 700 s = self.dirstate[f]
701 701 if s in 'nma':
702 702 commit.append(f)
703 703 elif s == 'r':
704 704 remove.append(f)
705 705 else:
706 706 self.ui.warn(_("%s not tracked!\n") % f)
707 707 else:
708 708 changes = self.status(match=match)[:5]
709 709 modified, added, removed, deleted, unknown = changes
710 710 commit = modified + added
711 711 remove = removed
712 712 else:
713 713 commit = files
714 714
715 715 if use_dirstate:
716 716 p1, p2 = self.dirstate.parents()
717 717 update_dirstate = True
718 718 else:
719 719 p1, p2 = p1, p2 or nullid
720 720 update_dirstate = (self.dirstate.parents()[0] == p1)
721 721
722 722 c1 = self.changelog.read(p1)
723 723 c2 = self.changelog.read(p2)
724 724 m1 = self.manifest.read(c1[0]).copy()
725 725 m2 = self.manifest.read(c2[0])
726 726
727 727 if use_dirstate:
728 728 branchname = self.workingctx().branch()
729 729 try:
730 730 branchname = branchname.decode('UTF-8').encode('UTF-8')
731 731 except UnicodeDecodeError:
732 732 raise util.Abort(_('branch name not in UTF-8!'))
733 733 else:
734 734 branchname = ""
735 735
736 736 if use_dirstate:
737 737 oldname = c1[5].get("branch") # stored in UTF-8
738 738 if (not commit and not remove and not force and p2 == nullid
739 739 and branchname == oldname):
740 740 self.ui.status(_("nothing changed\n"))
741 741 return None
742 742
743 743 xp1 = hex(p1)
744 744 if p2 == nullid: xp2 = ''
745 745 else: xp2 = hex(p2)
746 746
747 747 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
748 748
749 749 wlock = self.wlock()
750 750 lock = self.lock()
751 751 tr = self.transaction()
752 752 trp = weakref.proxy(tr)
753 753
754 754 # check in files
755 755 new = {}
756 756 linkrev = self.changelog.count()
757 757 commit.sort()
758 758 is_exec = util.execfunc(self.root, m1.execf)
759 759 is_link = util.linkfunc(self.root, m1.linkf)
760 760 for f in commit:
761 761 self.ui.note(f + "\n")
762 762 try:
763 763 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
764 764 new_exec = is_exec(f)
765 765 new_link = is_link(f)
766 766 if ((not changed or changed[-1] != f) and
767 767 m2.get(f) != new[f]):
768 768 # mention the file in the changelog if some
769 769 # flag changed, even if there was no content
770 770 # change.
771 771 old_exec = m1.execf(f)
772 772 old_link = m1.linkf(f)
773 773 if old_exec != new_exec or old_link != new_link:
774 774 changed.append(f)
775 775 m1.set(f, new_exec, new_link)
776 776 if use_dirstate:
777 777 self.dirstate.normal(f)
778 778
779 779 except (OSError, IOError):
780 780 if use_dirstate:
781 781 self.ui.warn(_("trouble committing %s!\n") % f)
782 782 raise
783 783 else:
784 784 remove.append(f)
785 785
786 786 # update manifest
787 787 m1.update(new)
788 788 remove.sort()
789 789 removed = []
790 790
791 791 for f in remove:
792 792 if f in m1:
793 793 del m1[f]
794 794 removed.append(f)
795 795 elif f in m2:
796 796 removed.append(f)
797 797 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
798 798 (new, removed))
799 799
800 800 # add changeset
801 801 new = new.keys()
802 802 new.sort()
803 803
804 804 user = user or self.ui.username()
805 805 if (not empty_ok and not text) or force_editor:
806 806 edittext = []
807 807 if text:
808 808 edittext.append(text)
809 809 edittext.append("")
810 810 edittext.append(_("HG: Enter commit message."
811 811 " Lines beginning with 'HG:' are removed."))
812 812 edittext.append("HG: --")
813 813 edittext.append("HG: user: %s" % user)
814 814 if p2 != nullid:
815 815 edittext.append("HG: branch merge")
816 816 if branchname:
817 817 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
818 818 edittext.extend(["HG: changed %s" % f for f in changed])
819 819 edittext.extend(["HG: removed %s" % f for f in removed])
820 820 if not changed and not remove:
821 821 edittext.append("HG: no files changed")
822 822 edittext.append("")
823 823 # run editor in the repository root
824 824 olddir = os.getcwd()
825 825 os.chdir(self.root)
826 826 text = self.ui.edit("\n".join(edittext), user)
827 827 os.chdir(olddir)
828 828
829 829 if branchname:
830 830 extra["branch"] = branchname
831 831
832 832 if use_dirstate:
833 833 lines = [line.rstrip() for line in text.rstrip().splitlines()]
834 834 while lines and not lines[0]:
835 835 del lines[0]
836 836 if not lines:
837 837 raise util.Abort(_("empty commit message"))
838 838 text = '\n'.join(lines)
839 839
840 840 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
841 841 user, date, extra)
842 842 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
843 843 parent2=xp2)
844 844 tr.close()
845 845
846 846 if self.branchcache and "branch" in extra:
847 847 self.branchcache[util.tolocal(extra["branch"])] = n
848 848
849 849 if use_dirstate or update_dirstate:
850 850 self.dirstate.setparents(n)
851 851 if use_dirstate:
852 852 for f in removed:
853 853 self.dirstate.forget(f)
854 854 valid = 1 # our dirstate updates are complete
855 855
856 856 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
857 857 return n
858 858 finally:
859 859 if not valid: # don't save our updated dirstate
860 860 self.dirstate.invalidate()
861 861 del tr, lock, wlock
862 862
863 863 def walk(self, node=None, files=[], match=util.always, badmatch=None):
864 864 '''
865 865 walk recursively through the directory tree or a given
866 866 changeset, finding all files matched by the match
867 867 function
868 868
869 869 results are yielded in a tuple (src, filename), where src
870 870 is one of:
871 871 'f' the file was found in the directory tree
872 872 'm' the file was only in the dirstate and not in the tree
873 873 'b' file was not found and matched badmatch
874 874 '''
875 875
876 876 if node:
877 877 fdict = dict.fromkeys(files)
878 878 # for dirstate.walk, files=['.'] means "walk the whole tree".
879 879 # follow that here, too
880 880 fdict.pop('.', None)
881 881 mdict = self.manifest.read(self.changelog.read(node)[0])
882 882 mfiles = mdict.keys()
883 883 mfiles.sort()
884 884 for fn in mfiles:
885 885 for ffn in fdict:
886 886 # match if the file is the exact name or a directory
887 887 if ffn == fn or fn.startswith("%s/" % ffn):
888 888 del fdict[ffn]
889 889 break
890 890 if match(fn):
891 891 yield 'm', fn
892 892 ffiles = fdict.keys()
893 893 ffiles.sort()
894 894 for fn in ffiles:
895 895 if badmatch and badmatch(fn):
896 896 if match(fn):
897 897 yield 'b', fn
898 898 else:
899 899 self.ui.warn(_('%s: No such file in rev %s\n')
900 900 % (self.pathto(fn), short(node)))
901 901 else:
902 902 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
903 903 yield src, fn
904 904
905 905 def status(self, node1=None, node2=None, files=[], match=util.always,
906 906 list_ignored=False, list_clean=False):
907 907 """return status of files between two nodes or node and working directory
908 908
909 909 If node1 is None, use the first dirstate parent instead.
910 910 If node2 is None, compare node1 with working directory.
911 911 """
912 912
913 913 def fcmp(fn, getnode):
914 914 t1 = self.wread(fn)
915 915 return self.file(fn).cmp(getnode(fn), t1)
916 916
917 917 def mfmatches(node):
918 918 change = self.changelog.read(node)
919 919 mf = self.manifest.read(change[0]).copy()
920 920 for fn in mf.keys():
921 921 if not match(fn):
922 922 del mf[fn]
923 923 return mf
924 924
925 925 modified, added, removed, deleted, unknown = [], [], [], [], []
926 926 ignored, clean = [], []
927 927
928 928 compareworking = False
929 929 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
930 930 compareworking = True
931 931
932 932 if not compareworking:
933 933 # read the manifest from node1 before the manifest from node2,
934 934 # so that we'll hit the manifest cache if we're going through
935 935 # all the revisions in parent->child order.
936 936 mf1 = mfmatches(node1)
937 937
938 938 # are we comparing the working directory?
939 939 if not node2:
940 940 (lookup, modified, added, removed, deleted, unknown,
941 941 ignored, clean) = self.dirstate.status(files, match,
942 942 list_ignored, list_clean)
943 943
944 944 # are we comparing working dir against its parent?
945 945 if compareworking:
946 946 if lookup:
947 947 fixup = []
948 948 # do a full compare of any files that might have changed
949 949 ctx = self.changectx()
950 950 for f in lookup:
951 951 if f not in ctx or ctx[f].cmp(self.wread(f)):
952 952 modified.append(f)
953 953 else:
954 954 fixup.append(f)
955 955 if list_clean:
956 956 clean.append(f)
957 957
958 958 # update dirstate for files that are actually clean
959 959 if fixup:
960 960 wlock = None
961 961 try:
962 962 try:
963 963 wlock = self.wlock(False)
964 964 except lock.LockException:
965 965 pass
966 966 if wlock:
967 967 for f in fixup:
968 968 self.dirstate.normal(f)
969 969 finally:
970 970 del wlock
971 971 else:
972 972 # we are comparing working dir against non-parent
973 973 # generate a pseudo-manifest for the working dir
974 974 # XXX: create it in dirstate.py ?
975 975 mf2 = mfmatches(self.dirstate.parents()[0])
976 976 is_exec = util.execfunc(self.root, mf2.execf)
977 977 is_link = util.linkfunc(self.root, mf2.linkf)
978 978 for f in lookup + modified + added:
979 979 mf2[f] = ""
980 980 mf2.set(f, is_exec(f), is_link(f))
981 981 for f in removed:
982 982 if f in mf2:
983 983 del mf2[f]
984 984
985 985 else:
986 986 # we are comparing two revisions
987 987 mf2 = mfmatches(node2)
988 988
989 989 if not compareworking:
990 990 # flush lists from dirstate before comparing manifests
991 991 modified, added, clean = [], [], []
992 992
993 993 # make sure to sort the files so we talk to the disk in a
994 994 # reasonable order
995 995 mf2keys = mf2.keys()
996 996 mf2keys.sort()
997 997 getnode = lambda fn: mf1.get(fn, nullid)
998 998 for fn in mf2keys:
999 999 if mf1.has_key(fn):
1000 1000 if (mf1.flags(fn) != mf2.flags(fn) or
1001 1001 (mf1[fn] != mf2[fn] and
1002 1002 (mf2[fn] != "" or fcmp(fn, getnode)))):
1003 1003 modified.append(fn)
1004 1004 elif list_clean:
1005 1005 clean.append(fn)
1006 1006 del mf1[fn]
1007 1007 else:
1008 1008 added.append(fn)
1009 1009
1010 1010 removed = mf1.keys()
1011 1011
1012 1012 # sort and return results:
1013 1013 for l in modified, added, removed, deleted, unknown, ignored, clean:
1014 1014 l.sort()
1015 1015 return (modified, added, removed, deleted, unknown, ignored, clean)
1016 1016
1017 1017 def add(self, list):
1018 1018 wlock = self.wlock()
1019 1019 try:
1020 1020 rejected = []
1021 1021 for f in list:
1022 1022 p = self.wjoin(f)
1023 1023 try:
1024 1024 st = os.lstat(p)
1025 1025 except:
1026 1026 self.ui.warn(_("%s does not exist!\n") % f)
1027 1027 rejected.append(f)
1028 1028 continue
1029 1029 if st.st_size > 10000000:
1030 1030 self.ui.warn(_("%s: files over 10MB may cause memory and"
1031 1031 " performance problems\n"
1032 1032 "(use 'hg revert %s' to unadd the file)\n")
1033 1033 % (f, f))
1034 1034 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1035 1035 self.ui.warn(_("%s not added: only files and symlinks "
1036 1036 "supported currently\n") % f)
1037 1037 rejected.append(p)
1038 1038 elif self.dirstate[f] in 'amn':
1039 1039 self.ui.warn(_("%s already tracked!\n") % f)
1040 1040 elif self.dirstate[f] == 'r':
1041 1041 self.dirstate.normallookup(f)
1042 1042 else:
1043 1043 self.dirstate.add(f)
1044 1044 return rejected
1045 1045 finally:
1046 1046 del wlock
1047 1047
1048 1048 def forget(self, list):
1049 1049 wlock = self.wlock()
1050 1050 try:
1051 1051 for f in list:
1052 1052 if self.dirstate[f] != 'a':
1053 1053 self.ui.warn(_("%s not added!\n") % f)
1054 1054 else:
1055 1055 self.dirstate.forget(f)
1056 1056 finally:
1057 1057 del wlock
1058 1058
1059 1059 def remove(self, list, unlink=False):
1060 1060 wlock = None
1061 1061 try:
1062 1062 if unlink:
1063 1063 for f in list:
1064 1064 try:
1065 1065 util.unlink(self.wjoin(f))
1066 1066 except OSError, inst:
1067 1067 if inst.errno != errno.ENOENT:
1068 1068 raise
1069 1069 wlock = self.wlock()
1070 1070 for f in list:
1071 1071 if unlink and os.path.exists(self.wjoin(f)):
1072 1072 self.ui.warn(_("%s still exists!\n") % f)
1073 1073 elif self.dirstate[f] == 'a':
1074 1074 self.dirstate.forget(f)
1075 1075 elif f not in self.dirstate:
1076 1076 self.ui.warn(_("%s not tracked!\n") % f)
1077 1077 else:
1078 1078 self.dirstate.remove(f)
1079 1079 finally:
1080 1080 del wlock
1081 1081
1082 1082 def undelete(self, list):
1083 1083 wlock = None
1084 1084 try:
1085 1085 manifests = [self.manifest.read(self.changelog.read(p)[0])
1086 1086 for p in self.dirstate.parents() if p != nullid]
1087 1087 wlock = self.wlock()
1088 1088 for f in list:
1089 1089 if self.dirstate[f] != 'r':
1090 1090 self.ui.warn("%s not removed!\n" % f)
1091 1091 else:
1092 1092 m = f in manifests[0] and manifests[0] or manifests[1]
1093 1093 t = self.file(f).read(m[f])
1094 1094 self.wwrite(f, t, m.flags(f))
1095 1095 self.dirstate.normal(f)
1096 1096 finally:
1097 1097 del wlock
1098 1098
1099 1099 def copy(self, source, dest):
1100 1100 wlock = None
1101 1101 try:
1102 1102 p = self.wjoin(dest)
1103 1103 if not (os.path.exists(p) or os.path.islink(p)):
1104 1104 self.ui.warn(_("%s does not exist!\n") % dest)
1105 1105 elif not (os.path.isfile(p) or os.path.islink(p)):
1106 1106 self.ui.warn(_("copy failed: %s is not a file or a "
1107 1107 "symbolic link\n") % dest)
1108 1108 else:
1109 1109 wlock = self.wlock()
1110 1110 if dest not in self.dirstate:
1111 1111 self.dirstate.add(dest)
1112 1112 self.dirstate.copy(source, dest)
1113 1113 finally:
1114 1114 del wlock
1115 1115
1116 1116 def heads(self, start=None):
1117 1117 heads = self.changelog.heads(start)
1118 1118 # sort the output in rev descending order
1119 1119 heads = [(-self.changelog.rev(h), h) for h in heads]
1120 1120 heads.sort()
1121 1121 return [n for (r, n) in heads]
1122 1122
1123 1123 def branchheads(self, branch, start=None):
1124 1124 branches = self.branchtags()
1125 1125 if branch not in branches:
1126 1126 return []
1127 1127 # The basic algorithm is this:
1128 1128 #
1129 1129 # Start from the branch tip since there are no later revisions that can
1130 1130 # possibly be in this branch, and the tip is a guaranteed head.
1131 1131 #
1132 1132 # Remember the tip's parents as the first ancestors, since these by
1133 1133 # definition are not heads.
1134 1134 #
1135 1135 # Step backwards from the brach tip through all the revisions. We are
1136 1136 # guaranteed by the rules of Mercurial that we will now be visiting the
1137 1137 # nodes in reverse topological order (children before parents).
1138 1138 #
1139 1139 # If a revision is one of the ancestors of a head then we can toss it
1140 1140 # out of the ancestors set (we've already found it and won't be
1141 1141 # visiting it again) and put its parents in the ancestors set.
1142 1142 #
1143 1143 # Otherwise, if a revision is in the branch it's another head, since it
1144 1144 # wasn't in the ancestor list of an existing head. So add it to the
1145 1145 # head list, and add its parents to the ancestor list.
1146 1146 #
1147 1147 # If it is not in the branch ignore it.
1148 1148 #
1149 1149 # Once we have a list of heads, use nodesbetween to filter out all the
1150 1150 # heads that cannot be reached from startrev. There may be a more
1151 1151 # efficient way to do this as part of the previous algorithm.
1152 1152
1153 1153 set = util.set
1154 1154 heads = [self.changelog.rev(branches[branch])]
1155 1155 # Don't care if ancestors contains nullrev or not.
1156 1156 ancestors = set(self.changelog.parentrevs(heads[0]))
1157 1157 for rev in xrange(heads[0] - 1, nullrev, -1):
1158 1158 if rev in ancestors:
1159 1159 ancestors.update(self.changelog.parentrevs(rev))
1160 1160 ancestors.remove(rev)
1161 1161 elif self.changectx(rev).branch() == branch:
1162 1162 heads.append(rev)
1163 1163 ancestors.update(self.changelog.parentrevs(rev))
1164 1164 heads = [self.changelog.node(rev) for rev in heads]
1165 1165 if start is not None:
1166 1166 heads = self.changelog.nodesbetween([start], heads)[2]
1167 1167 return heads
1168 1168
1169 1169 def branches(self, nodes):
1170 1170 if not nodes:
1171 1171 nodes = [self.changelog.tip()]
1172 1172 b = []
1173 1173 for n in nodes:
1174 1174 t = n
1175 1175 while 1:
1176 1176 p = self.changelog.parents(n)
1177 1177 if p[1] != nullid or p[0] == nullid:
1178 1178 b.append((t, n, p[0], p[1]))
1179 1179 break
1180 1180 n = p[0]
1181 1181 return b
1182 1182
1183 1183 def between(self, pairs):
1184 1184 r = []
1185 1185
1186 1186 for top, bottom in pairs:
1187 1187 n, l, i = top, [], 0
1188 1188 f = 1
1189 1189
1190 1190 while n != bottom:
1191 1191 p = self.changelog.parents(n)[0]
1192 1192 if i == f:
1193 1193 l.append(n)
1194 1194 f = f * 2
1195 1195 n = p
1196 1196 i += 1
1197 1197
1198 1198 r.append(l)
1199 1199
1200 1200 return r
1201 1201
1202 1202 def findincoming(self, remote, base=None, heads=None, force=False):
1203 1203 """Return list of roots of the subsets of missing nodes from remote
1204 1204
1205 1205 If base dict is specified, assume that these nodes and their parents
1206 1206 exist on the remote side and that no child of a node of base exists
1207 1207 in both remote and self.
1208 1208 Furthermore base will be updated to include the nodes that exists
1209 1209 in self and remote but no children exists in self and remote.
1210 1210 If a list of heads is specified, return only nodes which are heads
1211 1211 or ancestors of these heads.
1212 1212
1213 1213 All the ancestors of base are in self and in remote.
1214 1214 All the descendants of the list returned are missing in self.
1215 1215 (and so we know that the rest of the nodes are missing in remote, see
1216 1216 outgoing)
1217 1217 """
1218 1218 m = self.changelog.nodemap
1219 1219 search = []
1220 1220 fetch = {}
1221 1221 seen = {}
1222 1222 seenbranch = {}
1223 1223 if base == None:
1224 1224 base = {}
1225 1225
1226 1226 if not heads:
1227 1227 heads = remote.heads()
1228 1228
1229 1229 if self.changelog.tip() == nullid:
1230 1230 base[nullid] = 1
1231 1231 if heads != [nullid]:
1232 1232 return [nullid]
1233 1233 return []
1234 1234
1235 1235 # assume we're closer to the tip than the root
1236 1236 # and start by examining the heads
1237 1237 self.ui.status(_("searching for changes\n"))
1238 1238
1239 1239 unknown = []
1240 1240 for h in heads:
1241 1241 if h not in m:
1242 1242 unknown.append(h)
1243 1243 else:
1244 1244 base[h] = 1
1245 1245
1246 1246 if not unknown:
1247 1247 return []
1248 1248
1249 1249 req = dict.fromkeys(unknown)
1250 1250 reqcnt = 0
1251 1251
1252 1252 # search through remote branches
1253 1253 # a 'branch' here is a linear segment of history, with four parts:
1254 1254 # head, root, first parent, second parent
1255 1255 # (a branch always has two parents (or none) by definition)
1256 1256 unknown = remote.branches(unknown)
1257 1257 while unknown:
1258 1258 r = []
1259 1259 while unknown:
1260 1260 n = unknown.pop(0)
1261 1261 if n[0] in seen:
1262 1262 continue
1263 1263
1264 1264 self.ui.debug(_("examining %s:%s\n")
1265 1265 % (short(n[0]), short(n[1])))
1266 1266 if n[0] == nullid: # found the end of the branch
1267 1267 pass
1268 1268 elif n in seenbranch:
1269 1269 self.ui.debug(_("branch already found\n"))
1270 1270 continue
1271 1271 elif n[1] and n[1] in m: # do we know the base?
1272 1272 self.ui.debug(_("found incomplete branch %s:%s\n")
1273 1273 % (short(n[0]), short(n[1])))
1274 1274 search.append(n) # schedule branch range for scanning
1275 1275 seenbranch[n] = 1
1276 1276 else:
1277 1277 if n[1] not in seen and n[1] not in fetch:
1278 1278 if n[2] in m and n[3] in m:
1279 1279 self.ui.debug(_("found new changeset %s\n") %
1280 1280 short(n[1]))
1281 1281 fetch[n[1]] = 1 # earliest unknown
1282 1282 for p in n[2:4]:
1283 1283 if p in m:
1284 1284 base[p] = 1 # latest known
1285 1285
1286 1286 for p in n[2:4]:
1287 1287 if p not in req and p not in m:
1288 1288 r.append(p)
1289 1289 req[p] = 1
1290 1290 seen[n[0]] = 1
1291 1291
1292 1292 if r:
1293 1293 reqcnt += 1
1294 1294 self.ui.debug(_("request %d: %s\n") %
1295 1295 (reqcnt, " ".join(map(short, r))))
1296 1296 for p in xrange(0, len(r), 10):
1297 1297 for b in remote.branches(r[p:p+10]):
1298 1298 self.ui.debug(_("received %s:%s\n") %
1299 1299 (short(b[0]), short(b[1])))
1300 1300 unknown.append(b)
1301 1301
1302 1302 # do binary search on the branches we found
1303 1303 while search:
1304 1304 n = search.pop(0)
1305 1305 reqcnt += 1
1306 1306 l = remote.between([(n[0], n[1])])[0]
1307 1307 l.append(n[1])
1308 1308 p = n[0]
1309 1309 f = 1
1310 1310 for i in l:
1311 1311 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1312 1312 if i in m:
1313 1313 if f <= 2:
1314 1314 self.ui.debug(_("found new branch changeset %s\n") %
1315 1315 short(p))
1316 1316 fetch[p] = 1
1317 1317 base[i] = 1
1318 1318 else:
1319 1319 self.ui.debug(_("narrowed branch search to %s:%s\n")
1320 1320 % (short(p), short(i)))
1321 1321 search.append((p, i))
1322 1322 break
1323 1323 p, f = i, f * 2
1324 1324
1325 1325 # sanity check our fetch list
1326 1326 for f in fetch.keys():
1327 1327 if f in m:
1328 1328 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1329 1329
1330 1330 if base.keys() == [nullid]:
1331 1331 if force:
1332 1332 self.ui.warn(_("warning: repository is unrelated\n"))
1333 1333 else:
1334 1334 raise util.Abort(_("repository is unrelated"))
1335 1335
1336 1336 self.ui.debug(_("found new changesets starting at ") +
1337 1337 " ".join([short(f) for f in fetch]) + "\n")
1338 1338
1339 1339 self.ui.debug(_("%d total queries\n") % reqcnt)
1340 1340
1341 1341 return fetch.keys()
1342 1342
1343 1343 def findoutgoing(self, remote, base=None, heads=None, force=False):
1344 1344 """Return list of nodes that are roots of subsets not in remote
1345 1345
1346 1346 If base dict is specified, assume that these nodes and their parents
1347 1347 exist on the remote side.
1348 1348 If a list of heads is specified, return only nodes which are heads
1349 1349 or ancestors of these heads, and return a second element which
1350 1350 contains all remote heads which get new children.
1351 1351 """
1352 1352 if base == None:
1353 1353 base = {}
1354 1354 self.findincoming(remote, base, heads, force=force)
1355 1355
1356 1356 self.ui.debug(_("common changesets up to ")
1357 1357 + " ".join(map(short, base.keys())) + "\n")
1358 1358
1359 1359 remain = dict.fromkeys(self.changelog.nodemap)
1360 1360
1361 1361 # prune everything remote has from the tree
1362 1362 del remain[nullid]
1363 1363 remove = base.keys()
1364 1364 while remove:
1365 1365 n = remove.pop(0)
1366 1366 if n in remain:
1367 1367 del remain[n]
1368 1368 for p in self.changelog.parents(n):
1369 1369 remove.append(p)
1370 1370
1371 1371 # find every node whose parents have been pruned
1372 1372 subset = []
1373 1373 # find every remote head that will get new children
1374 1374 updated_heads = {}
1375 1375 for n in remain:
1376 1376 p1, p2 = self.changelog.parents(n)
1377 1377 if p1 not in remain and p2 not in remain:
1378 1378 subset.append(n)
1379 1379 if heads:
1380 1380 if p1 in heads:
1381 1381 updated_heads[p1] = True
1382 1382 if p2 in heads:
1383 1383 updated_heads[p2] = True
1384 1384
1385 1385 # this is the set of all roots we have to push
1386 1386 if heads:
1387 1387 return subset, updated_heads.keys()
1388 1388 else:
1389 1389 return subset
1390 1390
1391 1391 def pull(self, remote, heads=None, force=False):
1392 1392 lock = self.lock()
1393 1393 try:
1394 1394 fetch = self.findincoming(remote, heads=heads, force=force)
1395 1395 if fetch == [nullid]:
1396 1396 self.ui.status(_("requesting all changes\n"))
1397 1397
1398 1398 if not fetch:
1399 1399 self.ui.status(_("no changes found\n"))
1400 1400 return 0
1401 1401
1402 1402 if heads is None:
1403 1403 cg = remote.changegroup(fetch, 'pull')
1404 1404 else:
1405 1405 if 'changegroupsubset' not in remote.capabilities:
1406 1406 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1407 1407 cg = remote.changegroupsubset(fetch, heads, 'pull')
1408 1408 return self.addchangegroup(cg, 'pull', remote.url())
1409 1409 finally:
1410 1410 del lock
1411 1411
1412 1412 def push(self, remote, force=False, revs=None):
1413 1413 # there are two ways to push to remote repo:
1414 1414 #
1415 1415 # addchangegroup assumes local user can lock remote
1416 1416 # repo (local filesystem, old ssh servers).
1417 1417 #
1418 1418 # unbundle assumes local user cannot lock remote repo (new ssh
1419 1419 # servers, http servers).
1420 1420
1421 1421 if remote.capable('unbundle'):
1422 1422 return self.push_unbundle(remote, force, revs)
1423 1423 return self.push_addchangegroup(remote, force, revs)
1424 1424
1425 1425 def prepush(self, remote, force, revs):
1426 1426 base = {}
1427 1427 remote_heads = remote.heads()
1428 1428 inc = self.findincoming(remote, base, remote_heads, force=force)
1429 1429
1430 1430 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1431 1431 if revs is not None:
1432 1432 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1433 1433 else:
1434 1434 bases, heads = update, self.changelog.heads()
1435 1435
1436 1436 if not bases:
1437 1437 self.ui.status(_("no changes found\n"))
1438 1438 return None, 1
1439 1439 elif not force:
1440 1440 # check if we're creating new remote heads
1441 1441 # to be a remote head after push, node must be either
1442 1442 # - unknown locally
1443 1443 # - a local outgoing head descended from update
1444 1444 # - a remote head that's known locally and not
1445 1445 # ancestral to an outgoing head
1446 1446
1447 1447 warn = 0
1448 1448
1449 1449 if remote_heads == [nullid]:
1450 1450 warn = 0
1451 1451 elif not revs and len(heads) > len(remote_heads):
1452 1452 warn = 1
1453 1453 else:
1454 1454 newheads = list(heads)
1455 1455 for r in remote_heads:
1456 1456 if r in self.changelog.nodemap:
1457 1457 desc = self.changelog.heads(r, heads)
1458 1458 l = [h for h in heads if h in desc]
1459 1459 if not l:
1460 1460 newheads.append(r)
1461 1461 else:
1462 1462 newheads.append(r)
1463 1463 if len(newheads) > len(remote_heads):
1464 1464 warn = 1
1465 1465
1466 1466 if warn:
1467 1467 self.ui.warn(_("abort: push creates new remote branches!\n"))
1468 1468 self.ui.status(_("(did you forget to merge?"
1469 1469 " use push -f to force)\n"))
1470 1470 return None, 1
1471 1471 elif inc:
1472 1472 self.ui.warn(_("note: unsynced remote changes!\n"))
1473 1473
1474 1474
1475 1475 if revs is None:
1476 1476 cg = self.changegroup(update, 'push')
1477 1477 else:
1478 1478 cg = self.changegroupsubset(update, revs, 'push')
1479 1479 return cg, remote_heads
1480 1480
1481 1481 def push_addchangegroup(self, remote, force, revs):
1482 1482 lock = remote.lock()
1483 1483 try:
1484 1484 ret = self.prepush(remote, force, revs)
1485 1485 if ret[0] is not None:
1486 1486 cg, remote_heads = ret
1487 1487 return remote.addchangegroup(cg, 'push', self.url())
1488 1488 return ret[1]
1489 1489 finally:
1490 1490 del lock
1491 1491
1492 1492 def push_unbundle(self, remote, force, revs):
1493 1493 # local repo finds heads on server, finds out what revs it
1494 1494 # must push. once revs transferred, if server finds it has
1495 1495 # different heads (someone else won commit/push race), server
1496 1496 # aborts.
1497 1497
1498 1498 ret = self.prepush(remote, force, revs)
1499 1499 if ret[0] is not None:
1500 1500 cg, remote_heads = ret
1501 1501 if force: remote_heads = ['force']
1502 1502 return remote.unbundle(cg, remote_heads, 'push')
1503 1503 return ret[1]
1504 1504
1505 1505 def changegroupinfo(self, nodes, source):
1506 1506 if self.ui.verbose or source == 'bundle':
1507 1507 self.ui.status(_("%d changesets found\n") % len(nodes))
1508 1508 if self.ui.debugflag:
1509 1509 self.ui.debug(_("List of changesets:\n"))
1510 1510 for node in nodes:
1511 1511 self.ui.debug("%s\n" % hex(node))
1512 1512
1513 1513 def changegroupsubset(self, bases, heads, source):
1514 1514 """This function generates a changegroup consisting of all the nodes
1515 1515 that are descendents of any of the bases, and ancestors of any of
1516 1516 the heads.
1517 1517
1518 1518 It is fairly complex as determining which filenodes and which
1519 1519 manifest nodes need to be included for the changeset to be complete
1520 1520 is non-trivial.
1521 1521
1522 1522 Another wrinkle is doing the reverse, figuring out which changeset in
1523 1523 the changegroup a particular filenode or manifestnode belongs to."""
1524 1524
1525 1525 self.hook('preoutgoing', throw=True, source=source)
1526 1526
1527 1527 # Set up some initial variables
1528 1528 # Make it easy to refer to self.changelog
1529 1529 cl = self.changelog
1530 1530 # msng is short for missing - compute the list of changesets in this
1531 1531 # changegroup.
1532 1532 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1533 1533 self.changegroupinfo(msng_cl_lst, source)
1534 1534 # Some bases may turn out to be superfluous, and some heads may be
1535 1535 # too. nodesbetween will return the minimal set of bases and heads
1536 1536 # necessary to re-create the changegroup.
1537 1537
1538 1538 # Known heads are the list of heads that it is assumed the recipient
1539 1539 # of this changegroup will know about.
1540 1540 knownheads = {}
1541 1541 # We assume that all parents of bases are known heads.
1542 1542 for n in bases:
1543 1543 for p in cl.parents(n):
1544 1544 if p != nullid:
1545 1545 knownheads[p] = 1
1546 1546 knownheads = knownheads.keys()
1547 1547 if knownheads:
1548 1548 # Now that we know what heads are known, we can compute which
1549 1549 # changesets are known. The recipient must know about all
1550 1550 # changesets required to reach the known heads from the null
1551 1551 # changeset.
1552 1552 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1553 1553 junk = None
1554 1554 # Transform the list into an ersatz set.
1555 1555 has_cl_set = dict.fromkeys(has_cl_set)
1556 1556 else:
1557 1557 # If there were no known heads, the recipient cannot be assumed to
1558 1558 # know about any changesets.
1559 1559 has_cl_set = {}
1560 1560
1561 1561 # Make it easy to refer to self.manifest
1562 1562 mnfst = self.manifest
1563 1563 # We don't know which manifests are missing yet
1564 1564 msng_mnfst_set = {}
1565 1565 # Nor do we know which filenodes are missing.
1566 1566 msng_filenode_set = {}
1567 1567
1568 1568 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1569 1569 junk = None
1570 1570
1571 1571 # A changeset always belongs to itself, so the changenode lookup
1572 1572 # function for a changenode is identity.
1573 1573 def identity(x):
1574 1574 return x
1575 1575
1576 1576 # A function generating function. Sets up an environment for the
1577 1577 # inner function.
1578 1578 def cmp_by_rev_func(revlog):
1579 1579 # Compare two nodes by their revision number in the environment's
1580 1580 # revision history. Since the revision number both represents the
1581 1581 # most efficient order to read the nodes in, and represents a
1582 1582 # topological sorting of the nodes, this function is often useful.
1583 1583 def cmp_by_rev(a, b):
1584 1584 return cmp(revlog.rev(a), revlog.rev(b))
1585 1585 return cmp_by_rev
1586 1586
1587 1587 # If we determine that a particular file or manifest node must be a
1588 1588 # node that the recipient of the changegroup will already have, we can
1589 1589 # also assume the recipient will have all the parents. This function
1590 1590 # prunes them from the set of missing nodes.
1591 1591 def prune_parents(revlog, hasset, msngset):
1592 1592 haslst = hasset.keys()
1593 1593 haslst.sort(cmp_by_rev_func(revlog))
1594 1594 for node in haslst:
1595 1595 parentlst = [p for p in revlog.parents(node) if p != nullid]
1596 1596 while parentlst:
1597 1597 n = parentlst.pop()
1598 1598 if n not in hasset:
1599 1599 hasset[n] = 1
1600 1600 p = [p for p in revlog.parents(n) if p != nullid]
1601 1601 parentlst.extend(p)
1602 1602 for n in hasset:
1603 1603 msngset.pop(n, None)
1604 1604
1605 1605 # This is a function generating function used to set up an environment
1606 1606 # for the inner function to execute in.
1607 1607 def manifest_and_file_collector(changedfileset):
1608 1608 # This is an information gathering function that gathers
1609 1609 # information from each changeset node that goes out as part of
1610 1610 # the changegroup. The information gathered is a list of which
1611 1611 # manifest nodes are potentially required (the recipient may
1612 1612 # already have them) and total list of all files which were
1613 1613 # changed in any changeset in the changegroup.
1614 1614 #
1615 1615 # We also remember the first changenode we saw any manifest
1616 1616 # referenced by so we can later determine which changenode 'owns'
1617 1617 # the manifest.
1618 1618 def collect_manifests_and_files(clnode):
1619 1619 c = cl.read(clnode)
1620 1620 for f in c[3]:
1621 1621 # This is to make sure we only have one instance of each
1622 1622 # filename string for each filename.
1623 1623 changedfileset.setdefault(f, f)
1624 1624 msng_mnfst_set.setdefault(c[0], clnode)
1625 1625 return collect_manifests_and_files
1626 1626
1627 1627 # Figure out which manifest nodes (of the ones we think might be part
1628 1628 # of the changegroup) the recipient must know about and remove them
1629 1629 # from the changegroup.
1630 1630 def prune_manifests():
1631 1631 has_mnfst_set = {}
1632 1632 for n in msng_mnfst_set:
1633 1633 # If a 'missing' manifest thinks it belongs to a changenode
1634 1634 # the recipient is assumed to have, obviously the recipient
1635 1635 # must have that manifest.
1636 1636 linknode = cl.node(mnfst.linkrev(n))
1637 1637 if linknode in has_cl_set:
1638 1638 has_mnfst_set[n] = 1
1639 1639 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1640 1640
1641 1641 # Use the information collected in collect_manifests_and_files to say
1642 1642 # which changenode any manifestnode belongs to.
1643 1643 def lookup_manifest_link(mnfstnode):
1644 1644 return msng_mnfst_set[mnfstnode]
1645 1645
1646 1646 # A function generating function that sets up the initial environment
1647 1647 # the inner function.
1648 1648 def filenode_collector(changedfiles):
1649 1649 next_rev = [0]
1650 1650 # This gathers information from each manifestnode included in the
1651 1651 # changegroup about which filenodes the manifest node references
1652 1652 # so we can include those in the changegroup too.
1653 1653 #
1654 1654 # It also remembers which changenode each filenode belongs to. It
1655 1655 # does this by assuming the a filenode belongs to the changenode
1656 1656 # the first manifest that references it belongs to.
1657 1657 def collect_msng_filenodes(mnfstnode):
1658 1658 r = mnfst.rev(mnfstnode)
1659 1659 if r == next_rev[0]:
1660 1660 # If the last rev we looked at was the one just previous,
1661 1661 # we only need to see a diff.
1662 1662 deltamf = mnfst.readdelta(mnfstnode)
1663 1663 # For each line in the delta
1664 1664 for f, fnode in deltamf.items():
1665 1665 f = changedfiles.get(f, None)
1666 1666 # And if the file is in the list of files we care
1667 1667 # about.
1668 1668 if f is not None:
1669 1669 # Get the changenode this manifest belongs to
1670 1670 clnode = msng_mnfst_set[mnfstnode]
1671 1671 # Create the set of filenodes for the file if
1672 1672 # there isn't one already.
1673 1673 ndset = msng_filenode_set.setdefault(f, {})
1674 1674 # And set the filenode's changelog node to the
1675 1675 # manifest's if it hasn't been set already.
1676 1676 ndset.setdefault(fnode, clnode)
1677 1677 else:
1678 1678 # Otherwise we need a full manifest.
1679 1679 m = mnfst.read(mnfstnode)
1680 1680 # For every file in we care about.
1681 1681 for f in changedfiles:
1682 1682 fnode = m.get(f, None)
1683 1683 # If it's in the manifest
1684 1684 if fnode is not None:
1685 1685 # See comments above.
1686 1686 clnode = msng_mnfst_set[mnfstnode]
1687 1687 ndset = msng_filenode_set.setdefault(f, {})
1688 1688 ndset.setdefault(fnode, clnode)
1689 1689 # Remember the revision we hope to see next.
1690 1690 next_rev[0] = r + 1
1691 1691 return collect_msng_filenodes
1692 1692
1693 1693 # We have a list of filenodes we think we need for a file, lets remove
1694 1694 # all those we now the recipient must have.
1695 1695 def prune_filenodes(f, filerevlog):
1696 1696 msngset = msng_filenode_set[f]
1697 1697 hasset = {}
1698 1698 # If a 'missing' filenode thinks it belongs to a changenode we
1699 1699 # assume the recipient must have, then the recipient must have
1700 1700 # that filenode.
1701 1701 for n in msngset:
1702 1702 clnode = cl.node(filerevlog.linkrev(n))
1703 1703 if clnode in has_cl_set:
1704 1704 hasset[n] = 1
1705 1705 prune_parents(filerevlog, hasset, msngset)
1706 1706
1707 1707 # A function generator function that sets up the a context for the
1708 1708 # inner function.
1709 1709 def lookup_filenode_link_func(fname):
1710 1710 msngset = msng_filenode_set[fname]
1711 1711 # Lookup the changenode the filenode belongs to.
1712 1712 def lookup_filenode_link(fnode):
1713 1713 return msngset[fnode]
1714 1714 return lookup_filenode_link
1715 1715
1716 1716 # Now that we have all theses utility functions to help out and
1717 1717 # logically divide up the task, generate the group.
1718 1718 def gengroup():
1719 1719 # The set of changed files starts empty.
1720 1720 changedfiles = {}
1721 1721 # Create a changenode group generator that will call our functions
1722 1722 # back to lookup the owning changenode and collect information.
1723 1723 group = cl.group(msng_cl_lst, identity,
1724 1724 manifest_and_file_collector(changedfiles))
1725 1725 for chnk in group:
1726 1726 yield chnk
1727 1727
1728 1728 # The list of manifests has been collected by the generator
1729 1729 # calling our functions back.
1730 1730 prune_manifests()
1731 1731 msng_mnfst_lst = msng_mnfst_set.keys()
1732 1732 # Sort the manifestnodes by revision number.
1733 1733 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1734 1734 # Create a generator for the manifestnodes that calls our lookup
1735 1735 # and data collection functions back.
1736 1736 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1737 1737 filenode_collector(changedfiles))
1738 1738 for chnk in group:
1739 1739 yield chnk
1740 1740
1741 1741 # These are no longer needed, dereference and toss the memory for
1742 1742 # them.
1743 1743 msng_mnfst_lst = None
1744 1744 msng_mnfst_set.clear()
1745 1745
1746 1746 changedfiles = changedfiles.keys()
1747 1747 changedfiles.sort()
1748 1748 # Go through all our files in order sorted by name.
1749 1749 for fname in changedfiles:
1750 1750 filerevlog = self.file(fname)
1751 1751 if filerevlog.count() == 0:
1752 1752 raise util.Abort(_("empty or missing revlog for %s") % fname)
1753 1753 # Toss out the filenodes that the recipient isn't really
1754 1754 # missing.
1755 1755 if msng_filenode_set.has_key(fname):
1756 1756 prune_filenodes(fname, filerevlog)
1757 1757 msng_filenode_lst = msng_filenode_set[fname].keys()
1758 1758 else:
1759 1759 msng_filenode_lst = []
1760 1760 # If any filenodes are left, generate the group for them,
1761 1761 # otherwise don't bother.
1762 1762 if len(msng_filenode_lst) > 0:
1763 1763 yield changegroup.chunkheader(len(fname))
1764 1764 yield fname
1765 1765 # Sort the filenodes by their revision #
1766 1766 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1767 1767 # Create a group generator and only pass in a changenode
1768 1768 # lookup function as we need to collect no information
1769 1769 # from filenodes.
1770 1770 group = filerevlog.group(msng_filenode_lst,
1771 1771 lookup_filenode_link_func(fname))
1772 1772 for chnk in group:
1773 1773 yield chnk
1774 1774 if msng_filenode_set.has_key(fname):
1775 1775 # Don't need this anymore, toss it to free memory.
1776 1776 del msng_filenode_set[fname]
1777 1777 # Signal that no more groups are left.
1778 1778 yield changegroup.closechunk()
1779 1779
1780 1780 if msng_cl_lst:
1781 1781 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1782 1782
1783 1783 return util.chunkbuffer(gengroup())
1784 1784
1785 1785 def changegroup(self, basenodes, source):
1786 1786 """Generate a changegroup of all nodes that we have that a recipient
1787 1787 doesn't.
1788 1788
1789 1789 This is much easier than the previous function as we can assume that
1790 1790 the recipient has any changenode we aren't sending them."""
1791 1791
1792 1792 self.hook('preoutgoing', throw=True, source=source)
1793 1793
1794 1794 cl = self.changelog
1795 1795 nodes = cl.nodesbetween(basenodes, None)[0]
1796 1796 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1797 1797 self.changegroupinfo(nodes, source)
1798 1798
1799 1799 def identity(x):
1800 1800 return x
1801 1801
1802 1802 def gennodelst(revlog):
1803 1803 for r in xrange(0, revlog.count()):
1804 1804 n = revlog.node(r)
1805 1805 if revlog.linkrev(n) in revset:
1806 1806 yield n
1807 1807
1808 1808 def changed_file_collector(changedfileset):
1809 1809 def collect_changed_files(clnode):
1810 1810 c = cl.read(clnode)
1811 1811 for fname in c[3]:
1812 1812 changedfileset[fname] = 1
1813 1813 return collect_changed_files
1814 1814
1815 1815 def lookuprevlink_func(revlog):
1816 1816 def lookuprevlink(n):
1817 1817 return cl.node(revlog.linkrev(n))
1818 1818 return lookuprevlink
1819 1819
1820 1820 def gengroup():
1821 1821 # construct a list of all changed files
1822 1822 changedfiles = {}
1823 1823
1824 1824 for chnk in cl.group(nodes, identity,
1825 1825 changed_file_collector(changedfiles)):
1826 1826 yield chnk
1827 1827 changedfiles = changedfiles.keys()
1828 1828 changedfiles.sort()
1829 1829
1830 1830 mnfst = self.manifest
1831 1831 nodeiter = gennodelst(mnfst)
1832 1832 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1833 1833 yield chnk
1834 1834
1835 1835 for fname in changedfiles:
1836 1836 filerevlog = self.file(fname)
1837 1837 if filerevlog.count() == 0:
1838 1838 raise util.Abort(_("empty or missing revlog for %s") % fname)
1839 1839 nodeiter = gennodelst(filerevlog)
1840 1840 nodeiter = list(nodeiter)
1841 1841 if nodeiter:
1842 1842 yield changegroup.chunkheader(len(fname))
1843 1843 yield fname
1844 1844 lookup = lookuprevlink_func(filerevlog)
1845 1845 for chnk in filerevlog.group(nodeiter, lookup):
1846 1846 yield chnk
1847 1847
1848 1848 yield changegroup.closechunk()
1849 1849
1850 1850 if nodes:
1851 1851 self.hook('outgoing', node=hex(nodes[0]), source=source)
1852 1852
1853 1853 return util.chunkbuffer(gengroup())
1854 1854
1855 def addchangegroup(self, source, srctype, url):
1855 def addchangegroup(self, source, srctype, url, emptyok=False):
1856 1856 """add changegroup to repo.
1857 1857
1858 1858 return values:
1859 1859 - nothing changed or no source: 0
1860 1860 - more heads than before: 1+added heads (2..n)
1861 1861 - less heads than before: -1-removed heads (-2..-n)
1862 1862 - number of heads stays the same: 1
1863 1863 """
1864 1864 def csmap(x):
1865 1865 self.ui.debug(_("add changeset %s\n") % short(x))
1866 1866 return cl.count()
1867 1867
1868 1868 def revmap(x):
1869 1869 return cl.rev(x)
1870 1870
1871 1871 if not source:
1872 1872 return 0
1873 1873
1874 1874 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1875 1875
1876 1876 changesets = files = revisions = 0
1877 1877
1878 1878 # write changelog data to temp files so concurrent readers will not see
1879 1879 # inconsistent view
1880 1880 cl = self.changelog
1881 1881 cl.delayupdate()
1882 1882 oldheads = len(cl.heads())
1883 1883
1884 1884 tr = self.transaction()
1885 1885 try:
1886 1886 trp = weakref.proxy(tr)
1887 1887 # pull off the changeset group
1888 1888 self.ui.status(_("adding changesets\n"))
1889 1889 cor = cl.count() - 1
1890 1890 chunkiter = changegroup.chunkiter(source)
1891 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1891 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1892 1892 raise util.Abort(_("received changelog group is empty"))
1893 1893 cnr = cl.count() - 1
1894 1894 changesets = cnr - cor
1895 1895
1896 1896 # pull off the manifest group
1897 1897 self.ui.status(_("adding manifests\n"))
1898 1898 chunkiter = changegroup.chunkiter(source)
1899 1899 # no need to check for empty manifest group here:
1900 1900 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1901 1901 # no new manifest will be created and the manifest group will
1902 1902 # be empty during the pull
1903 1903 self.manifest.addgroup(chunkiter, revmap, trp)
1904 1904
1905 1905 # process the files
1906 1906 self.ui.status(_("adding file changes\n"))
1907 1907 while 1:
1908 1908 f = changegroup.getchunk(source)
1909 1909 if not f:
1910 1910 break
1911 1911 self.ui.debug(_("adding %s revisions\n") % f)
1912 1912 fl = self.file(f)
1913 1913 o = fl.count()
1914 1914 chunkiter = changegroup.chunkiter(source)
1915 1915 if fl.addgroup(chunkiter, revmap, trp) is None:
1916 1916 raise util.Abort(_("received file revlog group is empty"))
1917 1917 revisions += fl.count() - o
1918 1918 files += 1
1919 1919
1920 1920 # make changelog see real files again
1921 1921 cl.finalize(trp)
1922 1922
1923 1923 newheads = len(self.changelog.heads())
1924 1924 heads = ""
1925 1925 if oldheads and newheads != oldheads:
1926 1926 heads = _(" (%+d heads)") % (newheads - oldheads)
1927 1927
1928 1928 self.ui.status(_("added %d changesets"
1929 1929 " with %d changes to %d files%s\n")
1930 1930 % (changesets, revisions, files, heads))
1931 1931
1932 1932 if changesets > 0:
1933 1933 self.hook('pretxnchangegroup', throw=True,
1934 1934 node=hex(self.changelog.node(cor+1)), source=srctype,
1935 1935 url=url)
1936 1936
1937 1937 tr.close()
1938 1938 finally:
1939 1939 del tr
1940 1940
1941 1941 if changesets > 0:
1942 1942 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1943 1943 source=srctype, url=url)
1944 1944
1945 1945 for i in xrange(cor + 1, cnr + 1):
1946 1946 self.hook("incoming", node=hex(self.changelog.node(i)),
1947 1947 source=srctype, url=url)
1948 1948
1949 1949 # never return 0 here:
1950 1950 if newheads < oldheads:
1951 1951 return newheads - oldheads - 1
1952 1952 else:
1953 1953 return newheads - oldheads + 1
1954 1954
1955 1955
1956 1956 def stream_in(self, remote):
1957 1957 fp = remote.stream_out()
1958 1958 l = fp.readline()
1959 1959 try:
1960 1960 resp = int(l)
1961 1961 except ValueError:
1962 1962 raise util.UnexpectedOutput(
1963 1963 _('Unexpected response from remote server:'), l)
1964 1964 if resp == 1:
1965 1965 raise util.Abort(_('operation forbidden by server'))
1966 1966 elif resp == 2:
1967 1967 raise util.Abort(_('locking the remote repository failed'))
1968 1968 elif resp != 0:
1969 1969 raise util.Abort(_('the server sent an unknown error code'))
1970 1970 self.ui.status(_('streaming all changes\n'))
1971 1971 l = fp.readline()
1972 1972 try:
1973 1973 total_files, total_bytes = map(int, l.split(' ', 1))
1974 1974 except ValueError, TypeError:
1975 1975 raise util.UnexpectedOutput(
1976 1976 _('Unexpected response from remote server:'), l)
1977 1977 self.ui.status(_('%d files to transfer, %s of data\n') %
1978 1978 (total_files, util.bytecount(total_bytes)))
1979 1979 start = time.time()
1980 1980 for i in xrange(total_files):
1981 1981 # XXX doesn't support '\n' or '\r' in filenames
1982 1982 l = fp.readline()
1983 1983 try:
1984 1984 name, size = l.split('\0', 1)
1985 1985 size = int(size)
1986 1986 except ValueError, TypeError:
1987 1987 raise util.UnexpectedOutput(
1988 1988 _('Unexpected response from remote server:'), l)
1989 1989 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1990 1990 ofp = self.sopener(name, 'w')
1991 1991 for chunk in util.filechunkiter(fp, limit=size):
1992 1992 ofp.write(chunk)
1993 1993 ofp.close()
1994 1994 elapsed = time.time() - start
1995 1995 if elapsed <= 0:
1996 1996 elapsed = 0.001
1997 1997 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1998 1998 (util.bytecount(total_bytes), elapsed,
1999 1999 util.bytecount(total_bytes / elapsed)))
2000 2000 self.invalidate()
2001 2001 return len(self.heads()) + 1
2002 2002
2003 2003 def clone(self, remote, heads=[], stream=False):
2004 2004 '''clone remote repository.
2005 2005
2006 2006 keyword arguments:
2007 2007 heads: list of revs to clone (forces use of pull)
2008 2008 stream: use streaming clone if possible'''
2009 2009
2010 2010 # now, all clients that can request uncompressed clones can
2011 2011 # read repo formats supported by all servers that can serve
2012 2012 # them.
2013 2013
2014 2014 # if revlog format changes, client will have to check version
2015 2015 # and format flags on "stream" capability, and use
2016 2016 # uncompressed only if compatible.
2017 2017
2018 2018 if stream and not heads and remote.capable('stream'):
2019 2019 return self.stream_in(remote)
2020 2020 return self.pull(remote, heads)
2021 2021
2022 2022 # used to avoid circular references so destructors work
2023 2023 def aftertrans(files):
2024 2024 renamefiles = [tuple(t) for t in files]
2025 2025 def a():
2026 2026 for src, dest in renamefiles:
2027 2027 util.rename(src, dest)
2028 2028 return a
2029 2029
2030 2030 def instance(ui, path, create):
2031 2031 return localrepository(ui, util.drop_scheme('file', path), create)
2032 2032
2033 2033 def islocal(path):
2034 2034 return True
General Comments 0
You need to be logged in to leave comments. Login now