##// END OF EJS Templates
Export extra in _tag so convert can set the branch of a tag
Brendan Cully -
r4864:fc389dcc default
parent child Browse files
Show More
@@ -1,1929 +1,1931
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 extensions.loadall(self.ui)
80 80 except IOError:
81 81 pass
82 82
83 83 self.tagscache = None
84 84 self.branchcache = None
85 85 self.nodetagscache = None
86 86 self.filterpats = {}
87 87 self.transhandle = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 108 return hook.hook(self.ui, self, name, throw, **args)
109 109
110 110 tag_disallowed = ':\r\n'
111 111
112 def _tag(self, name, node, message, local, user, date, parent=None):
112 def _tag(self, name, node, message, local, user, date, parent=None,
113 extra={}):
113 114 use_dirstate = parent is None
114 115
115 116 for c in self.tag_disallowed:
116 117 if c in name:
117 118 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 119
119 120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 121
121 122 if local:
122 123 # local tags are stored in the current charset
123 124 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
124 125 self.hook('tag', node=hex(node), tag=name, local=local)
125 126 return
126 127
127 128 # committed tags are stored in UTF-8
128 129 line = '%s %s\n' % (hex(node), util.fromlocal(name))
129 130 if use_dirstate:
130 131 self.wfile('.hgtags', 'ab').write(line)
131 132 else:
132 133 ntags = self.filectx('.hgtags', parent).data()
133 134 self.wfile('.hgtags', 'ab').write(ntags + line)
134 135 if use_dirstate and self.dirstate.state('.hgtags') == '?':
135 136 self.add(['.hgtags'])
136 137
137 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
138 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
139 extra=extra)
138 140
139 141 self.hook('tag', node=hex(node), tag=name, local=local)
140 142
141 143 return tagnode
142 144
143 145 def tag(self, name, node, message, local, user, date):
144 146 '''tag a revision with a symbolic name.
145 147
146 148 if local is True, the tag is stored in a per-repository file.
147 149 otherwise, it is stored in the .hgtags file, and a new
148 150 changeset is committed with the change.
149 151
150 152 keyword arguments:
151 153
152 154 local: whether to store tag in non-version-controlled file
153 155 (default False)
154 156
155 157 message: commit message to use if committing
156 158
157 159 user: name of user to use if committing
158 160
159 161 date: date tuple to use if committing'''
160 162
161 163 for x in self.status()[:5]:
162 164 if '.hgtags' in x:
163 165 raise util.Abort(_('working copy of .hgtags is changed '
164 166 '(please commit .hgtags manually)'))
165 167
166 168
167 169 self._tag(name, node, message, local, user, date)
168 170
169 171 def tags(self):
170 172 '''return a mapping of tag to node'''
171 173 if self.tagscache:
172 174 return self.tagscache
173 175
174 176 globaltags = {}
175 177
176 178 def readtags(lines, fn):
177 179 filetags = {}
178 180 count = 0
179 181
180 182 def warn(msg):
181 183 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
182 184
183 185 for l in lines:
184 186 count += 1
185 187 if not l:
186 188 continue
187 189 s = l.split(" ", 1)
188 190 if len(s) != 2:
189 191 warn(_("cannot parse entry"))
190 192 continue
191 193 node, key = s
192 194 key = util.tolocal(key.strip()) # stored in UTF-8
193 195 try:
194 196 bin_n = bin(node)
195 197 except TypeError:
196 198 warn(_("node '%s' is not well formed") % node)
197 199 continue
198 200 if bin_n not in self.changelog.nodemap:
199 201 warn(_("tag '%s' refers to unknown node") % key)
200 202 continue
201 203
202 204 h = []
203 205 if key in filetags:
204 206 n, h = filetags[key]
205 207 h.append(n)
206 208 filetags[key] = (bin_n, h)
207 209
208 210 for k, nh in filetags.items():
209 211 if k not in globaltags:
210 212 globaltags[k] = nh
211 213 continue
212 214 # we prefer the global tag if:
213 215 # it supercedes us OR
214 216 # mutual supercedes and it has a higher rank
215 217 # otherwise we win because we're tip-most
216 218 an, ah = nh
217 219 bn, bh = globaltags[k]
218 220 if (bn != an and an in bh and
219 221 (bn not in ah or len(bh) > len(ah))):
220 222 an = bn
221 223 ah.extend([n for n in bh if n not in ah])
222 224 globaltags[k] = an, ah
223 225
224 226 # read the tags file from each head, ending with the tip
225 227 f = None
226 228 for rev, node, fnode in self._hgtagsnodes():
227 229 f = (f and f.filectx(fnode) or
228 230 self.filectx('.hgtags', fileid=fnode))
229 231 readtags(f.data().splitlines(), f)
230 232
231 233 try:
232 234 data = util.fromlocal(self.opener("localtags").read())
233 235 # localtags are stored in the local character set
234 236 # while the internal tag table is stored in UTF-8
235 237 readtags(data.splitlines(), "localtags")
236 238 except IOError:
237 239 pass
238 240
239 241 self.tagscache = {}
240 242 for k,nh in globaltags.items():
241 243 n = nh[0]
242 244 if n != nullid:
243 245 self.tagscache[k] = n
244 246 self.tagscache['tip'] = self.changelog.tip()
245 247
246 248 return self.tagscache
247 249
248 250 def _hgtagsnodes(self):
249 251 heads = self.heads()
250 252 heads.reverse()
251 253 last = {}
252 254 ret = []
253 255 for node in heads:
254 256 c = self.changectx(node)
255 257 rev = c.rev()
256 258 try:
257 259 fnode = c.filenode('.hgtags')
258 260 except revlog.LookupError:
259 261 continue
260 262 ret.append((rev, node, fnode))
261 263 if fnode in last:
262 264 ret[last[fnode]] = None
263 265 last[fnode] = len(ret) - 1
264 266 return [item for item in ret if item]
265 267
266 268 def tagslist(self):
267 269 '''return a list of tags ordered by revision'''
268 270 l = []
269 271 for t, n in self.tags().items():
270 272 try:
271 273 r = self.changelog.rev(n)
272 274 except:
273 275 r = -2 # sort to the beginning of the list if unknown
274 276 l.append((r, t, n))
275 277 l.sort()
276 278 return [(t, n) for r, t, n in l]
277 279
278 280 def nodetags(self, node):
279 281 '''return the tags associated with a node'''
280 282 if not self.nodetagscache:
281 283 self.nodetagscache = {}
282 284 for t, n in self.tags().items():
283 285 self.nodetagscache.setdefault(n, []).append(t)
284 286 return self.nodetagscache.get(node, [])
285 287
286 288 def _branchtags(self):
287 289 partial, last, lrev = self._readbranchcache()
288 290
289 291 tiprev = self.changelog.count() - 1
290 292 if lrev != tiprev:
291 293 self._updatebranchcache(partial, lrev+1, tiprev+1)
292 294 self._writebranchcache(partial, self.changelog.tip(), tiprev)
293 295
294 296 return partial
295 297
296 298 def branchtags(self):
297 299 if self.branchcache is not None:
298 300 return self.branchcache
299 301
300 302 self.branchcache = {} # avoid recursion in changectx
301 303 partial = self._branchtags()
302 304
303 305 # the branch cache is stored on disk as UTF-8, but in the local
304 306 # charset internally
305 307 for k, v in partial.items():
306 308 self.branchcache[util.tolocal(k)] = v
307 309 return self.branchcache
308 310
309 311 def _readbranchcache(self):
310 312 partial = {}
311 313 try:
312 314 f = self.opener("branch.cache")
313 315 lines = f.read().split('\n')
314 316 f.close()
315 317 except (IOError, OSError):
316 318 return {}, nullid, nullrev
317 319
318 320 try:
319 321 last, lrev = lines.pop(0).split(" ", 1)
320 322 last, lrev = bin(last), int(lrev)
321 323 if not (lrev < self.changelog.count() and
322 324 self.changelog.node(lrev) == last): # sanity check
323 325 # invalidate the cache
324 326 raise ValueError('Invalid branch cache: unknown tip')
325 327 for l in lines:
326 328 if not l: continue
327 329 node, label = l.split(" ", 1)
328 330 partial[label.strip()] = bin(node)
329 331 except (KeyboardInterrupt, util.SignalInterrupt):
330 332 raise
331 333 except Exception, inst:
332 334 if self.ui.debugflag:
333 335 self.ui.warn(str(inst), '\n')
334 336 partial, last, lrev = {}, nullid, nullrev
335 337 return partial, last, lrev
336 338
337 339 def _writebranchcache(self, branches, tip, tiprev):
338 340 try:
339 341 f = self.opener("branch.cache", "w", atomictemp=True)
340 342 f.write("%s %s\n" % (hex(tip), tiprev))
341 343 for label, node in branches.iteritems():
342 344 f.write("%s %s\n" % (hex(node), label))
343 345 f.rename()
344 346 except (IOError, OSError):
345 347 pass
346 348
347 349 def _updatebranchcache(self, partial, start, end):
348 350 for r in xrange(start, end):
349 351 c = self.changectx(r)
350 352 b = c.branch()
351 353 partial[b] = c.node()
352 354
353 355 def lookup(self, key):
354 356 if key == '.':
355 357 key, second = self.dirstate.parents()
356 358 if key == nullid:
357 359 raise repo.RepoError(_("no revision checked out"))
358 360 if second != nullid:
359 361 self.ui.warn(_("warning: working directory has two parents, "
360 362 "tag '.' uses the first\n"))
361 363 elif key == 'null':
362 364 return nullid
363 365 n = self.changelog._match(key)
364 366 if n:
365 367 return n
366 368 if key in self.tags():
367 369 return self.tags()[key]
368 370 if key in self.branchtags():
369 371 return self.branchtags()[key]
370 372 n = self.changelog._partialmatch(key)
371 373 if n:
372 374 return n
373 375 raise repo.RepoError(_("unknown revision '%s'") % key)
374 376
375 377 def dev(self):
376 378 return os.lstat(self.path).st_dev
377 379
378 380 def local(self):
379 381 return True
380 382
381 383 def join(self, f):
382 384 return os.path.join(self.path, f)
383 385
384 386 def sjoin(self, f):
385 387 f = self.encodefn(f)
386 388 return os.path.join(self.spath, f)
387 389
388 390 def wjoin(self, f):
389 391 return os.path.join(self.root, f)
390 392
391 393 def file(self, f):
392 394 if f[0] == '/':
393 395 f = f[1:]
394 396 return filelog.filelog(self.sopener, f)
395 397
396 398 def changectx(self, changeid=None):
397 399 return context.changectx(self, changeid)
398 400
399 401 def workingctx(self):
400 402 return context.workingctx(self)
401 403
402 404 def parents(self, changeid=None):
403 405 '''
404 406 get list of changectxs for parents of changeid or working directory
405 407 '''
406 408 if changeid is None:
407 409 pl = self.dirstate.parents()
408 410 else:
409 411 n = self.changelog.lookup(changeid)
410 412 pl = self.changelog.parents(n)
411 413 if pl[1] == nullid:
412 414 return [self.changectx(pl[0])]
413 415 return [self.changectx(pl[0]), self.changectx(pl[1])]
414 416
415 417 def filectx(self, path, changeid=None, fileid=None):
416 418 """changeid can be a changeset revision, node, or tag.
417 419 fileid can be a file revision or node."""
418 420 return context.filectx(self, path, changeid, fileid)
419 421
420 422 def getcwd(self):
421 423 return self.dirstate.getcwd()
422 424
423 425 def pathto(self, f, cwd=None):
424 426 return self.dirstate.pathto(f, cwd)
425 427
426 428 def wfile(self, f, mode='r'):
427 429 return self.wopener(f, mode)
428 430
429 431 def _link(self, f):
430 432 return os.path.islink(self.wjoin(f))
431 433
432 434 def _filter(self, filter, filename, data):
433 435 if filter not in self.filterpats:
434 436 l = []
435 437 for pat, cmd in self.ui.configitems(filter):
436 438 mf = util.matcher(self.root, "", [pat], [], [])[1]
437 439 l.append((mf, cmd))
438 440 self.filterpats[filter] = l
439 441
440 442 for mf, cmd in self.filterpats[filter]:
441 443 if mf(filename):
442 444 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
443 445 data = util.filter(data, cmd)
444 446 break
445 447
446 448 return data
447 449
448 450 def wread(self, filename):
449 451 if self._link(filename):
450 452 data = os.readlink(self.wjoin(filename))
451 453 else:
452 454 data = self.wopener(filename, 'r').read()
453 455 return self._filter("encode", filename, data)
454 456
455 457 def wwrite(self, filename, data, flags):
456 458 data = self._filter("decode", filename, data)
457 459 if "l" in flags:
458 460 f = self.wjoin(filename)
459 461 try:
460 462 os.unlink(f)
461 463 except OSError:
462 464 pass
463 465 d = os.path.dirname(f)
464 466 if not os.path.exists(d):
465 467 os.makedirs(d)
466 468 os.symlink(data, f)
467 469 else:
468 470 try:
469 471 if self._link(filename):
470 472 os.unlink(self.wjoin(filename))
471 473 except OSError:
472 474 pass
473 475 self.wopener(filename, 'w').write(data)
474 476 util.set_exec(self.wjoin(filename), "x" in flags)
475 477
476 478 def wwritedata(self, filename, data):
477 479 return self._filter("decode", filename, data)
478 480
479 481 def transaction(self):
480 482 tr = self.transhandle
481 483 if tr != None and tr.running():
482 484 return tr.nest()
483 485
484 486 # save dirstate for rollback
485 487 try:
486 488 ds = self.opener("dirstate").read()
487 489 except IOError:
488 490 ds = ""
489 491 self.opener("journal.dirstate", "w").write(ds)
490 492
491 493 renames = [(self.sjoin("journal"), self.sjoin("undo")),
492 494 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
493 495 tr = transaction.transaction(self.ui.warn, self.sopener,
494 496 self.sjoin("journal"),
495 497 aftertrans(renames))
496 498 self.transhandle = tr
497 499 return tr
498 500
499 501 def recover(self):
500 502 l = self.lock()
501 503 if os.path.exists(self.sjoin("journal")):
502 504 self.ui.status(_("rolling back interrupted transaction\n"))
503 505 transaction.rollback(self.sopener, self.sjoin("journal"))
504 506 self.invalidate()
505 507 return True
506 508 else:
507 509 self.ui.warn(_("no interrupted transaction available\n"))
508 510 return False
509 511
510 512 def rollback(self, wlock=None, lock=None):
511 513 if not wlock:
512 514 wlock = self.wlock()
513 515 if not lock:
514 516 lock = self.lock()
515 517 if os.path.exists(self.sjoin("undo")):
516 518 self.ui.status(_("rolling back last transaction\n"))
517 519 transaction.rollback(self.sopener, self.sjoin("undo"))
518 520 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
519 521 self.invalidate()
520 522 self.dirstate.invalidate()
521 523 else:
522 524 self.ui.warn(_("no rollback information available\n"))
523 525
524 526 def invalidate(self):
525 527 for a in "changelog manifest".split():
526 528 if hasattr(self, a):
527 529 self.__delattr__(a)
528 530 self.tagscache = None
529 531 self.nodetagscache = None
530 532
531 533 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
532 534 desc=None):
533 535 try:
534 536 l = lock.lock(lockname, 0, releasefn, desc=desc)
535 537 except lock.LockHeld, inst:
536 538 if not wait:
537 539 raise
538 540 self.ui.warn(_("waiting for lock on %s held by %r\n") %
539 541 (desc, inst.locker))
540 542 # default to 600 seconds timeout
541 543 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
542 544 releasefn, desc=desc)
543 545 if acquirefn:
544 546 acquirefn()
545 547 return l
546 548
547 549 def lock(self, wait=1):
548 550 return self.do_lock(self.sjoin("lock"), wait,
549 551 acquirefn=self.invalidate,
550 552 desc=_('repository %s') % self.origroot)
551 553
552 554 def wlock(self, wait=1):
553 555 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
554 556 self.dirstate.invalidate,
555 557 desc=_('working directory of %s') % self.origroot)
556 558
557 559 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
558 560 """
559 561 commit an individual file as part of a larger transaction
560 562 """
561 563
562 564 t = self.wread(fn)
563 565 fl = self.file(fn)
564 566 fp1 = manifest1.get(fn, nullid)
565 567 fp2 = manifest2.get(fn, nullid)
566 568
567 569 meta = {}
568 570 cp = self.dirstate.copied(fn)
569 571 if cp:
570 572 # Mark the new revision of this file as a copy of another
571 573 # file. This copy data will effectively act as a parent
572 574 # of this new revision. If this is a merge, the first
573 575 # parent will be the nullid (meaning "look up the copy data")
574 576 # and the second one will be the other parent. For example:
575 577 #
576 578 # 0 --- 1 --- 3 rev1 changes file foo
577 579 # \ / rev2 renames foo to bar and changes it
578 580 # \- 2 -/ rev3 should have bar with all changes and
579 581 # should record that bar descends from
580 582 # bar in rev2 and foo in rev1
581 583 #
582 584 # this allows this merge to succeed:
583 585 #
584 586 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
585 587 # \ / merging rev3 and rev4 should use bar@rev2
586 588 # \- 2 --- 4 as the merge base
587 589 #
588 590 meta["copy"] = cp
589 591 if not manifest2: # not a branch merge
590 592 meta["copyrev"] = hex(manifest1.get(cp, nullid))
591 593 fp2 = nullid
592 594 elif fp2 != nullid: # copied on remote side
593 595 meta["copyrev"] = hex(manifest1.get(cp, nullid))
594 596 elif fp1 != nullid: # copied on local side, reversed
595 597 meta["copyrev"] = hex(manifest2.get(cp))
596 598 fp2 = fp1
597 599 else: # directory rename
598 600 meta["copyrev"] = hex(manifest1.get(cp, nullid))
599 601 self.ui.debug(_(" %s: copy %s:%s\n") %
600 602 (fn, cp, meta["copyrev"]))
601 603 fp1 = nullid
602 604 elif fp2 != nullid:
603 605 # is one parent an ancestor of the other?
604 606 fpa = fl.ancestor(fp1, fp2)
605 607 if fpa == fp1:
606 608 fp1, fp2 = fp2, nullid
607 609 elif fpa == fp2:
608 610 fp2 = nullid
609 611
610 612 # is the file unmodified from the parent? report existing entry
611 613 if fp2 == nullid and not fl.cmp(fp1, t):
612 614 return fp1
613 615
614 616 changelist.append(fn)
615 617 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
616 618
617 619 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
618 620 if p1 is None:
619 621 p1, p2 = self.dirstate.parents()
620 622 return self.commit(files=files, text=text, user=user, date=date,
621 623 p1=p1, p2=p2, wlock=wlock, extra=extra)
622 624
623 625 def commit(self, files=None, text="", user=None, date=None,
624 626 match=util.always, force=False, lock=None, wlock=None,
625 627 force_editor=False, p1=None, p2=None, extra={}):
626 628
627 629 commit = []
628 630 remove = []
629 631 changed = []
630 632 use_dirstate = (p1 is None) # not rawcommit
631 633 extra = extra.copy()
632 634
633 635 if use_dirstate:
634 636 if files:
635 637 for f in files:
636 638 s = self.dirstate.state(f)
637 639 if s in 'nmai':
638 640 commit.append(f)
639 641 elif s == 'r':
640 642 remove.append(f)
641 643 else:
642 644 self.ui.warn(_("%s not tracked!\n") % f)
643 645 else:
644 646 changes = self.status(match=match)[:5]
645 647 modified, added, removed, deleted, unknown = changes
646 648 commit = modified + added
647 649 remove = removed
648 650 else:
649 651 commit = files
650 652
651 653 if use_dirstate:
652 654 p1, p2 = self.dirstate.parents()
653 655 update_dirstate = True
654 656 else:
655 657 p1, p2 = p1, p2 or nullid
656 658 update_dirstate = (self.dirstate.parents()[0] == p1)
657 659
658 660 c1 = self.changelog.read(p1)
659 661 c2 = self.changelog.read(p2)
660 662 m1 = self.manifest.read(c1[0]).copy()
661 663 m2 = self.manifest.read(c2[0])
662 664
663 665 if use_dirstate:
664 666 branchname = self.workingctx().branch()
665 667 try:
666 668 branchname = branchname.decode('UTF-8').encode('UTF-8')
667 669 except UnicodeDecodeError:
668 670 raise util.Abort(_('branch name not in UTF-8!'))
669 671 else:
670 672 branchname = ""
671 673
672 674 if use_dirstate:
673 675 oldname = c1[5].get("branch") # stored in UTF-8
674 676 if (not commit and not remove and not force and p2 == nullid
675 677 and branchname == oldname):
676 678 self.ui.status(_("nothing changed\n"))
677 679 return None
678 680
679 681 xp1 = hex(p1)
680 682 if p2 == nullid: xp2 = ''
681 683 else: xp2 = hex(p2)
682 684
683 685 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
684 686
685 687 if not wlock:
686 688 wlock = self.wlock()
687 689 if not lock:
688 690 lock = self.lock()
689 691 tr = self.transaction()
690 692
691 693 # check in files
692 694 new = {}
693 695 linkrev = self.changelog.count()
694 696 commit.sort()
695 697 is_exec = util.execfunc(self.root, m1.execf)
696 698 is_link = util.linkfunc(self.root, m1.linkf)
697 699 for f in commit:
698 700 self.ui.note(f + "\n")
699 701 try:
700 702 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
701 703 new_exec = is_exec(f)
702 704 new_link = is_link(f)
703 705 if not changed or changed[-1] != f:
704 706 # mention the file in the changelog if some flag changed,
705 707 # even if there was no content change.
706 708 old_exec = m1.execf(f)
707 709 old_link = m1.linkf(f)
708 710 if old_exec != new_exec or old_link != new_link:
709 711 changed.append(f)
710 712 m1.set(f, new_exec, new_link)
711 713 except (OSError, IOError):
712 714 if use_dirstate:
713 715 self.ui.warn(_("trouble committing %s!\n") % f)
714 716 raise
715 717 else:
716 718 remove.append(f)
717 719
718 720 # update manifest
719 721 m1.update(new)
720 722 remove.sort()
721 723 removed = []
722 724
723 725 for f in remove:
724 726 if f in m1:
725 727 del m1[f]
726 728 removed.append(f)
727 729 elif f in m2:
728 730 removed.append(f)
729 731 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
730 732
731 733 # add changeset
732 734 new = new.keys()
733 735 new.sort()
734 736
735 737 user = user or self.ui.username()
736 738 if not text or force_editor:
737 739 edittext = []
738 740 if text:
739 741 edittext.append(text)
740 742 edittext.append("")
741 743 edittext.append("HG: user: %s" % user)
742 744 if p2 != nullid:
743 745 edittext.append("HG: branch merge")
744 746 if branchname:
745 747 edittext.append("HG: branch %s" % util.tolocal(branchname))
746 748 edittext.extend(["HG: changed %s" % f for f in changed])
747 749 edittext.extend(["HG: removed %s" % f for f in removed])
748 750 if not changed and not remove:
749 751 edittext.append("HG: no files changed")
750 752 edittext.append("")
751 753 # run editor in the repository root
752 754 olddir = os.getcwd()
753 755 os.chdir(self.root)
754 756 text = self.ui.edit("\n".join(edittext), user)
755 757 os.chdir(olddir)
756 758
757 759 lines = [line.rstrip() for line in text.rstrip().splitlines()]
758 760 while lines and not lines[0]:
759 761 del lines[0]
760 762 if not lines:
761 763 return None
762 764 text = '\n'.join(lines)
763 765 if branchname:
764 766 extra["branch"] = branchname
765 767 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
766 768 user, date, extra)
767 769 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
768 770 parent2=xp2)
769 771 tr.close()
770 772
771 773 if self.branchcache and "branch" in extra:
772 774 self.branchcache[util.tolocal(extra["branch"])] = n
773 775
774 776 if use_dirstate or update_dirstate:
775 777 self.dirstate.setparents(n)
776 778 if use_dirstate:
777 779 self.dirstate.update(new, "n")
778 780 self.dirstate.forget(removed)
779 781
780 782 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
781 783 return n
782 784
783 785 def walk(self, node=None, files=[], match=util.always, badmatch=None):
784 786 '''
785 787 walk recursively through the directory tree or a given
786 788 changeset, finding all files matched by the match
787 789 function
788 790
789 791 results are yielded in a tuple (src, filename), where src
790 792 is one of:
791 793 'f' the file was found in the directory tree
792 794 'm' the file was only in the dirstate and not in the tree
793 795 'b' file was not found and matched badmatch
794 796 '''
795 797
796 798 if node:
797 799 fdict = dict.fromkeys(files)
798 800 # for dirstate.walk, files=['.'] means "walk the whole tree".
799 801 # follow that here, too
800 802 fdict.pop('.', None)
801 803 mdict = self.manifest.read(self.changelog.read(node)[0])
802 804 mfiles = mdict.keys()
803 805 mfiles.sort()
804 806 for fn in mfiles:
805 807 for ffn in fdict:
806 808 # match if the file is the exact name or a directory
807 809 if ffn == fn or fn.startswith("%s/" % ffn):
808 810 del fdict[ffn]
809 811 break
810 812 if match(fn):
811 813 yield 'm', fn
812 814 ffiles = fdict.keys()
813 815 ffiles.sort()
814 816 for fn in ffiles:
815 817 if badmatch and badmatch(fn):
816 818 if match(fn):
817 819 yield 'b', fn
818 820 else:
819 821 self.ui.warn(_('%s: No such file in rev %s\n')
820 822 % (self.pathto(fn), short(node)))
821 823 else:
822 824 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
823 825 yield src, fn
824 826
825 827 def status(self, node1=None, node2=None, files=[], match=util.always,
826 828 wlock=None, list_ignored=False, list_clean=False):
827 829 """return status of files between two nodes or node and working directory
828 830
829 831 If node1 is None, use the first dirstate parent instead.
830 832 If node2 is None, compare node1 with working directory.
831 833 """
832 834
833 835 def fcmp(fn, getnode):
834 836 t1 = self.wread(fn)
835 837 return self.file(fn).cmp(getnode(fn), t1)
836 838
837 839 def mfmatches(node):
838 840 change = self.changelog.read(node)
839 841 mf = self.manifest.read(change[0]).copy()
840 842 for fn in mf.keys():
841 843 if not match(fn):
842 844 del mf[fn]
843 845 return mf
844 846
845 847 modified, added, removed, deleted, unknown = [], [], [], [], []
846 848 ignored, clean = [], []
847 849
848 850 compareworking = False
849 851 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
850 852 compareworking = True
851 853
852 854 if not compareworking:
853 855 # read the manifest from node1 before the manifest from node2,
854 856 # so that we'll hit the manifest cache if we're going through
855 857 # all the revisions in parent->child order.
856 858 mf1 = mfmatches(node1)
857 859
858 860 mywlock = False
859 861
860 862 # are we comparing the working directory?
861 863 if not node2:
862 864 (lookup, modified, added, removed, deleted, unknown,
863 865 ignored, clean) = self.dirstate.status(files, match,
864 866 list_ignored, list_clean)
865 867
866 868 # are we comparing working dir against its parent?
867 869 if compareworking:
868 870 if lookup:
869 871 # do a full compare of any files that might have changed
870 872 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
871 873 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
872 874 nullid)
873 875 for f in lookup:
874 876 if fcmp(f, getnode):
875 877 modified.append(f)
876 878 else:
877 879 if list_clean:
878 880 clean.append(f)
879 881 if not wlock and not mywlock:
880 882 mywlock = True
881 883 try:
882 884 wlock = self.wlock(wait=0)
883 885 except lock.LockException:
884 886 pass
885 887 if wlock:
886 888 self.dirstate.update([f], "n")
887 889 else:
888 890 # we are comparing working dir against non-parent
889 891 # generate a pseudo-manifest for the working dir
890 892 # XXX: create it in dirstate.py ?
891 893 mf2 = mfmatches(self.dirstate.parents()[0])
892 894 is_exec = util.execfunc(self.root, mf2.execf)
893 895 is_link = util.linkfunc(self.root, mf2.linkf)
894 896 for f in lookup + modified + added:
895 897 mf2[f] = ""
896 898 mf2.set(f, is_exec(f), is_link(f))
897 899 for f in removed:
898 900 if f in mf2:
899 901 del mf2[f]
900 902
901 903 if mywlock and wlock:
902 904 wlock.release()
903 905 else:
904 906 # we are comparing two revisions
905 907 mf2 = mfmatches(node2)
906 908
907 909 if not compareworking:
908 910 # flush lists from dirstate before comparing manifests
909 911 modified, added, clean = [], [], []
910 912
911 913 # make sure to sort the files so we talk to the disk in a
912 914 # reasonable order
913 915 mf2keys = mf2.keys()
914 916 mf2keys.sort()
915 917 getnode = lambda fn: mf1.get(fn, nullid)
916 918 for fn in mf2keys:
917 919 if mf1.has_key(fn):
918 920 if (mf1.flags(fn) != mf2.flags(fn) or
919 921 (mf1[fn] != mf2[fn] and
920 922 (mf2[fn] != "" or fcmp(fn, getnode)))):
921 923 modified.append(fn)
922 924 elif list_clean:
923 925 clean.append(fn)
924 926 del mf1[fn]
925 927 else:
926 928 added.append(fn)
927 929
928 930 removed = mf1.keys()
929 931
930 932 # sort and return results:
931 933 for l in modified, added, removed, deleted, unknown, ignored, clean:
932 934 l.sort()
933 935 return (modified, added, removed, deleted, unknown, ignored, clean)
934 936
935 937 def add(self, list, wlock=None):
936 938 if not wlock:
937 939 wlock = self.wlock()
938 940 for f in list:
939 941 p = self.wjoin(f)
940 942 try:
941 943 st = os.lstat(p)
942 944 except:
943 945 self.ui.warn(_("%s does not exist!\n") % f)
944 946 continue
945 947 if st.st_size > 10000000:
946 948 self.ui.warn(_("%s: files over 10MB may cause memory and"
947 949 " performance problems\n"
948 950 "(use 'hg revert %s' to unadd the file)\n")
949 951 % (f, f))
950 952 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
951 953 self.ui.warn(_("%s not added: only files and symlinks "
952 954 "supported currently\n") % f)
953 955 elif self.dirstate.state(f) in 'an':
954 956 self.ui.warn(_("%s already tracked!\n") % f)
955 957 else:
956 958 self.dirstate.update([f], "a")
957 959
958 960 def forget(self, list, wlock=None):
959 961 if not wlock:
960 962 wlock = self.wlock()
961 963 for f in list:
962 964 if self.dirstate.state(f) not in 'ai':
963 965 self.ui.warn(_("%s not added!\n") % f)
964 966 else:
965 967 self.dirstate.forget([f])
966 968
967 969 def remove(self, list, unlink=False, wlock=None):
968 970 if unlink:
969 971 for f in list:
970 972 try:
971 973 util.unlink(self.wjoin(f))
972 974 except OSError, inst:
973 975 if inst.errno != errno.ENOENT:
974 976 raise
975 977 if not wlock:
976 978 wlock = self.wlock()
977 979 for f in list:
978 980 if unlink and os.path.exists(self.wjoin(f)):
979 981 self.ui.warn(_("%s still exists!\n") % f)
980 982 elif self.dirstate.state(f) == 'a':
981 983 self.dirstate.forget([f])
982 984 elif f not in self.dirstate:
983 985 self.ui.warn(_("%s not tracked!\n") % f)
984 986 else:
985 987 self.dirstate.update([f], "r")
986 988
987 989 def undelete(self, list, wlock=None):
988 990 p = self.dirstate.parents()[0]
989 991 mn = self.changelog.read(p)[0]
990 992 m = self.manifest.read(mn)
991 993 if not wlock:
992 994 wlock = self.wlock()
993 995 for f in list:
994 996 if self.dirstate.state(f) not in "r":
995 997 self.ui.warn("%s not removed!\n" % f)
996 998 else:
997 999 t = self.file(f).read(m[f])
998 1000 self.wwrite(f, t, m.flags(f))
999 1001 self.dirstate.update([f], "n")
1000 1002
1001 1003 def copy(self, source, dest, wlock=None):
1002 1004 p = self.wjoin(dest)
1003 1005 if not (os.path.exists(p) or os.path.islink(p)):
1004 1006 self.ui.warn(_("%s does not exist!\n") % dest)
1005 1007 elif not (os.path.isfile(p) or os.path.islink(p)):
1006 1008 self.ui.warn(_("copy failed: %s is not a file or a "
1007 1009 "symbolic link\n") % dest)
1008 1010 else:
1009 1011 if not wlock:
1010 1012 wlock = self.wlock()
1011 1013 if self.dirstate.state(dest) == '?':
1012 1014 self.dirstate.update([dest], "a")
1013 1015 self.dirstate.copy(source, dest)
1014 1016
1015 1017 def heads(self, start=None):
1016 1018 heads = self.changelog.heads(start)
1017 1019 # sort the output in rev descending order
1018 1020 heads = [(-self.changelog.rev(h), h) for h in heads]
1019 1021 heads.sort()
1020 1022 return [n for (r, n) in heads]
1021 1023
1022 1024 def branchheads(self, branch, start=None):
1023 1025 branches = self.branchtags()
1024 1026 if branch not in branches:
1025 1027 return []
1026 1028 # The basic algorithm is this:
1027 1029 #
1028 1030 # Start from the branch tip since there are no later revisions that can
1029 1031 # possibly be in this branch, and the tip is a guaranteed head.
1030 1032 #
1031 1033 # Remember the tip's parents as the first ancestors, since these by
1032 1034 # definition are not heads.
1033 1035 #
1034 1036 # Step backwards from the brach tip through all the revisions. We are
1035 1037 # guaranteed by the rules of Mercurial that we will now be visiting the
1036 1038 # nodes in reverse topological order (children before parents).
1037 1039 #
1038 1040 # If a revision is one of the ancestors of a head then we can toss it
1039 1041 # out of the ancestors set (we've already found it and won't be
1040 1042 # visiting it again) and put its parents in the ancestors set.
1041 1043 #
1042 1044 # Otherwise, if a revision is in the branch it's another head, since it
1043 1045 # wasn't in the ancestor list of an existing head. So add it to the
1044 1046 # head list, and add its parents to the ancestor list.
1045 1047 #
1046 1048 # If it is not in the branch ignore it.
1047 1049 #
1048 1050 # Once we have a list of heads, use nodesbetween to filter out all the
1049 1051 # heads that cannot be reached from startrev. There may be a more
1050 1052 # efficient way to do this as part of the previous algorithm.
1051 1053
1052 1054 set = util.set
1053 1055 heads = [self.changelog.rev(branches[branch])]
1054 1056 # Don't care if ancestors contains nullrev or not.
1055 1057 ancestors = set(self.changelog.parentrevs(heads[0]))
1056 1058 for rev in xrange(heads[0] - 1, nullrev, -1):
1057 1059 if rev in ancestors:
1058 1060 ancestors.update(self.changelog.parentrevs(rev))
1059 1061 ancestors.remove(rev)
1060 1062 elif self.changectx(rev).branch() == branch:
1061 1063 heads.append(rev)
1062 1064 ancestors.update(self.changelog.parentrevs(rev))
1063 1065 heads = [self.changelog.node(rev) for rev in heads]
1064 1066 if start is not None:
1065 1067 heads = self.changelog.nodesbetween([start], heads)[2]
1066 1068 return heads
1067 1069
1068 1070 def branches(self, nodes):
1069 1071 if not nodes:
1070 1072 nodes = [self.changelog.tip()]
1071 1073 b = []
1072 1074 for n in nodes:
1073 1075 t = n
1074 1076 while 1:
1075 1077 p = self.changelog.parents(n)
1076 1078 if p[1] != nullid or p[0] == nullid:
1077 1079 b.append((t, n, p[0], p[1]))
1078 1080 break
1079 1081 n = p[0]
1080 1082 return b
1081 1083
1082 1084 def between(self, pairs):
1083 1085 r = []
1084 1086
1085 1087 for top, bottom in pairs:
1086 1088 n, l, i = top, [], 0
1087 1089 f = 1
1088 1090
1089 1091 while n != bottom:
1090 1092 p = self.changelog.parents(n)[0]
1091 1093 if i == f:
1092 1094 l.append(n)
1093 1095 f = f * 2
1094 1096 n = p
1095 1097 i += 1
1096 1098
1097 1099 r.append(l)
1098 1100
1099 1101 return r
1100 1102
1101 1103 def findincoming(self, remote, base=None, heads=None, force=False):
1102 1104 """Return list of roots of the subsets of missing nodes from remote
1103 1105
1104 1106 If base dict is specified, assume that these nodes and their parents
1105 1107 exist on the remote side and that no child of a node of base exists
1106 1108 in both remote and self.
1107 1109 Furthermore base will be updated to include the nodes that exists
1108 1110 in self and remote but no children exists in self and remote.
1109 1111 If a list of heads is specified, return only nodes which are heads
1110 1112 or ancestors of these heads.
1111 1113
1112 1114 All the ancestors of base are in self and in remote.
1113 1115 All the descendants of the list returned are missing in self.
1114 1116 (and so we know that the rest of the nodes are missing in remote, see
1115 1117 outgoing)
1116 1118 """
1117 1119 m = self.changelog.nodemap
1118 1120 search = []
1119 1121 fetch = {}
1120 1122 seen = {}
1121 1123 seenbranch = {}
1122 1124 if base == None:
1123 1125 base = {}
1124 1126
1125 1127 if not heads:
1126 1128 heads = remote.heads()
1127 1129
1128 1130 if self.changelog.tip() == nullid:
1129 1131 base[nullid] = 1
1130 1132 if heads != [nullid]:
1131 1133 return [nullid]
1132 1134 return []
1133 1135
1134 1136 # assume we're closer to the tip than the root
1135 1137 # and start by examining the heads
1136 1138 self.ui.status(_("searching for changes\n"))
1137 1139
1138 1140 unknown = []
1139 1141 for h in heads:
1140 1142 if h not in m:
1141 1143 unknown.append(h)
1142 1144 else:
1143 1145 base[h] = 1
1144 1146
1145 1147 if not unknown:
1146 1148 return []
1147 1149
1148 1150 req = dict.fromkeys(unknown)
1149 1151 reqcnt = 0
1150 1152
1151 1153 # search through remote branches
1152 1154 # a 'branch' here is a linear segment of history, with four parts:
1153 1155 # head, root, first parent, second parent
1154 1156 # (a branch always has two parents (or none) by definition)
1155 1157 unknown = remote.branches(unknown)
1156 1158 while unknown:
1157 1159 r = []
1158 1160 while unknown:
1159 1161 n = unknown.pop(0)
1160 1162 if n[0] in seen:
1161 1163 continue
1162 1164
1163 1165 self.ui.debug(_("examining %s:%s\n")
1164 1166 % (short(n[0]), short(n[1])))
1165 1167 if n[0] == nullid: # found the end of the branch
1166 1168 pass
1167 1169 elif n in seenbranch:
1168 1170 self.ui.debug(_("branch already found\n"))
1169 1171 continue
1170 1172 elif n[1] and n[1] in m: # do we know the base?
1171 1173 self.ui.debug(_("found incomplete branch %s:%s\n")
1172 1174 % (short(n[0]), short(n[1])))
1173 1175 search.append(n) # schedule branch range for scanning
1174 1176 seenbranch[n] = 1
1175 1177 else:
1176 1178 if n[1] not in seen and n[1] not in fetch:
1177 1179 if n[2] in m and n[3] in m:
1178 1180 self.ui.debug(_("found new changeset %s\n") %
1179 1181 short(n[1]))
1180 1182 fetch[n[1]] = 1 # earliest unknown
1181 1183 for p in n[2:4]:
1182 1184 if p in m:
1183 1185 base[p] = 1 # latest known
1184 1186
1185 1187 for p in n[2:4]:
1186 1188 if p not in req and p not in m:
1187 1189 r.append(p)
1188 1190 req[p] = 1
1189 1191 seen[n[0]] = 1
1190 1192
1191 1193 if r:
1192 1194 reqcnt += 1
1193 1195 self.ui.debug(_("request %d: %s\n") %
1194 1196 (reqcnt, " ".join(map(short, r))))
1195 1197 for p in xrange(0, len(r), 10):
1196 1198 for b in remote.branches(r[p:p+10]):
1197 1199 self.ui.debug(_("received %s:%s\n") %
1198 1200 (short(b[0]), short(b[1])))
1199 1201 unknown.append(b)
1200 1202
1201 1203 # do binary search on the branches we found
1202 1204 while search:
1203 1205 n = search.pop(0)
1204 1206 reqcnt += 1
1205 1207 l = remote.between([(n[0], n[1])])[0]
1206 1208 l.append(n[1])
1207 1209 p = n[0]
1208 1210 f = 1
1209 1211 for i in l:
1210 1212 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1211 1213 if i in m:
1212 1214 if f <= 2:
1213 1215 self.ui.debug(_("found new branch changeset %s\n") %
1214 1216 short(p))
1215 1217 fetch[p] = 1
1216 1218 base[i] = 1
1217 1219 else:
1218 1220 self.ui.debug(_("narrowed branch search to %s:%s\n")
1219 1221 % (short(p), short(i)))
1220 1222 search.append((p, i))
1221 1223 break
1222 1224 p, f = i, f * 2
1223 1225
1224 1226 # sanity check our fetch list
1225 1227 for f in fetch.keys():
1226 1228 if f in m:
1227 1229 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1228 1230
1229 1231 if base.keys() == [nullid]:
1230 1232 if force:
1231 1233 self.ui.warn(_("warning: repository is unrelated\n"))
1232 1234 else:
1233 1235 raise util.Abort(_("repository is unrelated"))
1234 1236
1235 1237 self.ui.debug(_("found new changesets starting at ") +
1236 1238 " ".join([short(f) for f in fetch]) + "\n")
1237 1239
1238 1240 self.ui.debug(_("%d total queries\n") % reqcnt)
1239 1241
1240 1242 return fetch.keys()
1241 1243
1242 1244 def findoutgoing(self, remote, base=None, heads=None, force=False):
1243 1245 """Return list of nodes that are roots of subsets not in remote
1244 1246
1245 1247 If base dict is specified, assume that these nodes and their parents
1246 1248 exist on the remote side.
1247 1249 If a list of heads is specified, return only nodes which are heads
1248 1250 or ancestors of these heads, and return a second element which
1249 1251 contains all remote heads which get new children.
1250 1252 """
1251 1253 if base == None:
1252 1254 base = {}
1253 1255 self.findincoming(remote, base, heads, force=force)
1254 1256
1255 1257 self.ui.debug(_("common changesets up to ")
1256 1258 + " ".join(map(short, base.keys())) + "\n")
1257 1259
1258 1260 remain = dict.fromkeys(self.changelog.nodemap)
1259 1261
1260 1262 # prune everything remote has from the tree
1261 1263 del remain[nullid]
1262 1264 remove = base.keys()
1263 1265 while remove:
1264 1266 n = remove.pop(0)
1265 1267 if n in remain:
1266 1268 del remain[n]
1267 1269 for p in self.changelog.parents(n):
1268 1270 remove.append(p)
1269 1271
1270 1272 # find every node whose parents have been pruned
1271 1273 subset = []
1272 1274 # find every remote head that will get new children
1273 1275 updated_heads = {}
1274 1276 for n in remain:
1275 1277 p1, p2 = self.changelog.parents(n)
1276 1278 if p1 not in remain and p2 not in remain:
1277 1279 subset.append(n)
1278 1280 if heads:
1279 1281 if p1 in heads:
1280 1282 updated_heads[p1] = True
1281 1283 if p2 in heads:
1282 1284 updated_heads[p2] = True
1283 1285
1284 1286 # this is the set of all roots we have to push
1285 1287 if heads:
1286 1288 return subset, updated_heads.keys()
1287 1289 else:
1288 1290 return subset
1289 1291
1290 1292 def pull(self, remote, heads=None, force=False, lock=None):
1291 1293 mylock = False
1292 1294 if not lock:
1293 1295 lock = self.lock()
1294 1296 mylock = True
1295 1297
1296 1298 try:
1297 1299 fetch = self.findincoming(remote, force=force)
1298 1300 if fetch == [nullid]:
1299 1301 self.ui.status(_("requesting all changes\n"))
1300 1302
1301 1303 if not fetch:
1302 1304 self.ui.status(_("no changes found\n"))
1303 1305 return 0
1304 1306
1305 1307 if heads is None:
1306 1308 cg = remote.changegroup(fetch, 'pull')
1307 1309 else:
1308 1310 if 'changegroupsubset' not in remote.capabilities:
1309 1311 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1310 1312 cg = remote.changegroupsubset(fetch, heads, 'pull')
1311 1313 return self.addchangegroup(cg, 'pull', remote.url())
1312 1314 finally:
1313 1315 if mylock:
1314 1316 lock.release()
1315 1317
1316 1318 def push(self, remote, force=False, revs=None):
1317 1319 # there are two ways to push to remote repo:
1318 1320 #
1319 1321 # addchangegroup assumes local user can lock remote
1320 1322 # repo (local filesystem, old ssh servers).
1321 1323 #
1322 1324 # unbundle assumes local user cannot lock remote repo (new ssh
1323 1325 # servers, http servers).
1324 1326
1325 1327 if remote.capable('unbundle'):
1326 1328 return self.push_unbundle(remote, force, revs)
1327 1329 return self.push_addchangegroup(remote, force, revs)
1328 1330
1329 1331 def prepush(self, remote, force, revs):
1330 1332 base = {}
1331 1333 remote_heads = remote.heads()
1332 1334 inc = self.findincoming(remote, base, remote_heads, force=force)
1333 1335
1334 1336 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1335 1337 if revs is not None:
1336 1338 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1337 1339 else:
1338 1340 bases, heads = update, self.changelog.heads()
1339 1341
1340 1342 if not bases:
1341 1343 self.ui.status(_("no changes found\n"))
1342 1344 return None, 1
1343 1345 elif not force:
1344 1346 # check if we're creating new remote heads
1345 1347 # to be a remote head after push, node must be either
1346 1348 # - unknown locally
1347 1349 # - a local outgoing head descended from update
1348 1350 # - a remote head that's known locally and not
1349 1351 # ancestral to an outgoing head
1350 1352
1351 1353 warn = 0
1352 1354
1353 1355 if remote_heads == [nullid]:
1354 1356 warn = 0
1355 1357 elif not revs and len(heads) > len(remote_heads):
1356 1358 warn = 1
1357 1359 else:
1358 1360 newheads = list(heads)
1359 1361 for r in remote_heads:
1360 1362 if r in self.changelog.nodemap:
1361 1363 desc = self.changelog.heads(r, heads)
1362 1364 l = [h for h in heads if h in desc]
1363 1365 if not l:
1364 1366 newheads.append(r)
1365 1367 else:
1366 1368 newheads.append(r)
1367 1369 if len(newheads) > len(remote_heads):
1368 1370 warn = 1
1369 1371
1370 1372 if warn:
1371 1373 self.ui.warn(_("abort: push creates new remote branches!\n"))
1372 1374 self.ui.status(_("(did you forget to merge?"
1373 1375 " use push -f to force)\n"))
1374 1376 return None, 1
1375 1377 elif inc:
1376 1378 self.ui.warn(_("note: unsynced remote changes!\n"))
1377 1379
1378 1380
1379 1381 if revs is None:
1380 1382 cg = self.changegroup(update, 'push')
1381 1383 else:
1382 1384 cg = self.changegroupsubset(update, revs, 'push')
1383 1385 return cg, remote_heads
1384 1386
1385 1387 def push_addchangegroup(self, remote, force, revs):
1386 1388 lock = remote.lock()
1387 1389
1388 1390 ret = self.prepush(remote, force, revs)
1389 1391 if ret[0] is not None:
1390 1392 cg, remote_heads = ret
1391 1393 return remote.addchangegroup(cg, 'push', self.url())
1392 1394 return ret[1]
1393 1395
1394 1396 def push_unbundle(self, remote, force, revs):
1395 1397 # local repo finds heads on server, finds out what revs it
1396 1398 # must push. once revs transferred, if server finds it has
1397 1399 # different heads (someone else won commit/push race), server
1398 1400 # aborts.
1399 1401
1400 1402 ret = self.prepush(remote, force, revs)
1401 1403 if ret[0] is not None:
1402 1404 cg, remote_heads = ret
1403 1405 if force: remote_heads = ['force']
1404 1406 return remote.unbundle(cg, remote_heads, 'push')
1405 1407 return ret[1]
1406 1408
1407 1409 def changegroupinfo(self, nodes):
1408 1410 self.ui.note(_("%d changesets found\n") % len(nodes))
1409 1411 if self.ui.debugflag:
1410 1412 self.ui.debug(_("List of changesets:\n"))
1411 1413 for node in nodes:
1412 1414 self.ui.debug("%s\n" % hex(node))
1413 1415
1414 1416 def changegroupsubset(self, bases, heads, source):
1415 1417 """This function generates a changegroup consisting of all the nodes
1416 1418 that are descendents of any of the bases, and ancestors of any of
1417 1419 the heads.
1418 1420
1419 1421 It is fairly complex as determining which filenodes and which
1420 1422 manifest nodes need to be included for the changeset to be complete
1421 1423 is non-trivial.
1422 1424
1423 1425 Another wrinkle is doing the reverse, figuring out which changeset in
1424 1426 the changegroup a particular filenode or manifestnode belongs to."""
1425 1427
1426 1428 self.hook('preoutgoing', throw=True, source=source)
1427 1429
1428 1430 # Set up some initial variables
1429 1431 # Make it easy to refer to self.changelog
1430 1432 cl = self.changelog
1431 1433 # msng is short for missing - compute the list of changesets in this
1432 1434 # changegroup.
1433 1435 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1434 1436 self.changegroupinfo(msng_cl_lst)
1435 1437 # Some bases may turn out to be superfluous, and some heads may be
1436 1438 # too. nodesbetween will return the minimal set of bases and heads
1437 1439 # necessary to re-create the changegroup.
1438 1440
1439 1441 # Known heads are the list of heads that it is assumed the recipient
1440 1442 # of this changegroup will know about.
1441 1443 knownheads = {}
1442 1444 # We assume that all parents of bases are known heads.
1443 1445 for n in bases:
1444 1446 for p in cl.parents(n):
1445 1447 if p != nullid:
1446 1448 knownheads[p] = 1
1447 1449 knownheads = knownheads.keys()
1448 1450 if knownheads:
1449 1451 # Now that we know what heads are known, we can compute which
1450 1452 # changesets are known. The recipient must know about all
1451 1453 # changesets required to reach the known heads from the null
1452 1454 # changeset.
1453 1455 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1454 1456 junk = None
1455 1457 # Transform the list into an ersatz set.
1456 1458 has_cl_set = dict.fromkeys(has_cl_set)
1457 1459 else:
1458 1460 # If there were no known heads, the recipient cannot be assumed to
1459 1461 # know about any changesets.
1460 1462 has_cl_set = {}
1461 1463
1462 1464 # Make it easy to refer to self.manifest
1463 1465 mnfst = self.manifest
1464 1466 # We don't know which manifests are missing yet
1465 1467 msng_mnfst_set = {}
1466 1468 # Nor do we know which filenodes are missing.
1467 1469 msng_filenode_set = {}
1468 1470
1469 1471 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1470 1472 junk = None
1471 1473
1472 1474 # A changeset always belongs to itself, so the changenode lookup
1473 1475 # function for a changenode is identity.
1474 1476 def identity(x):
1475 1477 return x
1476 1478
1477 1479 # A function generating function. Sets up an environment for the
1478 1480 # inner function.
1479 1481 def cmp_by_rev_func(revlog):
1480 1482 # Compare two nodes by their revision number in the environment's
1481 1483 # revision history. Since the revision number both represents the
1482 1484 # most efficient order to read the nodes in, and represents a
1483 1485 # topological sorting of the nodes, this function is often useful.
1484 1486 def cmp_by_rev(a, b):
1485 1487 return cmp(revlog.rev(a), revlog.rev(b))
1486 1488 return cmp_by_rev
1487 1489
1488 1490 # If we determine that a particular file or manifest node must be a
1489 1491 # node that the recipient of the changegroup will already have, we can
1490 1492 # also assume the recipient will have all the parents. This function
1491 1493 # prunes them from the set of missing nodes.
1492 1494 def prune_parents(revlog, hasset, msngset):
1493 1495 haslst = hasset.keys()
1494 1496 haslst.sort(cmp_by_rev_func(revlog))
1495 1497 for node in haslst:
1496 1498 parentlst = [p for p in revlog.parents(node) if p != nullid]
1497 1499 while parentlst:
1498 1500 n = parentlst.pop()
1499 1501 if n not in hasset:
1500 1502 hasset[n] = 1
1501 1503 p = [p for p in revlog.parents(n) if p != nullid]
1502 1504 parentlst.extend(p)
1503 1505 for n in hasset:
1504 1506 msngset.pop(n, None)
1505 1507
1506 1508 # This is a function generating function used to set up an environment
1507 1509 # for the inner function to execute in.
1508 1510 def manifest_and_file_collector(changedfileset):
1509 1511 # This is an information gathering function that gathers
1510 1512 # information from each changeset node that goes out as part of
1511 1513 # the changegroup. The information gathered is a list of which
1512 1514 # manifest nodes are potentially required (the recipient may
1513 1515 # already have them) and total list of all files which were
1514 1516 # changed in any changeset in the changegroup.
1515 1517 #
1516 1518 # We also remember the first changenode we saw any manifest
1517 1519 # referenced by so we can later determine which changenode 'owns'
1518 1520 # the manifest.
1519 1521 def collect_manifests_and_files(clnode):
1520 1522 c = cl.read(clnode)
1521 1523 for f in c[3]:
1522 1524 # This is to make sure we only have one instance of each
1523 1525 # filename string for each filename.
1524 1526 changedfileset.setdefault(f, f)
1525 1527 msng_mnfst_set.setdefault(c[0], clnode)
1526 1528 return collect_manifests_and_files
1527 1529
1528 1530 # Figure out which manifest nodes (of the ones we think might be part
1529 1531 # of the changegroup) the recipient must know about and remove them
1530 1532 # from the changegroup.
1531 1533 def prune_manifests():
1532 1534 has_mnfst_set = {}
1533 1535 for n in msng_mnfst_set:
1534 1536 # If a 'missing' manifest thinks it belongs to a changenode
1535 1537 # the recipient is assumed to have, obviously the recipient
1536 1538 # must have that manifest.
1537 1539 linknode = cl.node(mnfst.linkrev(n))
1538 1540 if linknode in has_cl_set:
1539 1541 has_mnfst_set[n] = 1
1540 1542 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1541 1543
1542 1544 # Use the information collected in collect_manifests_and_files to say
1543 1545 # which changenode any manifestnode belongs to.
1544 1546 def lookup_manifest_link(mnfstnode):
1545 1547 return msng_mnfst_set[mnfstnode]
1546 1548
1547 1549 # A function generating function that sets up the initial environment
1548 1550 # the inner function.
1549 1551 def filenode_collector(changedfiles):
1550 1552 next_rev = [0]
1551 1553 # This gathers information from each manifestnode included in the
1552 1554 # changegroup about which filenodes the manifest node references
1553 1555 # so we can include those in the changegroup too.
1554 1556 #
1555 1557 # It also remembers which changenode each filenode belongs to. It
1556 1558 # does this by assuming the a filenode belongs to the changenode
1557 1559 # the first manifest that references it belongs to.
1558 1560 def collect_msng_filenodes(mnfstnode):
1559 1561 r = mnfst.rev(mnfstnode)
1560 1562 if r == next_rev[0]:
1561 1563 # If the last rev we looked at was the one just previous,
1562 1564 # we only need to see a diff.
1563 1565 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1564 1566 # For each line in the delta
1565 1567 for dline in delta.splitlines():
1566 1568 # get the filename and filenode for that line
1567 1569 f, fnode = dline.split('\0')
1568 1570 fnode = bin(fnode[:40])
1569 1571 f = changedfiles.get(f, None)
1570 1572 # And if the file is in the list of files we care
1571 1573 # about.
1572 1574 if f is not None:
1573 1575 # Get the changenode this manifest belongs to
1574 1576 clnode = msng_mnfst_set[mnfstnode]
1575 1577 # Create the set of filenodes for the file if
1576 1578 # there isn't one already.
1577 1579 ndset = msng_filenode_set.setdefault(f, {})
1578 1580 # And set the filenode's changelog node to the
1579 1581 # manifest's if it hasn't been set already.
1580 1582 ndset.setdefault(fnode, clnode)
1581 1583 else:
1582 1584 # Otherwise we need a full manifest.
1583 1585 m = mnfst.read(mnfstnode)
1584 1586 # For every file in we care about.
1585 1587 for f in changedfiles:
1586 1588 fnode = m.get(f, None)
1587 1589 # If it's in the manifest
1588 1590 if fnode is not None:
1589 1591 # See comments above.
1590 1592 clnode = msng_mnfst_set[mnfstnode]
1591 1593 ndset = msng_filenode_set.setdefault(f, {})
1592 1594 ndset.setdefault(fnode, clnode)
1593 1595 # Remember the revision we hope to see next.
1594 1596 next_rev[0] = r + 1
1595 1597 return collect_msng_filenodes
1596 1598
1597 1599 # We have a list of filenodes we think we need for a file, lets remove
1598 1600 # all those we now the recipient must have.
1599 1601 def prune_filenodes(f, filerevlog):
1600 1602 msngset = msng_filenode_set[f]
1601 1603 hasset = {}
1602 1604 # If a 'missing' filenode thinks it belongs to a changenode we
1603 1605 # assume the recipient must have, then the recipient must have
1604 1606 # that filenode.
1605 1607 for n in msngset:
1606 1608 clnode = cl.node(filerevlog.linkrev(n))
1607 1609 if clnode in has_cl_set:
1608 1610 hasset[n] = 1
1609 1611 prune_parents(filerevlog, hasset, msngset)
1610 1612
1611 1613 # A function generator function that sets up the a context for the
1612 1614 # inner function.
1613 1615 def lookup_filenode_link_func(fname):
1614 1616 msngset = msng_filenode_set[fname]
1615 1617 # Lookup the changenode the filenode belongs to.
1616 1618 def lookup_filenode_link(fnode):
1617 1619 return msngset[fnode]
1618 1620 return lookup_filenode_link
1619 1621
1620 1622 # Now that we have all theses utility functions to help out and
1621 1623 # logically divide up the task, generate the group.
1622 1624 def gengroup():
1623 1625 # The set of changed files starts empty.
1624 1626 changedfiles = {}
1625 1627 # Create a changenode group generator that will call our functions
1626 1628 # back to lookup the owning changenode and collect information.
1627 1629 group = cl.group(msng_cl_lst, identity,
1628 1630 manifest_and_file_collector(changedfiles))
1629 1631 for chnk in group:
1630 1632 yield chnk
1631 1633
1632 1634 # The list of manifests has been collected by the generator
1633 1635 # calling our functions back.
1634 1636 prune_manifests()
1635 1637 msng_mnfst_lst = msng_mnfst_set.keys()
1636 1638 # Sort the manifestnodes by revision number.
1637 1639 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1638 1640 # Create a generator for the manifestnodes that calls our lookup
1639 1641 # and data collection functions back.
1640 1642 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1641 1643 filenode_collector(changedfiles))
1642 1644 for chnk in group:
1643 1645 yield chnk
1644 1646
1645 1647 # These are no longer needed, dereference and toss the memory for
1646 1648 # them.
1647 1649 msng_mnfst_lst = None
1648 1650 msng_mnfst_set.clear()
1649 1651
1650 1652 changedfiles = changedfiles.keys()
1651 1653 changedfiles.sort()
1652 1654 # Go through all our files in order sorted by name.
1653 1655 for fname in changedfiles:
1654 1656 filerevlog = self.file(fname)
1655 1657 # Toss out the filenodes that the recipient isn't really
1656 1658 # missing.
1657 1659 if msng_filenode_set.has_key(fname):
1658 1660 prune_filenodes(fname, filerevlog)
1659 1661 msng_filenode_lst = msng_filenode_set[fname].keys()
1660 1662 else:
1661 1663 msng_filenode_lst = []
1662 1664 # If any filenodes are left, generate the group for them,
1663 1665 # otherwise don't bother.
1664 1666 if len(msng_filenode_lst) > 0:
1665 1667 yield changegroup.genchunk(fname)
1666 1668 # Sort the filenodes by their revision #
1667 1669 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1668 1670 # Create a group generator and only pass in a changenode
1669 1671 # lookup function as we need to collect no information
1670 1672 # from filenodes.
1671 1673 group = filerevlog.group(msng_filenode_lst,
1672 1674 lookup_filenode_link_func(fname))
1673 1675 for chnk in group:
1674 1676 yield chnk
1675 1677 if msng_filenode_set.has_key(fname):
1676 1678 # Don't need this anymore, toss it to free memory.
1677 1679 del msng_filenode_set[fname]
1678 1680 # Signal that no more groups are left.
1679 1681 yield changegroup.closechunk()
1680 1682
1681 1683 if msng_cl_lst:
1682 1684 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1683 1685
1684 1686 return util.chunkbuffer(gengroup())
1685 1687
1686 1688 def changegroup(self, basenodes, source):
1687 1689 """Generate a changegroup of all nodes that we have that a recipient
1688 1690 doesn't.
1689 1691
1690 1692 This is much easier than the previous function as we can assume that
1691 1693 the recipient has any changenode we aren't sending them."""
1692 1694
1693 1695 self.hook('preoutgoing', throw=True, source=source)
1694 1696
1695 1697 cl = self.changelog
1696 1698 nodes = cl.nodesbetween(basenodes, None)[0]
1697 1699 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1698 1700 self.changegroupinfo(nodes)
1699 1701
1700 1702 def identity(x):
1701 1703 return x
1702 1704
1703 1705 def gennodelst(revlog):
1704 1706 for r in xrange(0, revlog.count()):
1705 1707 n = revlog.node(r)
1706 1708 if revlog.linkrev(n) in revset:
1707 1709 yield n
1708 1710
1709 1711 def changed_file_collector(changedfileset):
1710 1712 def collect_changed_files(clnode):
1711 1713 c = cl.read(clnode)
1712 1714 for fname in c[3]:
1713 1715 changedfileset[fname] = 1
1714 1716 return collect_changed_files
1715 1717
1716 1718 def lookuprevlink_func(revlog):
1717 1719 def lookuprevlink(n):
1718 1720 return cl.node(revlog.linkrev(n))
1719 1721 return lookuprevlink
1720 1722
1721 1723 def gengroup():
1722 1724 # construct a list of all changed files
1723 1725 changedfiles = {}
1724 1726
1725 1727 for chnk in cl.group(nodes, identity,
1726 1728 changed_file_collector(changedfiles)):
1727 1729 yield chnk
1728 1730 changedfiles = changedfiles.keys()
1729 1731 changedfiles.sort()
1730 1732
1731 1733 mnfst = self.manifest
1732 1734 nodeiter = gennodelst(mnfst)
1733 1735 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1734 1736 yield chnk
1735 1737
1736 1738 for fname in changedfiles:
1737 1739 filerevlog = self.file(fname)
1738 1740 nodeiter = gennodelst(filerevlog)
1739 1741 nodeiter = list(nodeiter)
1740 1742 if nodeiter:
1741 1743 yield changegroup.genchunk(fname)
1742 1744 lookup = lookuprevlink_func(filerevlog)
1743 1745 for chnk in filerevlog.group(nodeiter, lookup):
1744 1746 yield chnk
1745 1747
1746 1748 yield changegroup.closechunk()
1747 1749
1748 1750 if nodes:
1749 1751 self.hook('outgoing', node=hex(nodes[0]), source=source)
1750 1752
1751 1753 return util.chunkbuffer(gengroup())
1752 1754
1753 1755 def addchangegroup(self, source, srctype, url):
1754 1756 """add changegroup to repo.
1755 1757
1756 1758 return values:
1757 1759 - nothing changed or no source: 0
1758 1760 - more heads than before: 1+added heads (2..n)
1759 1761 - less heads than before: -1-removed heads (-2..-n)
1760 1762 - number of heads stays the same: 1
1761 1763 """
1762 1764 def csmap(x):
1763 1765 self.ui.debug(_("add changeset %s\n") % short(x))
1764 1766 return cl.count()
1765 1767
1766 1768 def revmap(x):
1767 1769 return cl.rev(x)
1768 1770
1769 1771 if not source:
1770 1772 return 0
1771 1773
1772 1774 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1773 1775
1774 1776 changesets = files = revisions = 0
1775 1777
1776 1778 tr = self.transaction()
1777 1779
1778 1780 # write changelog data to temp files so concurrent readers will not see
1779 1781 # inconsistent view
1780 1782 cl = self.changelog
1781 1783 cl.delayupdate()
1782 1784 oldheads = len(cl.heads())
1783 1785
1784 1786 # pull off the changeset group
1785 1787 self.ui.status(_("adding changesets\n"))
1786 1788 cor = cl.count() - 1
1787 1789 chunkiter = changegroup.chunkiter(source)
1788 1790 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1789 1791 raise util.Abort(_("received changelog group is empty"))
1790 1792 cnr = cl.count() - 1
1791 1793 changesets = cnr - cor
1792 1794
1793 1795 # pull off the manifest group
1794 1796 self.ui.status(_("adding manifests\n"))
1795 1797 chunkiter = changegroup.chunkiter(source)
1796 1798 # no need to check for empty manifest group here:
1797 1799 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1798 1800 # no new manifest will be created and the manifest group will
1799 1801 # be empty during the pull
1800 1802 self.manifest.addgroup(chunkiter, revmap, tr)
1801 1803
1802 1804 # process the files
1803 1805 self.ui.status(_("adding file changes\n"))
1804 1806 while 1:
1805 1807 f = changegroup.getchunk(source)
1806 1808 if not f:
1807 1809 break
1808 1810 self.ui.debug(_("adding %s revisions\n") % f)
1809 1811 fl = self.file(f)
1810 1812 o = fl.count()
1811 1813 chunkiter = changegroup.chunkiter(source)
1812 1814 if fl.addgroup(chunkiter, revmap, tr) is None:
1813 1815 raise util.Abort(_("received file revlog group is empty"))
1814 1816 revisions += fl.count() - o
1815 1817 files += 1
1816 1818
1817 1819 # make changelog see real files again
1818 1820 cl.finalize(tr)
1819 1821
1820 1822 newheads = len(self.changelog.heads())
1821 1823 heads = ""
1822 1824 if oldheads and newheads != oldheads:
1823 1825 heads = _(" (%+d heads)") % (newheads - oldheads)
1824 1826
1825 1827 self.ui.status(_("added %d changesets"
1826 1828 " with %d changes to %d files%s\n")
1827 1829 % (changesets, revisions, files, heads))
1828 1830
1829 1831 if changesets > 0:
1830 1832 self.hook('pretxnchangegroup', throw=True,
1831 1833 node=hex(self.changelog.node(cor+1)), source=srctype,
1832 1834 url=url)
1833 1835
1834 1836 tr.close()
1835 1837
1836 1838 if changesets > 0:
1837 1839 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1838 1840 source=srctype, url=url)
1839 1841
1840 1842 for i in xrange(cor + 1, cnr + 1):
1841 1843 self.hook("incoming", node=hex(self.changelog.node(i)),
1842 1844 source=srctype, url=url)
1843 1845
1844 1846 # never return 0 here:
1845 1847 if newheads < oldheads:
1846 1848 return newheads - oldheads - 1
1847 1849 else:
1848 1850 return newheads - oldheads + 1
1849 1851
1850 1852
1851 1853 def stream_in(self, remote):
1852 1854 fp = remote.stream_out()
1853 1855 l = fp.readline()
1854 1856 try:
1855 1857 resp = int(l)
1856 1858 except ValueError:
1857 1859 raise util.UnexpectedOutput(
1858 1860 _('Unexpected response from remote server:'), l)
1859 1861 if resp == 1:
1860 1862 raise util.Abort(_('operation forbidden by server'))
1861 1863 elif resp == 2:
1862 1864 raise util.Abort(_('locking the remote repository failed'))
1863 1865 elif resp != 0:
1864 1866 raise util.Abort(_('the server sent an unknown error code'))
1865 1867 self.ui.status(_('streaming all changes\n'))
1866 1868 l = fp.readline()
1867 1869 try:
1868 1870 total_files, total_bytes = map(int, l.split(' ', 1))
1869 1871 except ValueError, TypeError:
1870 1872 raise util.UnexpectedOutput(
1871 1873 _('Unexpected response from remote server:'), l)
1872 1874 self.ui.status(_('%d files to transfer, %s of data\n') %
1873 1875 (total_files, util.bytecount(total_bytes)))
1874 1876 start = time.time()
1875 1877 for i in xrange(total_files):
1876 1878 # XXX doesn't support '\n' or '\r' in filenames
1877 1879 l = fp.readline()
1878 1880 try:
1879 1881 name, size = l.split('\0', 1)
1880 1882 size = int(size)
1881 1883 except ValueError, TypeError:
1882 1884 raise util.UnexpectedOutput(
1883 1885 _('Unexpected response from remote server:'), l)
1884 1886 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1885 1887 ofp = self.sopener(name, 'w')
1886 1888 for chunk in util.filechunkiter(fp, limit=size):
1887 1889 ofp.write(chunk)
1888 1890 ofp.close()
1889 1891 elapsed = time.time() - start
1890 1892 if elapsed <= 0:
1891 1893 elapsed = 0.001
1892 1894 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1893 1895 (util.bytecount(total_bytes), elapsed,
1894 1896 util.bytecount(total_bytes / elapsed)))
1895 1897 self.invalidate()
1896 1898 return len(self.heads()) + 1
1897 1899
1898 1900 def clone(self, remote, heads=[], stream=False):
1899 1901 '''clone remote repository.
1900 1902
1901 1903 keyword arguments:
1902 1904 heads: list of revs to clone (forces use of pull)
1903 1905 stream: use streaming clone if possible'''
1904 1906
1905 1907 # now, all clients that can request uncompressed clones can
1906 1908 # read repo formats supported by all servers that can serve
1907 1909 # them.
1908 1910
1909 1911 # if revlog format changes, client will have to check version
1910 1912 # and format flags on "stream" capability, and use
1911 1913 # uncompressed only if compatible.
1912 1914
1913 1915 if stream and not heads and remote.capable('stream'):
1914 1916 return self.stream_in(remote)
1915 1917 return self.pull(remote, heads)
1916 1918
1917 1919 # used to avoid circular references so destructors work
1918 1920 def aftertrans(files):
1919 1921 renamefiles = [tuple(t) for t in files]
1920 1922 def a():
1921 1923 for src, dest in renamefiles:
1922 1924 util.rename(src, dest)
1923 1925 return a
1924 1926
1925 1927 def instance(ui, path, create):
1926 1928 return localrepository(ui, util.drop_scheme('file', path), create)
1927 1929
1928 1930 def islocal(path):
1929 1931 return True
General Comments 0
You need to be logged in to leave comments. Login now