##// END OF EJS Templates
localrepo: demand-load changeset, manifest, and dirstate
Matt Mackall -
r4559:eda59019 default
parent child Browse files
Show More
@@ -1,1958 +1,1967 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 except IOError:
80 80 pass
81 81
82 self.changelog = changelog.changelog(self.sopener)
83 self.sopener.defversion = self.changelog.version
84 self.manifest = manifest.manifest(self.sopener)
85
86 82 fallback = self.ui.config('ui', 'fallbackencoding')
87 83 if fallback:
88 84 util._fallbackencoding = fallback
89 85
90 86 self.tagscache = None
91 87 self.branchcache = None
92 88 self.nodetagscache = None
93 89 self.filterpats = {}
94 90 self.transhandle = None
95 91
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
92 def __getattr__(self, name):
93 if name == 'changelog':
94 self.changelog = changelog.changelog(self.sopener)
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
97 if name == 'manifest':
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
101 if name == 'dirstate':
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
104 else:
105 raise AttributeError, name
97 106
98 107 def url(self):
99 108 return 'file:' + self.root
100 109
101 110 def hook(self, name, throw=False, **args):
102 111 def callhook(hname, funcname):
103 112 '''call python hook. hook is callable object, looked up as
104 113 name in python module. if callable returns "true", hook
105 114 fails, else passes. if hook raises exception, treated as
106 115 hook failure. exception propagates if throw is "true".
107 116
108 117 reason for "true" meaning "hook failed" is so that
109 118 unmodified commands (e.g. mercurial.commands.update) can
110 119 be run as hooks without wrappers to convert return values.'''
111 120
112 121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
113 122 obj = funcname
114 123 if not callable(obj):
115 124 d = funcname.rfind('.')
116 125 if d == -1:
117 126 raise util.Abort(_('%s hook is invalid ("%s" not in '
118 127 'a module)') % (hname, funcname))
119 128 modname = funcname[:d]
120 129 try:
121 130 obj = __import__(modname)
122 131 except ImportError:
123 132 try:
124 133 # extensions are loaded with hgext_ prefix
125 134 obj = __import__("hgext_%s" % modname)
126 135 except ImportError:
127 136 raise util.Abort(_('%s hook is invalid '
128 137 '(import of "%s" failed)') %
129 138 (hname, modname))
130 139 try:
131 140 for p in funcname.split('.')[1:]:
132 141 obj = getattr(obj, p)
133 142 except AttributeError, err:
134 143 raise util.Abort(_('%s hook is invalid '
135 144 '("%s" is not defined)') %
136 145 (hname, funcname))
137 146 if not callable(obj):
138 147 raise util.Abort(_('%s hook is invalid '
139 148 '("%s" is not callable)') %
140 149 (hname, funcname))
141 150 try:
142 151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
143 152 except (KeyboardInterrupt, util.SignalInterrupt):
144 153 raise
145 154 except Exception, exc:
146 155 if isinstance(exc, util.Abort):
147 156 self.ui.warn(_('error: %s hook failed: %s\n') %
148 157 (hname, exc.args[0]))
149 158 else:
150 159 self.ui.warn(_('error: %s hook raised an exception: '
151 160 '%s\n') % (hname, exc))
152 161 if throw:
153 162 raise
154 163 self.ui.print_exc()
155 164 return True
156 165 if r:
157 166 if throw:
158 167 raise util.Abort(_('%s hook failed') % hname)
159 168 self.ui.warn(_('warning: %s hook failed\n') % hname)
160 169 return r
161 170
162 171 def runhook(name, cmd):
163 172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
164 173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
165 174 r = util.system(cmd, environ=env, cwd=self.root)
166 175 if r:
167 176 desc, r = util.explain_exit(r)
168 177 if throw:
169 178 raise util.Abort(_('%s hook %s') % (name, desc))
170 179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
171 180 return r
172 181
173 182 r = False
174 183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
175 184 if hname.split(".", 1)[0] == name and cmd]
176 185 hooks.sort()
177 186 for hname, cmd in hooks:
178 187 if callable(cmd):
179 188 r = callhook(hname, cmd) or r
180 189 elif cmd.startswith('python:'):
181 190 r = callhook(hname, cmd[7:].strip()) or r
182 191 else:
183 192 r = runhook(hname, cmd) or r
184 193 return r
185 194
186 195 tag_disallowed = ':\r\n'
187 196
188 197 def _tag(self, name, node, message, local, user, date, parent=None):
189 198 use_dirstate = parent is None
190 199
191 200 for c in self.tag_disallowed:
192 201 if c in name:
193 202 raise util.Abort(_('%r cannot be used in a tag name') % c)
194 203
195 204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
196 205
197 206 if local:
198 207 # local tags are stored in the current charset
199 208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
200 209 self.hook('tag', node=hex(node), tag=name, local=local)
201 210 return
202 211
203 212 # committed tags are stored in UTF-8
204 213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
205 214 if use_dirstate:
206 215 self.wfile('.hgtags', 'ab').write(line)
207 216 else:
208 217 ntags = self.filectx('.hgtags', parent).data()
209 218 self.wfile('.hgtags', 'ab').write(ntags + line)
210 219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
211 220 self.add(['.hgtags'])
212 221
213 222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
214 223
215 224 self.hook('tag', node=hex(node), tag=name, local=local)
216 225
217 226 return tagnode
218 227
219 228 def tag(self, name, node, message, local, user, date):
220 229 '''tag a revision with a symbolic name.
221 230
222 231 if local is True, the tag is stored in a per-repository file.
223 232 otherwise, it is stored in the .hgtags file, and a new
224 233 changeset is committed with the change.
225 234
226 235 keyword arguments:
227 236
228 237 local: whether to store tag in non-version-controlled file
229 238 (default False)
230 239
231 240 message: commit message to use if committing
232 241
233 242 user: name of user to use if committing
234 243
235 244 date: date tuple to use if committing'''
236 245
237 246 for x in self.status()[:5]:
238 247 if '.hgtags' in x:
239 248 raise util.Abort(_('working copy of .hgtags is changed '
240 249 '(please commit .hgtags manually)'))
241 250
242 251
243 252 self._tag(name, node, message, local, user, date)
244 253
245 254 def tags(self):
246 255 '''return a mapping of tag to node'''
247 256 if self.tagscache:
248 257 return self.tagscache
249 258
250 259 globaltags = {}
251 260
252 261 def readtags(lines, fn):
253 262 filetags = {}
254 263 count = 0
255 264
256 265 def warn(msg):
257 266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258 267
259 268 for l in lines:
260 269 count += 1
261 270 if not l:
262 271 continue
263 272 s = l.split(" ", 1)
264 273 if len(s) != 2:
265 274 warn(_("cannot parse entry"))
266 275 continue
267 276 node, key = s
268 277 key = util.tolocal(key.strip()) # stored in UTF-8
269 278 try:
270 279 bin_n = bin(node)
271 280 except TypeError:
272 281 warn(_("node '%s' is not well formed") % node)
273 282 continue
274 283 if bin_n not in self.changelog.nodemap:
275 284 warn(_("tag '%s' refers to unknown node") % key)
276 285 continue
277 286
278 287 h = []
279 288 if key in filetags:
280 289 n, h = filetags[key]
281 290 h.append(n)
282 291 filetags[key] = (bin_n, h)
283 292
284 293 for k,nh in filetags.items():
285 294 if k not in globaltags:
286 295 globaltags[k] = nh
287 296 continue
288 297 # we prefer the global tag if:
289 298 # it supercedes us OR
290 299 # mutual supercedes and it has a higher rank
291 300 # otherwise we win because we're tip-most
292 301 an, ah = nh
293 302 bn, bh = globaltags[k]
294 303 if bn != an and an in bh and \
295 304 (bn not in ah or len(bh) > len(ah)):
296 305 an = bn
297 306 ah.extend([n for n in bh if n not in ah])
298 307 globaltags[k] = an, ah
299 308
300 309 # read the tags file from each head, ending with the tip
301 310 f = None
302 311 for rev, node, fnode in self._hgtagsnodes():
303 312 f = (f and f.filectx(fnode) or
304 313 self.filectx('.hgtags', fileid=fnode))
305 314 readtags(f.data().splitlines(), f)
306 315
307 316 try:
308 317 data = util.fromlocal(self.opener("localtags").read())
309 318 # localtags are stored in the local character set
310 319 # while the internal tag table is stored in UTF-8
311 320 readtags(data.splitlines(), "localtags")
312 321 except IOError:
313 322 pass
314 323
315 324 self.tagscache = {}
316 325 for k,nh in globaltags.items():
317 326 n = nh[0]
318 327 if n != nullid:
319 328 self.tagscache[k] = n
320 329 self.tagscache['tip'] = self.changelog.tip()
321 330
322 331 return self.tagscache
323 332
324 333 def _hgtagsnodes(self):
325 334 heads = self.heads()
326 335 heads.reverse()
327 336 last = {}
328 337 ret = []
329 338 for node in heads:
330 339 c = self.changectx(node)
331 340 rev = c.rev()
332 341 try:
333 342 fnode = c.filenode('.hgtags')
334 343 except revlog.LookupError:
335 344 continue
336 345 ret.append((rev, node, fnode))
337 346 if fnode in last:
338 347 ret[last[fnode]] = None
339 348 last[fnode] = len(ret) - 1
340 349 return [item for item in ret if item]
341 350
342 351 def tagslist(self):
343 352 '''return a list of tags ordered by revision'''
344 353 l = []
345 354 for t, n in self.tags().items():
346 355 try:
347 356 r = self.changelog.rev(n)
348 357 except:
349 358 r = -2 # sort to the beginning of the list if unknown
350 359 l.append((r, t, n))
351 360 l.sort()
352 361 return [(t, n) for r, t, n in l]
353 362
354 363 def nodetags(self, node):
355 364 '''return the tags associated with a node'''
356 365 if not self.nodetagscache:
357 366 self.nodetagscache = {}
358 367 for t, n in self.tags().items():
359 368 self.nodetagscache.setdefault(n, []).append(t)
360 369 return self.nodetagscache.get(node, [])
361 370
362 371 def _branchtags(self):
363 372 partial, last, lrev = self._readbranchcache()
364 373
365 374 tiprev = self.changelog.count() - 1
366 375 if lrev != tiprev:
367 376 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369 378
370 379 return partial
371 380
372 381 def branchtags(self):
373 382 if self.branchcache is not None:
374 383 return self.branchcache
375 384
376 385 self.branchcache = {} # avoid recursion in changectx
377 386 partial = self._branchtags()
378 387
379 388 # the branch cache is stored on disk as UTF-8, but in the local
380 389 # charset internally
381 390 for k, v in partial.items():
382 391 self.branchcache[util.tolocal(k)] = v
383 392 return self.branchcache
384 393
385 394 def _readbranchcache(self):
386 395 partial = {}
387 396 try:
388 397 f = self.opener("branch.cache")
389 398 lines = f.read().split('\n')
390 399 f.close()
391 400 except (IOError, OSError):
392 401 return {}, nullid, nullrev
393 402
394 403 try:
395 404 last, lrev = lines.pop(0).split(" ", 1)
396 405 last, lrev = bin(last), int(lrev)
397 406 if not (lrev < self.changelog.count() and
398 407 self.changelog.node(lrev) == last): # sanity check
399 408 # invalidate the cache
400 409 raise ValueError('Invalid branch cache: unknown tip')
401 410 for l in lines:
402 411 if not l: continue
403 412 node, label = l.split(" ", 1)
404 413 partial[label.strip()] = bin(node)
405 414 except (KeyboardInterrupt, util.SignalInterrupt):
406 415 raise
407 416 except Exception, inst:
408 417 if self.ui.debugflag:
409 418 self.ui.warn(str(inst), '\n')
410 419 partial, last, lrev = {}, nullid, nullrev
411 420 return partial, last, lrev
412 421
413 422 def _writebranchcache(self, branches, tip, tiprev):
414 423 try:
415 424 f = self.opener("branch.cache", "w", atomictemp=True)
416 425 f.write("%s %s\n" % (hex(tip), tiprev))
417 426 for label, node in branches.iteritems():
418 427 f.write("%s %s\n" % (hex(node), label))
419 428 f.rename()
420 429 except (IOError, OSError):
421 430 pass
422 431
423 432 def _updatebranchcache(self, partial, start, end):
424 433 for r in xrange(start, end):
425 434 c = self.changectx(r)
426 435 b = c.branch()
427 436 partial[b] = c.node()
428 437
429 438 def lookup(self, key):
430 439 if key == '.':
431 440 key, second = self.dirstate.parents()
432 441 if key == nullid:
433 442 raise repo.RepoError(_("no revision checked out"))
434 443 if second != nullid:
435 444 self.ui.warn(_("warning: working directory has two parents, "
436 445 "tag '.' uses the first\n"))
437 446 elif key == 'null':
438 447 return nullid
439 448 n = self.changelog._match(key)
440 449 if n:
441 450 return n
442 451 if key in self.tags():
443 452 return self.tags()[key]
444 453 if key in self.branchtags():
445 454 return self.branchtags()[key]
446 455 n = self.changelog._partialmatch(key)
447 456 if n:
448 457 return n
449 458 raise repo.RepoError(_("unknown revision '%s'") % key)
450 459
451 460 def dev(self):
452 461 return os.lstat(self.path).st_dev
453 462
454 463 def local(self):
455 464 return True
456 465
457 466 def join(self, f):
458 467 return os.path.join(self.path, f)
459 468
460 469 def sjoin(self, f):
461 470 f = self.encodefn(f)
462 471 return os.path.join(self.spath, f)
463 472
464 473 def wjoin(self, f):
465 474 return os.path.join(self.root, f)
466 475
467 476 def file(self, f):
468 477 if f[0] == '/':
469 478 f = f[1:]
470 479 return filelog.filelog(self.sopener, f)
471 480
472 481 def changectx(self, changeid=None):
473 482 return context.changectx(self, changeid)
474 483
475 484 def workingctx(self):
476 485 return context.workingctx(self)
477 486
478 487 def parents(self, changeid=None):
479 488 '''
480 489 get list of changectxs for parents of changeid or working directory
481 490 '''
482 491 if changeid is None:
483 492 pl = self.dirstate.parents()
484 493 else:
485 494 n = self.changelog.lookup(changeid)
486 495 pl = self.changelog.parents(n)
487 496 if pl[1] == nullid:
488 497 return [self.changectx(pl[0])]
489 498 return [self.changectx(pl[0]), self.changectx(pl[1])]
490 499
491 500 def filectx(self, path, changeid=None, fileid=None):
492 501 """changeid can be a changeset revision, node, or tag.
493 502 fileid can be a file revision or node."""
494 503 return context.filectx(self, path, changeid, fileid)
495 504
496 505 def getcwd(self):
497 506 return self.dirstate.getcwd()
498 507
499 508 def pathto(self, f, cwd=None):
500 509 return self.dirstate.pathto(f, cwd)
501 510
502 511 def wfile(self, f, mode='r'):
503 512 return self.wopener(f, mode)
504 513
505 514 def _link(self, f):
506 515 return os.path.islink(self.wjoin(f))
507 516
508 517 def _filter(self, filter, filename, data):
509 518 if filter not in self.filterpats:
510 519 l = []
511 520 for pat, cmd in self.ui.configitems(filter):
512 521 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 522 l.append((mf, cmd))
514 523 self.filterpats[filter] = l
515 524
516 525 for mf, cmd in self.filterpats[filter]:
517 526 if mf(filename):
518 527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
519 528 data = util.filter(data, cmd)
520 529 break
521 530
522 531 return data
523 532
524 533 def wread(self, filename):
525 534 if self._link(filename):
526 535 data = os.readlink(self.wjoin(filename))
527 536 else:
528 537 data = self.wopener(filename, 'r').read()
529 538 return self._filter("encode", filename, data)
530 539
531 540 def wwrite(self, filename, data, flags):
532 541 data = self._filter("decode", filename, data)
533 542 if "l" in flags:
534 543 f = self.wjoin(filename)
535 544 try:
536 545 os.unlink(f)
537 546 except OSError:
538 547 pass
539 548 d = os.path.dirname(f)
540 549 if not os.path.exists(d):
541 550 os.makedirs(d)
542 551 os.symlink(data, f)
543 552 else:
544 553 try:
545 554 if self._link(filename):
546 555 os.unlink(self.wjoin(filename))
547 556 except OSError:
548 557 pass
549 558 self.wopener(filename, 'w').write(data)
550 559 util.set_exec(self.wjoin(filename), "x" in flags)
551 560
552 561 def wwritedata(self, filename, data):
553 562 return self._filter("decode", filename, data)
554 563
555 564 def transaction(self):
556 565 tr = self.transhandle
557 566 if tr != None and tr.running():
558 567 return tr.nest()
559 568
560 569 # save dirstate for rollback
561 570 try:
562 571 ds = self.opener("dirstate").read()
563 572 except IOError:
564 573 ds = ""
565 574 self.opener("journal.dirstate", "w").write(ds)
566 575
567 576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
568 577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
569 578 tr = transaction.transaction(self.ui.warn, self.sopener,
570 579 self.sjoin("journal"),
571 580 aftertrans(renames))
572 581 self.transhandle = tr
573 582 return tr
574 583
575 584 def recover(self):
576 585 l = self.lock()
577 586 if os.path.exists(self.sjoin("journal")):
578 587 self.ui.status(_("rolling back interrupted transaction\n"))
579 588 transaction.rollback(self.sopener, self.sjoin("journal"))
580 589 self.reload()
581 590 return True
582 591 else:
583 592 self.ui.warn(_("no interrupted transaction available\n"))
584 593 return False
585 594
586 595 def rollback(self, wlock=None, lock=None):
587 596 if not wlock:
588 597 wlock = self.wlock()
589 598 if not lock:
590 599 lock = self.lock()
591 600 if os.path.exists(self.sjoin("undo")):
592 601 self.ui.status(_("rolling back last transaction\n"))
593 602 transaction.rollback(self.sopener, self.sjoin("undo"))
594 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
595 604 self.reload()
596 605 self.wreload()
597 606 else:
598 607 self.ui.warn(_("no rollback information available\n"))
599 608
600 609 def wreload(self):
601 610 self.dirstate.reload()
602 611
603 612 def reload(self):
604 613 self.changelog.load()
605 614 self.manifest.load()
606 615 self.tagscache = None
607 616 self.nodetagscache = None
608 617
609 618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
610 619 desc=None):
611 620 try:
612 621 l = lock.lock(lockname, 0, releasefn, desc=desc)
613 622 except lock.LockHeld, inst:
614 623 if not wait:
615 624 raise
616 625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
617 626 (desc, inst.locker))
618 627 # default to 600 seconds timeout
619 628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
620 629 releasefn, desc=desc)
621 630 if acquirefn:
622 631 acquirefn()
623 632 return l
624 633
625 634 def lock(self, wait=1):
626 635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
627 636 desc=_('repository %s') % self.origroot)
628 637
629 638 def wlock(self, wait=1):
630 639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
631 640 self.wreload,
632 641 desc=_('working directory of %s') % self.origroot)
633 642
634 643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
635 644 """
636 645 commit an individual file as part of a larger transaction
637 646 """
638 647
639 648 t = self.wread(fn)
640 649 fl = self.file(fn)
641 650 fp1 = manifest1.get(fn, nullid)
642 651 fp2 = manifest2.get(fn, nullid)
643 652
644 653 meta = {}
645 654 cp = self.dirstate.copied(fn)
646 655 if cp:
647 656 # Mark the new revision of this file as a copy of another
648 657 # file. This copy data will effectively act as a parent
649 658 # of this new revision. If this is a merge, the first
650 659 # parent will be the nullid (meaning "look up the copy data")
651 660 # and the second one will be the other parent. For example:
652 661 #
653 662 # 0 --- 1 --- 3 rev1 changes file foo
654 663 # \ / rev2 renames foo to bar and changes it
655 664 # \- 2 -/ rev3 should have bar with all changes and
656 665 # should record that bar descends from
657 666 # bar in rev2 and foo in rev1
658 667 #
659 668 # this allows this merge to succeed:
660 669 #
661 670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
662 671 # \ / merging rev3 and rev4 should use bar@rev2
663 672 # \- 2 --- 4 as the merge base
664 673 #
665 674 meta["copy"] = cp
666 675 if not manifest2: # not a branch merge
667 676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 677 fp2 = nullid
669 678 elif fp2 != nullid: # copied on remote side
670 679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 680 elif fp1 != nullid: # copied on local side, reversed
672 681 meta["copyrev"] = hex(manifest2.get(cp))
673 682 fp2 = fp1
674 683 else: # directory rename
675 684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 685 self.ui.debug(_(" %s: copy %s:%s\n") %
677 686 (fn, cp, meta["copyrev"]))
678 687 fp1 = nullid
679 688 elif fp2 != nullid:
680 689 # is one parent an ancestor of the other?
681 690 fpa = fl.ancestor(fp1, fp2)
682 691 if fpa == fp1:
683 692 fp1, fp2 = fp2, nullid
684 693 elif fpa == fp2:
685 694 fp2 = nullid
686 695
687 696 # is the file unmodified from the parent? report existing entry
688 697 if fp2 == nullid and not fl.cmp(fp1, t):
689 698 return fp1
690 699
691 700 changelist.append(fn)
692 701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
693 702
694 703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
695 704 if p1 is None:
696 705 p1, p2 = self.dirstate.parents()
697 706 return self.commit(files=files, text=text, user=user, date=date,
698 707 p1=p1, p2=p2, wlock=wlock, extra=extra)
699 708
700 709 def commit(self, files=None, text="", user=None, date=None,
701 710 match=util.always, force=False, lock=None, wlock=None,
702 711 force_editor=False, p1=None, p2=None, extra={}):
703 712
704 713 commit = []
705 714 remove = []
706 715 changed = []
707 716 use_dirstate = (p1 is None) # not rawcommit
708 717 extra = extra.copy()
709 718
710 719 if use_dirstate:
711 720 if files:
712 721 for f in files:
713 722 s = self.dirstate.state(f)
714 723 if s in 'nmai':
715 724 commit.append(f)
716 725 elif s == 'r':
717 726 remove.append(f)
718 727 else:
719 728 self.ui.warn(_("%s not tracked!\n") % f)
720 729 else:
721 730 changes = self.status(match=match)[:5]
722 731 modified, added, removed, deleted, unknown = changes
723 732 commit = modified + added
724 733 remove = removed
725 734 else:
726 735 commit = files
727 736
728 737 if use_dirstate:
729 738 p1, p2 = self.dirstate.parents()
730 739 update_dirstate = True
731 740 else:
732 741 p1, p2 = p1, p2 or nullid
733 742 update_dirstate = (self.dirstate.parents()[0] == p1)
734 743
735 744 c1 = self.changelog.read(p1)
736 745 c2 = self.changelog.read(p2)
737 746 m1 = self.manifest.read(c1[0]).copy()
738 747 m2 = self.manifest.read(c2[0])
739 748
740 749 if use_dirstate:
741 750 branchname = self.workingctx().branch()
742 751 try:
743 752 branchname = branchname.decode('UTF-8').encode('UTF-8')
744 753 except UnicodeDecodeError:
745 754 raise util.Abort(_('branch name not in UTF-8!'))
746 755 else:
747 756 branchname = ""
748 757
749 758 if use_dirstate:
750 759 oldname = c1[5].get("branch") # stored in UTF-8
751 760 if not commit and not remove and not force and p2 == nullid and \
752 761 branchname == oldname:
753 762 self.ui.status(_("nothing changed\n"))
754 763 return None
755 764
756 765 xp1 = hex(p1)
757 766 if p2 == nullid: xp2 = ''
758 767 else: xp2 = hex(p2)
759 768
760 769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
761 770
762 771 if not wlock:
763 772 wlock = self.wlock()
764 773 if not lock:
765 774 lock = self.lock()
766 775 tr = self.transaction()
767 776
768 777 # check in files
769 778 new = {}
770 779 linkrev = self.changelog.count()
771 780 commit.sort()
772 781 is_exec = util.execfunc(self.root, m1.execf)
773 782 is_link = util.linkfunc(self.root, m1.linkf)
774 783 for f in commit:
775 784 self.ui.note(f + "\n")
776 785 try:
777 786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
778 787 new_exec = is_exec(f)
779 788 new_link = is_link(f)
780 789 if not changed or changed[-1] != f:
781 790 # mention the file in the changelog if some flag changed,
782 791 # even if there was no content change.
783 792 old_exec = m1.execf(f)
784 793 old_link = m1.linkf(f)
785 794 if old_exec != new_exec or old_link != new_link:
786 795 changed.append(f)
787 796 m1.set(f, new_exec, new_link)
788 797 except (OSError, IOError):
789 798 if use_dirstate:
790 799 self.ui.warn(_("trouble committing %s!\n") % f)
791 800 raise
792 801 else:
793 802 remove.append(f)
794 803
795 804 # update manifest
796 805 m1.update(new)
797 806 remove.sort()
798 807 removed = []
799 808
800 809 for f in remove:
801 810 if f in m1:
802 811 del m1[f]
803 812 removed.append(f)
804 813 elif f in m2:
805 814 removed.append(f)
806 815 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
807 816
808 817 # add changeset
809 818 new = new.keys()
810 819 new.sort()
811 820
812 821 user = user or self.ui.username()
813 822 if not text or force_editor:
814 823 edittext = []
815 824 if text:
816 825 edittext.append(text)
817 826 edittext.append("")
818 827 edittext.append("HG: user: %s" % user)
819 828 if p2 != nullid:
820 829 edittext.append("HG: branch merge")
821 830 if branchname:
822 831 edittext.append("HG: branch %s" % util.tolocal(branchname))
823 832 edittext.extend(["HG: changed %s" % f for f in changed])
824 833 edittext.extend(["HG: removed %s" % f for f in removed])
825 834 if not changed and not remove:
826 835 edittext.append("HG: no files changed")
827 836 edittext.append("")
828 837 # run editor in the repository root
829 838 olddir = os.getcwd()
830 839 os.chdir(self.root)
831 840 text = self.ui.edit("\n".join(edittext), user)
832 841 os.chdir(olddir)
833 842
834 843 lines = [line.rstrip() for line in text.rstrip().splitlines()]
835 844 while lines and not lines[0]:
836 845 del lines[0]
837 846 if not lines:
838 847 return None
839 848 text = '\n'.join(lines)
840 849 if branchname:
841 850 extra["branch"] = branchname
842 851 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
843 852 user, date, extra)
844 853 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
845 854 parent2=xp2)
846 855 tr.close()
847 856
848 857 if self.branchcache and "branch" in extra:
849 858 self.branchcache[util.tolocal(extra["branch"])] = n
850 859
851 860 if use_dirstate or update_dirstate:
852 861 self.dirstate.setparents(n)
853 862 if use_dirstate:
854 863 self.dirstate.update(new, "n")
855 864 self.dirstate.forget(removed)
856 865
857 866 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
858 867 return n
859 868
860 869 def walk(self, node=None, files=[], match=util.always, badmatch=None):
861 870 '''
862 871 walk recursively through the directory tree or a given
863 872 changeset, finding all files matched by the match
864 873 function
865 874
866 875 results are yielded in a tuple (src, filename), where src
867 876 is one of:
868 877 'f' the file was found in the directory tree
869 878 'm' the file was only in the dirstate and not in the tree
870 879 'b' file was not found and matched badmatch
871 880 '''
872 881
873 882 if node:
874 883 fdict = dict.fromkeys(files)
875 884 # for dirstate.walk, files=['.'] means "walk the whole tree".
876 885 # follow that here, too
877 886 fdict.pop('.', None)
878 887 mdict = self.manifest.read(self.changelog.read(node)[0])
879 888 mfiles = mdict.keys()
880 889 mfiles.sort()
881 890 for fn in mfiles:
882 891 for ffn in fdict:
883 892 # match if the file is the exact name or a directory
884 893 if ffn == fn or fn.startswith("%s/" % ffn):
885 894 del fdict[ffn]
886 895 break
887 896 if match(fn):
888 897 yield 'm', fn
889 898 ffiles = fdict.keys()
890 899 ffiles.sort()
891 900 for fn in ffiles:
892 901 if badmatch and badmatch(fn):
893 902 if match(fn):
894 903 yield 'b', fn
895 904 else:
896 905 self.ui.warn(_('%s: No such file in rev %s\n')
897 906 % (self.pathto(fn), short(node)))
898 907 else:
899 908 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
900 909 yield src, fn
901 910
902 911 def status(self, node1=None, node2=None, files=[], match=util.always,
903 912 wlock=None, list_ignored=False, list_clean=False):
904 913 """return status of files between two nodes or node and working directory
905 914
906 915 If node1 is None, use the first dirstate parent instead.
907 916 If node2 is None, compare node1 with working directory.
908 917 """
909 918
910 919 def fcmp(fn, getnode):
911 920 t1 = self.wread(fn)
912 921 return self.file(fn).cmp(getnode(fn), t1)
913 922
914 923 def mfmatches(node):
915 924 change = self.changelog.read(node)
916 925 mf = self.manifest.read(change[0]).copy()
917 926 for fn in mf.keys():
918 927 if not match(fn):
919 928 del mf[fn]
920 929 return mf
921 930
922 931 modified, added, removed, deleted, unknown = [], [], [], [], []
923 932 ignored, clean = [], []
924 933
925 934 compareworking = False
926 935 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
927 936 compareworking = True
928 937
929 938 if not compareworking:
930 939 # read the manifest from node1 before the manifest from node2,
931 940 # so that we'll hit the manifest cache if we're going through
932 941 # all the revisions in parent->child order.
933 942 mf1 = mfmatches(node1)
934 943
935 944 mywlock = False
936 945
937 946 # are we comparing the working directory?
938 947 if not node2:
939 948 (lookup, modified, added, removed, deleted, unknown,
940 949 ignored, clean) = self.dirstate.status(files, match,
941 950 list_ignored, list_clean)
942 951
943 952 # are we comparing working dir against its parent?
944 953 if compareworking:
945 954 if lookup:
946 955 # do a full compare of any files that might have changed
947 956 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
948 957 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
949 958 nullid)
950 959 for f in lookup:
951 960 if fcmp(f, getnode):
952 961 modified.append(f)
953 962 else:
954 963 clean.append(f)
955 964 if not wlock and not mywlock:
956 965 mywlock = True
957 966 try:
958 967 wlock = self.wlock(wait=0)
959 968 except lock.LockException:
960 969 pass
961 970 if wlock:
962 971 self.dirstate.update([f], "n")
963 972 else:
964 973 # we are comparing working dir against non-parent
965 974 # generate a pseudo-manifest for the working dir
966 975 # XXX: create it in dirstate.py ?
967 976 mf2 = mfmatches(self.dirstate.parents()[0])
968 977 is_exec = util.execfunc(self.root, mf2.execf)
969 978 is_link = util.linkfunc(self.root, mf2.linkf)
970 979 for f in lookup + modified + added:
971 980 mf2[f] = ""
972 981 mf2.set(f, is_exec(f), is_link(f))
973 982 for f in removed:
974 983 if f in mf2:
975 984 del mf2[f]
976 985
977 986 if mywlock and wlock:
978 987 wlock.release()
979 988 else:
980 989 # we are comparing two revisions
981 990 mf2 = mfmatches(node2)
982 991
983 992 if not compareworking:
984 993 # flush lists from dirstate before comparing manifests
985 994 modified, added, clean = [], [], []
986 995
987 996 # make sure to sort the files so we talk to the disk in a
988 997 # reasonable order
989 998 mf2keys = mf2.keys()
990 999 mf2keys.sort()
991 1000 getnode = lambda fn: mf1.get(fn, nullid)
992 1001 for fn in mf2keys:
993 1002 if mf1.has_key(fn):
994 1003 if mf1.flags(fn) != mf2.flags(fn) or \
995 1004 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
996 1005 fcmp(fn, getnode))):
997 1006 modified.append(fn)
998 1007 elif list_clean:
999 1008 clean.append(fn)
1000 1009 del mf1[fn]
1001 1010 else:
1002 1011 added.append(fn)
1003 1012
1004 1013 removed = mf1.keys()
1005 1014
1006 1015 # sort and return results:
1007 1016 for l in modified, added, removed, deleted, unknown, ignored, clean:
1008 1017 l.sort()
1009 1018 return (modified, added, removed, deleted, unknown, ignored, clean)
1010 1019
1011 1020 def add(self, list, wlock=None):
1012 1021 if not wlock:
1013 1022 wlock = self.wlock()
1014 1023 for f in list:
1015 1024 p = self.wjoin(f)
1016 1025 islink = os.path.islink(p)
1017 1026 size = os.lstat(p).st_size
1018 1027 if size > 10000000:
1019 1028 self.ui.warn(_("%s: files over 10MB may cause memory and"
1020 1029 " performance problems\n"
1021 1030 "(use 'hg revert %s' to unadd the file)\n")
1022 1031 % (f, f))
1023 1032 if not islink and not os.path.exists(p):
1024 1033 self.ui.warn(_("%s does not exist!\n") % f)
1025 1034 elif not islink and not os.path.isfile(p):
1026 1035 self.ui.warn(_("%s not added: only files and symlinks "
1027 1036 "supported currently\n") % f)
1028 1037 elif self.dirstate.state(f) in 'an':
1029 1038 self.ui.warn(_("%s already tracked!\n") % f)
1030 1039 else:
1031 1040 self.dirstate.update([f], "a")
1032 1041
1033 1042 def forget(self, list, wlock=None):
1034 1043 if not wlock:
1035 1044 wlock = self.wlock()
1036 1045 for f in list:
1037 1046 if self.dirstate.state(f) not in 'ai':
1038 1047 self.ui.warn(_("%s not added!\n") % f)
1039 1048 else:
1040 1049 self.dirstate.forget([f])
1041 1050
1042 1051 def remove(self, list, unlink=False, wlock=None):
1043 1052 if unlink:
1044 1053 for f in list:
1045 1054 try:
1046 1055 util.unlink(self.wjoin(f))
1047 1056 except OSError, inst:
1048 1057 if inst.errno != errno.ENOENT:
1049 1058 raise
1050 1059 if not wlock:
1051 1060 wlock = self.wlock()
1052 1061 for f in list:
1053 1062 if unlink and os.path.exists(self.wjoin(f)):
1054 1063 self.ui.warn(_("%s still exists!\n") % f)
1055 1064 elif self.dirstate.state(f) == 'a':
1056 1065 self.dirstate.forget([f])
1057 1066 elif f not in self.dirstate:
1058 1067 self.ui.warn(_("%s not tracked!\n") % f)
1059 1068 else:
1060 1069 self.dirstate.update([f], "r")
1061 1070
1062 1071 def undelete(self, list, wlock=None):
1063 1072 p = self.dirstate.parents()[0]
1064 1073 mn = self.changelog.read(p)[0]
1065 1074 m = self.manifest.read(mn)
1066 1075 if not wlock:
1067 1076 wlock = self.wlock()
1068 1077 for f in list:
1069 1078 if self.dirstate.state(f) not in "r":
1070 1079 self.ui.warn("%s not removed!\n" % f)
1071 1080 else:
1072 1081 t = self.file(f).read(m[f])
1073 1082 self.wwrite(f, t, m.flags(f))
1074 1083 self.dirstate.update([f], "n")
1075 1084
1076 1085 def copy(self, source, dest, wlock=None):
1077 1086 p = self.wjoin(dest)
1078 1087 if not (os.path.exists(p) or os.path.islink(p)):
1079 1088 self.ui.warn(_("%s does not exist!\n") % dest)
1080 1089 elif not (os.path.isfile(p) or os.path.islink(p)):
1081 1090 self.ui.warn(_("copy failed: %s is not a file or a "
1082 1091 "symbolic link\n") % dest)
1083 1092 else:
1084 1093 if not wlock:
1085 1094 wlock = self.wlock()
1086 1095 if self.dirstate.state(dest) == '?':
1087 1096 self.dirstate.update([dest], "a")
1088 1097 self.dirstate.copy(source, dest)
1089 1098
1090 1099 def heads(self, start=None):
1091 1100 heads = self.changelog.heads(start)
1092 1101 # sort the output in rev descending order
1093 1102 heads = [(-self.changelog.rev(h), h) for h in heads]
1094 1103 heads.sort()
1095 1104 return [n for (r, n) in heads]
1096 1105
1097 1106 def branches(self, nodes):
1098 1107 if not nodes:
1099 1108 nodes = [self.changelog.tip()]
1100 1109 b = []
1101 1110 for n in nodes:
1102 1111 t = n
1103 1112 while 1:
1104 1113 p = self.changelog.parents(n)
1105 1114 if p[1] != nullid or p[0] == nullid:
1106 1115 b.append((t, n, p[0], p[1]))
1107 1116 break
1108 1117 n = p[0]
1109 1118 return b
1110 1119
1111 1120 def between(self, pairs):
1112 1121 r = []
1113 1122
1114 1123 for top, bottom in pairs:
1115 1124 n, l, i = top, [], 0
1116 1125 f = 1
1117 1126
1118 1127 while n != bottom:
1119 1128 p = self.changelog.parents(n)[0]
1120 1129 if i == f:
1121 1130 l.append(n)
1122 1131 f = f * 2
1123 1132 n = p
1124 1133 i += 1
1125 1134
1126 1135 r.append(l)
1127 1136
1128 1137 return r
1129 1138
1130 1139 def findincoming(self, remote, base=None, heads=None, force=False):
1131 1140 """Return list of roots of the subsets of missing nodes from remote
1132 1141
1133 1142 If base dict is specified, assume that these nodes and their parents
1134 1143 exist on the remote side and that no child of a node of base exists
1135 1144 in both remote and self.
1136 1145 Furthermore base will be updated to include the nodes that exists
1137 1146 in self and remote but no children exists in self and remote.
1138 1147 If a list of heads is specified, return only nodes which are heads
1139 1148 or ancestors of these heads.
1140 1149
1141 1150 All the ancestors of base are in self and in remote.
1142 1151 All the descendants of the list returned are missing in self.
1143 1152 (and so we know that the rest of the nodes are missing in remote, see
1144 1153 outgoing)
1145 1154 """
1146 1155 m = self.changelog.nodemap
1147 1156 search = []
1148 1157 fetch = {}
1149 1158 seen = {}
1150 1159 seenbranch = {}
1151 1160 if base == None:
1152 1161 base = {}
1153 1162
1154 1163 if not heads:
1155 1164 heads = remote.heads()
1156 1165
1157 1166 if self.changelog.tip() == nullid:
1158 1167 base[nullid] = 1
1159 1168 if heads != [nullid]:
1160 1169 return [nullid]
1161 1170 return []
1162 1171
1163 1172 # assume we're closer to the tip than the root
1164 1173 # and start by examining the heads
1165 1174 self.ui.status(_("searching for changes\n"))
1166 1175
1167 1176 unknown = []
1168 1177 for h in heads:
1169 1178 if h not in m:
1170 1179 unknown.append(h)
1171 1180 else:
1172 1181 base[h] = 1
1173 1182
1174 1183 if not unknown:
1175 1184 return []
1176 1185
1177 1186 req = dict.fromkeys(unknown)
1178 1187 reqcnt = 0
1179 1188
1180 1189 # search through remote branches
1181 1190 # a 'branch' here is a linear segment of history, with four parts:
1182 1191 # head, root, first parent, second parent
1183 1192 # (a branch always has two parents (or none) by definition)
1184 1193 unknown = remote.branches(unknown)
1185 1194 while unknown:
1186 1195 r = []
1187 1196 while unknown:
1188 1197 n = unknown.pop(0)
1189 1198 if n[0] in seen:
1190 1199 continue
1191 1200
1192 1201 self.ui.debug(_("examining %s:%s\n")
1193 1202 % (short(n[0]), short(n[1])))
1194 1203 if n[0] == nullid: # found the end of the branch
1195 1204 pass
1196 1205 elif n in seenbranch:
1197 1206 self.ui.debug(_("branch already found\n"))
1198 1207 continue
1199 1208 elif n[1] and n[1] in m: # do we know the base?
1200 1209 self.ui.debug(_("found incomplete branch %s:%s\n")
1201 1210 % (short(n[0]), short(n[1])))
1202 1211 search.append(n) # schedule branch range for scanning
1203 1212 seenbranch[n] = 1
1204 1213 else:
1205 1214 if n[1] not in seen and n[1] not in fetch:
1206 1215 if n[2] in m and n[3] in m:
1207 1216 self.ui.debug(_("found new changeset %s\n") %
1208 1217 short(n[1]))
1209 1218 fetch[n[1]] = 1 # earliest unknown
1210 1219 for p in n[2:4]:
1211 1220 if p in m:
1212 1221 base[p] = 1 # latest known
1213 1222
1214 1223 for p in n[2:4]:
1215 1224 if p not in req and p not in m:
1216 1225 r.append(p)
1217 1226 req[p] = 1
1218 1227 seen[n[0]] = 1
1219 1228
1220 1229 if r:
1221 1230 reqcnt += 1
1222 1231 self.ui.debug(_("request %d: %s\n") %
1223 1232 (reqcnt, " ".join(map(short, r))))
1224 1233 for p in xrange(0, len(r), 10):
1225 1234 for b in remote.branches(r[p:p+10]):
1226 1235 self.ui.debug(_("received %s:%s\n") %
1227 1236 (short(b[0]), short(b[1])))
1228 1237 unknown.append(b)
1229 1238
1230 1239 # do binary search on the branches we found
1231 1240 while search:
1232 1241 n = search.pop(0)
1233 1242 reqcnt += 1
1234 1243 l = remote.between([(n[0], n[1])])[0]
1235 1244 l.append(n[1])
1236 1245 p = n[0]
1237 1246 f = 1
1238 1247 for i in l:
1239 1248 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1240 1249 if i in m:
1241 1250 if f <= 2:
1242 1251 self.ui.debug(_("found new branch changeset %s\n") %
1243 1252 short(p))
1244 1253 fetch[p] = 1
1245 1254 base[i] = 1
1246 1255 else:
1247 1256 self.ui.debug(_("narrowed branch search to %s:%s\n")
1248 1257 % (short(p), short(i)))
1249 1258 search.append((p, i))
1250 1259 break
1251 1260 p, f = i, f * 2
1252 1261
1253 1262 # sanity check our fetch list
1254 1263 for f in fetch.keys():
1255 1264 if f in m:
1256 1265 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1257 1266
1258 1267 if base.keys() == [nullid]:
1259 1268 if force:
1260 1269 self.ui.warn(_("warning: repository is unrelated\n"))
1261 1270 else:
1262 1271 raise util.Abort(_("repository is unrelated"))
1263 1272
1264 1273 self.ui.debug(_("found new changesets starting at ") +
1265 1274 " ".join([short(f) for f in fetch]) + "\n")
1266 1275
1267 1276 self.ui.debug(_("%d total queries\n") % reqcnt)
1268 1277
1269 1278 return fetch.keys()
1270 1279
1271 1280 def findoutgoing(self, remote, base=None, heads=None, force=False):
1272 1281 """Return list of nodes that are roots of subsets not in remote
1273 1282
1274 1283 If base dict is specified, assume that these nodes and their parents
1275 1284 exist on the remote side.
1276 1285 If a list of heads is specified, return only nodes which are heads
1277 1286 or ancestors of these heads, and return a second element which
1278 1287 contains all remote heads which get new children.
1279 1288 """
1280 1289 if base == None:
1281 1290 base = {}
1282 1291 self.findincoming(remote, base, heads, force=force)
1283 1292
1284 1293 self.ui.debug(_("common changesets up to ")
1285 1294 + " ".join(map(short, base.keys())) + "\n")
1286 1295
1287 1296 remain = dict.fromkeys(self.changelog.nodemap)
1288 1297
1289 1298 # prune everything remote has from the tree
1290 1299 del remain[nullid]
1291 1300 remove = base.keys()
1292 1301 while remove:
1293 1302 n = remove.pop(0)
1294 1303 if n in remain:
1295 1304 del remain[n]
1296 1305 for p in self.changelog.parents(n):
1297 1306 remove.append(p)
1298 1307
1299 1308 # find every node whose parents have been pruned
1300 1309 subset = []
1301 1310 # find every remote head that will get new children
1302 1311 updated_heads = {}
1303 1312 for n in remain:
1304 1313 p1, p2 = self.changelog.parents(n)
1305 1314 if p1 not in remain and p2 not in remain:
1306 1315 subset.append(n)
1307 1316 if heads:
1308 1317 if p1 in heads:
1309 1318 updated_heads[p1] = True
1310 1319 if p2 in heads:
1311 1320 updated_heads[p2] = True
1312 1321
1313 1322 # this is the set of all roots we have to push
1314 1323 if heads:
1315 1324 return subset, updated_heads.keys()
1316 1325 else:
1317 1326 return subset
1318 1327
1319 1328 def pull(self, remote, heads=None, force=False, lock=None):
1320 1329 mylock = False
1321 1330 if not lock:
1322 1331 lock = self.lock()
1323 1332 mylock = True
1324 1333
1325 1334 try:
1326 1335 fetch = self.findincoming(remote, force=force)
1327 1336 if fetch == [nullid]:
1328 1337 self.ui.status(_("requesting all changes\n"))
1329 1338
1330 1339 if not fetch:
1331 1340 self.ui.status(_("no changes found\n"))
1332 1341 return 0
1333 1342
1334 1343 if heads is None:
1335 1344 cg = remote.changegroup(fetch, 'pull')
1336 1345 else:
1337 1346 if 'changegroupsubset' not in remote.capabilities:
1338 1347 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1339 1348 cg = remote.changegroupsubset(fetch, heads, 'pull')
1340 1349 return self.addchangegroup(cg, 'pull', remote.url())
1341 1350 finally:
1342 1351 if mylock:
1343 1352 lock.release()
1344 1353
1345 1354 def push(self, remote, force=False, revs=None):
1346 1355 # there are two ways to push to remote repo:
1347 1356 #
1348 1357 # addchangegroup assumes local user can lock remote
1349 1358 # repo (local filesystem, old ssh servers).
1350 1359 #
1351 1360 # unbundle assumes local user cannot lock remote repo (new ssh
1352 1361 # servers, http servers).
1353 1362
1354 1363 if remote.capable('unbundle'):
1355 1364 return self.push_unbundle(remote, force, revs)
1356 1365 return self.push_addchangegroup(remote, force, revs)
1357 1366
1358 1367 def prepush(self, remote, force, revs):
1359 1368 base = {}
1360 1369 remote_heads = remote.heads()
1361 1370 inc = self.findincoming(remote, base, remote_heads, force=force)
1362 1371
1363 1372 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1364 1373 if revs is not None:
1365 1374 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1366 1375 else:
1367 1376 bases, heads = update, self.changelog.heads()
1368 1377
1369 1378 if not bases:
1370 1379 self.ui.status(_("no changes found\n"))
1371 1380 return None, 1
1372 1381 elif not force:
1373 1382 # check if we're creating new remote heads
1374 1383 # to be a remote head after push, node must be either
1375 1384 # - unknown locally
1376 1385 # - a local outgoing head descended from update
1377 1386 # - a remote head that's known locally and not
1378 1387 # ancestral to an outgoing head
1379 1388
1380 1389 warn = 0
1381 1390
1382 1391 if remote_heads == [nullid]:
1383 1392 warn = 0
1384 1393 elif not revs and len(heads) > len(remote_heads):
1385 1394 warn = 1
1386 1395 else:
1387 1396 newheads = list(heads)
1388 1397 for r in remote_heads:
1389 1398 if r in self.changelog.nodemap:
1390 1399 desc = self.changelog.heads(r, heads)
1391 1400 l = [h for h in heads if h in desc]
1392 1401 if not l:
1393 1402 newheads.append(r)
1394 1403 else:
1395 1404 newheads.append(r)
1396 1405 if len(newheads) > len(remote_heads):
1397 1406 warn = 1
1398 1407
1399 1408 if warn:
1400 1409 self.ui.warn(_("abort: push creates new remote branches!\n"))
1401 1410 self.ui.status(_("(did you forget to merge?"
1402 1411 " use push -f to force)\n"))
1403 1412 return None, 1
1404 1413 elif inc:
1405 1414 self.ui.warn(_("note: unsynced remote changes!\n"))
1406 1415
1407 1416
1408 1417 if revs is None:
1409 1418 cg = self.changegroup(update, 'push')
1410 1419 else:
1411 1420 cg = self.changegroupsubset(update, revs, 'push')
1412 1421 return cg, remote_heads
1413 1422
1414 1423 def push_addchangegroup(self, remote, force, revs):
1415 1424 lock = remote.lock()
1416 1425
1417 1426 ret = self.prepush(remote, force, revs)
1418 1427 if ret[0] is not None:
1419 1428 cg, remote_heads = ret
1420 1429 return remote.addchangegroup(cg, 'push', self.url())
1421 1430 return ret[1]
1422 1431
1423 1432 def push_unbundle(self, remote, force, revs):
1424 1433 # local repo finds heads on server, finds out what revs it
1425 1434 # must push. once revs transferred, if server finds it has
1426 1435 # different heads (someone else won commit/push race), server
1427 1436 # aborts.
1428 1437
1429 1438 ret = self.prepush(remote, force, revs)
1430 1439 if ret[0] is not None:
1431 1440 cg, remote_heads = ret
1432 1441 if force: remote_heads = ['force']
1433 1442 return remote.unbundle(cg, remote_heads, 'push')
1434 1443 return ret[1]
1435 1444
1436 1445 def changegroupinfo(self, nodes):
1437 1446 self.ui.note(_("%d changesets found\n") % len(nodes))
1438 1447 if self.ui.debugflag:
1439 1448 self.ui.debug(_("List of changesets:\n"))
1440 1449 for node in nodes:
1441 1450 self.ui.debug("%s\n" % hex(node))
1442 1451
1443 1452 def changegroupsubset(self, bases, heads, source):
1444 1453 """This function generates a changegroup consisting of all the nodes
1445 1454 that are descendents of any of the bases, and ancestors of any of
1446 1455 the heads.
1447 1456
1448 1457 It is fairly complex as determining which filenodes and which
1449 1458 manifest nodes need to be included for the changeset to be complete
1450 1459 is non-trivial.
1451 1460
1452 1461 Another wrinkle is doing the reverse, figuring out which changeset in
1453 1462 the changegroup a particular filenode or manifestnode belongs to."""
1454 1463
1455 1464 self.hook('preoutgoing', throw=True, source=source)
1456 1465
1457 1466 # Set up some initial variables
1458 1467 # Make it easy to refer to self.changelog
1459 1468 cl = self.changelog
1460 1469 # msng is short for missing - compute the list of changesets in this
1461 1470 # changegroup.
1462 1471 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1463 1472 self.changegroupinfo(msng_cl_lst)
1464 1473 # Some bases may turn out to be superfluous, and some heads may be
1465 1474 # too. nodesbetween will return the minimal set of bases and heads
1466 1475 # necessary to re-create the changegroup.
1467 1476
1468 1477 # Known heads are the list of heads that it is assumed the recipient
1469 1478 # of this changegroup will know about.
1470 1479 knownheads = {}
1471 1480 # We assume that all parents of bases are known heads.
1472 1481 for n in bases:
1473 1482 for p in cl.parents(n):
1474 1483 if p != nullid:
1475 1484 knownheads[p] = 1
1476 1485 knownheads = knownheads.keys()
1477 1486 if knownheads:
1478 1487 # Now that we know what heads are known, we can compute which
1479 1488 # changesets are known. The recipient must know about all
1480 1489 # changesets required to reach the known heads from the null
1481 1490 # changeset.
1482 1491 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1483 1492 junk = None
1484 1493 # Transform the list into an ersatz set.
1485 1494 has_cl_set = dict.fromkeys(has_cl_set)
1486 1495 else:
1487 1496 # If there were no known heads, the recipient cannot be assumed to
1488 1497 # know about any changesets.
1489 1498 has_cl_set = {}
1490 1499
1491 1500 # Make it easy to refer to self.manifest
1492 1501 mnfst = self.manifest
1493 1502 # We don't know which manifests are missing yet
1494 1503 msng_mnfst_set = {}
1495 1504 # Nor do we know which filenodes are missing.
1496 1505 msng_filenode_set = {}
1497 1506
1498 1507 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1499 1508 junk = None
1500 1509
1501 1510 # A changeset always belongs to itself, so the changenode lookup
1502 1511 # function for a changenode is identity.
1503 1512 def identity(x):
1504 1513 return x
1505 1514
1506 1515 # A function generating function. Sets up an environment for the
1507 1516 # inner function.
1508 1517 def cmp_by_rev_func(revlog):
1509 1518 # Compare two nodes by their revision number in the environment's
1510 1519 # revision history. Since the revision number both represents the
1511 1520 # most efficient order to read the nodes in, and represents a
1512 1521 # topological sorting of the nodes, this function is often useful.
1513 1522 def cmp_by_rev(a, b):
1514 1523 return cmp(revlog.rev(a), revlog.rev(b))
1515 1524 return cmp_by_rev
1516 1525
1517 1526 # If we determine that a particular file or manifest node must be a
1518 1527 # node that the recipient of the changegroup will already have, we can
1519 1528 # also assume the recipient will have all the parents. This function
1520 1529 # prunes them from the set of missing nodes.
1521 1530 def prune_parents(revlog, hasset, msngset):
1522 1531 haslst = hasset.keys()
1523 1532 haslst.sort(cmp_by_rev_func(revlog))
1524 1533 for node in haslst:
1525 1534 parentlst = [p for p in revlog.parents(node) if p != nullid]
1526 1535 while parentlst:
1527 1536 n = parentlst.pop()
1528 1537 if n not in hasset:
1529 1538 hasset[n] = 1
1530 1539 p = [p for p in revlog.parents(n) if p != nullid]
1531 1540 parentlst.extend(p)
1532 1541 for n in hasset:
1533 1542 msngset.pop(n, None)
1534 1543
1535 1544 # This is a function generating function used to set up an environment
1536 1545 # for the inner function to execute in.
1537 1546 def manifest_and_file_collector(changedfileset):
1538 1547 # This is an information gathering function that gathers
1539 1548 # information from each changeset node that goes out as part of
1540 1549 # the changegroup. The information gathered is a list of which
1541 1550 # manifest nodes are potentially required (the recipient may
1542 1551 # already have them) and total list of all files which were
1543 1552 # changed in any changeset in the changegroup.
1544 1553 #
1545 1554 # We also remember the first changenode we saw any manifest
1546 1555 # referenced by so we can later determine which changenode 'owns'
1547 1556 # the manifest.
1548 1557 def collect_manifests_and_files(clnode):
1549 1558 c = cl.read(clnode)
1550 1559 for f in c[3]:
1551 1560 # This is to make sure we only have one instance of each
1552 1561 # filename string for each filename.
1553 1562 changedfileset.setdefault(f, f)
1554 1563 msng_mnfst_set.setdefault(c[0], clnode)
1555 1564 return collect_manifests_and_files
1556 1565
1557 1566 # Figure out which manifest nodes (of the ones we think might be part
1558 1567 # of the changegroup) the recipient must know about and remove them
1559 1568 # from the changegroup.
1560 1569 def prune_manifests():
1561 1570 has_mnfst_set = {}
1562 1571 for n in msng_mnfst_set:
1563 1572 # If a 'missing' manifest thinks it belongs to a changenode
1564 1573 # the recipient is assumed to have, obviously the recipient
1565 1574 # must have that manifest.
1566 1575 linknode = cl.node(mnfst.linkrev(n))
1567 1576 if linknode in has_cl_set:
1568 1577 has_mnfst_set[n] = 1
1569 1578 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1570 1579
1571 1580 # Use the information collected in collect_manifests_and_files to say
1572 1581 # which changenode any manifestnode belongs to.
1573 1582 def lookup_manifest_link(mnfstnode):
1574 1583 return msng_mnfst_set[mnfstnode]
1575 1584
1576 1585 # A function generating function that sets up the initial environment
1577 1586 # the inner function.
1578 1587 def filenode_collector(changedfiles):
1579 1588 next_rev = [0]
1580 1589 # This gathers information from each manifestnode included in the
1581 1590 # changegroup about which filenodes the manifest node references
1582 1591 # so we can include those in the changegroup too.
1583 1592 #
1584 1593 # It also remembers which changenode each filenode belongs to. It
1585 1594 # does this by assuming the a filenode belongs to the changenode
1586 1595 # the first manifest that references it belongs to.
1587 1596 def collect_msng_filenodes(mnfstnode):
1588 1597 r = mnfst.rev(mnfstnode)
1589 1598 if r == next_rev[0]:
1590 1599 # If the last rev we looked at was the one just previous,
1591 1600 # we only need to see a diff.
1592 1601 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1593 1602 # For each line in the delta
1594 1603 for dline in delta.splitlines():
1595 1604 # get the filename and filenode for that line
1596 1605 f, fnode = dline.split('\0')
1597 1606 fnode = bin(fnode[:40])
1598 1607 f = changedfiles.get(f, None)
1599 1608 # And if the file is in the list of files we care
1600 1609 # about.
1601 1610 if f is not None:
1602 1611 # Get the changenode this manifest belongs to
1603 1612 clnode = msng_mnfst_set[mnfstnode]
1604 1613 # Create the set of filenodes for the file if
1605 1614 # there isn't one already.
1606 1615 ndset = msng_filenode_set.setdefault(f, {})
1607 1616 # And set the filenode's changelog node to the
1608 1617 # manifest's if it hasn't been set already.
1609 1618 ndset.setdefault(fnode, clnode)
1610 1619 else:
1611 1620 # Otherwise we need a full manifest.
1612 1621 m = mnfst.read(mnfstnode)
1613 1622 # For every file in we care about.
1614 1623 for f in changedfiles:
1615 1624 fnode = m.get(f, None)
1616 1625 # If it's in the manifest
1617 1626 if fnode is not None:
1618 1627 # See comments above.
1619 1628 clnode = msng_mnfst_set[mnfstnode]
1620 1629 ndset = msng_filenode_set.setdefault(f, {})
1621 1630 ndset.setdefault(fnode, clnode)
1622 1631 # Remember the revision we hope to see next.
1623 1632 next_rev[0] = r + 1
1624 1633 return collect_msng_filenodes
1625 1634
1626 1635 # We have a list of filenodes we think we need for a file, lets remove
1627 1636 # all those we now the recipient must have.
1628 1637 def prune_filenodes(f, filerevlog):
1629 1638 msngset = msng_filenode_set[f]
1630 1639 hasset = {}
1631 1640 # If a 'missing' filenode thinks it belongs to a changenode we
1632 1641 # assume the recipient must have, then the recipient must have
1633 1642 # that filenode.
1634 1643 for n in msngset:
1635 1644 clnode = cl.node(filerevlog.linkrev(n))
1636 1645 if clnode in has_cl_set:
1637 1646 hasset[n] = 1
1638 1647 prune_parents(filerevlog, hasset, msngset)
1639 1648
1640 1649 # A function generator function that sets up the a context for the
1641 1650 # inner function.
1642 1651 def lookup_filenode_link_func(fname):
1643 1652 msngset = msng_filenode_set[fname]
1644 1653 # Lookup the changenode the filenode belongs to.
1645 1654 def lookup_filenode_link(fnode):
1646 1655 return msngset[fnode]
1647 1656 return lookup_filenode_link
1648 1657
1649 1658 # Now that we have all theses utility functions to help out and
1650 1659 # logically divide up the task, generate the group.
1651 1660 def gengroup():
1652 1661 # The set of changed files starts empty.
1653 1662 changedfiles = {}
1654 1663 # Create a changenode group generator that will call our functions
1655 1664 # back to lookup the owning changenode and collect information.
1656 1665 group = cl.group(msng_cl_lst, identity,
1657 1666 manifest_and_file_collector(changedfiles))
1658 1667 for chnk in group:
1659 1668 yield chnk
1660 1669
1661 1670 # The list of manifests has been collected by the generator
1662 1671 # calling our functions back.
1663 1672 prune_manifests()
1664 1673 msng_mnfst_lst = msng_mnfst_set.keys()
1665 1674 # Sort the manifestnodes by revision number.
1666 1675 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1667 1676 # Create a generator for the manifestnodes that calls our lookup
1668 1677 # and data collection functions back.
1669 1678 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1670 1679 filenode_collector(changedfiles))
1671 1680 for chnk in group:
1672 1681 yield chnk
1673 1682
1674 1683 # These are no longer needed, dereference and toss the memory for
1675 1684 # them.
1676 1685 msng_mnfst_lst = None
1677 1686 msng_mnfst_set.clear()
1678 1687
1679 1688 changedfiles = changedfiles.keys()
1680 1689 changedfiles.sort()
1681 1690 # Go through all our files in order sorted by name.
1682 1691 for fname in changedfiles:
1683 1692 filerevlog = self.file(fname)
1684 1693 # Toss out the filenodes that the recipient isn't really
1685 1694 # missing.
1686 1695 if msng_filenode_set.has_key(fname):
1687 1696 prune_filenodes(fname, filerevlog)
1688 1697 msng_filenode_lst = msng_filenode_set[fname].keys()
1689 1698 else:
1690 1699 msng_filenode_lst = []
1691 1700 # If any filenodes are left, generate the group for them,
1692 1701 # otherwise don't bother.
1693 1702 if len(msng_filenode_lst) > 0:
1694 1703 yield changegroup.genchunk(fname)
1695 1704 # Sort the filenodes by their revision #
1696 1705 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1697 1706 # Create a group generator and only pass in a changenode
1698 1707 # lookup function as we need to collect no information
1699 1708 # from filenodes.
1700 1709 group = filerevlog.group(msng_filenode_lst,
1701 1710 lookup_filenode_link_func(fname))
1702 1711 for chnk in group:
1703 1712 yield chnk
1704 1713 if msng_filenode_set.has_key(fname):
1705 1714 # Don't need this anymore, toss it to free memory.
1706 1715 del msng_filenode_set[fname]
1707 1716 # Signal that no more groups are left.
1708 1717 yield changegroup.closechunk()
1709 1718
1710 1719 if msng_cl_lst:
1711 1720 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1712 1721
1713 1722 return util.chunkbuffer(gengroup())
1714 1723
1715 1724 def changegroup(self, basenodes, source):
1716 1725 """Generate a changegroup of all nodes that we have that a recipient
1717 1726 doesn't.
1718 1727
1719 1728 This is much easier than the previous function as we can assume that
1720 1729 the recipient has any changenode we aren't sending them."""
1721 1730
1722 1731 self.hook('preoutgoing', throw=True, source=source)
1723 1732
1724 1733 cl = self.changelog
1725 1734 nodes = cl.nodesbetween(basenodes, None)[0]
1726 1735 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1727 1736 self.changegroupinfo(nodes)
1728 1737
1729 1738 def identity(x):
1730 1739 return x
1731 1740
1732 1741 def gennodelst(revlog):
1733 1742 for r in xrange(0, revlog.count()):
1734 1743 n = revlog.node(r)
1735 1744 if revlog.linkrev(n) in revset:
1736 1745 yield n
1737 1746
1738 1747 def changed_file_collector(changedfileset):
1739 1748 def collect_changed_files(clnode):
1740 1749 c = cl.read(clnode)
1741 1750 for fname in c[3]:
1742 1751 changedfileset[fname] = 1
1743 1752 return collect_changed_files
1744 1753
1745 1754 def lookuprevlink_func(revlog):
1746 1755 def lookuprevlink(n):
1747 1756 return cl.node(revlog.linkrev(n))
1748 1757 return lookuprevlink
1749 1758
1750 1759 def gengroup():
1751 1760 # construct a list of all changed files
1752 1761 changedfiles = {}
1753 1762
1754 1763 for chnk in cl.group(nodes, identity,
1755 1764 changed_file_collector(changedfiles)):
1756 1765 yield chnk
1757 1766 changedfiles = changedfiles.keys()
1758 1767 changedfiles.sort()
1759 1768
1760 1769 mnfst = self.manifest
1761 1770 nodeiter = gennodelst(mnfst)
1762 1771 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1763 1772 yield chnk
1764 1773
1765 1774 for fname in changedfiles:
1766 1775 filerevlog = self.file(fname)
1767 1776 nodeiter = gennodelst(filerevlog)
1768 1777 nodeiter = list(nodeiter)
1769 1778 if nodeiter:
1770 1779 yield changegroup.genchunk(fname)
1771 1780 lookup = lookuprevlink_func(filerevlog)
1772 1781 for chnk in filerevlog.group(nodeiter, lookup):
1773 1782 yield chnk
1774 1783
1775 1784 yield changegroup.closechunk()
1776 1785
1777 1786 if nodes:
1778 1787 self.hook('outgoing', node=hex(nodes[0]), source=source)
1779 1788
1780 1789 return util.chunkbuffer(gengroup())
1781 1790
1782 1791 def addchangegroup(self, source, srctype, url):
1783 1792 """add changegroup to repo.
1784 1793
1785 1794 return values:
1786 1795 - nothing changed or no source: 0
1787 1796 - more heads than before: 1+added heads (2..n)
1788 1797 - less heads than before: -1-removed heads (-2..-n)
1789 1798 - number of heads stays the same: 1
1790 1799 """
1791 1800 def csmap(x):
1792 1801 self.ui.debug(_("add changeset %s\n") % short(x))
1793 1802 return cl.count()
1794 1803
1795 1804 def revmap(x):
1796 1805 return cl.rev(x)
1797 1806
1798 1807 if not source:
1799 1808 return 0
1800 1809
1801 1810 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1802 1811
1803 1812 changesets = files = revisions = 0
1804 1813
1805 1814 tr = self.transaction()
1806 1815
1807 1816 # write changelog data to temp files so concurrent readers will not see
1808 1817 # inconsistent view
1809 1818 cl = self.changelog
1810 1819 cl.delayupdate()
1811 1820 oldheads = len(cl.heads())
1812 1821
1813 1822 # pull off the changeset group
1814 1823 self.ui.status(_("adding changesets\n"))
1815 1824 cor = cl.count() - 1
1816 1825 chunkiter = changegroup.chunkiter(source)
1817 1826 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1818 1827 raise util.Abort(_("received changelog group is empty"))
1819 1828 cnr = cl.count() - 1
1820 1829 changesets = cnr - cor
1821 1830
1822 1831 # pull off the manifest group
1823 1832 self.ui.status(_("adding manifests\n"))
1824 1833 chunkiter = changegroup.chunkiter(source)
1825 1834 # no need to check for empty manifest group here:
1826 1835 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1827 1836 # no new manifest will be created and the manifest group will
1828 1837 # be empty during the pull
1829 1838 self.manifest.addgroup(chunkiter, revmap, tr)
1830 1839
1831 1840 # process the files
1832 1841 self.ui.status(_("adding file changes\n"))
1833 1842 while 1:
1834 1843 f = changegroup.getchunk(source)
1835 1844 if not f:
1836 1845 break
1837 1846 self.ui.debug(_("adding %s revisions\n") % f)
1838 1847 fl = self.file(f)
1839 1848 o = fl.count()
1840 1849 chunkiter = changegroup.chunkiter(source)
1841 1850 if fl.addgroup(chunkiter, revmap, tr) is None:
1842 1851 raise util.Abort(_("received file revlog group is empty"))
1843 1852 revisions += fl.count() - o
1844 1853 files += 1
1845 1854
1846 1855 # make changelog see real files again
1847 1856 cl.finalize(tr)
1848 1857
1849 1858 newheads = len(self.changelog.heads())
1850 1859 heads = ""
1851 1860 if oldheads and newheads != oldheads:
1852 1861 heads = _(" (%+d heads)") % (newheads - oldheads)
1853 1862
1854 1863 self.ui.status(_("added %d changesets"
1855 1864 " with %d changes to %d files%s\n")
1856 1865 % (changesets, revisions, files, heads))
1857 1866
1858 1867 if changesets > 0:
1859 1868 self.hook('pretxnchangegroup', throw=True,
1860 1869 node=hex(self.changelog.node(cor+1)), source=srctype,
1861 1870 url=url)
1862 1871
1863 1872 tr.close()
1864 1873
1865 1874 if changesets > 0:
1866 1875 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1867 1876 source=srctype, url=url)
1868 1877
1869 1878 for i in xrange(cor + 1, cnr + 1):
1870 1879 self.hook("incoming", node=hex(self.changelog.node(i)),
1871 1880 source=srctype, url=url)
1872 1881
1873 1882 # never return 0 here:
1874 1883 if newheads < oldheads:
1875 1884 return newheads - oldheads - 1
1876 1885 else:
1877 1886 return newheads - oldheads + 1
1878 1887
1879 1888
1880 1889 def stream_in(self, remote):
1881 1890 fp = remote.stream_out()
1882 1891 l = fp.readline()
1883 1892 try:
1884 1893 resp = int(l)
1885 1894 except ValueError:
1886 1895 raise util.UnexpectedOutput(
1887 1896 _('Unexpected response from remote server:'), l)
1888 1897 if resp == 1:
1889 1898 raise util.Abort(_('operation forbidden by server'))
1890 1899 elif resp == 2:
1891 1900 raise util.Abort(_('locking the remote repository failed'))
1892 1901 elif resp != 0:
1893 1902 raise util.Abort(_('the server sent an unknown error code'))
1894 1903 self.ui.status(_('streaming all changes\n'))
1895 1904 l = fp.readline()
1896 1905 try:
1897 1906 total_files, total_bytes = map(int, l.split(' ', 1))
1898 1907 except ValueError, TypeError:
1899 1908 raise util.UnexpectedOutput(
1900 1909 _('Unexpected response from remote server:'), l)
1901 1910 self.ui.status(_('%d files to transfer, %s of data\n') %
1902 1911 (total_files, util.bytecount(total_bytes)))
1903 1912 start = time.time()
1904 1913 for i in xrange(total_files):
1905 1914 # XXX doesn't support '\n' or '\r' in filenames
1906 1915 l = fp.readline()
1907 1916 try:
1908 1917 name, size = l.split('\0', 1)
1909 1918 size = int(size)
1910 1919 except ValueError, TypeError:
1911 1920 raise util.UnexpectedOutput(
1912 1921 _('Unexpected response from remote server:'), l)
1913 1922 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1914 1923 ofp = self.sopener(name, 'w')
1915 1924 for chunk in util.filechunkiter(fp, limit=size):
1916 1925 ofp.write(chunk)
1917 1926 ofp.close()
1918 1927 elapsed = time.time() - start
1919 1928 if elapsed <= 0:
1920 1929 elapsed = 0.001
1921 1930 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1922 1931 (util.bytecount(total_bytes), elapsed,
1923 1932 util.bytecount(total_bytes / elapsed)))
1924 1933 self.reload()
1925 1934 return len(self.heads()) + 1
1926 1935
1927 1936 def clone(self, remote, heads=[], stream=False):
1928 1937 '''clone remote repository.
1929 1938
1930 1939 keyword arguments:
1931 1940 heads: list of revs to clone (forces use of pull)
1932 1941 stream: use streaming clone if possible'''
1933 1942
1934 1943 # now, all clients that can request uncompressed clones can
1935 1944 # read repo formats supported by all servers that can serve
1936 1945 # them.
1937 1946
1938 1947 # if revlog format changes, client will have to check version
1939 1948 # and format flags on "stream" capability, and use
1940 1949 # uncompressed only if compatible.
1941 1950
1942 1951 if stream and not heads and remote.capable('stream'):
1943 1952 return self.stream_in(remote)
1944 1953 return self.pull(remote, heads)
1945 1954
1946 1955 # used to avoid circular references so destructors work
1947 1956 def aftertrans(files):
1948 1957 renamefiles = [tuple(t) for t in files]
1949 1958 def a():
1950 1959 for src, dest in renamefiles:
1951 1960 util.rename(src, dest)
1952 1961 return a
1953 1962
1954 1963 def instance(ui, path, create):
1955 1964 return localrepository(ui, util.drop_scheme('file', path), create)
1956 1965
1957 1966 def islocal(path):
1958 1967 return True
General Comments 0
You need to be logged in to leave comments. Login now