##// END OF EJS Templates
Make sure the changelog mentions files whose flags changed...
Alexis S. L. Carvalho -
r4530:0ac7fee4 default
parent child Browse files
Show More
@@ -1,1956 +1,1965
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.root = os.path.realpath(path)
34 34 self.path = os.path.join(self.root, ".hg")
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 requirements = ["revlogv1"]
45 45 if parentui.configbool('format', 'usestore', True):
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements.append("store")
48 48 # create an invalid changelog
49 49 self.opener("00changelog.i", "a").write(
50 50 '\0\0\0\2' # represents revlogv2
51 51 ' dummy changelog to prevent using the old repo layout'
52 52 )
53 53 reqfile = self.opener("requires", "w")
54 54 for r in requirements:
55 55 reqfile.write("%s\n" % r)
56 56 reqfile.close()
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 self.manifest = manifest.manifest(self.sopener)
94 94
95 95 fallback = self.ui.config('ui', 'fallbackencoding')
96 96 if fallback:
97 97 util._fallbackencoding = fallback
98 98
99 99 self.tagscache = None
100 100 self.branchcache = None
101 101 self.nodetagscache = None
102 102 self.filterpats = {}
103 103 self.transhandle = None
104 104
105 105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106 106
107 107 def url(self):
108 108 return 'file:' + self.root
109 109
110 110 def hook(self, name, throw=False, **args):
111 111 def callhook(hname, funcname):
112 112 '''call python hook. hook is callable object, looked up as
113 113 name in python module. if callable returns "true", hook
114 114 fails, else passes. if hook raises exception, treated as
115 115 hook failure. exception propagates if throw is "true".
116 116
117 117 reason for "true" meaning "hook failed" is so that
118 118 unmodified commands (e.g. mercurial.commands.update) can
119 119 be run as hooks without wrappers to convert return values.'''
120 120
121 121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 122 obj = funcname
123 123 if not callable(obj):
124 124 d = funcname.rfind('.')
125 125 if d == -1:
126 126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 127 'a module)') % (hname, funcname))
128 128 modname = funcname[:d]
129 129 try:
130 130 obj = __import__(modname)
131 131 except ImportError:
132 132 try:
133 133 # extensions are loaded with hgext_ prefix
134 134 obj = __import__("hgext_%s" % modname)
135 135 except ImportError:
136 136 raise util.Abort(_('%s hook is invalid '
137 137 '(import of "%s" failed)') %
138 138 (hname, modname))
139 139 try:
140 140 for p in funcname.split('.')[1:]:
141 141 obj = getattr(obj, p)
142 142 except AttributeError, err:
143 143 raise util.Abort(_('%s hook is invalid '
144 144 '("%s" is not defined)') %
145 145 (hname, funcname))
146 146 if not callable(obj):
147 147 raise util.Abort(_('%s hook is invalid '
148 148 '("%s" is not callable)') %
149 149 (hname, funcname))
150 150 try:
151 151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 152 except (KeyboardInterrupt, util.SignalInterrupt):
153 153 raise
154 154 except Exception, exc:
155 155 if isinstance(exc, util.Abort):
156 156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 157 (hname, exc.args[0]))
158 158 else:
159 159 self.ui.warn(_('error: %s hook raised an exception: '
160 160 '%s\n') % (hname, exc))
161 161 if throw:
162 162 raise
163 163 self.ui.print_exc()
164 164 return True
165 165 if r:
166 166 if throw:
167 167 raise util.Abort(_('%s hook failed') % hname)
168 168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 169 return r
170 170
171 171 def runhook(name, cmd):
172 172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 174 r = util.system(cmd, environ=env, cwd=self.root)
175 175 if r:
176 176 desc, r = util.explain_exit(r)
177 177 if throw:
178 178 raise util.Abort(_('%s hook %s') % (name, desc))
179 179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 180 return r
181 181
182 182 r = False
183 183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 184 if hname.split(".", 1)[0] == name and cmd]
185 185 hooks.sort()
186 186 for hname, cmd in hooks:
187 187 if callable(cmd):
188 188 r = callhook(hname, cmd) or r
189 189 elif cmd.startswith('python:'):
190 190 r = callhook(hname, cmd[7:].strip()) or r
191 191 else:
192 192 r = runhook(hname, cmd) or r
193 193 return r
194 194
195 195 tag_disallowed = ':\r\n'
196 196
197 197 def _tag(self, name, node, message, local, user, date, parent=None):
198 198 use_dirstate = parent is None
199 199
200 200 for c in self.tag_disallowed:
201 201 if c in name:
202 202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203 203
204 204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205 205
206 206 if local:
207 207 # local tags are stored in the current charset
208 208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 209 self.hook('tag', node=hex(node), tag=name, local=local)
210 210 return
211 211
212 212 # committed tags are stored in UTF-8
213 213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 214 if use_dirstate:
215 215 self.wfile('.hgtags', 'ab').write(line)
216 216 else:
217 217 ntags = self.filectx('.hgtags', parent).data()
218 218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 220 self.add(['.hgtags'])
221 221
222 222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223 223
224 224 self.hook('tag', node=hex(node), tag=name, local=local)
225 225
226 226 return tagnode
227 227
228 228 def tag(self, name, node, message, local, user, date):
229 229 '''tag a revision with a symbolic name.
230 230
231 231 if local is True, the tag is stored in a per-repository file.
232 232 otherwise, it is stored in the .hgtags file, and a new
233 233 changeset is committed with the change.
234 234
235 235 keyword arguments:
236 236
237 237 local: whether to store tag in non-version-controlled file
238 238 (default False)
239 239
240 240 message: commit message to use if committing
241 241
242 242 user: name of user to use if committing
243 243
244 244 date: date tuple to use if committing'''
245 245
246 246 for x in self.status()[:5]:
247 247 if '.hgtags' in x:
248 248 raise util.Abort(_('working copy of .hgtags is changed '
249 249 '(please commit .hgtags manually)'))
250 250
251 251
252 252 self._tag(name, node, message, local, user, date)
253 253
254 254 def tags(self):
255 255 '''return a mapping of tag to node'''
256 256 if self.tagscache:
257 257 return self.tagscache
258 258
259 259 globaltags = {}
260 260
261 261 def readtags(lines, fn):
262 262 filetags = {}
263 263 count = 0
264 264
265 265 def warn(msg):
266 266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267 267
268 268 for l in lines:
269 269 count += 1
270 270 if not l:
271 271 continue
272 272 s = l.split(" ", 1)
273 273 if len(s) != 2:
274 274 warn(_("cannot parse entry"))
275 275 continue
276 276 node, key = s
277 277 key = util.tolocal(key.strip()) # stored in UTF-8
278 278 try:
279 279 bin_n = bin(node)
280 280 except TypeError:
281 281 warn(_("node '%s' is not well formed") % node)
282 282 continue
283 283 if bin_n not in self.changelog.nodemap:
284 284 warn(_("tag '%s' refers to unknown node") % key)
285 285 continue
286 286
287 287 h = []
288 288 if key in filetags:
289 289 n, h = filetags[key]
290 290 h.append(n)
291 291 filetags[key] = (bin_n, h)
292 292
293 293 for k,nh in filetags.items():
294 294 if k not in globaltags:
295 295 globaltags[k] = nh
296 296 continue
297 297 # we prefer the global tag if:
298 298 # it supercedes us OR
299 299 # mutual supercedes and it has a higher rank
300 300 # otherwise we win because we're tip-most
301 301 an, ah = nh
302 302 bn, bh = globaltags[k]
303 303 if bn != an and an in bh and \
304 304 (bn not in ah or len(bh) > len(ah)):
305 305 an = bn
306 306 ah.extend([n for n in bh if n not in ah])
307 307 globaltags[k] = an, ah
308 308
309 309 # read the tags file from each head, ending with the tip
310 310 f = None
311 311 for rev, node, fnode in self._hgtagsnodes():
312 312 f = (f and f.filectx(fnode) or
313 313 self.filectx('.hgtags', fileid=fnode))
314 314 readtags(f.data().splitlines(), f)
315 315
316 316 try:
317 317 data = util.fromlocal(self.opener("localtags").read())
318 318 # localtags are stored in the local character set
319 319 # while the internal tag table is stored in UTF-8
320 320 readtags(data.splitlines(), "localtags")
321 321 except IOError:
322 322 pass
323 323
324 324 self.tagscache = {}
325 325 for k,nh in globaltags.items():
326 326 n = nh[0]
327 327 if n != nullid:
328 328 self.tagscache[k] = n
329 329 self.tagscache['tip'] = self.changelog.tip()
330 330
331 331 return self.tagscache
332 332
333 333 def _hgtagsnodes(self):
334 334 heads = self.heads()
335 335 heads.reverse()
336 336 last = {}
337 337 ret = []
338 338 for node in heads:
339 339 c = self.changectx(node)
340 340 rev = c.rev()
341 341 try:
342 342 fnode = c.filenode('.hgtags')
343 343 except revlog.LookupError:
344 344 continue
345 345 ret.append((rev, node, fnode))
346 346 if fnode in last:
347 347 ret[last[fnode]] = None
348 348 last[fnode] = len(ret) - 1
349 349 return [item for item in ret if item]
350 350
351 351 def tagslist(self):
352 352 '''return a list of tags ordered by revision'''
353 353 l = []
354 354 for t, n in self.tags().items():
355 355 try:
356 356 r = self.changelog.rev(n)
357 357 except:
358 358 r = -2 # sort to the beginning of the list if unknown
359 359 l.append((r, t, n))
360 360 l.sort()
361 361 return [(t, n) for r, t, n in l]
362 362
363 363 def nodetags(self, node):
364 364 '''return the tags associated with a node'''
365 365 if not self.nodetagscache:
366 366 self.nodetagscache = {}
367 367 for t, n in self.tags().items():
368 368 self.nodetagscache.setdefault(n, []).append(t)
369 369 return self.nodetagscache.get(node, [])
370 370
371 371 def _branchtags(self):
372 372 partial, last, lrev = self._readbranchcache()
373 373
374 374 tiprev = self.changelog.count() - 1
375 375 if lrev != tiprev:
376 376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378 378
379 379 return partial
380 380
381 381 def branchtags(self):
382 382 if self.branchcache is not None:
383 383 return self.branchcache
384 384
385 385 self.branchcache = {} # avoid recursion in changectx
386 386 partial = self._branchtags()
387 387
388 388 # the branch cache is stored on disk as UTF-8, but in the local
389 389 # charset internally
390 390 for k, v in partial.items():
391 391 self.branchcache[util.tolocal(k)] = v
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 except (IOError, OSError):
401 401 return {}, nullid, nullrev
402 402
403 403 try:
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if not (lrev < self.changelog.count() and
407 407 self.changelog.node(lrev) == last): # sanity check
408 408 # invalidate the cache
409 409 raise ValueError('Invalid branch cache: unknown tip')
410 410 for l in lines:
411 411 if not l: continue
412 412 node, label = l.split(" ", 1)
413 413 partial[label.strip()] = bin(node)
414 414 except (KeyboardInterrupt, util.SignalInterrupt):
415 415 raise
416 416 except Exception, inst:
417 417 if self.ui.debugflag:
418 418 self.ui.warn(str(inst), '\n')
419 419 partial, last, lrev = {}, nullid, nullrev
420 420 return partial, last, lrev
421 421
422 422 def _writebranchcache(self, branches, tip, tiprev):
423 423 try:
424 424 f = self.opener("branch.cache", "w", atomictemp=True)
425 425 f.write("%s %s\n" % (hex(tip), tiprev))
426 426 for label, node in branches.iteritems():
427 427 f.write("%s %s\n" % (hex(node), label))
428 428 f.rename()
429 429 except (IOError, OSError):
430 430 pass
431 431
432 432 def _updatebranchcache(self, partial, start, end):
433 433 for r in xrange(start, end):
434 434 c = self.changectx(r)
435 435 b = c.branch()
436 436 partial[b] = c.node()
437 437
438 438 def lookup(self, key):
439 439 if key == '.':
440 440 key, second = self.dirstate.parents()
441 441 if key == nullid:
442 442 raise repo.RepoError(_("no revision checked out"))
443 443 if second != nullid:
444 444 self.ui.warn(_("warning: working directory has two parents, "
445 445 "tag '.' uses the first\n"))
446 446 elif key == 'null':
447 447 return nullid
448 448 n = self.changelog._match(key)
449 449 if n:
450 450 return n
451 451 if key in self.tags():
452 452 return self.tags()[key]
453 453 if key in self.branchtags():
454 454 return self.branchtags()[key]
455 455 n = self.changelog._partialmatch(key)
456 456 if n:
457 457 return n
458 458 raise repo.RepoError(_("unknown revision '%s'") % key)
459 459
460 460 def dev(self):
461 461 return os.lstat(self.path).st_dev
462 462
463 463 def local(self):
464 464 return True
465 465
466 466 def join(self, f):
467 467 return os.path.join(self.path, f)
468 468
469 469 def sjoin(self, f):
470 470 f = self.encodefn(f)
471 471 return os.path.join(self.spath, f)
472 472
473 473 def wjoin(self, f):
474 474 return os.path.join(self.root, f)
475 475
476 476 def file(self, f):
477 477 if f[0] == '/':
478 478 f = f[1:]
479 479 return filelog.filelog(self.sopener, f)
480 480
481 481 def changectx(self, changeid=None):
482 482 return context.changectx(self, changeid)
483 483
484 484 def workingctx(self):
485 485 return context.workingctx(self)
486 486
487 487 def parents(self, changeid=None):
488 488 '''
489 489 get list of changectxs for parents of changeid or working directory
490 490 '''
491 491 if changeid is None:
492 492 pl = self.dirstate.parents()
493 493 else:
494 494 n = self.changelog.lookup(changeid)
495 495 pl = self.changelog.parents(n)
496 496 if pl[1] == nullid:
497 497 return [self.changectx(pl[0])]
498 498 return [self.changectx(pl[0]), self.changectx(pl[1])]
499 499
500 500 def filectx(self, path, changeid=None, fileid=None):
501 501 """changeid can be a changeset revision, node, or tag.
502 502 fileid can be a file revision or node."""
503 503 return context.filectx(self, path, changeid, fileid)
504 504
505 505 def getcwd(self):
506 506 return self.dirstate.getcwd()
507 507
508 508 def pathto(self, f, cwd=None):
509 509 return self.dirstate.pathto(f, cwd)
510 510
511 511 def wfile(self, f, mode='r'):
512 512 return self.wopener(f, mode)
513 513
514 514 def _link(self, f):
515 515 return os.path.islink(self.wjoin(f))
516 516
517 517 def _filter(self, filter, filename, data):
518 518 if filter not in self.filterpats:
519 519 l = []
520 520 for pat, cmd in self.ui.configitems(filter):
521 521 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 522 l.append((mf, cmd))
523 523 self.filterpats[filter] = l
524 524
525 525 for mf, cmd in self.filterpats[filter]:
526 526 if mf(filename):
527 527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 528 data = util.filter(data, cmd)
529 529 break
530 530
531 531 return data
532 532
533 533 def wread(self, filename):
534 534 if self._link(filename):
535 535 data = os.readlink(self.wjoin(filename))
536 536 else:
537 537 data = self.wopener(filename, 'r').read()
538 538 return self._filter("encode", filename, data)
539 539
540 540 def wwrite(self, filename, data, flags):
541 541 data = self._filter("decode", filename, data)
542 542 if "l" in flags:
543 543 f = self.wjoin(filename)
544 544 try:
545 545 os.unlink(f)
546 546 except OSError:
547 547 pass
548 548 d = os.path.dirname(f)
549 549 if not os.path.exists(d):
550 550 os.makedirs(d)
551 551 os.symlink(data, f)
552 552 else:
553 553 try:
554 554 if self._link(filename):
555 555 os.unlink(self.wjoin(filename))
556 556 except OSError:
557 557 pass
558 558 self.wopener(filename, 'w').write(data)
559 559 util.set_exec(self.wjoin(filename), "x" in flags)
560 560
561 561 def wwritedata(self, filename, data):
562 562 return self._filter("decode", filename, data)
563 563
564 564 def transaction(self):
565 565 tr = self.transhandle
566 566 if tr != None and tr.running():
567 567 return tr.nest()
568 568
569 569 # save dirstate for rollback
570 570 try:
571 571 ds = self.opener("dirstate").read()
572 572 except IOError:
573 573 ds = ""
574 574 self.opener("journal.dirstate", "w").write(ds)
575 575
576 576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
578 578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 579 self.sjoin("journal"),
580 580 aftertrans(renames))
581 581 self.transhandle = tr
582 582 return tr
583 583
584 584 def recover(self):
585 585 l = self.lock()
586 586 if os.path.exists(self.sjoin("journal")):
587 587 self.ui.status(_("rolling back interrupted transaction\n"))
588 588 transaction.rollback(self.sopener, self.sjoin("journal"))
589 589 self.reload()
590 590 return True
591 591 else:
592 592 self.ui.warn(_("no interrupted transaction available\n"))
593 593 return False
594 594
595 595 def rollback(self, wlock=None, lock=None):
596 596 if not wlock:
597 597 wlock = self.wlock()
598 598 if not lock:
599 599 lock = self.lock()
600 600 if os.path.exists(self.sjoin("undo")):
601 601 self.ui.status(_("rolling back last transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 604 self.reload()
605 605 self.wreload()
606 606 else:
607 607 self.ui.warn(_("no rollback information available\n"))
608 608
609 609 def wreload(self):
610 610 self.dirstate.reload()
611 611
612 612 def reload(self):
613 613 self.changelog.load()
614 614 self.manifest.load()
615 615 self.tagscache = None
616 616 self.nodetagscache = None
617 617
618 618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
619 619 desc=None):
620 620 try:
621 621 l = lock.lock(lockname, 0, releasefn, desc=desc)
622 622 except lock.LockHeld, inst:
623 623 if not wait:
624 624 raise
625 625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
626 626 (desc, inst.locker))
627 627 # default to 600 seconds timeout
628 628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
629 629 releasefn, desc=desc)
630 630 if acquirefn:
631 631 acquirefn()
632 632 return l
633 633
634 634 def lock(self, wait=1):
635 635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
636 636 desc=_('repository %s') % self.origroot)
637 637
638 638 def wlock(self, wait=1):
639 639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 640 self.wreload,
641 641 desc=_('working directory of %s') % self.origroot)
642 642
643 643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 644 """
645 645 commit an individual file as part of a larger transaction
646 646 """
647 647
648 648 t = self.wread(fn)
649 649 fl = self.file(fn)
650 650 fp1 = manifest1.get(fn, nullid)
651 651 fp2 = manifest2.get(fn, nullid)
652 652
653 653 meta = {}
654 654 cp = self.dirstate.copied(fn)
655 655 if cp:
656 656 # Mark the new revision of this file as a copy of another
657 657 # file. This copy data will effectively act as a parent
658 658 # of this new revision. If this is a merge, the first
659 659 # parent will be the nullid (meaning "look up the copy data")
660 660 # and the second one will be the other parent. For example:
661 661 #
662 662 # 0 --- 1 --- 3 rev1 changes file foo
663 663 # \ / rev2 renames foo to bar and changes it
664 664 # \- 2 -/ rev3 should have bar with all changes and
665 665 # should record that bar descends from
666 666 # bar in rev2 and foo in rev1
667 667 #
668 668 # this allows this merge to succeed:
669 669 #
670 670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 671 # \ / merging rev3 and rev4 should use bar@rev2
672 672 # \- 2 --- 4 as the merge base
673 673 #
674 674 meta["copy"] = cp
675 675 if not manifest2: # not a branch merge
676 676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 677 fp2 = nullid
678 678 elif fp2 != nullid: # copied on remote side
679 679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 680 elif fp1 != nullid: # copied on local side, reversed
681 681 meta["copyrev"] = hex(manifest2.get(cp))
682 682 fp2 = fp1
683 683 else: # directory rename
684 684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 685 self.ui.debug(_(" %s: copy %s:%s\n") %
686 686 (fn, cp, meta["copyrev"]))
687 687 fp1 = nullid
688 688 elif fp2 != nullid:
689 689 # is one parent an ancestor of the other?
690 690 fpa = fl.ancestor(fp1, fp2)
691 691 if fpa == fp1:
692 692 fp1, fp2 = fp2, nullid
693 693 elif fpa == fp2:
694 694 fp2 = nullid
695 695
696 696 # is the file unmodified from the parent? report existing entry
697 697 if fp2 == nullid and not fl.cmp(fp1, t):
698 698 return fp1
699 699
700 700 changelist.append(fn)
701 701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702 702
703 703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 704 if p1 is None:
705 705 p1, p2 = self.dirstate.parents()
706 706 return self.commit(files=files, text=text, user=user, date=date,
707 707 p1=p1, p2=p2, wlock=wlock, extra=extra)
708 708
709 709 def commit(self, files=None, text="", user=None, date=None,
710 710 match=util.always, force=False, lock=None, wlock=None,
711 711 force_editor=False, p1=None, p2=None, extra={}):
712 712
713 713 commit = []
714 714 remove = []
715 715 changed = []
716 716 use_dirstate = (p1 is None) # not rawcommit
717 717 extra = extra.copy()
718 718
719 719 if use_dirstate:
720 720 if files:
721 721 for f in files:
722 722 s = self.dirstate.state(f)
723 723 if s in 'nmai':
724 724 commit.append(f)
725 725 elif s == 'r':
726 726 remove.append(f)
727 727 else:
728 728 self.ui.warn(_("%s not tracked!\n") % f)
729 729 else:
730 730 changes = self.status(match=match)[:5]
731 731 modified, added, removed, deleted, unknown = changes
732 732 commit = modified + added
733 733 remove = removed
734 734 else:
735 735 commit = files
736 736
737 737 if use_dirstate:
738 738 p1, p2 = self.dirstate.parents()
739 739 update_dirstate = True
740 740 else:
741 741 p1, p2 = p1, p2 or nullid
742 742 update_dirstate = (self.dirstate.parents()[0] == p1)
743 743
744 744 c1 = self.changelog.read(p1)
745 745 c2 = self.changelog.read(p2)
746 746 m1 = self.manifest.read(c1[0]).copy()
747 747 m2 = self.manifest.read(c2[0])
748 748
749 749 if use_dirstate:
750 750 branchname = self.workingctx().branch()
751 751 try:
752 752 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 753 except UnicodeDecodeError:
754 754 raise util.Abort(_('branch name not in UTF-8!'))
755 755 else:
756 756 branchname = ""
757 757
758 758 if use_dirstate:
759 759 oldname = c1[5].get("branch") # stored in UTF-8
760 760 if not commit and not remove and not force and p2 == nullid and \
761 761 branchname == oldname:
762 762 self.ui.status(_("nothing changed\n"))
763 763 return None
764 764
765 765 xp1 = hex(p1)
766 766 if p2 == nullid: xp2 = ''
767 767 else: xp2 = hex(p2)
768 768
769 769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770 770
771 771 if not wlock:
772 772 wlock = self.wlock()
773 773 if not lock:
774 774 lock = self.lock()
775 775 tr = self.transaction()
776 776
777 777 # check in files
778 778 new = {}
779 779 linkrev = self.changelog.count()
780 780 commit.sort()
781 781 is_exec = util.execfunc(self.root, m1.execf)
782 782 is_link = util.linkfunc(self.root, m1.linkf)
783 783 for f in commit:
784 784 self.ui.note(f + "\n")
785 785 try:
786 786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 m1.set(f, is_exec(f), is_link(f))
787 new_exec = is_exec(f)
788 new_link = is_link(f)
789 if not changed or changed[-1] != f:
790 # mention the file in the changelog if some flag changed,
791 # even if there was no content change.
792 old_exec = m1.execf(f)
793 old_link = m1.linkf(f)
794 if old_exec != new_exec or old_link != new_link:
795 changed.append(f)
796 m1.set(f, new_exec, new_link)
788 797 except (OSError, IOError):
789 798 if use_dirstate:
790 799 self.ui.warn(_("trouble committing %s!\n") % f)
791 800 raise
792 801 else:
793 802 remove.append(f)
794 803
795 804 # update manifest
796 805 m1.update(new)
797 806 remove.sort()
798 807 removed = []
799 808
800 809 for f in remove:
801 810 if f in m1:
802 811 del m1[f]
803 812 removed.append(f)
804 813 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
805 814
806 815 # add changeset
807 816 new = new.keys()
808 817 new.sort()
809 818
810 819 user = user or self.ui.username()
811 820 if not text or force_editor:
812 821 edittext = []
813 822 if text:
814 823 edittext.append(text)
815 824 edittext.append("")
816 825 edittext.append("HG: user: %s" % user)
817 826 if p2 != nullid:
818 827 edittext.append("HG: branch merge")
819 828 if branchname:
820 829 edittext.append("HG: branch %s" % util.tolocal(branchname))
821 830 edittext.extend(["HG: changed %s" % f for f in changed])
822 831 edittext.extend(["HG: removed %s" % f for f in removed])
823 832 if not changed and not remove:
824 833 edittext.append("HG: no files changed")
825 834 edittext.append("")
826 835 # run editor in the repository root
827 836 olddir = os.getcwd()
828 837 os.chdir(self.root)
829 838 text = self.ui.edit("\n".join(edittext), user)
830 839 os.chdir(olddir)
831 840
832 841 lines = [line.rstrip() for line in text.rstrip().splitlines()]
833 842 while lines and not lines[0]:
834 843 del lines[0]
835 844 if not lines:
836 845 return None
837 846 text = '\n'.join(lines)
838 847 if branchname:
839 848 extra["branch"] = branchname
840 849 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
841 850 user, date, extra)
842 851 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
843 852 parent2=xp2)
844 853 tr.close()
845 854
846 855 if self.branchcache and "branch" in extra:
847 856 self.branchcache[util.tolocal(extra["branch"])] = n
848 857
849 858 if use_dirstate or update_dirstate:
850 859 self.dirstate.setparents(n)
851 860 if use_dirstate:
852 861 self.dirstate.update(new, "n")
853 862 self.dirstate.forget(removed)
854 863
855 864 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
856 865 return n
857 866
858 867 def walk(self, node=None, files=[], match=util.always, badmatch=None):
859 868 '''
860 869 walk recursively through the directory tree or a given
861 870 changeset, finding all files matched by the match
862 871 function
863 872
864 873 results are yielded in a tuple (src, filename), where src
865 874 is one of:
866 875 'f' the file was found in the directory tree
867 876 'm' the file was only in the dirstate and not in the tree
868 877 'b' file was not found and matched badmatch
869 878 '''
870 879
871 880 if node:
872 881 fdict = dict.fromkeys(files)
873 882 # for dirstate.walk, files=['.'] means "walk the whole tree".
874 883 # follow that here, too
875 884 fdict.pop('.', None)
876 885 mdict = self.manifest.read(self.changelog.read(node)[0])
877 886 mfiles = mdict.keys()
878 887 mfiles.sort()
879 888 for fn in mfiles:
880 889 for ffn in fdict:
881 890 # match if the file is the exact name or a directory
882 891 if ffn == fn or fn.startswith("%s/" % ffn):
883 892 del fdict[ffn]
884 893 break
885 894 if match(fn):
886 895 yield 'm', fn
887 896 ffiles = fdict.keys()
888 897 ffiles.sort()
889 898 for fn in ffiles:
890 899 if badmatch and badmatch(fn):
891 900 if match(fn):
892 901 yield 'b', fn
893 902 else:
894 903 self.ui.warn(_('%s: No such file in rev %s\n')
895 904 % (self.pathto(fn), short(node)))
896 905 else:
897 906 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
898 907 yield src, fn
899 908
900 909 def status(self, node1=None, node2=None, files=[], match=util.always,
901 910 wlock=None, list_ignored=False, list_clean=False):
902 911 """return status of files between two nodes or node and working directory
903 912
904 913 If node1 is None, use the first dirstate parent instead.
905 914 If node2 is None, compare node1 with working directory.
906 915 """
907 916
908 917 def fcmp(fn, getnode):
909 918 t1 = self.wread(fn)
910 919 return self.file(fn).cmp(getnode(fn), t1)
911 920
912 921 def mfmatches(node):
913 922 change = self.changelog.read(node)
914 923 mf = self.manifest.read(change[0]).copy()
915 924 for fn in mf.keys():
916 925 if not match(fn):
917 926 del mf[fn]
918 927 return mf
919 928
920 929 modified, added, removed, deleted, unknown = [], [], [], [], []
921 930 ignored, clean = [], []
922 931
923 932 compareworking = False
924 933 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
925 934 compareworking = True
926 935
927 936 if not compareworking:
928 937 # read the manifest from node1 before the manifest from node2,
929 938 # so that we'll hit the manifest cache if we're going through
930 939 # all the revisions in parent->child order.
931 940 mf1 = mfmatches(node1)
932 941
933 942 mywlock = False
934 943
935 944 # are we comparing the working directory?
936 945 if not node2:
937 946 (lookup, modified, added, removed, deleted, unknown,
938 947 ignored, clean) = self.dirstate.status(files, match,
939 948 list_ignored, list_clean)
940 949
941 950 # are we comparing working dir against its parent?
942 951 if compareworking:
943 952 if lookup:
944 953 # do a full compare of any files that might have changed
945 954 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
946 955 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
947 956 nullid)
948 957 for f in lookup:
949 958 if fcmp(f, getnode):
950 959 modified.append(f)
951 960 else:
952 961 clean.append(f)
953 962 if not wlock and not mywlock:
954 963 mywlock = True
955 964 try:
956 965 wlock = self.wlock(wait=0)
957 966 except lock.LockException:
958 967 pass
959 968 if wlock:
960 969 self.dirstate.update([f], "n")
961 970 else:
962 971 # we are comparing working dir against non-parent
963 972 # generate a pseudo-manifest for the working dir
964 973 # XXX: create it in dirstate.py ?
965 974 mf2 = mfmatches(self.dirstate.parents()[0])
966 975 is_exec = util.execfunc(self.root, mf2.execf)
967 976 is_link = util.linkfunc(self.root, mf2.linkf)
968 977 for f in lookup + modified + added:
969 978 mf2[f] = ""
970 979 mf2.set(f, is_exec(f), is_link(f))
971 980 for f in removed:
972 981 if f in mf2:
973 982 del mf2[f]
974 983
975 984 if mywlock and wlock:
976 985 wlock.release()
977 986 else:
978 987 # we are comparing two revisions
979 988 mf2 = mfmatches(node2)
980 989
981 990 if not compareworking:
982 991 # flush lists from dirstate before comparing manifests
983 992 modified, added, clean = [], [], []
984 993
985 994 # make sure to sort the files so we talk to the disk in a
986 995 # reasonable order
987 996 mf2keys = mf2.keys()
988 997 mf2keys.sort()
989 998 getnode = lambda fn: mf1.get(fn, nullid)
990 999 for fn in mf2keys:
991 1000 if mf1.has_key(fn):
992 1001 if mf1.flags(fn) != mf2.flags(fn) or \
993 1002 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
994 1003 fcmp(fn, getnode))):
995 1004 modified.append(fn)
996 1005 elif list_clean:
997 1006 clean.append(fn)
998 1007 del mf1[fn]
999 1008 else:
1000 1009 added.append(fn)
1001 1010
1002 1011 removed = mf1.keys()
1003 1012
1004 1013 # sort and return results:
1005 1014 for l in modified, added, removed, deleted, unknown, ignored, clean:
1006 1015 l.sort()
1007 1016 return (modified, added, removed, deleted, unknown, ignored, clean)
1008 1017
1009 1018 def add(self, list, wlock=None):
1010 1019 if not wlock:
1011 1020 wlock = self.wlock()
1012 1021 for f in list:
1013 1022 p = self.wjoin(f)
1014 1023 islink = os.path.islink(p)
1015 1024 size = os.lstat(p).st_size
1016 1025 if size > 10000000:
1017 1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1018 1027 " performance problems\n"
1019 1028 "(use 'hg revert %s' to unadd the file)\n")
1020 1029 % (f, f))
1021 1030 if not islink and not os.path.exists(p):
1022 1031 self.ui.warn(_("%s does not exist!\n") % f)
1023 1032 elif not islink and not os.path.isfile(p):
1024 1033 self.ui.warn(_("%s not added: only files and symlinks "
1025 1034 "supported currently\n") % f)
1026 1035 elif self.dirstate.state(f) in 'an':
1027 1036 self.ui.warn(_("%s already tracked!\n") % f)
1028 1037 else:
1029 1038 self.dirstate.update([f], "a")
1030 1039
1031 1040 def forget(self, list, wlock=None):
1032 1041 if not wlock:
1033 1042 wlock = self.wlock()
1034 1043 for f in list:
1035 1044 if self.dirstate.state(f) not in 'ai':
1036 1045 self.ui.warn(_("%s not added!\n") % f)
1037 1046 else:
1038 1047 self.dirstate.forget([f])
1039 1048
1040 1049 def remove(self, list, unlink=False, wlock=None):
1041 1050 if unlink:
1042 1051 for f in list:
1043 1052 try:
1044 1053 util.unlink(self.wjoin(f))
1045 1054 except OSError, inst:
1046 1055 if inst.errno != errno.ENOENT:
1047 1056 raise
1048 1057 if not wlock:
1049 1058 wlock = self.wlock()
1050 1059 for f in list:
1051 1060 if unlink and os.path.exists(self.wjoin(f)):
1052 1061 self.ui.warn(_("%s still exists!\n") % f)
1053 1062 elif self.dirstate.state(f) == 'a':
1054 1063 self.dirstate.forget([f])
1055 1064 elif f not in self.dirstate:
1056 1065 self.ui.warn(_("%s not tracked!\n") % f)
1057 1066 else:
1058 1067 self.dirstate.update([f], "r")
1059 1068
1060 1069 def undelete(self, list, wlock=None):
1061 1070 p = self.dirstate.parents()[0]
1062 1071 mn = self.changelog.read(p)[0]
1063 1072 m = self.manifest.read(mn)
1064 1073 if not wlock:
1065 1074 wlock = self.wlock()
1066 1075 for f in list:
1067 1076 if self.dirstate.state(f) not in "r":
1068 1077 self.ui.warn("%s not removed!\n" % f)
1069 1078 else:
1070 1079 t = self.file(f).read(m[f])
1071 1080 self.wwrite(f, t, m.flags(f))
1072 1081 self.dirstate.update([f], "n")
1073 1082
1074 1083 def copy(self, source, dest, wlock=None):
1075 1084 p = self.wjoin(dest)
1076 1085 if not (os.path.exists(p) or os.path.islink(p)):
1077 1086 self.ui.warn(_("%s does not exist!\n") % dest)
1078 1087 elif not (os.path.isfile(p) or os.path.islink(p)):
1079 1088 self.ui.warn(_("copy failed: %s is not a file or a "
1080 1089 "symbolic link\n") % dest)
1081 1090 else:
1082 1091 if not wlock:
1083 1092 wlock = self.wlock()
1084 1093 if self.dirstate.state(dest) == '?':
1085 1094 self.dirstate.update([dest], "a")
1086 1095 self.dirstate.copy(source, dest)
1087 1096
1088 1097 def heads(self, start=None):
1089 1098 heads = self.changelog.heads(start)
1090 1099 # sort the output in rev descending order
1091 1100 heads = [(-self.changelog.rev(h), h) for h in heads]
1092 1101 heads.sort()
1093 1102 return [n for (r, n) in heads]
1094 1103
1095 1104 def branches(self, nodes):
1096 1105 if not nodes:
1097 1106 nodes = [self.changelog.tip()]
1098 1107 b = []
1099 1108 for n in nodes:
1100 1109 t = n
1101 1110 while 1:
1102 1111 p = self.changelog.parents(n)
1103 1112 if p[1] != nullid or p[0] == nullid:
1104 1113 b.append((t, n, p[0], p[1]))
1105 1114 break
1106 1115 n = p[0]
1107 1116 return b
1108 1117
1109 1118 def between(self, pairs):
1110 1119 r = []
1111 1120
1112 1121 for top, bottom in pairs:
1113 1122 n, l, i = top, [], 0
1114 1123 f = 1
1115 1124
1116 1125 while n != bottom:
1117 1126 p = self.changelog.parents(n)[0]
1118 1127 if i == f:
1119 1128 l.append(n)
1120 1129 f = f * 2
1121 1130 n = p
1122 1131 i += 1
1123 1132
1124 1133 r.append(l)
1125 1134
1126 1135 return r
1127 1136
1128 1137 def findincoming(self, remote, base=None, heads=None, force=False):
1129 1138 """Return list of roots of the subsets of missing nodes from remote
1130 1139
1131 1140 If base dict is specified, assume that these nodes and their parents
1132 1141 exist on the remote side and that no child of a node of base exists
1133 1142 in both remote and self.
1134 1143 Furthermore base will be updated to include the nodes that exists
1135 1144 in self and remote but no children exists in self and remote.
1136 1145 If a list of heads is specified, return only nodes which are heads
1137 1146 or ancestors of these heads.
1138 1147
1139 1148 All the ancestors of base are in self and in remote.
1140 1149 All the descendants of the list returned are missing in self.
1141 1150 (and so we know that the rest of the nodes are missing in remote, see
1142 1151 outgoing)
1143 1152 """
1144 1153 m = self.changelog.nodemap
1145 1154 search = []
1146 1155 fetch = {}
1147 1156 seen = {}
1148 1157 seenbranch = {}
1149 1158 if base == None:
1150 1159 base = {}
1151 1160
1152 1161 if not heads:
1153 1162 heads = remote.heads()
1154 1163
1155 1164 if self.changelog.tip() == nullid:
1156 1165 base[nullid] = 1
1157 1166 if heads != [nullid]:
1158 1167 return [nullid]
1159 1168 return []
1160 1169
1161 1170 # assume we're closer to the tip than the root
1162 1171 # and start by examining the heads
1163 1172 self.ui.status(_("searching for changes\n"))
1164 1173
1165 1174 unknown = []
1166 1175 for h in heads:
1167 1176 if h not in m:
1168 1177 unknown.append(h)
1169 1178 else:
1170 1179 base[h] = 1
1171 1180
1172 1181 if not unknown:
1173 1182 return []
1174 1183
1175 1184 req = dict.fromkeys(unknown)
1176 1185 reqcnt = 0
1177 1186
1178 1187 # search through remote branches
1179 1188 # a 'branch' here is a linear segment of history, with four parts:
1180 1189 # head, root, first parent, second parent
1181 1190 # (a branch always has two parents (or none) by definition)
1182 1191 unknown = remote.branches(unknown)
1183 1192 while unknown:
1184 1193 r = []
1185 1194 while unknown:
1186 1195 n = unknown.pop(0)
1187 1196 if n[0] in seen:
1188 1197 continue
1189 1198
1190 1199 self.ui.debug(_("examining %s:%s\n")
1191 1200 % (short(n[0]), short(n[1])))
1192 1201 if n[0] == nullid: # found the end of the branch
1193 1202 pass
1194 1203 elif n in seenbranch:
1195 1204 self.ui.debug(_("branch already found\n"))
1196 1205 continue
1197 1206 elif n[1] and n[1] in m: # do we know the base?
1198 1207 self.ui.debug(_("found incomplete branch %s:%s\n")
1199 1208 % (short(n[0]), short(n[1])))
1200 1209 search.append(n) # schedule branch range for scanning
1201 1210 seenbranch[n] = 1
1202 1211 else:
1203 1212 if n[1] not in seen and n[1] not in fetch:
1204 1213 if n[2] in m and n[3] in m:
1205 1214 self.ui.debug(_("found new changeset %s\n") %
1206 1215 short(n[1]))
1207 1216 fetch[n[1]] = 1 # earliest unknown
1208 1217 for p in n[2:4]:
1209 1218 if p in m:
1210 1219 base[p] = 1 # latest known
1211 1220
1212 1221 for p in n[2:4]:
1213 1222 if p not in req and p not in m:
1214 1223 r.append(p)
1215 1224 req[p] = 1
1216 1225 seen[n[0]] = 1
1217 1226
1218 1227 if r:
1219 1228 reqcnt += 1
1220 1229 self.ui.debug(_("request %d: %s\n") %
1221 1230 (reqcnt, " ".join(map(short, r))))
1222 1231 for p in xrange(0, len(r), 10):
1223 1232 for b in remote.branches(r[p:p+10]):
1224 1233 self.ui.debug(_("received %s:%s\n") %
1225 1234 (short(b[0]), short(b[1])))
1226 1235 unknown.append(b)
1227 1236
1228 1237 # do binary search on the branches we found
1229 1238 while search:
1230 1239 n = search.pop(0)
1231 1240 reqcnt += 1
1232 1241 l = remote.between([(n[0], n[1])])[0]
1233 1242 l.append(n[1])
1234 1243 p = n[0]
1235 1244 f = 1
1236 1245 for i in l:
1237 1246 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1238 1247 if i in m:
1239 1248 if f <= 2:
1240 1249 self.ui.debug(_("found new branch changeset %s\n") %
1241 1250 short(p))
1242 1251 fetch[p] = 1
1243 1252 base[i] = 1
1244 1253 else:
1245 1254 self.ui.debug(_("narrowed branch search to %s:%s\n")
1246 1255 % (short(p), short(i)))
1247 1256 search.append((p, i))
1248 1257 break
1249 1258 p, f = i, f * 2
1250 1259
1251 1260 # sanity check our fetch list
1252 1261 for f in fetch.keys():
1253 1262 if f in m:
1254 1263 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1255 1264
1256 1265 if base.keys() == [nullid]:
1257 1266 if force:
1258 1267 self.ui.warn(_("warning: repository is unrelated\n"))
1259 1268 else:
1260 1269 raise util.Abort(_("repository is unrelated"))
1261 1270
1262 1271 self.ui.debug(_("found new changesets starting at ") +
1263 1272 " ".join([short(f) for f in fetch]) + "\n")
1264 1273
1265 1274 self.ui.debug(_("%d total queries\n") % reqcnt)
1266 1275
1267 1276 return fetch.keys()
1268 1277
1269 1278 def findoutgoing(self, remote, base=None, heads=None, force=False):
1270 1279 """Return list of nodes that are roots of subsets not in remote
1271 1280
1272 1281 If base dict is specified, assume that these nodes and their parents
1273 1282 exist on the remote side.
1274 1283 If a list of heads is specified, return only nodes which are heads
1275 1284 or ancestors of these heads, and return a second element which
1276 1285 contains all remote heads which get new children.
1277 1286 """
1278 1287 if base == None:
1279 1288 base = {}
1280 1289 self.findincoming(remote, base, heads, force=force)
1281 1290
1282 1291 self.ui.debug(_("common changesets up to ")
1283 1292 + " ".join(map(short, base.keys())) + "\n")
1284 1293
1285 1294 remain = dict.fromkeys(self.changelog.nodemap)
1286 1295
1287 1296 # prune everything remote has from the tree
1288 1297 del remain[nullid]
1289 1298 remove = base.keys()
1290 1299 while remove:
1291 1300 n = remove.pop(0)
1292 1301 if n in remain:
1293 1302 del remain[n]
1294 1303 for p in self.changelog.parents(n):
1295 1304 remove.append(p)
1296 1305
1297 1306 # find every node whose parents have been pruned
1298 1307 subset = []
1299 1308 # find every remote head that will get new children
1300 1309 updated_heads = {}
1301 1310 for n in remain:
1302 1311 p1, p2 = self.changelog.parents(n)
1303 1312 if p1 not in remain and p2 not in remain:
1304 1313 subset.append(n)
1305 1314 if heads:
1306 1315 if p1 in heads:
1307 1316 updated_heads[p1] = True
1308 1317 if p2 in heads:
1309 1318 updated_heads[p2] = True
1310 1319
1311 1320 # this is the set of all roots we have to push
1312 1321 if heads:
1313 1322 return subset, updated_heads.keys()
1314 1323 else:
1315 1324 return subset
1316 1325
1317 1326 def pull(self, remote, heads=None, force=False, lock=None):
1318 1327 mylock = False
1319 1328 if not lock:
1320 1329 lock = self.lock()
1321 1330 mylock = True
1322 1331
1323 1332 try:
1324 1333 fetch = self.findincoming(remote, force=force)
1325 1334 if fetch == [nullid]:
1326 1335 self.ui.status(_("requesting all changes\n"))
1327 1336
1328 1337 if not fetch:
1329 1338 self.ui.status(_("no changes found\n"))
1330 1339 return 0
1331 1340
1332 1341 if heads is None:
1333 1342 cg = remote.changegroup(fetch, 'pull')
1334 1343 else:
1335 1344 if 'changegroupsubset' not in remote.capabilities:
1336 1345 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1337 1346 cg = remote.changegroupsubset(fetch, heads, 'pull')
1338 1347 return self.addchangegroup(cg, 'pull', remote.url())
1339 1348 finally:
1340 1349 if mylock:
1341 1350 lock.release()
1342 1351
1343 1352 def push(self, remote, force=False, revs=None):
1344 1353 # there are two ways to push to remote repo:
1345 1354 #
1346 1355 # addchangegroup assumes local user can lock remote
1347 1356 # repo (local filesystem, old ssh servers).
1348 1357 #
1349 1358 # unbundle assumes local user cannot lock remote repo (new ssh
1350 1359 # servers, http servers).
1351 1360
1352 1361 if remote.capable('unbundle'):
1353 1362 return self.push_unbundle(remote, force, revs)
1354 1363 return self.push_addchangegroup(remote, force, revs)
1355 1364
1356 1365 def prepush(self, remote, force, revs):
1357 1366 base = {}
1358 1367 remote_heads = remote.heads()
1359 1368 inc = self.findincoming(remote, base, remote_heads, force=force)
1360 1369
1361 1370 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1362 1371 if revs is not None:
1363 1372 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1364 1373 else:
1365 1374 bases, heads = update, self.changelog.heads()
1366 1375
1367 1376 if not bases:
1368 1377 self.ui.status(_("no changes found\n"))
1369 1378 return None, 1
1370 1379 elif not force:
1371 1380 # check if we're creating new remote heads
1372 1381 # to be a remote head after push, node must be either
1373 1382 # - unknown locally
1374 1383 # - a local outgoing head descended from update
1375 1384 # - a remote head that's known locally and not
1376 1385 # ancestral to an outgoing head
1377 1386
1378 1387 warn = 0
1379 1388
1380 1389 if remote_heads == [nullid]:
1381 1390 warn = 0
1382 1391 elif not revs and len(heads) > len(remote_heads):
1383 1392 warn = 1
1384 1393 else:
1385 1394 newheads = list(heads)
1386 1395 for r in remote_heads:
1387 1396 if r in self.changelog.nodemap:
1388 1397 desc = self.changelog.heads(r, heads)
1389 1398 l = [h for h in heads if h in desc]
1390 1399 if not l:
1391 1400 newheads.append(r)
1392 1401 else:
1393 1402 newheads.append(r)
1394 1403 if len(newheads) > len(remote_heads):
1395 1404 warn = 1
1396 1405
1397 1406 if warn:
1398 1407 self.ui.warn(_("abort: push creates new remote branches!\n"))
1399 1408 self.ui.status(_("(did you forget to merge?"
1400 1409 " use push -f to force)\n"))
1401 1410 return None, 1
1402 1411 elif inc:
1403 1412 self.ui.warn(_("note: unsynced remote changes!\n"))
1404 1413
1405 1414
1406 1415 if revs is None:
1407 1416 cg = self.changegroup(update, 'push')
1408 1417 else:
1409 1418 cg = self.changegroupsubset(update, revs, 'push')
1410 1419 return cg, remote_heads
1411 1420
1412 1421 def push_addchangegroup(self, remote, force, revs):
1413 1422 lock = remote.lock()
1414 1423
1415 1424 ret = self.prepush(remote, force, revs)
1416 1425 if ret[0] is not None:
1417 1426 cg, remote_heads = ret
1418 1427 return remote.addchangegroup(cg, 'push', self.url())
1419 1428 return ret[1]
1420 1429
1421 1430 def push_unbundle(self, remote, force, revs):
1422 1431 # local repo finds heads on server, finds out what revs it
1423 1432 # must push. once revs transferred, if server finds it has
1424 1433 # different heads (someone else won commit/push race), server
1425 1434 # aborts.
1426 1435
1427 1436 ret = self.prepush(remote, force, revs)
1428 1437 if ret[0] is not None:
1429 1438 cg, remote_heads = ret
1430 1439 if force: remote_heads = ['force']
1431 1440 return remote.unbundle(cg, remote_heads, 'push')
1432 1441 return ret[1]
1433 1442
1434 1443 def changegroupinfo(self, nodes):
1435 1444 self.ui.note(_("%d changesets found\n") % len(nodes))
1436 1445 if self.ui.debugflag:
1437 1446 self.ui.debug(_("List of changesets:\n"))
1438 1447 for node in nodes:
1439 1448 self.ui.debug("%s\n" % hex(node))
1440 1449
1441 1450 def changegroupsubset(self, bases, heads, source):
1442 1451 """This function generates a changegroup consisting of all the nodes
1443 1452 that are descendents of any of the bases, and ancestors of any of
1444 1453 the heads.
1445 1454
1446 1455 It is fairly complex as determining which filenodes and which
1447 1456 manifest nodes need to be included for the changeset to be complete
1448 1457 is non-trivial.
1449 1458
1450 1459 Another wrinkle is doing the reverse, figuring out which changeset in
1451 1460 the changegroup a particular filenode or manifestnode belongs to."""
1452 1461
1453 1462 self.hook('preoutgoing', throw=True, source=source)
1454 1463
1455 1464 # Set up some initial variables
1456 1465 # Make it easy to refer to self.changelog
1457 1466 cl = self.changelog
1458 1467 # msng is short for missing - compute the list of changesets in this
1459 1468 # changegroup.
1460 1469 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1461 1470 self.changegroupinfo(msng_cl_lst)
1462 1471 # Some bases may turn out to be superfluous, and some heads may be
1463 1472 # too. nodesbetween will return the minimal set of bases and heads
1464 1473 # necessary to re-create the changegroup.
1465 1474
1466 1475 # Known heads are the list of heads that it is assumed the recipient
1467 1476 # of this changegroup will know about.
1468 1477 knownheads = {}
1469 1478 # We assume that all parents of bases are known heads.
1470 1479 for n in bases:
1471 1480 for p in cl.parents(n):
1472 1481 if p != nullid:
1473 1482 knownheads[p] = 1
1474 1483 knownheads = knownheads.keys()
1475 1484 if knownheads:
1476 1485 # Now that we know what heads are known, we can compute which
1477 1486 # changesets are known. The recipient must know about all
1478 1487 # changesets required to reach the known heads from the null
1479 1488 # changeset.
1480 1489 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1481 1490 junk = None
1482 1491 # Transform the list into an ersatz set.
1483 1492 has_cl_set = dict.fromkeys(has_cl_set)
1484 1493 else:
1485 1494 # If there were no known heads, the recipient cannot be assumed to
1486 1495 # know about any changesets.
1487 1496 has_cl_set = {}
1488 1497
1489 1498 # Make it easy to refer to self.manifest
1490 1499 mnfst = self.manifest
1491 1500 # We don't know which manifests are missing yet
1492 1501 msng_mnfst_set = {}
1493 1502 # Nor do we know which filenodes are missing.
1494 1503 msng_filenode_set = {}
1495 1504
1496 1505 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1497 1506 junk = None
1498 1507
1499 1508 # A changeset always belongs to itself, so the changenode lookup
1500 1509 # function for a changenode is identity.
1501 1510 def identity(x):
1502 1511 return x
1503 1512
1504 1513 # A function generating function. Sets up an environment for the
1505 1514 # inner function.
1506 1515 def cmp_by_rev_func(revlog):
1507 1516 # Compare two nodes by their revision number in the environment's
1508 1517 # revision history. Since the revision number both represents the
1509 1518 # most efficient order to read the nodes in, and represents a
1510 1519 # topological sorting of the nodes, this function is often useful.
1511 1520 def cmp_by_rev(a, b):
1512 1521 return cmp(revlog.rev(a), revlog.rev(b))
1513 1522 return cmp_by_rev
1514 1523
1515 1524 # If we determine that a particular file or manifest node must be a
1516 1525 # node that the recipient of the changegroup will already have, we can
1517 1526 # also assume the recipient will have all the parents. This function
1518 1527 # prunes them from the set of missing nodes.
1519 1528 def prune_parents(revlog, hasset, msngset):
1520 1529 haslst = hasset.keys()
1521 1530 haslst.sort(cmp_by_rev_func(revlog))
1522 1531 for node in haslst:
1523 1532 parentlst = [p for p in revlog.parents(node) if p != nullid]
1524 1533 while parentlst:
1525 1534 n = parentlst.pop()
1526 1535 if n not in hasset:
1527 1536 hasset[n] = 1
1528 1537 p = [p for p in revlog.parents(n) if p != nullid]
1529 1538 parentlst.extend(p)
1530 1539 for n in hasset:
1531 1540 msngset.pop(n, None)
1532 1541
1533 1542 # This is a function generating function used to set up an environment
1534 1543 # for the inner function to execute in.
1535 1544 def manifest_and_file_collector(changedfileset):
1536 1545 # This is an information gathering function that gathers
1537 1546 # information from each changeset node that goes out as part of
1538 1547 # the changegroup. The information gathered is a list of which
1539 1548 # manifest nodes are potentially required (the recipient may
1540 1549 # already have them) and total list of all files which were
1541 1550 # changed in any changeset in the changegroup.
1542 1551 #
1543 1552 # We also remember the first changenode we saw any manifest
1544 1553 # referenced by so we can later determine which changenode 'owns'
1545 1554 # the manifest.
1546 1555 def collect_manifests_and_files(clnode):
1547 1556 c = cl.read(clnode)
1548 1557 for f in c[3]:
1549 1558 # This is to make sure we only have one instance of each
1550 1559 # filename string for each filename.
1551 1560 changedfileset.setdefault(f, f)
1552 1561 msng_mnfst_set.setdefault(c[0], clnode)
1553 1562 return collect_manifests_and_files
1554 1563
1555 1564 # Figure out which manifest nodes (of the ones we think might be part
1556 1565 # of the changegroup) the recipient must know about and remove them
1557 1566 # from the changegroup.
1558 1567 def prune_manifests():
1559 1568 has_mnfst_set = {}
1560 1569 for n in msng_mnfst_set:
1561 1570 # If a 'missing' manifest thinks it belongs to a changenode
1562 1571 # the recipient is assumed to have, obviously the recipient
1563 1572 # must have that manifest.
1564 1573 linknode = cl.node(mnfst.linkrev(n))
1565 1574 if linknode in has_cl_set:
1566 1575 has_mnfst_set[n] = 1
1567 1576 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1568 1577
1569 1578 # Use the information collected in collect_manifests_and_files to say
1570 1579 # which changenode any manifestnode belongs to.
1571 1580 def lookup_manifest_link(mnfstnode):
1572 1581 return msng_mnfst_set[mnfstnode]
1573 1582
1574 1583 # A function generating function that sets up the initial environment
1575 1584 # the inner function.
1576 1585 def filenode_collector(changedfiles):
1577 1586 next_rev = [0]
1578 1587 # This gathers information from each manifestnode included in the
1579 1588 # changegroup about which filenodes the manifest node references
1580 1589 # so we can include those in the changegroup too.
1581 1590 #
1582 1591 # It also remembers which changenode each filenode belongs to. It
1583 1592 # does this by assuming the a filenode belongs to the changenode
1584 1593 # the first manifest that references it belongs to.
1585 1594 def collect_msng_filenodes(mnfstnode):
1586 1595 r = mnfst.rev(mnfstnode)
1587 1596 if r == next_rev[0]:
1588 1597 # If the last rev we looked at was the one just previous,
1589 1598 # we only need to see a diff.
1590 1599 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1591 1600 # For each line in the delta
1592 1601 for dline in delta.splitlines():
1593 1602 # get the filename and filenode for that line
1594 1603 f, fnode = dline.split('\0')
1595 1604 fnode = bin(fnode[:40])
1596 1605 f = changedfiles.get(f, None)
1597 1606 # And if the file is in the list of files we care
1598 1607 # about.
1599 1608 if f is not None:
1600 1609 # Get the changenode this manifest belongs to
1601 1610 clnode = msng_mnfst_set[mnfstnode]
1602 1611 # Create the set of filenodes for the file if
1603 1612 # there isn't one already.
1604 1613 ndset = msng_filenode_set.setdefault(f, {})
1605 1614 # And set the filenode's changelog node to the
1606 1615 # manifest's if it hasn't been set already.
1607 1616 ndset.setdefault(fnode, clnode)
1608 1617 else:
1609 1618 # Otherwise we need a full manifest.
1610 1619 m = mnfst.read(mnfstnode)
1611 1620 # For every file in we care about.
1612 1621 for f in changedfiles:
1613 1622 fnode = m.get(f, None)
1614 1623 # If it's in the manifest
1615 1624 if fnode is not None:
1616 1625 # See comments above.
1617 1626 clnode = msng_mnfst_set[mnfstnode]
1618 1627 ndset = msng_filenode_set.setdefault(f, {})
1619 1628 ndset.setdefault(fnode, clnode)
1620 1629 # Remember the revision we hope to see next.
1621 1630 next_rev[0] = r + 1
1622 1631 return collect_msng_filenodes
1623 1632
1624 1633 # We have a list of filenodes we think we need for a file, lets remove
1625 1634 # all those we now the recipient must have.
1626 1635 def prune_filenodes(f, filerevlog):
1627 1636 msngset = msng_filenode_set[f]
1628 1637 hasset = {}
1629 1638 # If a 'missing' filenode thinks it belongs to a changenode we
1630 1639 # assume the recipient must have, then the recipient must have
1631 1640 # that filenode.
1632 1641 for n in msngset:
1633 1642 clnode = cl.node(filerevlog.linkrev(n))
1634 1643 if clnode in has_cl_set:
1635 1644 hasset[n] = 1
1636 1645 prune_parents(filerevlog, hasset, msngset)
1637 1646
1638 1647 # A function generator function that sets up the a context for the
1639 1648 # inner function.
1640 1649 def lookup_filenode_link_func(fname):
1641 1650 msngset = msng_filenode_set[fname]
1642 1651 # Lookup the changenode the filenode belongs to.
1643 1652 def lookup_filenode_link(fnode):
1644 1653 return msngset[fnode]
1645 1654 return lookup_filenode_link
1646 1655
1647 1656 # Now that we have all theses utility functions to help out and
1648 1657 # logically divide up the task, generate the group.
1649 1658 def gengroup():
1650 1659 # The set of changed files starts empty.
1651 1660 changedfiles = {}
1652 1661 # Create a changenode group generator that will call our functions
1653 1662 # back to lookup the owning changenode and collect information.
1654 1663 group = cl.group(msng_cl_lst, identity,
1655 1664 manifest_and_file_collector(changedfiles))
1656 1665 for chnk in group:
1657 1666 yield chnk
1658 1667
1659 1668 # The list of manifests has been collected by the generator
1660 1669 # calling our functions back.
1661 1670 prune_manifests()
1662 1671 msng_mnfst_lst = msng_mnfst_set.keys()
1663 1672 # Sort the manifestnodes by revision number.
1664 1673 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1665 1674 # Create a generator for the manifestnodes that calls our lookup
1666 1675 # and data collection functions back.
1667 1676 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1668 1677 filenode_collector(changedfiles))
1669 1678 for chnk in group:
1670 1679 yield chnk
1671 1680
1672 1681 # These are no longer needed, dereference and toss the memory for
1673 1682 # them.
1674 1683 msng_mnfst_lst = None
1675 1684 msng_mnfst_set.clear()
1676 1685
1677 1686 changedfiles = changedfiles.keys()
1678 1687 changedfiles.sort()
1679 1688 # Go through all our files in order sorted by name.
1680 1689 for fname in changedfiles:
1681 1690 filerevlog = self.file(fname)
1682 1691 # Toss out the filenodes that the recipient isn't really
1683 1692 # missing.
1684 1693 if msng_filenode_set.has_key(fname):
1685 1694 prune_filenodes(fname, filerevlog)
1686 1695 msng_filenode_lst = msng_filenode_set[fname].keys()
1687 1696 else:
1688 1697 msng_filenode_lst = []
1689 1698 # If any filenodes are left, generate the group for them,
1690 1699 # otherwise don't bother.
1691 1700 if len(msng_filenode_lst) > 0:
1692 1701 yield changegroup.genchunk(fname)
1693 1702 # Sort the filenodes by their revision #
1694 1703 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1695 1704 # Create a group generator and only pass in a changenode
1696 1705 # lookup function as we need to collect no information
1697 1706 # from filenodes.
1698 1707 group = filerevlog.group(msng_filenode_lst,
1699 1708 lookup_filenode_link_func(fname))
1700 1709 for chnk in group:
1701 1710 yield chnk
1702 1711 if msng_filenode_set.has_key(fname):
1703 1712 # Don't need this anymore, toss it to free memory.
1704 1713 del msng_filenode_set[fname]
1705 1714 # Signal that no more groups are left.
1706 1715 yield changegroup.closechunk()
1707 1716
1708 1717 if msng_cl_lst:
1709 1718 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1710 1719
1711 1720 return util.chunkbuffer(gengroup())
1712 1721
1713 1722 def changegroup(self, basenodes, source):
1714 1723 """Generate a changegroup of all nodes that we have that a recipient
1715 1724 doesn't.
1716 1725
1717 1726 This is much easier than the previous function as we can assume that
1718 1727 the recipient has any changenode we aren't sending them."""
1719 1728
1720 1729 self.hook('preoutgoing', throw=True, source=source)
1721 1730
1722 1731 cl = self.changelog
1723 1732 nodes = cl.nodesbetween(basenodes, None)[0]
1724 1733 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1725 1734 self.changegroupinfo(nodes)
1726 1735
1727 1736 def identity(x):
1728 1737 return x
1729 1738
1730 1739 def gennodelst(revlog):
1731 1740 for r in xrange(0, revlog.count()):
1732 1741 n = revlog.node(r)
1733 1742 if revlog.linkrev(n) in revset:
1734 1743 yield n
1735 1744
1736 1745 def changed_file_collector(changedfileset):
1737 1746 def collect_changed_files(clnode):
1738 1747 c = cl.read(clnode)
1739 1748 for fname in c[3]:
1740 1749 changedfileset[fname] = 1
1741 1750 return collect_changed_files
1742 1751
1743 1752 def lookuprevlink_func(revlog):
1744 1753 def lookuprevlink(n):
1745 1754 return cl.node(revlog.linkrev(n))
1746 1755 return lookuprevlink
1747 1756
1748 1757 def gengroup():
1749 1758 # construct a list of all changed files
1750 1759 changedfiles = {}
1751 1760
1752 1761 for chnk in cl.group(nodes, identity,
1753 1762 changed_file_collector(changedfiles)):
1754 1763 yield chnk
1755 1764 changedfiles = changedfiles.keys()
1756 1765 changedfiles.sort()
1757 1766
1758 1767 mnfst = self.manifest
1759 1768 nodeiter = gennodelst(mnfst)
1760 1769 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1761 1770 yield chnk
1762 1771
1763 1772 for fname in changedfiles:
1764 1773 filerevlog = self.file(fname)
1765 1774 nodeiter = gennodelst(filerevlog)
1766 1775 nodeiter = list(nodeiter)
1767 1776 if nodeiter:
1768 1777 yield changegroup.genchunk(fname)
1769 1778 lookup = lookuprevlink_func(filerevlog)
1770 1779 for chnk in filerevlog.group(nodeiter, lookup):
1771 1780 yield chnk
1772 1781
1773 1782 yield changegroup.closechunk()
1774 1783
1775 1784 if nodes:
1776 1785 self.hook('outgoing', node=hex(nodes[0]), source=source)
1777 1786
1778 1787 return util.chunkbuffer(gengroup())
1779 1788
1780 1789 def addchangegroup(self, source, srctype, url):
1781 1790 """add changegroup to repo.
1782 1791
1783 1792 return values:
1784 1793 - nothing changed or no source: 0
1785 1794 - more heads than before: 1+added heads (2..n)
1786 1795 - less heads than before: -1-removed heads (-2..-n)
1787 1796 - number of heads stays the same: 1
1788 1797 """
1789 1798 def csmap(x):
1790 1799 self.ui.debug(_("add changeset %s\n") % short(x))
1791 1800 return cl.count()
1792 1801
1793 1802 def revmap(x):
1794 1803 return cl.rev(x)
1795 1804
1796 1805 if not source:
1797 1806 return 0
1798 1807
1799 1808 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1800 1809
1801 1810 changesets = files = revisions = 0
1802 1811
1803 1812 tr = self.transaction()
1804 1813
1805 1814 # write changelog data to temp files so concurrent readers will not see
1806 1815 # inconsistent view
1807 1816 cl = self.changelog
1808 1817 cl.delayupdate()
1809 1818 oldheads = len(cl.heads())
1810 1819
1811 1820 # pull off the changeset group
1812 1821 self.ui.status(_("adding changesets\n"))
1813 1822 cor = cl.count() - 1
1814 1823 chunkiter = changegroup.chunkiter(source)
1815 1824 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1816 1825 raise util.Abort(_("received changelog group is empty"))
1817 1826 cnr = cl.count() - 1
1818 1827 changesets = cnr - cor
1819 1828
1820 1829 # pull off the manifest group
1821 1830 self.ui.status(_("adding manifests\n"))
1822 1831 chunkiter = changegroup.chunkiter(source)
1823 1832 # no need to check for empty manifest group here:
1824 1833 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1825 1834 # no new manifest will be created and the manifest group will
1826 1835 # be empty during the pull
1827 1836 self.manifest.addgroup(chunkiter, revmap, tr)
1828 1837
1829 1838 # process the files
1830 1839 self.ui.status(_("adding file changes\n"))
1831 1840 while 1:
1832 1841 f = changegroup.getchunk(source)
1833 1842 if not f:
1834 1843 break
1835 1844 self.ui.debug(_("adding %s revisions\n") % f)
1836 1845 fl = self.file(f)
1837 1846 o = fl.count()
1838 1847 chunkiter = changegroup.chunkiter(source)
1839 1848 if fl.addgroup(chunkiter, revmap, tr) is None:
1840 1849 raise util.Abort(_("received file revlog group is empty"))
1841 1850 revisions += fl.count() - o
1842 1851 files += 1
1843 1852
1844 1853 # make changelog see real files again
1845 1854 cl.finalize(tr)
1846 1855
1847 1856 newheads = len(self.changelog.heads())
1848 1857 heads = ""
1849 1858 if oldheads and newheads != oldheads:
1850 1859 heads = _(" (%+d heads)") % (newheads - oldheads)
1851 1860
1852 1861 self.ui.status(_("added %d changesets"
1853 1862 " with %d changes to %d files%s\n")
1854 1863 % (changesets, revisions, files, heads))
1855 1864
1856 1865 if changesets > 0:
1857 1866 self.hook('pretxnchangegroup', throw=True,
1858 1867 node=hex(self.changelog.node(cor+1)), source=srctype,
1859 1868 url=url)
1860 1869
1861 1870 tr.close()
1862 1871
1863 1872 if changesets > 0:
1864 1873 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1865 1874 source=srctype, url=url)
1866 1875
1867 1876 for i in xrange(cor + 1, cnr + 1):
1868 1877 self.hook("incoming", node=hex(self.changelog.node(i)),
1869 1878 source=srctype, url=url)
1870 1879
1871 1880 # never return 0 here:
1872 1881 if newheads < oldheads:
1873 1882 return newheads - oldheads - 1
1874 1883 else:
1875 1884 return newheads - oldheads + 1
1876 1885
1877 1886
1878 1887 def stream_in(self, remote):
1879 1888 fp = remote.stream_out()
1880 1889 l = fp.readline()
1881 1890 try:
1882 1891 resp = int(l)
1883 1892 except ValueError:
1884 1893 raise util.UnexpectedOutput(
1885 1894 _('Unexpected response from remote server:'), l)
1886 1895 if resp == 1:
1887 1896 raise util.Abort(_('operation forbidden by server'))
1888 1897 elif resp == 2:
1889 1898 raise util.Abort(_('locking the remote repository failed'))
1890 1899 elif resp != 0:
1891 1900 raise util.Abort(_('the server sent an unknown error code'))
1892 1901 self.ui.status(_('streaming all changes\n'))
1893 1902 l = fp.readline()
1894 1903 try:
1895 1904 total_files, total_bytes = map(int, l.split(' ', 1))
1896 1905 except ValueError, TypeError:
1897 1906 raise util.UnexpectedOutput(
1898 1907 _('Unexpected response from remote server:'), l)
1899 1908 self.ui.status(_('%d files to transfer, %s of data\n') %
1900 1909 (total_files, util.bytecount(total_bytes)))
1901 1910 start = time.time()
1902 1911 for i in xrange(total_files):
1903 1912 # XXX doesn't support '\n' or '\r' in filenames
1904 1913 l = fp.readline()
1905 1914 try:
1906 1915 name, size = l.split('\0', 1)
1907 1916 size = int(size)
1908 1917 except ValueError, TypeError:
1909 1918 raise util.UnexpectedOutput(
1910 1919 _('Unexpected response from remote server:'), l)
1911 1920 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1912 1921 ofp = self.sopener(name, 'w')
1913 1922 for chunk in util.filechunkiter(fp, limit=size):
1914 1923 ofp.write(chunk)
1915 1924 ofp.close()
1916 1925 elapsed = time.time() - start
1917 1926 if elapsed <= 0:
1918 1927 elapsed = 0.001
1919 1928 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1920 1929 (util.bytecount(total_bytes), elapsed,
1921 1930 util.bytecount(total_bytes / elapsed)))
1922 1931 self.reload()
1923 1932 return len(self.heads()) + 1
1924 1933
1925 1934 def clone(self, remote, heads=[], stream=False):
1926 1935 '''clone remote repository.
1927 1936
1928 1937 keyword arguments:
1929 1938 heads: list of revs to clone (forces use of pull)
1930 1939 stream: use streaming clone if possible'''
1931 1940
1932 1941 # now, all clients that can request uncompressed clones can
1933 1942 # read repo formats supported by all servers that can serve
1934 1943 # them.
1935 1944
1936 1945 # if revlog format changes, client will have to check version
1937 1946 # and format flags on "stream" capability, and use
1938 1947 # uncompressed only if compatible.
1939 1948
1940 1949 if stream and not heads and remote.capable('stream'):
1941 1950 return self.stream_in(remote)
1942 1951 return self.pull(remote, heads)
1943 1952
1944 1953 # used to avoid circular references so destructors work
1945 1954 def aftertrans(files):
1946 1955 renamefiles = [tuple(t) for t in files]
1947 1956 def a():
1948 1957 for src, dest in renamefiles:
1949 1958 util.rename(src, dest)
1950 1959 return a
1951 1960
1952 1961 def instance(ui, path, create):
1953 1962 return localrepository(ui, util.drop_scheme('file', path), create)
1954 1963
1955 1964 def islocal(path):
1956 1965 return True
@@ -1,48 +1,50
1 1 #!/bin/sh -e
2 2
3 3 umask 027
4 4 mkdir test1
5 5 cd test1
6 6
7 7 hg init
8 8 touch a b
9 9 hg add a b
10 10 hg ci -m "added a b" -d "1000000 0"
11 11
12 12 cd ..
13 13 hg clone test1 test3
14 14 mkdir test2
15 15 cd test2
16 16
17 17 hg init
18 18 hg pull ../test1
19 19 hg co
20 20 chmod +x a
21 21 hg ci -m "chmod +x a" -d "1000000 0"
22 echo % the changelog should mention file a:
23 hg tip --template '#files#\n'
22 24
23 25 cd ../test1
24 26 echo 123 >>a
25 27 hg ci -m "a updated" -d "1000000 0"
26 28
27 29 hg pull ../test2
28 30 hg heads
29 31 hg history
30 32
31 33 hg -v merge
32 34
33 35 cd ../test3
34 36 echo 123 >>b
35 37 hg ci -m "b updated" -d "1000000 0"
36 38
37 39 hg pull ../test2
38 40 hg heads
39 41 hg history
40 42
41 43 hg -v merge
42 44
43 45 ls -l ../test[123]/a > foo
44 46 cut -b 1-10 < foo
45 47
46 48 hg debugindex .hg/store/data/a.i
47 49 hg debugindex ../test2/.hg/store/data/a.i
48 50 hg debugindex ../test1/.hg/store/data/a.i
@@ -1,102 +1,104
1 1 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 2 pulling from ../test1
3 3 requesting all changes
4 4 adding changesets
5 5 adding manifests
6 6 adding file changes
7 7 added 1 changesets with 2 changes to 2 files
8 8 (run 'hg update' to get a working copy)
9 9 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 % the changelog should mention file a:
11 a
10 12 pulling from ../test2
11 13 searching for changes
12 14 adding changesets
13 15 adding manifests
14 16 adding file changes
15 17 added 1 changesets with 1 changes to 1 files (+1 heads)
16 18 (run 'hg heads' to see heads, 'hg merge' to merge)
17 19 changeset: 2:b833d578451e
18 20 tag: tip
19 21 parent: 0:4536b1c2ca69
20 22 user: test
21 23 date: Mon Jan 12 13:46:40 1970 +0000
22 24 summary: chmod +x a
23 25
24 26 changeset: 1:a187cb361a5a
25 27 user: test
26 28 date: Mon Jan 12 13:46:40 1970 +0000
27 29 summary: a updated
28 30
29 31 changeset: 2:b833d578451e
30 32 tag: tip
31 33 parent: 0:4536b1c2ca69
32 34 user: test
33 35 date: Mon Jan 12 13:46:40 1970 +0000
34 36 summary: chmod +x a
35 37
36 38 changeset: 1:a187cb361a5a
37 39 user: test
38 40 date: Mon Jan 12 13:46:40 1970 +0000
39 41 summary: a updated
40 42
41 43 changeset: 0:4536b1c2ca69
42 44 user: test
43 45 date: Mon Jan 12 13:46:40 1970 +0000
44 46 summary: added a b
45 47
46 48 resolving manifests
47 49 merging a
48 50 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
49 51 (branch merge, don't forget to commit)
50 52 pulling from ../test2
51 53 searching for changes
52 54 adding changesets
53 55 adding manifests
54 56 adding file changes
55 57 added 1 changesets with 1 changes to 1 files (+1 heads)
56 58 (run 'hg heads' to see heads, 'hg merge' to merge)
57 59 changeset: 2:b833d578451e
58 60 tag: tip
59 61 parent: 0:4536b1c2ca69
60 62 user: test
61 63 date: Mon Jan 12 13:46:40 1970 +0000
62 64 summary: chmod +x a
63 65
64 66 changeset: 1:d54568174d8e
65 67 user: test
66 68 date: Mon Jan 12 13:46:40 1970 +0000
67 69 summary: b updated
68 70
69 71 changeset: 2:b833d578451e
70 72 tag: tip
71 73 parent: 0:4536b1c2ca69
72 74 user: test
73 75 date: Mon Jan 12 13:46:40 1970 +0000
74 76 summary: chmod +x a
75 77
76 78 changeset: 1:d54568174d8e
77 79 user: test
78 80 date: Mon Jan 12 13:46:40 1970 +0000
79 81 summary: b updated
80 82
81 83 changeset: 0:4536b1c2ca69
82 84 user: test
83 85 date: Mon Jan 12 13:46:40 1970 +0000
84 86 summary: added a b
85 87
86 88 resolving manifests
87 89 getting a
88 90 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 91 (branch merge, don't forget to commit)
90 92 -rwxr-x---
91 93 -rwxr-x---
92 94 -rwxr-x---
93 95 rev offset length base linkrev nodeid p1 p2
94 96 0 0 0 0 0 b80de5d13875 000000000000 000000000000
95 97 1 0 0 0 2 37c42bd6cc03 b80de5d13875 000000000000
96 98 rev offset length base linkrev nodeid p1 p2
97 99 0 0 0 0 0 b80de5d13875 000000000000 000000000000
98 100 1 0 0 0 1 37c42bd6cc03 b80de5d13875 000000000000
99 101 rev offset length base linkrev nodeid p1 p2
100 102 0 0 0 0 0 b80de5d13875 000000000000 000000000000
101 103 1 0 5 1 1 7fe919cc0336 b80de5d13875 000000000000
102 104 2 5 0 2 2 37c42bd6cc03 b80de5d13875 000000000000
General Comments 0
You need to be logged in to leave comments. Login now