##// END OF EJS Templates
commit: read branch with workingctx
Matt Mackall -
r3440:0f1fd985 default
parent child Browse files
Show More
@@ -1,1813 +1,1810 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 heads = self.heads()
247 247 heads.reverse()
248 248 fl = self.file(".hgtags")
249 249 for node in heads:
250 250 change = self.changelog.read(node)
251 251 rev = self.changelog.rev(node)
252 252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 253 if fn is None: continue
254 254 count = 0
255 255 for l in fl.read(fn).splitlines():
256 256 count += 1
257 257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 258 (rev, short(node), count))
259 259 try:
260 260 f = self.opener("localtags")
261 261 count = 0
262 262 for l in f:
263 263 count += 1
264 264 parsetag(l, _("localtags, line %d") % count)
265 265 except IOError:
266 266 pass
267 267
268 268 self.tagscache['tip'] = self.changelog.tip()
269 269
270 270 return self.tagscache
271 271
272 272 def tagslist(self):
273 273 '''return a list of tags ordered by revision'''
274 274 l = []
275 275 for t, n in self.tags().items():
276 276 try:
277 277 r = self.changelog.rev(n)
278 278 except:
279 279 r = -2 # sort to the beginning of the list if unknown
280 280 l.append((r, t, n))
281 281 l.sort()
282 282 return [(t, n) for r, t, n in l]
283 283
284 284 def nodetags(self, node):
285 285 '''return the tags associated with a node'''
286 286 if not self.nodetagscache:
287 287 self.nodetagscache = {}
288 288 for t, n in self.tags().items():
289 289 self.nodetagscache.setdefault(n, []).append(t)
290 290 return self.nodetagscache.get(node, [])
291 291
292 292 def branchtags(self):
293 293 if self.branchcache != None:
294 294 return self.branchcache
295 295
296 296 self.branchcache = {} # avoid recursion in changectx
297 297
298 298 try:
299 299 f = self.opener("branches.cache")
300 300 last, lrev = f.readline().rstrip().split(" ", 1)
301 301 last, lrev = bin(last), int(lrev)
302 302 if self.changelog.node(lrev) == last: # sanity check
303 303 for l in f:
304 304 node, label = l.rstrip().split(" ", 1)
305 305 self.branchcache[label] = bin(node)
306 306 f.close()
307 307 except IOError:
308 308 last, lrev = nullid, -1
309 309 lrev = self.changelog.rev(last)
310 310
311 311 tip = self.changelog.count() - 1
312 312 if lrev != tip:
313 313 for r in xrange(lrev + 1, tip + 1):
314 314 c = self.changectx(r)
315 315 b = c.branch()
316 316 if b:
317 317 self.branchcache[b] = c.node()
318 318 self._writebranchcache()
319 319
320 320 return self.branchcache
321 321
322 322 def _writebranchcache(self):
323 323 f = self.opener("branches.cache", "w")
324 324 t = self.changelog.tip()
325 325 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 326 for label, node in self.branchcache.iteritems():
327 327 f.write("%s %s\n" % (hex(node), label))
328 328
329 329 def lookup(self, key):
330 330 if key == '.':
331 331 key = self.dirstate.parents()[0]
332 332 if key == nullid:
333 333 raise repo.RepoError(_("no revision checked out"))
334 334 if key in self.tags():
335 335 return self.tags()[key]
336 336 if key in self.branchtags():
337 337 return self.branchtags()[key]
338 338 try:
339 339 return self.changelog.lookup(key)
340 340 except:
341 341 raise repo.RepoError(_("unknown revision '%s'") % key)
342 342
343 343 def dev(self):
344 344 return os.lstat(self.path).st_dev
345 345
346 346 def local(self):
347 347 return True
348 348
349 349 def join(self, f):
350 350 return os.path.join(self.path, f)
351 351
352 352 def wjoin(self, f):
353 353 return os.path.join(self.root, f)
354 354
355 355 def file(self, f):
356 356 if f[0] == '/':
357 357 f = f[1:]
358 358 return filelog.filelog(self.opener, f, self.revlogversion)
359 359
360 360 def changectx(self, changeid=None):
361 361 return context.changectx(self, changeid)
362 362
363 363 def workingctx(self):
364 364 return context.workingctx(self)
365 365
366 366 def parents(self, changeid=None):
367 367 '''
368 368 get list of changectxs for parents of changeid or working directory
369 369 '''
370 370 if changeid is None:
371 371 pl = self.dirstate.parents()
372 372 else:
373 373 n = self.changelog.lookup(changeid)
374 374 pl = self.changelog.parents(n)
375 375 if pl[1] == nullid:
376 376 return [self.changectx(pl[0])]
377 377 return [self.changectx(pl[0]), self.changectx(pl[1])]
378 378
379 379 def filectx(self, path, changeid=None, fileid=None):
380 380 """changeid can be a changeset revision, node, or tag.
381 381 fileid can be a file revision or node."""
382 382 return context.filectx(self, path, changeid, fileid)
383 383
384 384 def getcwd(self):
385 385 return self.dirstate.getcwd()
386 386
387 387 def wfile(self, f, mode='r'):
388 388 return self.wopener(f, mode)
389 389
390 390 def wread(self, filename):
391 391 if self.encodepats == None:
392 392 l = []
393 393 for pat, cmd in self.ui.configitems("encode"):
394 394 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 395 l.append((mf, cmd))
396 396 self.encodepats = l
397 397
398 398 data = self.wopener(filename, 'r').read()
399 399
400 400 for mf, cmd in self.encodepats:
401 401 if mf(filename):
402 402 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 403 data = util.filter(data, cmd)
404 404 break
405 405
406 406 return data
407 407
408 408 def wwrite(self, filename, data, fd=None):
409 409 if self.decodepats == None:
410 410 l = []
411 411 for pat, cmd in self.ui.configitems("decode"):
412 412 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 413 l.append((mf, cmd))
414 414 self.decodepats = l
415 415
416 416 for mf, cmd in self.decodepats:
417 417 if mf(filename):
418 418 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 419 data = util.filter(data, cmd)
420 420 break
421 421
422 422 if fd:
423 423 return fd.write(data)
424 424 return self.wopener(filename, 'w').write(data)
425 425
426 426 def transaction(self):
427 427 tr = self.transhandle
428 428 if tr != None and tr.running():
429 429 return tr.nest()
430 430
431 431 # save dirstate for rollback
432 432 try:
433 433 ds = self.opener("dirstate").read()
434 434 except IOError:
435 435 ds = ""
436 436 self.opener("journal.dirstate", "w").write(ds)
437 437
438 438 tr = transaction.transaction(self.ui.warn, self.opener,
439 439 self.join("journal"),
440 440 aftertrans(self.path))
441 441 self.transhandle = tr
442 442 return tr
443 443
444 444 def recover(self):
445 445 l = self.lock()
446 446 if os.path.exists(self.join("journal")):
447 447 self.ui.status(_("rolling back interrupted transaction\n"))
448 448 transaction.rollback(self.opener, self.join("journal"))
449 449 self.reload()
450 450 return True
451 451 else:
452 452 self.ui.warn(_("no interrupted transaction available\n"))
453 453 return False
454 454
455 455 def rollback(self, wlock=None):
456 456 if not wlock:
457 457 wlock = self.wlock()
458 458 l = self.lock()
459 459 if os.path.exists(self.join("undo")):
460 460 self.ui.status(_("rolling back last transaction\n"))
461 461 transaction.rollback(self.opener, self.join("undo"))
462 462 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 463 self.reload()
464 464 self.wreload()
465 465 else:
466 466 self.ui.warn(_("no rollback information available\n"))
467 467
468 468 def wreload(self):
469 469 self.dirstate.read()
470 470
471 471 def reload(self):
472 472 self.changelog.load()
473 473 self.manifest.load()
474 474 self.tagscache = None
475 475 self.nodetagscache = None
476 476
477 477 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 478 desc=None):
479 479 try:
480 480 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 481 except lock.LockHeld, inst:
482 482 if not wait:
483 483 raise
484 484 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 485 (desc, inst.args[0]))
486 486 # default to 600 seconds timeout
487 487 l = lock.lock(self.join(lockname),
488 488 int(self.ui.config("ui", "timeout") or 600),
489 489 releasefn, desc=desc)
490 490 if acquirefn:
491 491 acquirefn()
492 492 return l
493 493
494 494 def lock(self, wait=1):
495 495 return self.do_lock("lock", wait, acquirefn=self.reload,
496 496 desc=_('repository %s') % self.origroot)
497 497
498 498 def wlock(self, wait=1):
499 499 return self.do_lock("wlock", wait, self.dirstate.write,
500 500 self.wreload,
501 501 desc=_('working directory of %s') % self.origroot)
502 502
503 503 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 504 """
505 505 commit an individual file as part of a larger transaction
506 506 """
507 507
508 508 t = self.wread(fn)
509 509 fl = self.file(fn)
510 510 fp1 = manifest1.get(fn, nullid)
511 511 fp2 = manifest2.get(fn, nullid)
512 512
513 513 meta = {}
514 514 cp = self.dirstate.copied(fn)
515 515 if cp:
516 516 meta["copy"] = cp
517 517 if not manifest2: # not a branch merge
518 518 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 519 fp2 = nullid
520 520 elif fp2 != nullid: # copied on remote side
521 521 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 522 else: # copied on local side, reversed
523 523 meta["copyrev"] = hex(manifest2.get(cp))
524 524 fp2 = nullid
525 525 self.ui.debug(_(" %s: copy %s:%s\n") %
526 526 (fn, cp, meta["copyrev"]))
527 527 fp1 = nullid
528 528 elif fp2 != nullid:
529 529 # is one parent an ancestor of the other?
530 530 fpa = fl.ancestor(fp1, fp2)
531 531 if fpa == fp1:
532 532 fp1, fp2 = fp2, nullid
533 533 elif fpa == fp2:
534 534 fp2 = nullid
535 535
536 536 # is the file unmodified from the parent? report existing entry
537 537 if fp2 == nullid and not fl.cmp(fp1, t):
538 538 return fp1
539 539
540 540 changelist.append(fn)
541 541 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542 542
543 543 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 544 orig_parent = self.dirstate.parents()[0] or nullid
545 545 p1 = p1 or self.dirstate.parents()[0] or nullid
546 546 p2 = p2 or self.dirstate.parents()[1] or nullid
547 547 c1 = self.changelog.read(p1)
548 548 c2 = self.changelog.read(p2)
549 549 m1 = self.manifest.read(c1[0]).copy()
550 550 m2 = self.manifest.read(c2[0])
551 551 changed = []
552 552 removed = []
553 553
554 554 if orig_parent == p1:
555 555 update_dirstate = 1
556 556 else:
557 557 update_dirstate = 0
558 558
559 559 if not wlock:
560 560 wlock = self.wlock()
561 561 l = self.lock()
562 562 tr = self.transaction()
563 563 linkrev = self.changelog.count()
564 564 for f in files:
565 565 try:
566 566 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 568 except IOError:
569 569 try:
570 570 del m1[f]
571 571 if update_dirstate:
572 572 self.dirstate.forget([f])
573 573 removed.append(f)
574 574 except:
575 575 # deleted from p2?
576 576 pass
577 577
578 578 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 579 user = user or self.ui.username()
580 580 n = self.changelog.add(mnode, changed + removed, text,
581 581 tr, p1, p2, user, date)
582 582 tr.close()
583 583 if update_dirstate:
584 584 self.dirstate.setparents(n, nullid)
585 585
586 586 def commit(self, files=None, text="", user=None, date=None,
587 587 match=util.always, force=False, lock=None, wlock=None,
588 588 force_editor=False):
589 589 commit = []
590 590 remove = []
591 591 changed = []
592 592
593 593 if files:
594 594 for f in files:
595 595 s = self.dirstate.state(f)
596 596 if s in 'nmai':
597 597 commit.append(f)
598 598 elif s == 'r':
599 599 remove.append(f)
600 600 else:
601 601 self.ui.warn(_("%s not tracked!\n") % f)
602 602 else:
603 603 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 604 commit = modified + added
605 605 remove = removed
606 606
607 607 p1, p2 = self.dirstate.parents()
608 608 c1 = self.changelog.read(p1)
609 609 c2 = self.changelog.read(p2)
610 610 m1 = self.manifest.read(c1[0]).copy()
611 611 m2 = self.manifest.read(c2[0])
612 612
613 try:
614 branchname = self.opener("branch").read().rstrip()
615 except IOError:
616 branchname = ""
613 branchname = self.workingctx().branch()
617 614 oldname = c1[5].get("branch", "")
618 615
619 616 if not commit and not remove and not force and p2 == nullid and \
620 617 branchname == oldname:
621 618 self.ui.status(_("nothing changed\n"))
622 619 return None
623 620
624 621 xp1 = hex(p1)
625 622 if p2 == nullid: xp2 = ''
626 623 else: xp2 = hex(p2)
627 624
628 625 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
629 626
630 627 if not wlock:
631 628 wlock = self.wlock()
632 629 if not lock:
633 630 lock = self.lock()
634 631 tr = self.transaction()
635 632
636 633 # check in files
637 634 new = {}
638 635 linkrev = self.changelog.count()
639 636 commit.sort()
640 637 for f in commit:
641 638 self.ui.note(f + "\n")
642 639 try:
643 640 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
644 641 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
645 642 except IOError:
646 643 self.ui.warn(_("trouble committing %s!\n") % f)
647 644 raise
648 645
649 646 # update manifest
650 647 m1.update(new)
651 648 for f in remove:
652 649 if f in m1:
653 650 del m1[f]
654 651 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
655 652
656 653 # add changeset
657 654 new = new.keys()
658 655 new.sort()
659 656
660 657 user = user or self.ui.username()
661 658 if not text or force_editor:
662 659 edittext = []
663 660 if text:
664 661 edittext.append(text)
665 662 edittext.append("")
666 663 if p2 != nullid:
667 664 edittext.append("HG: branch merge")
668 665 edittext.extend(["HG: changed %s" % f for f in changed])
669 666 edittext.extend(["HG: removed %s" % f for f in remove])
670 667 if not changed and not remove:
671 668 edittext.append("HG: no files changed")
672 669 edittext.append("")
673 670 # run editor in the repository root
674 671 olddir = os.getcwd()
675 672 os.chdir(self.root)
676 673 text = self.ui.edit("\n".join(edittext), user)
677 674 os.chdir(olddir)
678 675
679 676 lines = [line.rstrip() for line in text.rstrip().splitlines()]
680 677 while lines and not lines[0]:
681 678 del lines[0]
682 679 if not lines:
683 680 return None
684 681 text = '\n'.join(lines)
685 682 extra = {}
686 683 if branchname:
687 684 extra["branch"] = branchname
688 685 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
689 686 user, date, extra)
690 687 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
691 688 parent2=xp2)
692 689 tr.close()
693 690
694 691 self.dirstate.setparents(n)
695 692 self.dirstate.update(new, "n")
696 693 self.dirstate.forget(remove)
697 694
698 695 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
699 696 return n
700 697
701 698 def walk(self, node=None, files=[], match=util.always, badmatch=None):
702 699 if node:
703 700 fdict = dict.fromkeys(files)
704 701 for fn in self.manifest.read(self.changelog.read(node)[0]):
705 702 for ffn in fdict:
706 703 # match if the file is the exact name or a directory
707 704 if ffn == fn or fn.startswith("%s/" % ffn):
708 705 del fdict[ffn]
709 706 break
710 707 if match(fn):
711 708 yield 'm', fn
712 709 for fn in fdict:
713 710 if badmatch and badmatch(fn):
714 711 if match(fn):
715 712 yield 'b', fn
716 713 else:
717 714 self.ui.warn(_('%s: No such file in rev %s\n') % (
718 715 util.pathto(self.getcwd(), fn), short(node)))
719 716 else:
720 717 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
721 718 yield src, fn
722 719
723 720 def status(self, node1=None, node2=None, files=[], match=util.always,
724 721 wlock=None, list_ignored=False, list_clean=False):
725 722 """return status of files between two nodes or node and working directory
726 723
727 724 If node1 is None, use the first dirstate parent instead.
728 725 If node2 is None, compare node1 with working directory.
729 726 """
730 727
731 728 def fcmp(fn, mf):
732 729 t1 = self.wread(fn)
733 730 return self.file(fn).cmp(mf.get(fn, nullid), t1)
734 731
735 732 def mfmatches(node):
736 733 change = self.changelog.read(node)
737 734 mf = self.manifest.read(change[0]).copy()
738 735 for fn in mf.keys():
739 736 if not match(fn):
740 737 del mf[fn]
741 738 return mf
742 739
743 740 modified, added, removed, deleted, unknown = [], [], [], [], []
744 741 ignored, clean = [], []
745 742
746 743 compareworking = False
747 744 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
748 745 compareworking = True
749 746
750 747 if not compareworking:
751 748 # read the manifest from node1 before the manifest from node2,
752 749 # so that we'll hit the manifest cache if we're going through
753 750 # all the revisions in parent->child order.
754 751 mf1 = mfmatches(node1)
755 752
756 753 # are we comparing the working directory?
757 754 if not node2:
758 755 if not wlock:
759 756 try:
760 757 wlock = self.wlock(wait=0)
761 758 except lock.LockException:
762 759 wlock = None
763 760 (lookup, modified, added, removed, deleted, unknown,
764 761 ignored, clean) = self.dirstate.status(files, match,
765 762 list_ignored, list_clean)
766 763
767 764 # are we comparing working dir against its parent?
768 765 if compareworking:
769 766 if lookup:
770 767 # do a full compare of any files that might have changed
771 768 mf2 = mfmatches(self.dirstate.parents()[0])
772 769 for f in lookup:
773 770 if fcmp(f, mf2):
774 771 modified.append(f)
775 772 else:
776 773 clean.append(f)
777 774 if wlock is not None:
778 775 self.dirstate.update([f], "n")
779 776 else:
780 777 # we are comparing working dir against non-parent
781 778 # generate a pseudo-manifest for the working dir
782 779 # XXX: create it in dirstate.py ?
783 780 mf2 = mfmatches(self.dirstate.parents()[0])
784 781 for f in lookup + modified + added:
785 782 mf2[f] = ""
786 783 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
787 784 for f in removed:
788 785 if f in mf2:
789 786 del mf2[f]
790 787 else:
791 788 # we are comparing two revisions
792 789 mf2 = mfmatches(node2)
793 790
794 791 if not compareworking:
795 792 # flush lists from dirstate before comparing manifests
796 793 modified, added, clean = [], [], []
797 794
798 795 # make sure to sort the files so we talk to the disk in a
799 796 # reasonable order
800 797 mf2keys = mf2.keys()
801 798 mf2keys.sort()
802 799 for fn in mf2keys:
803 800 if mf1.has_key(fn):
804 801 if mf1.flags(fn) != mf2.flags(fn) or \
805 802 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
806 803 modified.append(fn)
807 804 elif list_clean:
808 805 clean.append(fn)
809 806 del mf1[fn]
810 807 else:
811 808 added.append(fn)
812 809
813 810 removed = mf1.keys()
814 811
815 812 # sort and return results:
816 813 for l in modified, added, removed, deleted, unknown, ignored, clean:
817 814 l.sort()
818 815 return (modified, added, removed, deleted, unknown, ignored, clean)
819 816
820 817 def add(self, list, wlock=None):
821 818 if not wlock:
822 819 wlock = self.wlock()
823 820 for f in list:
824 821 p = self.wjoin(f)
825 822 if not os.path.exists(p):
826 823 self.ui.warn(_("%s does not exist!\n") % f)
827 824 elif not os.path.isfile(p):
828 825 self.ui.warn(_("%s not added: only files supported currently\n")
829 826 % f)
830 827 elif self.dirstate.state(f) in 'an':
831 828 self.ui.warn(_("%s already tracked!\n") % f)
832 829 else:
833 830 self.dirstate.update([f], "a")
834 831
835 832 def forget(self, list, wlock=None):
836 833 if not wlock:
837 834 wlock = self.wlock()
838 835 for f in list:
839 836 if self.dirstate.state(f) not in 'ai':
840 837 self.ui.warn(_("%s not added!\n") % f)
841 838 else:
842 839 self.dirstate.forget([f])
843 840
844 841 def remove(self, list, unlink=False, wlock=None):
845 842 if unlink:
846 843 for f in list:
847 844 try:
848 845 util.unlink(self.wjoin(f))
849 846 except OSError, inst:
850 847 if inst.errno != errno.ENOENT:
851 848 raise
852 849 if not wlock:
853 850 wlock = self.wlock()
854 851 for f in list:
855 852 p = self.wjoin(f)
856 853 if os.path.exists(p):
857 854 self.ui.warn(_("%s still exists!\n") % f)
858 855 elif self.dirstate.state(f) == 'a':
859 856 self.dirstate.forget([f])
860 857 elif f not in self.dirstate:
861 858 self.ui.warn(_("%s not tracked!\n") % f)
862 859 else:
863 860 self.dirstate.update([f], "r")
864 861
865 862 def undelete(self, list, wlock=None):
866 863 p = self.dirstate.parents()[0]
867 864 mn = self.changelog.read(p)[0]
868 865 m = self.manifest.read(mn)
869 866 if not wlock:
870 867 wlock = self.wlock()
871 868 for f in list:
872 869 if self.dirstate.state(f) not in "r":
873 870 self.ui.warn("%s not removed!\n" % f)
874 871 else:
875 872 t = self.file(f).read(m[f])
876 873 self.wwrite(f, t)
877 874 util.set_exec(self.wjoin(f), m.execf(f))
878 875 self.dirstate.update([f], "n")
879 876
880 877 def copy(self, source, dest, wlock=None):
881 878 p = self.wjoin(dest)
882 879 if not os.path.exists(p):
883 880 self.ui.warn(_("%s does not exist!\n") % dest)
884 881 elif not os.path.isfile(p):
885 882 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
886 883 else:
887 884 if not wlock:
888 885 wlock = self.wlock()
889 886 if self.dirstate.state(dest) == '?':
890 887 self.dirstate.update([dest], "a")
891 888 self.dirstate.copy(source, dest)
892 889
893 890 def heads(self, start=None):
894 891 heads = self.changelog.heads(start)
895 892 # sort the output in rev descending order
896 893 heads = [(-self.changelog.rev(h), h) for h in heads]
897 894 heads.sort()
898 895 return [n for (r, n) in heads]
899 896
900 897 # branchlookup returns a dict giving a list of branches for
901 898 # each head. A branch is defined as the tag of a node or
902 899 # the branch of the node's parents. If a node has multiple
903 900 # branch tags, tags are eliminated if they are visible from other
904 901 # branch tags.
905 902 #
906 903 # So, for this graph: a->b->c->d->e
907 904 # \ /
908 905 # aa -----/
909 906 # a has tag 2.6.12
910 907 # d has tag 2.6.13
911 908 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
912 909 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
913 910 # from the list.
914 911 #
915 912 # It is possible that more than one head will have the same branch tag.
916 913 # callers need to check the result for multiple heads under the same
917 914 # branch tag if that is a problem for them (ie checkout of a specific
918 915 # branch).
919 916 #
920 917 # passing in a specific branch will limit the depth of the search
921 918 # through the parents. It won't limit the branches returned in the
922 919 # result though.
923 920 def branchlookup(self, heads=None, branch=None):
924 921 if not heads:
925 922 heads = self.heads()
926 923 headt = [ h for h in heads ]
927 924 chlog = self.changelog
928 925 branches = {}
929 926 merges = []
930 927 seenmerge = {}
931 928
932 929 # traverse the tree once for each head, recording in the branches
933 930 # dict which tags are visible from this head. The branches
934 931 # dict also records which tags are visible from each tag
935 932 # while we traverse.
936 933 while headt or merges:
937 934 if merges:
938 935 n, found = merges.pop()
939 936 visit = [n]
940 937 else:
941 938 h = headt.pop()
942 939 visit = [h]
943 940 found = [h]
944 941 seen = {}
945 942 while visit:
946 943 n = visit.pop()
947 944 if n in seen:
948 945 continue
949 946 pp = chlog.parents(n)
950 947 tags = self.nodetags(n)
951 948 if tags:
952 949 for x in tags:
953 950 if x == 'tip':
954 951 continue
955 952 for f in found:
956 953 branches.setdefault(f, {})[n] = 1
957 954 branches.setdefault(n, {})[n] = 1
958 955 break
959 956 if n not in found:
960 957 found.append(n)
961 958 if branch in tags:
962 959 continue
963 960 seen[n] = 1
964 961 if pp[1] != nullid and n not in seenmerge:
965 962 merges.append((pp[1], [x for x in found]))
966 963 seenmerge[n] = 1
967 964 if pp[0] != nullid:
968 965 visit.append(pp[0])
969 966 # traverse the branches dict, eliminating branch tags from each
970 967 # head that are visible from another branch tag for that head.
971 968 out = {}
972 969 viscache = {}
973 970 for h in heads:
974 971 def visible(node):
975 972 if node in viscache:
976 973 return viscache[node]
977 974 ret = {}
978 975 visit = [node]
979 976 while visit:
980 977 x = visit.pop()
981 978 if x in viscache:
982 979 ret.update(viscache[x])
983 980 elif x not in ret:
984 981 ret[x] = 1
985 982 if x in branches:
986 983 visit[len(visit):] = branches[x].keys()
987 984 viscache[node] = ret
988 985 return ret
989 986 if h not in branches:
990 987 continue
991 988 # O(n^2), but somewhat limited. This only searches the
992 989 # tags visible from a specific head, not all the tags in the
993 990 # whole repo.
994 991 for b in branches[h]:
995 992 vis = False
996 993 for bb in branches[h].keys():
997 994 if b != bb:
998 995 if b in visible(bb):
999 996 vis = True
1000 997 break
1001 998 if not vis:
1002 999 l = out.setdefault(h, [])
1003 1000 l[len(l):] = self.nodetags(b)
1004 1001 return out
1005 1002
1006 1003 def branches(self, nodes):
1007 1004 if not nodes:
1008 1005 nodes = [self.changelog.tip()]
1009 1006 b = []
1010 1007 for n in nodes:
1011 1008 t = n
1012 1009 while 1:
1013 1010 p = self.changelog.parents(n)
1014 1011 if p[1] != nullid or p[0] == nullid:
1015 1012 b.append((t, n, p[0], p[1]))
1016 1013 break
1017 1014 n = p[0]
1018 1015 return b
1019 1016
1020 1017 def between(self, pairs):
1021 1018 r = []
1022 1019
1023 1020 for top, bottom in pairs:
1024 1021 n, l, i = top, [], 0
1025 1022 f = 1
1026 1023
1027 1024 while n != bottom:
1028 1025 p = self.changelog.parents(n)[0]
1029 1026 if i == f:
1030 1027 l.append(n)
1031 1028 f = f * 2
1032 1029 n = p
1033 1030 i += 1
1034 1031
1035 1032 r.append(l)
1036 1033
1037 1034 return r
1038 1035
1039 1036 def findincoming(self, remote, base=None, heads=None, force=False):
1040 1037 """Return list of roots of the subsets of missing nodes from remote
1041 1038
1042 1039 If base dict is specified, assume that these nodes and their parents
1043 1040 exist on the remote side and that no child of a node of base exists
1044 1041 in both remote and self.
1045 1042 Furthermore base will be updated to include the nodes that exists
1046 1043 in self and remote but no children exists in self and remote.
1047 1044 If a list of heads is specified, return only nodes which are heads
1048 1045 or ancestors of these heads.
1049 1046
1050 1047 All the ancestors of base are in self and in remote.
1051 1048 All the descendants of the list returned are missing in self.
1052 1049 (and so we know that the rest of the nodes are missing in remote, see
1053 1050 outgoing)
1054 1051 """
1055 1052 m = self.changelog.nodemap
1056 1053 search = []
1057 1054 fetch = {}
1058 1055 seen = {}
1059 1056 seenbranch = {}
1060 1057 if base == None:
1061 1058 base = {}
1062 1059
1063 1060 if not heads:
1064 1061 heads = remote.heads()
1065 1062
1066 1063 if self.changelog.tip() == nullid:
1067 1064 base[nullid] = 1
1068 1065 if heads != [nullid]:
1069 1066 return [nullid]
1070 1067 return []
1071 1068
1072 1069 # assume we're closer to the tip than the root
1073 1070 # and start by examining the heads
1074 1071 self.ui.status(_("searching for changes\n"))
1075 1072
1076 1073 unknown = []
1077 1074 for h in heads:
1078 1075 if h not in m:
1079 1076 unknown.append(h)
1080 1077 else:
1081 1078 base[h] = 1
1082 1079
1083 1080 if not unknown:
1084 1081 return []
1085 1082
1086 1083 req = dict.fromkeys(unknown)
1087 1084 reqcnt = 0
1088 1085
1089 1086 # search through remote branches
1090 1087 # a 'branch' here is a linear segment of history, with four parts:
1091 1088 # head, root, first parent, second parent
1092 1089 # (a branch always has two parents (or none) by definition)
1093 1090 unknown = remote.branches(unknown)
1094 1091 while unknown:
1095 1092 r = []
1096 1093 while unknown:
1097 1094 n = unknown.pop(0)
1098 1095 if n[0] in seen:
1099 1096 continue
1100 1097
1101 1098 self.ui.debug(_("examining %s:%s\n")
1102 1099 % (short(n[0]), short(n[1])))
1103 1100 if n[0] == nullid: # found the end of the branch
1104 1101 pass
1105 1102 elif n in seenbranch:
1106 1103 self.ui.debug(_("branch already found\n"))
1107 1104 continue
1108 1105 elif n[1] and n[1] in m: # do we know the base?
1109 1106 self.ui.debug(_("found incomplete branch %s:%s\n")
1110 1107 % (short(n[0]), short(n[1])))
1111 1108 search.append(n) # schedule branch range for scanning
1112 1109 seenbranch[n] = 1
1113 1110 else:
1114 1111 if n[1] not in seen and n[1] not in fetch:
1115 1112 if n[2] in m and n[3] in m:
1116 1113 self.ui.debug(_("found new changeset %s\n") %
1117 1114 short(n[1]))
1118 1115 fetch[n[1]] = 1 # earliest unknown
1119 1116 for p in n[2:4]:
1120 1117 if p in m:
1121 1118 base[p] = 1 # latest known
1122 1119
1123 1120 for p in n[2:4]:
1124 1121 if p not in req and p not in m:
1125 1122 r.append(p)
1126 1123 req[p] = 1
1127 1124 seen[n[0]] = 1
1128 1125
1129 1126 if r:
1130 1127 reqcnt += 1
1131 1128 self.ui.debug(_("request %d: %s\n") %
1132 1129 (reqcnt, " ".join(map(short, r))))
1133 1130 for p in range(0, len(r), 10):
1134 1131 for b in remote.branches(r[p:p+10]):
1135 1132 self.ui.debug(_("received %s:%s\n") %
1136 1133 (short(b[0]), short(b[1])))
1137 1134 unknown.append(b)
1138 1135
1139 1136 # do binary search on the branches we found
1140 1137 while search:
1141 1138 n = search.pop(0)
1142 1139 reqcnt += 1
1143 1140 l = remote.between([(n[0], n[1])])[0]
1144 1141 l.append(n[1])
1145 1142 p = n[0]
1146 1143 f = 1
1147 1144 for i in l:
1148 1145 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1149 1146 if i in m:
1150 1147 if f <= 2:
1151 1148 self.ui.debug(_("found new branch changeset %s\n") %
1152 1149 short(p))
1153 1150 fetch[p] = 1
1154 1151 base[i] = 1
1155 1152 else:
1156 1153 self.ui.debug(_("narrowed branch search to %s:%s\n")
1157 1154 % (short(p), short(i)))
1158 1155 search.append((p, i))
1159 1156 break
1160 1157 p, f = i, f * 2
1161 1158
1162 1159 # sanity check our fetch list
1163 1160 for f in fetch.keys():
1164 1161 if f in m:
1165 1162 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1166 1163
1167 1164 if base.keys() == [nullid]:
1168 1165 if force:
1169 1166 self.ui.warn(_("warning: repository is unrelated\n"))
1170 1167 else:
1171 1168 raise util.Abort(_("repository is unrelated"))
1172 1169
1173 1170 self.ui.debug(_("found new changesets starting at ") +
1174 1171 " ".join([short(f) for f in fetch]) + "\n")
1175 1172
1176 1173 self.ui.debug(_("%d total queries\n") % reqcnt)
1177 1174
1178 1175 return fetch.keys()
1179 1176
1180 1177 def findoutgoing(self, remote, base=None, heads=None, force=False):
1181 1178 """Return list of nodes that are roots of subsets not in remote
1182 1179
1183 1180 If base dict is specified, assume that these nodes and their parents
1184 1181 exist on the remote side.
1185 1182 If a list of heads is specified, return only nodes which are heads
1186 1183 or ancestors of these heads, and return a second element which
1187 1184 contains all remote heads which get new children.
1188 1185 """
1189 1186 if base == None:
1190 1187 base = {}
1191 1188 self.findincoming(remote, base, heads, force=force)
1192 1189
1193 1190 self.ui.debug(_("common changesets up to ")
1194 1191 + " ".join(map(short, base.keys())) + "\n")
1195 1192
1196 1193 remain = dict.fromkeys(self.changelog.nodemap)
1197 1194
1198 1195 # prune everything remote has from the tree
1199 1196 del remain[nullid]
1200 1197 remove = base.keys()
1201 1198 while remove:
1202 1199 n = remove.pop(0)
1203 1200 if n in remain:
1204 1201 del remain[n]
1205 1202 for p in self.changelog.parents(n):
1206 1203 remove.append(p)
1207 1204
1208 1205 # find every node whose parents have been pruned
1209 1206 subset = []
1210 1207 # find every remote head that will get new children
1211 1208 updated_heads = {}
1212 1209 for n in remain:
1213 1210 p1, p2 = self.changelog.parents(n)
1214 1211 if p1 not in remain and p2 not in remain:
1215 1212 subset.append(n)
1216 1213 if heads:
1217 1214 if p1 in heads:
1218 1215 updated_heads[p1] = True
1219 1216 if p2 in heads:
1220 1217 updated_heads[p2] = True
1221 1218
1222 1219 # this is the set of all roots we have to push
1223 1220 if heads:
1224 1221 return subset, updated_heads.keys()
1225 1222 else:
1226 1223 return subset
1227 1224
1228 1225 def pull(self, remote, heads=None, force=False, lock=None):
1229 1226 mylock = False
1230 1227 if not lock:
1231 1228 lock = self.lock()
1232 1229 mylock = True
1233 1230
1234 1231 try:
1235 1232 fetch = self.findincoming(remote, force=force)
1236 1233 if fetch == [nullid]:
1237 1234 self.ui.status(_("requesting all changes\n"))
1238 1235
1239 1236 if not fetch:
1240 1237 self.ui.status(_("no changes found\n"))
1241 1238 return 0
1242 1239
1243 1240 if heads is None:
1244 1241 cg = remote.changegroup(fetch, 'pull')
1245 1242 else:
1246 1243 cg = remote.changegroupsubset(fetch, heads, 'pull')
1247 1244 return self.addchangegroup(cg, 'pull', remote.url())
1248 1245 finally:
1249 1246 if mylock:
1250 1247 lock.release()
1251 1248
1252 1249 def push(self, remote, force=False, revs=None):
1253 1250 # there are two ways to push to remote repo:
1254 1251 #
1255 1252 # addchangegroup assumes local user can lock remote
1256 1253 # repo (local filesystem, old ssh servers).
1257 1254 #
1258 1255 # unbundle assumes local user cannot lock remote repo (new ssh
1259 1256 # servers, http servers).
1260 1257
1261 1258 if remote.capable('unbundle'):
1262 1259 return self.push_unbundle(remote, force, revs)
1263 1260 return self.push_addchangegroup(remote, force, revs)
1264 1261
1265 1262 def prepush(self, remote, force, revs):
1266 1263 base = {}
1267 1264 remote_heads = remote.heads()
1268 1265 inc = self.findincoming(remote, base, remote_heads, force=force)
1269 1266 if not force and inc:
1270 1267 self.ui.warn(_("abort: unsynced remote changes!\n"))
1271 1268 self.ui.status(_("(did you forget to sync?"
1272 1269 " use push -f to force)\n"))
1273 1270 return None, 1
1274 1271
1275 1272 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1276 1273 if revs is not None:
1277 1274 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1278 1275 else:
1279 1276 bases, heads = update, self.changelog.heads()
1280 1277
1281 1278 if not bases:
1282 1279 self.ui.status(_("no changes found\n"))
1283 1280 return None, 1
1284 1281 elif not force:
1285 1282 # FIXME we don't properly detect creation of new heads
1286 1283 # in the push -r case, assume the user knows what he's doing
1287 1284 if not revs and len(remote_heads) < len(heads) \
1288 1285 and remote_heads != [nullid]:
1289 1286 self.ui.warn(_("abort: push creates new remote branches!\n"))
1290 1287 self.ui.status(_("(did you forget to merge?"
1291 1288 " use push -f to force)\n"))
1292 1289 return None, 1
1293 1290
1294 1291 if revs is None:
1295 1292 cg = self.changegroup(update, 'push')
1296 1293 else:
1297 1294 cg = self.changegroupsubset(update, revs, 'push')
1298 1295 return cg, remote_heads
1299 1296
1300 1297 def push_addchangegroup(self, remote, force, revs):
1301 1298 lock = remote.lock()
1302 1299
1303 1300 ret = self.prepush(remote, force, revs)
1304 1301 if ret[0] is not None:
1305 1302 cg, remote_heads = ret
1306 1303 return remote.addchangegroup(cg, 'push', self.url())
1307 1304 return ret[1]
1308 1305
1309 1306 def push_unbundle(self, remote, force, revs):
1310 1307 # local repo finds heads on server, finds out what revs it
1311 1308 # must push. once revs transferred, if server finds it has
1312 1309 # different heads (someone else won commit/push race), server
1313 1310 # aborts.
1314 1311
1315 1312 ret = self.prepush(remote, force, revs)
1316 1313 if ret[0] is not None:
1317 1314 cg, remote_heads = ret
1318 1315 if force: remote_heads = ['force']
1319 1316 return remote.unbundle(cg, remote_heads, 'push')
1320 1317 return ret[1]
1321 1318
1322 1319 def changegroupsubset(self, bases, heads, source):
1323 1320 """This function generates a changegroup consisting of all the nodes
1324 1321 that are descendents of any of the bases, and ancestors of any of
1325 1322 the heads.
1326 1323
1327 1324 It is fairly complex as determining which filenodes and which
1328 1325 manifest nodes need to be included for the changeset to be complete
1329 1326 is non-trivial.
1330 1327
1331 1328 Another wrinkle is doing the reverse, figuring out which changeset in
1332 1329 the changegroup a particular filenode or manifestnode belongs to."""
1333 1330
1334 1331 self.hook('preoutgoing', throw=True, source=source)
1335 1332
1336 1333 # Set up some initial variables
1337 1334 # Make it easy to refer to self.changelog
1338 1335 cl = self.changelog
1339 1336 # msng is short for missing - compute the list of changesets in this
1340 1337 # changegroup.
1341 1338 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1342 1339 # Some bases may turn out to be superfluous, and some heads may be
1343 1340 # too. nodesbetween will return the minimal set of bases and heads
1344 1341 # necessary to re-create the changegroup.
1345 1342
1346 1343 # Known heads are the list of heads that it is assumed the recipient
1347 1344 # of this changegroup will know about.
1348 1345 knownheads = {}
1349 1346 # We assume that all parents of bases are known heads.
1350 1347 for n in bases:
1351 1348 for p in cl.parents(n):
1352 1349 if p != nullid:
1353 1350 knownheads[p] = 1
1354 1351 knownheads = knownheads.keys()
1355 1352 if knownheads:
1356 1353 # Now that we know what heads are known, we can compute which
1357 1354 # changesets are known. The recipient must know about all
1358 1355 # changesets required to reach the known heads from the null
1359 1356 # changeset.
1360 1357 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1361 1358 junk = None
1362 1359 # Transform the list into an ersatz set.
1363 1360 has_cl_set = dict.fromkeys(has_cl_set)
1364 1361 else:
1365 1362 # If there were no known heads, the recipient cannot be assumed to
1366 1363 # know about any changesets.
1367 1364 has_cl_set = {}
1368 1365
1369 1366 # Make it easy to refer to self.manifest
1370 1367 mnfst = self.manifest
1371 1368 # We don't know which manifests are missing yet
1372 1369 msng_mnfst_set = {}
1373 1370 # Nor do we know which filenodes are missing.
1374 1371 msng_filenode_set = {}
1375 1372
1376 1373 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1377 1374 junk = None
1378 1375
1379 1376 # A changeset always belongs to itself, so the changenode lookup
1380 1377 # function for a changenode is identity.
1381 1378 def identity(x):
1382 1379 return x
1383 1380
1384 1381 # A function generating function. Sets up an environment for the
1385 1382 # inner function.
1386 1383 def cmp_by_rev_func(revlog):
1387 1384 # Compare two nodes by their revision number in the environment's
1388 1385 # revision history. Since the revision number both represents the
1389 1386 # most efficient order to read the nodes in, and represents a
1390 1387 # topological sorting of the nodes, this function is often useful.
1391 1388 def cmp_by_rev(a, b):
1392 1389 return cmp(revlog.rev(a), revlog.rev(b))
1393 1390 return cmp_by_rev
1394 1391
1395 1392 # If we determine that a particular file or manifest node must be a
1396 1393 # node that the recipient of the changegroup will already have, we can
1397 1394 # also assume the recipient will have all the parents. This function
1398 1395 # prunes them from the set of missing nodes.
1399 1396 def prune_parents(revlog, hasset, msngset):
1400 1397 haslst = hasset.keys()
1401 1398 haslst.sort(cmp_by_rev_func(revlog))
1402 1399 for node in haslst:
1403 1400 parentlst = [p for p in revlog.parents(node) if p != nullid]
1404 1401 while parentlst:
1405 1402 n = parentlst.pop()
1406 1403 if n not in hasset:
1407 1404 hasset[n] = 1
1408 1405 p = [p for p in revlog.parents(n) if p != nullid]
1409 1406 parentlst.extend(p)
1410 1407 for n in hasset:
1411 1408 msngset.pop(n, None)
1412 1409
1413 1410 # This is a function generating function used to set up an environment
1414 1411 # for the inner function to execute in.
1415 1412 def manifest_and_file_collector(changedfileset):
1416 1413 # This is an information gathering function that gathers
1417 1414 # information from each changeset node that goes out as part of
1418 1415 # the changegroup. The information gathered is a list of which
1419 1416 # manifest nodes are potentially required (the recipient may
1420 1417 # already have them) and total list of all files which were
1421 1418 # changed in any changeset in the changegroup.
1422 1419 #
1423 1420 # We also remember the first changenode we saw any manifest
1424 1421 # referenced by so we can later determine which changenode 'owns'
1425 1422 # the manifest.
1426 1423 def collect_manifests_and_files(clnode):
1427 1424 c = cl.read(clnode)
1428 1425 for f in c[3]:
1429 1426 # This is to make sure we only have one instance of each
1430 1427 # filename string for each filename.
1431 1428 changedfileset.setdefault(f, f)
1432 1429 msng_mnfst_set.setdefault(c[0], clnode)
1433 1430 return collect_manifests_and_files
1434 1431
1435 1432 # Figure out which manifest nodes (of the ones we think might be part
1436 1433 # of the changegroup) the recipient must know about and remove them
1437 1434 # from the changegroup.
1438 1435 def prune_manifests():
1439 1436 has_mnfst_set = {}
1440 1437 for n in msng_mnfst_set:
1441 1438 # If a 'missing' manifest thinks it belongs to a changenode
1442 1439 # the recipient is assumed to have, obviously the recipient
1443 1440 # must have that manifest.
1444 1441 linknode = cl.node(mnfst.linkrev(n))
1445 1442 if linknode in has_cl_set:
1446 1443 has_mnfst_set[n] = 1
1447 1444 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1448 1445
1449 1446 # Use the information collected in collect_manifests_and_files to say
1450 1447 # which changenode any manifestnode belongs to.
1451 1448 def lookup_manifest_link(mnfstnode):
1452 1449 return msng_mnfst_set[mnfstnode]
1453 1450
1454 1451 # A function generating function that sets up the initial environment
1455 1452 # the inner function.
1456 1453 def filenode_collector(changedfiles):
1457 1454 next_rev = [0]
1458 1455 # This gathers information from each manifestnode included in the
1459 1456 # changegroup about which filenodes the manifest node references
1460 1457 # so we can include those in the changegroup too.
1461 1458 #
1462 1459 # It also remembers which changenode each filenode belongs to. It
1463 1460 # does this by assuming the a filenode belongs to the changenode
1464 1461 # the first manifest that references it belongs to.
1465 1462 def collect_msng_filenodes(mnfstnode):
1466 1463 r = mnfst.rev(mnfstnode)
1467 1464 if r == next_rev[0]:
1468 1465 # If the last rev we looked at was the one just previous,
1469 1466 # we only need to see a diff.
1470 1467 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1471 1468 # For each line in the delta
1472 1469 for dline in delta.splitlines():
1473 1470 # get the filename and filenode for that line
1474 1471 f, fnode = dline.split('\0')
1475 1472 fnode = bin(fnode[:40])
1476 1473 f = changedfiles.get(f, None)
1477 1474 # And if the file is in the list of files we care
1478 1475 # about.
1479 1476 if f is not None:
1480 1477 # Get the changenode this manifest belongs to
1481 1478 clnode = msng_mnfst_set[mnfstnode]
1482 1479 # Create the set of filenodes for the file if
1483 1480 # there isn't one already.
1484 1481 ndset = msng_filenode_set.setdefault(f, {})
1485 1482 # And set the filenode's changelog node to the
1486 1483 # manifest's if it hasn't been set already.
1487 1484 ndset.setdefault(fnode, clnode)
1488 1485 else:
1489 1486 # Otherwise we need a full manifest.
1490 1487 m = mnfst.read(mnfstnode)
1491 1488 # For every file in we care about.
1492 1489 for f in changedfiles:
1493 1490 fnode = m.get(f, None)
1494 1491 # If it's in the manifest
1495 1492 if fnode is not None:
1496 1493 # See comments above.
1497 1494 clnode = msng_mnfst_set[mnfstnode]
1498 1495 ndset = msng_filenode_set.setdefault(f, {})
1499 1496 ndset.setdefault(fnode, clnode)
1500 1497 # Remember the revision we hope to see next.
1501 1498 next_rev[0] = r + 1
1502 1499 return collect_msng_filenodes
1503 1500
1504 1501 # We have a list of filenodes we think we need for a file, lets remove
1505 1502 # all those we now the recipient must have.
1506 1503 def prune_filenodes(f, filerevlog):
1507 1504 msngset = msng_filenode_set[f]
1508 1505 hasset = {}
1509 1506 # If a 'missing' filenode thinks it belongs to a changenode we
1510 1507 # assume the recipient must have, then the recipient must have
1511 1508 # that filenode.
1512 1509 for n in msngset:
1513 1510 clnode = cl.node(filerevlog.linkrev(n))
1514 1511 if clnode in has_cl_set:
1515 1512 hasset[n] = 1
1516 1513 prune_parents(filerevlog, hasset, msngset)
1517 1514
1518 1515 # A function generator function that sets up the a context for the
1519 1516 # inner function.
1520 1517 def lookup_filenode_link_func(fname):
1521 1518 msngset = msng_filenode_set[fname]
1522 1519 # Lookup the changenode the filenode belongs to.
1523 1520 def lookup_filenode_link(fnode):
1524 1521 return msngset[fnode]
1525 1522 return lookup_filenode_link
1526 1523
1527 1524 # Now that we have all theses utility functions to help out and
1528 1525 # logically divide up the task, generate the group.
1529 1526 def gengroup():
1530 1527 # The set of changed files starts empty.
1531 1528 changedfiles = {}
1532 1529 # Create a changenode group generator that will call our functions
1533 1530 # back to lookup the owning changenode and collect information.
1534 1531 group = cl.group(msng_cl_lst, identity,
1535 1532 manifest_and_file_collector(changedfiles))
1536 1533 for chnk in group:
1537 1534 yield chnk
1538 1535
1539 1536 # The list of manifests has been collected by the generator
1540 1537 # calling our functions back.
1541 1538 prune_manifests()
1542 1539 msng_mnfst_lst = msng_mnfst_set.keys()
1543 1540 # Sort the manifestnodes by revision number.
1544 1541 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1545 1542 # Create a generator for the manifestnodes that calls our lookup
1546 1543 # and data collection functions back.
1547 1544 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1548 1545 filenode_collector(changedfiles))
1549 1546 for chnk in group:
1550 1547 yield chnk
1551 1548
1552 1549 # These are no longer needed, dereference and toss the memory for
1553 1550 # them.
1554 1551 msng_mnfst_lst = None
1555 1552 msng_mnfst_set.clear()
1556 1553
1557 1554 changedfiles = changedfiles.keys()
1558 1555 changedfiles.sort()
1559 1556 # Go through all our files in order sorted by name.
1560 1557 for fname in changedfiles:
1561 1558 filerevlog = self.file(fname)
1562 1559 # Toss out the filenodes that the recipient isn't really
1563 1560 # missing.
1564 1561 if msng_filenode_set.has_key(fname):
1565 1562 prune_filenodes(fname, filerevlog)
1566 1563 msng_filenode_lst = msng_filenode_set[fname].keys()
1567 1564 else:
1568 1565 msng_filenode_lst = []
1569 1566 # If any filenodes are left, generate the group for them,
1570 1567 # otherwise don't bother.
1571 1568 if len(msng_filenode_lst) > 0:
1572 1569 yield changegroup.genchunk(fname)
1573 1570 # Sort the filenodes by their revision #
1574 1571 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1575 1572 # Create a group generator and only pass in a changenode
1576 1573 # lookup function as we need to collect no information
1577 1574 # from filenodes.
1578 1575 group = filerevlog.group(msng_filenode_lst,
1579 1576 lookup_filenode_link_func(fname))
1580 1577 for chnk in group:
1581 1578 yield chnk
1582 1579 if msng_filenode_set.has_key(fname):
1583 1580 # Don't need this anymore, toss it to free memory.
1584 1581 del msng_filenode_set[fname]
1585 1582 # Signal that no more groups are left.
1586 1583 yield changegroup.closechunk()
1587 1584
1588 1585 if msng_cl_lst:
1589 1586 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1590 1587
1591 1588 return util.chunkbuffer(gengroup())
1592 1589
1593 1590 def changegroup(self, basenodes, source):
1594 1591 """Generate a changegroup of all nodes that we have that a recipient
1595 1592 doesn't.
1596 1593
1597 1594 This is much easier than the previous function as we can assume that
1598 1595 the recipient has any changenode we aren't sending them."""
1599 1596
1600 1597 self.hook('preoutgoing', throw=True, source=source)
1601 1598
1602 1599 cl = self.changelog
1603 1600 nodes = cl.nodesbetween(basenodes, None)[0]
1604 1601 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1605 1602
1606 1603 def identity(x):
1607 1604 return x
1608 1605
1609 1606 def gennodelst(revlog):
1610 1607 for r in xrange(0, revlog.count()):
1611 1608 n = revlog.node(r)
1612 1609 if revlog.linkrev(n) in revset:
1613 1610 yield n
1614 1611
1615 1612 def changed_file_collector(changedfileset):
1616 1613 def collect_changed_files(clnode):
1617 1614 c = cl.read(clnode)
1618 1615 for fname in c[3]:
1619 1616 changedfileset[fname] = 1
1620 1617 return collect_changed_files
1621 1618
1622 1619 def lookuprevlink_func(revlog):
1623 1620 def lookuprevlink(n):
1624 1621 return cl.node(revlog.linkrev(n))
1625 1622 return lookuprevlink
1626 1623
1627 1624 def gengroup():
1628 1625 # construct a list of all changed files
1629 1626 changedfiles = {}
1630 1627
1631 1628 for chnk in cl.group(nodes, identity,
1632 1629 changed_file_collector(changedfiles)):
1633 1630 yield chnk
1634 1631 changedfiles = changedfiles.keys()
1635 1632 changedfiles.sort()
1636 1633
1637 1634 mnfst = self.manifest
1638 1635 nodeiter = gennodelst(mnfst)
1639 1636 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1640 1637 yield chnk
1641 1638
1642 1639 for fname in changedfiles:
1643 1640 filerevlog = self.file(fname)
1644 1641 nodeiter = gennodelst(filerevlog)
1645 1642 nodeiter = list(nodeiter)
1646 1643 if nodeiter:
1647 1644 yield changegroup.genchunk(fname)
1648 1645 lookup = lookuprevlink_func(filerevlog)
1649 1646 for chnk in filerevlog.group(nodeiter, lookup):
1650 1647 yield chnk
1651 1648
1652 1649 yield changegroup.closechunk()
1653 1650
1654 1651 if nodes:
1655 1652 self.hook('outgoing', node=hex(nodes[0]), source=source)
1656 1653
1657 1654 return util.chunkbuffer(gengroup())
1658 1655
1659 1656 def addchangegroup(self, source, srctype, url):
1660 1657 """add changegroup to repo.
1661 1658 returns number of heads modified or added + 1."""
1662 1659
1663 1660 def csmap(x):
1664 1661 self.ui.debug(_("add changeset %s\n") % short(x))
1665 1662 return cl.count()
1666 1663
1667 1664 def revmap(x):
1668 1665 return cl.rev(x)
1669 1666
1670 1667 if not source:
1671 1668 return 0
1672 1669
1673 1670 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1674 1671
1675 1672 changesets = files = revisions = 0
1676 1673
1677 1674 tr = self.transaction()
1678 1675
1679 1676 # write changelog data to temp files so concurrent readers will not see
1680 1677 # inconsistent view
1681 1678 cl = None
1682 1679 try:
1683 1680 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1684 1681
1685 1682 oldheads = len(cl.heads())
1686 1683
1687 1684 # pull off the changeset group
1688 1685 self.ui.status(_("adding changesets\n"))
1689 1686 cor = cl.count() - 1
1690 1687 chunkiter = changegroup.chunkiter(source)
1691 1688 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1692 1689 raise util.Abort(_("received changelog group is empty"))
1693 1690 cnr = cl.count() - 1
1694 1691 changesets = cnr - cor
1695 1692
1696 1693 # pull off the manifest group
1697 1694 self.ui.status(_("adding manifests\n"))
1698 1695 chunkiter = changegroup.chunkiter(source)
1699 1696 # no need to check for empty manifest group here:
1700 1697 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1701 1698 # no new manifest will be created and the manifest group will
1702 1699 # be empty during the pull
1703 1700 self.manifest.addgroup(chunkiter, revmap, tr)
1704 1701
1705 1702 # process the files
1706 1703 self.ui.status(_("adding file changes\n"))
1707 1704 while 1:
1708 1705 f = changegroup.getchunk(source)
1709 1706 if not f:
1710 1707 break
1711 1708 self.ui.debug(_("adding %s revisions\n") % f)
1712 1709 fl = self.file(f)
1713 1710 o = fl.count()
1714 1711 chunkiter = changegroup.chunkiter(source)
1715 1712 if fl.addgroup(chunkiter, revmap, tr) is None:
1716 1713 raise util.Abort(_("received file revlog group is empty"))
1717 1714 revisions += fl.count() - o
1718 1715 files += 1
1719 1716
1720 1717 cl.writedata()
1721 1718 finally:
1722 1719 if cl:
1723 1720 cl.cleanup()
1724 1721
1725 1722 # make changelog see real files again
1726 1723 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1727 1724 self.changelog.checkinlinesize(tr)
1728 1725
1729 1726 newheads = len(self.changelog.heads())
1730 1727 heads = ""
1731 1728 if oldheads and newheads != oldheads:
1732 1729 heads = _(" (%+d heads)") % (newheads - oldheads)
1733 1730
1734 1731 self.ui.status(_("added %d changesets"
1735 1732 " with %d changes to %d files%s\n")
1736 1733 % (changesets, revisions, files, heads))
1737 1734
1738 1735 if changesets > 0:
1739 1736 self.hook('pretxnchangegroup', throw=True,
1740 1737 node=hex(self.changelog.node(cor+1)), source=srctype,
1741 1738 url=url)
1742 1739
1743 1740 tr.close()
1744 1741
1745 1742 if changesets > 0:
1746 1743 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1747 1744 source=srctype, url=url)
1748 1745
1749 1746 for i in range(cor + 1, cnr + 1):
1750 1747 self.hook("incoming", node=hex(self.changelog.node(i)),
1751 1748 source=srctype, url=url)
1752 1749
1753 1750 return newheads - oldheads + 1
1754 1751
1755 1752
1756 1753 def stream_in(self, remote):
1757 1754 fp = remote.stream_out()
1758 1755 resp = int(fp.readline())
1759 1756 if resp != 0:
1760 1757 raise util.Abort(_('operation forbidden by server'))
1761 1758 self.ui.status(_('streaming all changes\n'))
1762 1759 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1763 1760 self.ui.status(_('%d files to transfer, %s of data\n') %
1764 1761 (total_files, util.bytecount(total_bytes)))
1765 1762 start = time.time()
1766 1763 for i in xrange(total_files):
1767 1764 name, size = fp.readline().split('\0', 1)
1768 1765 size = int(size)
1769 1766 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1770 1767 ofp = self.opener(name, 'w')
1771 1768 for chunk in util.filechunkiter(fp, limit=size):
1772 1769 ofp.write(chunk)
1773 1770 ofp.close()
1774 1771 elapsed = time.time() - start
1775 1772 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1776 1773 (util.bytecount(total_bytes), elapsed,
1777 1774 util.bytecount(total_bytes / elapsed)))
1778 1775 self.reload()
1779 1776 return len(self.heads()) + 1
1780 1777
1781 1778 def clone(self, remote, heads=[], stream=False):
1782 1779 '''clone remote repository.
1783 1780
1784 1781 keyword arguments:
1785 1782 heads: list of revs to clone (forces use of pull)
1786 1783 stream: use streaming clone if possible'''
1787 1784
1788 1785 # now, all clients that can request uncompressed clones can
1789 1786 # read repo formats supported by all servers that can serve
1790 1787 # them.
1791 1788
1792 1789 # if revlog format changes, client will have to check version
1793 1790 # and format flags on "stream" capability, and use
1794 1791 # uncompressed only if compatible.
1795 1792
1796 1793 if stream and not heads and remote.capable('stream'):
1797 1794 return self.stream_in(remote)
1798 1795 return self.pull(remote, heads)
1799 1796
1800 1797 # used to avoid circular references so destructors work
1801 1798 def aftertrans(base):
1802 1799 p = base
1803 1800 def a():
1804 1801 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1805 1802 util.rename(os.path.join(p, "journal.dirstate"),
1806 1803 os.path.join(p, "undo.dirstate"))
1807 1804 return a
1808 1805
1809 1806 def instance(ui, path, create):
1810 1807 return localrepository(ui, util.drop_scheme('file', path), create)
1811 1808
1812 1809 def islocal(path):
1813 1810 return True
General Comments 0
You need to be logged in to leave comments. Login now