##// END OF EJS Templates
introduce store classes...
Adrian Buehlmann -
r6840:80e51429 default
parent child Browse files
Show More
@@ -1,2076 +1,2074
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15
16 16 class localrepository(repo.repository):
17 17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 18 supported = ('revlogv1', 'store')
19 19
20 20 def __init__(self, parentui, path=None, create=0):
21 21 repo.repository.__init__(self)
22 22 self.root = os.path.realpath(path)
23 23 self.path = os.path.join(self.root, ".hg")
24 24 self.origroot = path
25 25 self.opener = util.opener(self.path)
26 26 self.wopener = util.opener(self.root)
27 27
28 28 if not os.path.isdir(self.path):
29 29 if create:
30 30 if not os.path.exists(path):
31 31 os.mkdir(path)
32 32 os.mkdir(self.path)
33 33 requirements = ["revlogv1"]
34 34 if parentui.configbool('format', 'usestore', True):
35 35 os.mkdir(os.path.join(self.path, "store"))
36 36 requirements.append("store")
37 37 # create an invalid changelog
38 38 self.opener("00changelog.i", "a").write(
39 39 '\0\0\0\2' # represents revlogv2
40 40 ' dummy changelog to prevent using the old repo layout'
41 41 )
42 42 reqfile = self.opener("requires", "w")
43 43 for r in requirements:
44 44 reqfile.write("%s\n" % r)
45 45 reqfile.close()
46 46 else:
47 47 raise repo.RepoError(_("repository %s not found") % path)
48 48 elif create:
49 49 raise repo.RepoError(_("repository %s already exists") % path)
50 50 else:
51 51 # find requirements
52 52 try:
53 53 requirements = self.opener("requires").read().splitlines()
54 54 except IOError, inst:
55 55 if inst.errno != errno.ENOENT:
56 56 raise
57 57 requirements = []
58 58 # check them
59 59 for r in requirements:
60 60 if r not in self.supported:
61 61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 62
63 # setup store
64 if "store" in requirements:
65 self.encodefn = store.encodefilename
66 self.decodefn = store.decodefilename
67 self.spath = os.path.join(self.path, "store")
68 else:
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
63 self.store = store.store(requirements, self.path)
72 64
73 try:
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
79 except OSError:
80 mode = None
81
82 self._createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
86 self.sopener = store.encodedopener(sopener, self.encodefn)
65 self.spath = self.store.path
66 self.sopener = self.store.opener
67 self.sjoin = self.store.join
68 self._createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
87 70
88 71 self.ui = ui.ui(parentui=parentui)
89 72 try:
90 73 self.ui.readconfig(self.join("hgrc"), self.root)
91 74 extensions.loadall(self.ui)
92 75 except IOError:
93 76 pass
94 77
95 78 self.tagscache = None
96 79 self._tagstypecache = None
97 80 self.branchcache = None
98 81 self._ubranchcache = None # UTF-8 version of branchcache
99 82 self._branchcachetip = None
100 83 self.nodetagscache = None
101 84 self.filterpats = {}
102 85 self._datafilters = {}
103 86 self._transref = self._lockref = self._wlockref = None
104 87
105 88 def __getattr__(self, name):
106 89 if name == 'changelog':
107 90 self.changelog = changelog.changelog(self.sopener)
108 91 self.sopener.defversion = self.changelog.version
109 92 return self.changelog
110 93 if name == 'manifest':
111 94 self.changelog
112 95 self.manifest = manifest.manifest(self.sopener)
113 96 return self.manifest
114 97 if name == 'dirstate':
115 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 99 return self.dirstate
117 100 else:
118 101 raise AttributeError, name
119 102
120 103 def __getitem__(self, changeid):
121 104 if changeid == None:
122 105 return context.workingctx(self)
123 106 return context.changectx(self, changeid)
124 107
125 108 def __nonzero__(self):
126 109 return True
127 110
128 111 def __len__(self):
129 112 return len(self.changelog)
130 113
131 114 def __iter__(self):
132 115 for i in xrange(len(self)):
133 116 yield i
134 117
135 118 def url(self):
136 119 return 'file:' + self.root
137 120
138 121 def hook(self, name, throw=False, **args):
139 122 return hook.hook(self.ui, self, name, throw, **args)
140 123
141 124 tag_disallowed = ':\r\n'
142 125
143 126 def _tag(self, names, node, message, local, user, date, parent=None,
144 127 extra={}):
145 128 use_dirstate = parent is None
146 129
147 130 if isinstance(names, str):
148 131 allchars = names
149 132 names = (names,)
150 133 else:
151 134 allchars = ''.join(names)
152 135 for c in self.tag_disallowed:
153 136 if c in allchars:
154 137 raise util.Abort(_('%r cannot be used in a tag name') % c)
155 138
156 139 for name in names:
157 140 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 141 local=local)
159 142
160 143 def writetags(fp, names, munge, prevtags):
161 144 fp.seek(0, 2)
162 145 if prevtags and prevtags[-1] != '\n':
163 146 fp.write('\n')
164 147 for name in names:
165 148 m = munge and munge(name) or name
166 149 if self._tagstypecache and name in self._tagstypecache:
167 150 old = self.tagscache.get(name, nullid)
168 151 fp.write('%s %s\n' % (hex(old), m))
169 152 fp.write('%s %s\n' % (hex(node), m))
170 153 fp.close()
171 154
172 155 prevtags = ''
173 156 if local:
174 157 try:
175 158 fp = self.opener('localtags', 'r+')
176 159 except IOError, err:
177 160 fp = self.opener('localtags', 'a')
178 161 else:
179 162 prevtags = fp.read()
180 163
181 164 # local tags are stored in the current charset
182 165 writetags(fp, names, None, prevtags)
183 166 for name in names:
184 167 self.hook('tag', node=hex(node), tag=name, local=local)
185 168 return
186 169
187 170 if use_dirstate:
188 171 try:
189 172 fp = self.wfile('.hgtags', 'rb+')
190 173 except IOError, err:
191 174 fp = self.wfile('.hgtags', 'ab')
192 175 else:
193 176 prevtags = fp.read()
194 177 else:
195 178 try:
196 179 prevtags = self.filectx('.hgtags', parent).data()
197 180 except revlog.LookupError:
198 181 pass
199 182 fp = self.wfile('.hgtags', 'wb')
200 183 if prevtags:
201 184 fp.write(prevtags)
202 185
203 186 # committed tags are stored in UTF-8
204 187 writetags(fp, names, util.fromlocal, prevtags)
205 188
206 189 if use_dirstate and '.hgtags' not in self.dirstate:
207 190 self.add(['.hgtags'])
208 191
209 192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 193 extra=extra)
211 194
212 195 for name in names:
213 196 self.hook('tag', node=hex(node), tag=name, local=local)
214 197
215 198 return tagnode
216 199
217 200 def tag(self, names, node, message, local, user, date):
218 201 '''tag a revision with one or more symbolic names.
219 202
220 203 names is a list of strings or, when adding a single tag, names may be a
221 204 string.
222 205
223 206 if local is True, the tags are stored in a per-repository file.
224 207 otherwise, they are stored in the .hgtags file, and a new
225 208 changeset is committed with the change.
226 209
227 210 keyword arguments:
228 211
229 212 local: whether to store tags in non-version-controlled file
230 213 (default False)
231 214
232 215 message: commit message to use if committing
233 216
234 217 user: name of user to use if committing
235 218
236 219 date: date tuple to use if committing'''
237 220
238 221 for x in self.status()[:5]:
239 222 if '.hgtags' in x:
240 223 raise util.Abort(_('working copy of .hgtags is changed '
241 224 '(please commit .hgtags manually)'))
242 225
243 226 self._tag(names, node, message, local, user, date)
244 227
245 228 def tags(self):
246 229 '''return a mapping of tag to node'''
247 230 if self.tagscache:
248 231 return self.tagscache
249 232
250 233 globaltags = {}
251 234 tagtypes = {}
252 235
253 236 def readtags(lines, fn, tagtype):
254 237 filetags = {}
255 238 count = 0
256 239
257 240 def warn(msg):
258 241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259 242
260 243 for l in lines:
261 244 count += 1
262 245 if not l:
263 246 continue
264 247 s = l.split(" ", 1)
265 248 if len(s) != 2:
266 249 warn(_("cannot parse entry"))
267 250 continue
268 251 node, key = s
269 252 key = util.tolocal(key.strip()) # stored in UTF-8
270 253 try:
271 254 bin_n = bin(node)
272 255 except TypeError:
273 256 warn(_("node '%s' is not well formed") % node)
274 257 continue
275 258 if bin_n not in self.changelog.nodemap:
276 259 warn(_("tag '%s' refers to unknown node") % key)
277 260 continue
278 261
279 262 h = []
280 263 if key in filetags:
281 264 n, h = filetags[key]
282 265 h.append(n)
283 266 filetags[key] = (bin_n, h)
284 267
285 268 for k, nh in filetags.items():
286 269 if k not in globaltags:
287 270 globaltags[k] = nh
288 271 tagtypes[k] = tagtype
289 272 continue
290 273
291 274 # we prefer the global tag if:
292 275 # it supercedes us OR
293 276 # mutual supercedes and it has a higher rank
294 277 # otherwise we win because we're tip-most
295 278 an, ah = nh
296 279 bn, bh = globaltags[k]
297 280 if (bn != an and an in bh and
298 281 (bn not in ah or len(bh) > len(ah))):
299 282 an = bn
300 283 ah.extend([n for n in bh if n not in ah])
301 284 globaltags[k] = an, ah
302 285 tagtypes[k] = tagtype
303 286
304 287 # read the tags file from each head, ending with the tip
305 288 f = None
306 289 for rev, node, fnode in self._hgtagsnodes():
307 290 f = (f and f.filectx(fnode) or
308 291 self.filectx('.hgtags', fileid=fnode))
309 292 readtags(f.data().splitlines(), f, "global")
310 293
311 294 try:
312 295 data = util.fromlocal(self.opener("localtags").read())
313 296 # localtags are stored in the local character set
314 297 # while the internal tag table is stored in UTF-8
315 298 readtags(data.splitlines(), "localtags", "local")
316 299 except IOError:
317 300 pass
318 301
319 302 self.tagscache = {}
320 303 self._tagstypecache = {}
321 304 for k,nh in globaltags.items():
322 305 n = nh[0]
323 306 if n != nullid:
324 307 self.tagscache[k] = n
325 308 self._tagstypecache[k] = tagtypes[k]
326 309 self.tagscache['tip'] = self.changelog.tip()
327 310 return self.tagscache
328 311
329 312 def tagtype(self, tagname):
330 313 '''
331 314 return the type of the given tag. result can be:
332 315
333 316 'local' : a local tag
334 317 'global' : a global tag
335 318 None : tag does not exist
336 319 '''
337 320
338 321 self.tags()
339 322
340 323 return self._tagstypecache.get(tagname)
341 324
342 325 def _hgtagsnodes(self):
343 326 heads = self.heads()
344 327 heads.reverse()
345 328 last = {}
346 329 ret = []
347 330 for node in heads:
348 331 c = self[node]
349 332 rev = c.rev()
350 333 try:
351 334 fnode = c.filenode('.hgtags')
352 335 except revlog.LookupError:
353 336 continue
354 337 ret.append((rev, node, fnode))
355 338 if fnode in last:
356 339 ret[last[fnode]] = None
357 340 last[fnode] = len(ret) - 1
358 341 return [item for item in ret if item]
359 342
360 343 def tagslist(self):
361 344 '''return a list of tags ordered by revision'''
362 345 l = []
363 346 for t, n in self.tags().items():
364 347 try:
365 348 r = self.changelog.rev(n)
366 349 except:
367 350 r = -2 # sort to the beginning of the list if unknown
368 351 l.append((r, t, n))
369 352 return [(t, n) for r, t, n in util.sort(l)]
370 353
371 354 def nodetags(self, node):
372 355 '''return the tags associated with a node'''
373 356 if not self.nodetagscache:
374 357 self.nodetagscache = {}
375 358 for t, n in self.tags().items():
376 359 self.nodetagscache.setdefault(n, []).append(t)
377 360 return self.nodetagscache.get(node, [])
378 361
379 362 def _branchtags(self, partial, lrev):
380 363 tiprev = len(self) - 1
381 364 if lrev != tiprev:
382 365 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384 367
385 368 return partial
386 369
387 370 def branchtags(self):
388 371 tip = self.changelog.tip()
389 372 if self.branchcache is not None and self._branchcachetip == tip:
390 373 return self.branchcache
391 374
392 375 oldtip = self._branchcachetip
393 376 self._branchcachetip = tip
394 377 if self.branchcache is None:
395 378 self.branchcache = {} # avoid recursion in changectx
396 379 else:
397 380 self.branchcache.clear() # keep using the same dict
398 381 if oldtip is None or oldtip not in self.changelog.nodemap:
399 382 partial, last, lrev = self._readbranchcache()
400 383 else:
401 384 lrev = self.changelog.rev(oldtip)
402 385 partial = self._ubranchcache
403 386
404 387 self._branchtags(partial, lrev)
405 388
406 389 # the branch cache is stored on disk as UTF-8, but in the local
407 390 # charset internally
408 391 for k, v in partial.items():
409 392 self.branchcache[util.tolocal(k)] = v
410 393 self._ubranchcache = partial
411 394 return self.branchcache
412 395
413 396 def _readbranchcache(self):
414 397 partial = {}
415 398 try:
416 399 f = self.opener("branch.cache")
417 400 lines = f.read().split('\n')
418 401 f.close()
419 402 except (IOError, OSError):
420 403 return {}, nullid, nullrev
421 404
422 405 try:
423 406 last, lrev = lines.pop(0).split(" ", 1)
424 407 last, lrev = bin(last), int(lrev)
425 408 if lrev >= len(self) or self[lrev].node() != last:
426 409 # invalidate the cache
427 410 raise ValueError('invalidating branch cache (tip differs)')
428 411 for l in lines:
429 412 if not l: continue
430 413 node, label = l.split(" ", 1)
431 414 partial[label.strip()] = bin(node)
432 415 except (KeyboardInterrupt, util.SignalInterrupt):
433 416 raise
434 417 except Exception, inst:
435 418 if self.ui.debugflag:
436 419 self.ui.warn(str(inst), '\n')
437 420 partial, last, lrev = {}, nullid, nullrev
438 421 return partial, last, lrev
439 422
440 423 def _writebranchcache(self, branches, tip, tiprev):
441 424 try:
442 425 f = self.opener("branch.cache", "w", atomictemp=True)
443 426 f.write("%s %s\n" % (hex(tip), tiprev))
444 427 for label, node in branches.iteritems():
445 428 f.write("%s %s\n" % (hex(node), label))
446 429 f.rename()
447 430 except (IOError, OSError):
448 431 pass
449 432
450 433 def _updatebranchcache(self, partial, start, end):
451 434 for r in xrange(start, end):
452 435 c = self[r]
453 436 b = c.branch()
454 437 partial[b] = c.node()
455 438
456 439 def lookup(self, key):
457 440 if key == '.':
458 441 return self.dirstate.parents()[0]
459 442 elif key == 'null':
460 443 return nullid
461 444 n = self.changelog._match(key)
462 445 if n:
463 446 return n
464 447 if key in self.tags():
465 448 return self.tags()[key]
466 449 if key in self.branchtags():
467 450 return self.branchtags()[key]
468 451 n = self.changelog._partialmatch(key)
469 452 if n:
470 453 return n
471 454 try:
472 455 if len(key) == 20:
473 456 key = hex(key)
474 457 except:
475 458 pass
476 459 raise repo.RepoError(_("unknown revision '%s'") % key)
477 460
478 461 def local(self):
479 462 return True
480 463
481 464 def join(self, f):
482 465 return os.path.join(self.path, f)
483 466
484 def sjoin(self, f):
485 f = self.encodefn(f)
486 return os.path.join(self.spath, f)
487
488 467 def wjoin(self, f):
489 468 return os.path.join(self.root, f)
490 469
491 470 def rjoin(self, f):
492 471 return os.path.join(self.root, util.pconvert(f))
493 472
494 473 def file(self, f):
495 474 if f[0] == '/':
496 475 f = f[1:]
497 476 return filelog.filelog(self.sopener, f)
498 477
499 478 def changectx(self, changeid):
500 479 return self[changeid]
501 480
502 481 def parents(self, changeid=None):
503 482 '''get list of changectxs for parents of changeid'''
504 483 return self[changeid].parents()
505 484
506 485 def filectx(self, path, changeid=None, fileid=None):
507 486 """changeid can be a changeset revision, node, or tag.
508 487 fileid can be a file revision or node."""
509 488 return context.filectx(self, path, changeid, fileid)
510 489
511 490 def getcwd(self):
512 491 return self.dirstate.getcwd()
513 492
514 493 def pathto(self, f, cwd=None):
515 494 return self.dirstate.pathto(f, cwd)
516 495
517 496 def wfile(self, f, mode='r'):
518 497 return self.wopener(f, mode)
519 498
520 499 def _link(self, f):
521 500 return os.path.islink(self.wjoin(f))
522 501
523 502 def _filter(self, filter, filename, data):
524 503 if filter not in self.filterpats:
525 504 l = []
526 505 for pat, cmd in self.ui.configitems(filter):
527 506 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 507 fn = None
529 508 params = cmd
530 509 for name, filterfn in self._datafilters.iteritems():
531 510 if cmd.startswith(name):
532 511 fn = filterfn
533 512 params = cmd[len(name):].lstrip()
534 513 break
535 514 if not fn:
536 515 fn = lambda s, c, **kwargs: util.filter(s, c)
537 516 # Wrap old filters not supporting keyword arguments
538 517 if not inspect.getargspec(fn)[2]:
539 518 oldfn = fn
540 519 fn = lambda s, c, **kwargs: oldfn(s, c)
541 520 l.append((mf, fn, params))
542 521 self.filterpats[filter] = l
543 522
544 523 for mf, fn, cmd in self.filterpats[filter]:
545 524 if mf(filename):
546 525 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 527 break
549 528
550 529 return data
551 530
552 531 def adddatafilter(self, name, filter):
553 532 self._datafilters[name] = filter
554 533
555 534 def wread(self, filename):
556 535 if self._link(filename):
557 536 data = os.readlink(self.wjoin(filename))
558 537 else:
559 538 data = self.wopener(filename, 'r').read()
560 539 return self._filter("encode", filename, data)
561 540
562 541 def wwrite(self, filename, data, flags):
563 542 data = self._filter("decode", filename, data)
564 543 try:
565 544 os.unlink(self.wjoin(filename))
566 545 except OSError:
567 546 pass
568 547 self.wopener(filename, 'w').write(data)
569 548 util.set_flags(self.wjoin(filename), flags)
570 549
571 550 def wwritedata(self, filename, data):
572 551 return self._filter("decode", filename, data)
573 552
574 553 def transaction(self):
575 554 if self._transref and self._transref():
576 555 return self._transref().nest()
577 556
578 557 # abort here if the journal already exists
579 558 if os.path.exists(self.sjoin("journal")):
580 559 raise repo.RepoError(_("journal already exists - run hg recover"))
581 560
582 561 # save dirstate for rollback
583 562 try:
584 563 ds = self.opener("dirstate").read()
585 564 except IOError:
586 565 ds = ""
587 566 self.opener("journal.dirstate", "w").write(ds)
588 567 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 568
590 569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 571 (self.join("journal.branch"), self.join("undo.branch"))]
593 572 tr = transaction.transaction(self.ui.warn, self.sopener,
594 573 self.sjoin("journal"),
595 574 aftertrans(renames),
596 575 self._createmode)
597 576 self._transref = weakref.ref(tr)
598 577 return tr
599 578
600 579 def recover(self):
601 580 l = self.lock()
602 581 try:
603 582 if os.path.exists(self.sjoin("journal")):
604 583 self.ui.status(_("rolling back interrupted transaction\n"))
605 584 transaction.rollback(self.sopener, self.sjoin("journal"))
606 585 self.invalidate()
607 586 return True
608 587 else:
609 588 self.ui.warn(_("no interrupted transaction available\n"))
610 589 return False
611 590 finally:
612 591 del l
613 592
614 593 def rollback(self):
615 594 wlock = lock = None
616 595 try:
617 596 wlock = self.wlock()
618 597 lock = self.lock()
619 598 if os.path.exists(self.sjoin("undo")):
620 599 self.ui.status(_("rolling back last transaction\n"))
621 600 transaction.rollback(self.sopener, self.sjoin("undo"))
622 601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 602 try:
624 603 branch = self.opener("undo.branch").read()
625 604 self.dirstate.setbranch(branch)
626 605 except IOError:
627 606 self.ui.warn(_("Named branch could not be reset, "
628 607 "current branch still is: %s\n")
629 608 % util.tolocal(self.dirstate.branch()))
630 609 self.invalidate()
631 610 self.dirstate.invalidate()
632 611 else:
633 612 self.ui.warn(_("no rollback information available\n"))
634 613 finally:
635 614 del lock, wlock
636 615
637 616 def invalidate(self):
638 617 for a in "changelog manifest".split():
639 618 if a in self.__dict__:
640 619 delattr(self, a)
641 620 self.tagscache = None
642 621 self._tagstypecache = None
643 622 self.nodetagscache = None
644 623 self.branchcache = None
645 624 self._ubranchcache = None
646 625 self._branchcachetip = None
647 626
648 627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 628 try:
650 629 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 630 except lock.LockHeld, inst:
652 631 if not wait:
653 632 raise
654 633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 634 (desc, inst.locker))
656 635 # default to 600 seconds timeout
657 636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 637 releasefn, desc=desc)
659 638 if acquirefn:
660 639 acquirefn()
661 640 return l
662 641
663 642 def lock(self, wait=True):
664 643 if self._lockref and self._lockref():
665 644 return self._lockref()
666 645
667 646 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 647 _('repository %s') % self.origroot)
669 648 self._lockref = weakref.ref(l)
670 649 return l
671 650
672 651 def wlock(self, wait=True):
673 652 if self._wlockref and self._wlockref():
674 653 return self._wlockref()
675 654
676 655 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 656 self.dirstate.invalidate, _('working directory of %s') %
678 657 self.origroot)
679 658 self._wlockref = weakref.ref(l)
680 659 return l
681 660
682 661 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 662 """
684 663 commit an individual file as part of a larger transaction
685 664 """
686 665
687 666 fn = fctx.path()
688 667 t = fctx.data()
689 668 fl = self.file(fn)
690 669 fp1 = manifest1.get(fn, nullid)
691 670 fp2 = manifest2.get(fn, nullid)
692 671
693 672 meta = {}
694 673 cp = fctx.renamed()
695 674 if cp and cp[0] != fn:
696 675 cp = cp[0]
697 676 # Mark the new revision of this file as a copy of another
698 677 # file. This copy data will effectively act as a parent
699 678 # of this new revision. If this is a merge, the first
700 679 # parent will be the nullid (meaning "look up the copy data")
701 680 # and the second one will be the other parent. For example:
702 681 #
703 682 # 0 --- 1 --- 3 rev1 changes file foo
704 683 # \ / rev2 renames foo to bar and changes it
705 684 # \- 2 -/ rev3 should have bar with all changes and
706 685 # should record that bar descends from
707 686 # bar in rev2 and foo in rev1
708 687 #
709 688 # this allows this merge to succeed:
710 689 #
711 690 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 691 # \ / merging rev3 and rev4 should use bar@rev2
713 692 # \- 2 --- 4 as the merge base
714 693 #
715 694 meta["copy"] = cp
716 695 if not manifest2: # not a branch merge
717 696 meta["copyrev"] = hex(manifest1[cp])
718 697 fp2 = nullid
719 698 elif fp2 != nullid: # copied on remote side
720 699 meta["copyrev"] = hex(manifest1[cp])
721 700 elif fp1 != nullid: # copied on local side, reversed
722 701 meta["copyrev"] = hex(manifest2[cp])
723 702 fp2 = fp1
724 703 elif cp in manifest2: # directory rename on local side
725 704 meta["copyrev"] = hex(manifest2[cp])
726 705 else: # directory rename on remote side
727 706 meta["copyrev"] = hex(manifest1[cp])
728 707 self.ui.debug(_(" %s: copy %s:%s\n") %
729 708 (fn, cp, meta["copyrev"]))
730 709 fp1 = nullid
731 710 elif fp2 != nullid:
732 711 # is one parent an ancestor of the other?
733 712 fpa = fl.ancestor(fp1, fp2)
734 713 if fpa == fp1:
735 714 fp1, fp2 = fp2, nullid
736 715 elif fpa == fp2:
737 716 fp2 = nullid
738 717
739 718 # is the file unmodified from the parent? report existing entry
740 719 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 720 return fp1
742 721
743 722 changelist.append(fn)
744 723 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745 724
746 725 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 726 if p1 is None:
748 727 p1, p2 = self.dirstate.parents()
749 728 return self.commit(files=files, text=text, user=user, date=date,
750 729 p1=p1, p2=p2, extra=extra, empty_ok=True)
751 730
752 731 def commit(self, files=None, text="", user=None, date=None,
753 732 match=None, force=False, force_editor=False,
754 733 p1=None, p2=None, extra={}, empty_ok=False):
755 734 wlock = lock = None
756 735 if files:
757 736 files = util.unique(files)
758 737 try:
759 738 wlock = self.wlock()
760 739 lock = self.lock()
761 740 use_dirstate = (p1 is None) # not rawcommit
762 741
763 742 if use_dirstate:
764 743 p1, p2 = self.dirstate.parents()
765 744 update_dirstate = True
766 745
767 746 if (not force and p2 != nullid and
768 747 (match and (match.files() or match.anypats()))):
769 748 raise util.Abort(_('cannot partially commit a merge '
770 749 '(do not specify files or patterns)'))
771 750
772 751 if files:
773 752 modified, removed = [], []
774 753 for f in files:
775 754 s = self.dirstate[f]
776 755 if s in 'nma':
777 756 modified.append(f)
778 757 elif s == 'r':
779 758 removed.append(f)
780 759 else:
781 760 self.ui.warn(_("%s not tracked!\n") % f)
782 761 changes = [modified, [], removed, [], []]
783 762 else:
784 763 changes = self.status(match=match)
785 764 else:
786 765 p1, p2 = p1, p2 or nullid
787 766 update_dirstate = (self.dirstate.parents()[0] == p1)
788 767 changes = [files, [], [], [], []]
789 768
790 769 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 770 extra, changes)
792 771 return self._commitctx(wctx, force, force_editor, empty_ok,
793 772 use_dirstate, update_dirstate)
794 773 finally:
795 774 del lock, wlock
796 775
797 776 def commitctx(self, ctx):
798 777 wlock = lock = None
799 778 try:
800 779 wlock = self.wlock()
801 780 lock = self.lock()
802 781 return self._commitctx(ctx, force=True, force_editor=False,
803 782 empty_ok=True, use_dirstate=False,
804 783 update_dirstate=False)
805 784 finally:
806 785 del lock, wlock
807 786
808 787 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 788 use_dirstate=True, update_dirstate=True):
810 789 tr = None
811 790 valid = 0 # don't save the dirstate if this isn't set
812 791 try:
813 792 commit = util.sort(wctx.modified() + wctx.added())
814 793 remove = wctx.removed()
815 794 extra = wctx.extra().copy()
816 795 branchname = extra['branch']
817 796 user = wctx.user()
818 797 text = wctx.description()
819 798
820 799 p1, p2 = [p.node() for p in wctx.parents()]
821 800 c1 = self.changelog.read(p1)
822 801 c2 = self.changelog.read(p2)
823 802 m1 = self.manifest.read(c1[0]).copy()
824 803 m2 = self.manifest.read(c2[0])
825 804
826 805 if use_dirstate:
827 806 oldname = c1[5].get("branch") # stored in UTF-8
828 807 if (not commit and not remove and not force and p2 == nullid
829 808 and branchname == oldname):
830 809 self.ui.status(_("nothing changed\n"))
831 810 return None
832 811
833 812 xp1 = hex(p1)
834 813 if p2 == nullid: xp2 = ''
835 814 else: xp2 = hex(p2)
836 815
837 816 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 817
839 818 tr = self.transaction()
840 819 trp = weakref.proxy(tr)
841 820
842 821 # check in files
843 822 new = {}
844 823 changed = []
845 824 linkrev = len(self)
846 825 for f in commit:
847 826 self.ui.note(f + "\n")
848 827 try:
849 828 fctx = wctx.filectx(f)
850 829 newflags = fctx.flags()
851 830 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 831 if ((not changed or changed[-1] != f) and
853 832 m2.get(f) != new[f]):
854 833 # mention the file in the changelog if some
855 834 # flag changed, even if there was no content
856 835 # change.
857 836 if m1.flags(f) != newflags:
858 837 changed.append(f)
859 838 m1.set(f, newflags)
860 839 if use_dirstate:
861 840 self.dirstate.normal(f)
862 841
863 842 except (OSError, IOError):
864 843 if use_dirstate:
865 844 self.ui.warn(_("trouble committing %s!\n") % f)
866 845 raise
867 846 else:
868 847 remove.append(f)
869 848
870 849 # update manifest
871 850 m1.update(new)
872 851 removed = []
873 852
874 853 for f in util.sort(remove):
875 854 if f in m1:
876 855 del m1[f]
877 856 removed.append(f)
878 857 elif f in m2:
879 858 removed.append(f)
880 859 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 860 (new, removed))
882 861
883 862 # add changeset
884 863 if (not empty_ok and not text) or force_editor:
885 864 edittext = []
886 865 if text:
887 866 edittext.append(text)
888 867 edittext.append("")
889 868 edittext.append(_("HG: Enter commit message."
890 869 " Lines beginning with 'HG:' are removed."))
891 870 edittext.append("HG: --")
892 871 edittext.append("HG: user: %s" % user)
893 872 if p2 != nullid:
894 873 edittext.append("HG: branch merge")
895 874 if branchname:
896 875 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 876 edittext.extend(["HG: changed %s" % f for f in changed])
898 877 edittext.extend(["HG: removed %s" % f for f in removed])
899 878 if not changed and not remove:
900 879 edittext.append("HG: no files changed")
901 880 edittext.append("")
902 881 # run editor in the repository root
903 882 olddir = os.getcwd()
904 883 os.chdir(self.root)
905 884 text = self.ui.edit("\n".join(edittext), user)
906 885 os.chdir(olddir)
907 886
908 887 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 888 while lines and not lines[0]:
910 889 del lines[0]
911 890 if not lines and use_dirstate:
912 891 raise util.Abort(_("empty commit message"))
913 892 text = '\n'.join(lines)
914 893
915 894 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 895 user, wctx.date(), extra)
917 896 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 897 parent2=xp2)
919 898 tr.close()
920 899
921 900 if self.branchcache:
922 901 self.branchtags()
923 902
924 903 if use_dirstate or update_dirstate:
925 904 self.dirstate.setparents(n)
926 905 if use_dirstate:
927 906 for f in removed:
928 907 self.dirstate.forget(f)
929 908 valid = 1 # our dirstate updates are complete
930 909
931 910 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 911 return n
933 912 finally:
934 913 if not valid: # don't save our updated dirstate
935 914 self.dirstate.invalidate()
936 915 del tr
937 916
938 917 def walk(self, match, node=None):
939 918 '''
940 919 walk recursively through the directory tree or a given
941 920 changeset, finding all files matched by the match
942 921 function
943 922 '''
944 923 return self[node].walk(match)
945 924
946 925 def status(self, node1='.', node2=None, match=None,
947 926 ignored=False, clean=False, unknown=False):
948 927 """return status of files between two nodes or node and working directory
949 928
950 929 If node1 is None, use the first dirstate parent instead.
951 930 If node2 is None, compare node1 with working directory.
952 931 """
953 932
954 933 def mfmatches(ctx):
955 934 mf = ctx.manifest().copy()
956 935 for fn in mf.keys():
957 936 if not match(fn):
958 937 del mf[fn]
959 938 return mf
960 939
961 940 ctx1 = self[node1]
962 941 ctx2 = self[node2]
963 942 working = ctx2 == self[None]
964 943 parentworking = working and ctx1 == self['.']
965 944 match = match or match_.always(self.root, self.getcwd())
966 945 listignored, listclean, listunknown = ignored, clean, unknown
967 946
968 947 if working: # we need to scan the working dir
969 948 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 949 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971 950
972 951 # check for any possibly clean files
973 952 if parentworking and cmp:
974 953 fixup = []
975 954 # do a full compare of any files that might have changed
976 955 for f in cmp:
977 956 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
978 957 or ctx1[f].cmp(ctx2[f].data())):
979 958 modified.append(f)
980 959 else:
981 960 fixup.append(f)
982 961
983 962 if listclean:
984 963 clean += fixup
985 964
986 965 # update dirstate for files that are actually clean
987 966 if fixup:
988 967 wlock = None
989 968 try:
990 969 try:
991 970 wlock = self.wlock(False)
992 971 for f in fixup:
993 972 self.dirstate.normal(f)
994 973 except lock.LockException:
995 974 pass
996 975 finally:
997 976 del wlock
998 977
999 978 if not parentworking:
1000 979 mf1 = mfmatches(ctx1)
1001 980 if working:
1002 981 # we are comparing working dir against non-parent
1003 982 # generate a pseudo-manifest for the working dir
1004 983 mf2 = mfmatches(self['.'])
1005 984 for f in cmp + modified + added:
1006 985 mf2[f] = None
1007 986 mf2.set(f, ctx2.flags(f))
1008 987 for f in removed:
1009 988 if f in mf2:
1010 989 del mf2[f]
1011 990 else:
1012 991 # we are comparing two revisions
1013 992 deleted, unknown, ignored = [], [], []
1014 993 mf2 = mfmatches(ctx2)
1015 994
1016 995 modified, added, clean = [], [], []
1017 996 for fn in mf2:
1018 997 if fn in mf1:
1019 998 if (mf1.flags(fn) != mf2.flags(fn) or
1020 999 (mf1[fn] != mf2[fn] and
1021 1000 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1022 1001 modified.append(fn)
1023 1002 elif listclean:
1024 1003 clean.append(fn)
1025 1004 del mf1[fn]
1026 1005 else:
1027 1006 added.append(fn)
1028 1007 removed = mf1.keys()
1029 1008
1030 1009 r = modified, added, removed, deleted, unknown, ignored, clean
1031 1010 [l.sort() for l in r]
1032 1011 return r
1033 1012
1034 1013 def add(self, list):
1035 1014 wlock = self.wlock()
1036 1015 try:
1037 1016 rejected = []
1038 1017 for f in list:
1039 1018 p = self.wjoin(f)
1040 1019 try:
1041 1020 st = os.lstat(p)
1042 1021 except:
1043 1022 self.ui.warn(_("%s does not exist!\n") % f)
1044 1023 rejected.append(f)
1045 1024 continue
1046 1025 if st.st_size > 10000000:
1047 1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1048 1027 " performance problems\n"
1049 1028 "(use 'hg revert %s' to unadd the file)\n")
1050 1029 % (f, f))
1051 1030 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1052 1031 self.ui.warn(_("%s not added: only files and symlinks "
1053 1032 "supported currently\n") % f)
1054 1033 rejected.append(p)
1055 1034 elif self.dirstate[f] in 'amn':
1056 1035 self.ui.warn(_("%s already tracked!\n") % f)
1057 1036 elif self.dirstate[f] == 'r':
1058 1037 self.dirstate.normallookup(f)
1059 1038 else:
1060 1039 self.dirstate.add(f)
1061 1040 return rejected
1062 1041 finally:
1063 1042 del wlock
1064 1043
1065 1044 def forget(self, list):
1066 1045 wlock = self.wlock()
1067 1046 try:
1068 1047 for f in list:
1069 1048 if self.dirstate[f] != 'a':
1070 1049 self.ui.warn(_("%s not added!\n") % f)
1071 1050 else:
1072 1051 self.dirstate.forget(f)
1073 1052 finally:
1074 1053 del wlock
1075 1054
1076 1055 def remove(self, list, unlink=False):
1077 1056 wlock = None
1078 1057 try:
1079 1058 if unlink:
1080 1059 for f in list:
1081 1060 try:
1082 1061 util.unlink(self.wjoin(f))
1083 1062 except OSError, inst:
1084 1063 if inst.errno != errno.ENOENT:
1085 1064 raise
1086 1065 wlock = self.wlock()
1087 1066 for f in list:
1088 1067 if unlink and os.path.exists(self.wjoin(f)):
1089 1068 self.ui.warn(_("%s still exists!\n") % f)
1090 1069 elif self.dirstate[f] == 'a':
1091 1070 self.dirstate.forget(f)
1092 1071 elif f not in self.dirstate:
1093 1072 self.ui.warn(_("%s not tracked!\n") % f)
1094 1073 else:
1095 1074 self.dirstate.remove(f)
1096 1075 finally:
1097 1076 del wlock
1098 1077
1099 1078 def undelete(self, list):
1100 1079 wlock = None
1101 1080 try:
1102 1081 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 1082 for p in self.dirstate.parents() if p != nullid]
1104 1083 wlock = self.wlock()
1105 1084 for f in list:
1106 1085 if self.dirstate[f] != 'r':
1107 1086 self.ui.warn("%s not removed!\n" % f)
1108 1087 else:
1109 1088 m = f in manifests[0] and manifests[0] or manifests[1]
1110 1089 t = self.file(f).read(m[f])
1111 1090 self.wwrite(f, t, m.flags(f))
1112 1091 self.dirstate.normal(f)
1113 1092 finally:
1114 1093 del wlock
1115 1094
1116 1095 def copy(self, source, dest):
1117 1096 wlock = None
1118 1097 try:
1119 1098 p = self.wjoin(dest)
1120 1099 if not (os.path.exists(p) or os.path.islink(p)):
1121 1100 self.ui.warn(_("%s does not exist!\n") % dest)
1122 1101 elif not (os.path.isfile(p) or os.path.islink(p)):
1123 1102 self.ui.warn(_("copy failed: %s is not a file or a "
1124 1103 "symbolic link\n") % dest)
1125 1104 else:
1126 1105 wlock = self.wlock()
1127 1106 if dest not in self.dirstate:
1128 1107 self.dirstate.add(dest)
1129 1108 self.dirstate.copy(source, dest)
1130 1109 finally:
1131 1110 del wlock
1132 1111
1133 1112 def heads(self, start=None):
1134 1113 heads = self.changelog.heads(start)
1135 1114 # sort the output in rev descending order
1136 1115 heads = [(-self.changelog.rev(h), h) for h in heads]
1137 1116 return [n for (r, n) in util.sort(heads)]
1138 1117
1139 1118 def branchheads(self, branch=None, start=None):
1140 1119 if branch is None:
1141 1120 branch = self[None].branch()
1142 1121 branches = self.branchtags()
1143 1122 if branch not in branches:
1144 1123 return []
1145 1124 # The basic algorithm is this:
1146 1125 #
1147 1126 # Start from the branch tip since there are no later revisions that can
1148 1127 # possibly be in this branch, and the tip is a guaranteed head.
1149 1128 #
1150 1129 # Remember the tip's parents as the first ancestors, since these by
1151 1130 # definition are not heads.
1152 1131 #
1153 1132 # Step backwards from the brach tip through all the revisions. We are
1154 1133 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 1134 # nodes in reverse topological order (children before parents).
1156 1135 #
1157 1136 # If a revision is one of the ancestors of a head then we can toss it
1158 1137 # out of the ancestors set (we've already found it and won't be
1159 1138 # visiting it again) and put its parents in the ancestors set.
1160 1139 #
1161 1140 # Otherwise, if a revision is in the branch it's another head, since it
1162 1141 # wasn't in the ancestor list of an existing head. So add it to the
1163 1142 # head list, and add its parents to the ancestor list.
1164 1143 #
1165 1144 # If it is not in the branch ignore it.
1166 1145 #
1167 1146 # Once we have a list of heads, use nodesbetween to filter out all the
1168 1147 # heads that cannot be reached from startrev. There may be a more
1169 1148 # efficient way to do this as part of the previous algorithm.
1170 1149
1171 1150 set = util.set
1172 1151 heads = [self.changelog.rev(branches[branch])]
1173 1152 # Don't care if ancestors contains nullrev or not.
1174 1153 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 1154 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 1155 if rev in ancestors:
1177 1156 ancestors.update(self.changelog.parentrevs(rev))
1178 1157 ancestors.remove(rev)
1179 1158 elif self[rev].branch() == branch:
1180 1159 heads.append(rev)
1181 1160 ancestors.update(self.changelog.parentrevs(rev))
1182 1161 heads = [self.changelog.node(rev) for rev in heads]
1183 1162 if start is not None:
1184 1163 heads = self.changelog.nodesbetween([start], heads)[2]
1185 1164 return heads
1186 1165
1187 1166 def branches(self, nodes):
1188 1167 if not nodes:
1189 1168 nodes = [self.changelog.tip()]
1190 1169 b = []
1191 1170 for n in nodes:
1192 1171 t = n
1193 1172 while 1:
1194 1173 p = self.changelog.parents(n)
1195 1174 if p[1] != nullid or p[0] == nullid:
1196 1175 b.append((t, n, p[0], p[1]))
1197 1176 break
1198 1177 n = p[0]
1199 1178 return b
1200 1179
1201 1180 def between(self, pairs):
1202 1181 r = []
1203 1182
1204 1183 for top, bottom in pairs:
1205 1184 n, l, i = top, [], 0
1206 1185 f = 1
1207 1186
1208 1187 while n != bottom:
1209 1188 p = self.changelog.parents(n)[0]
1210 1189 if i == f:
1211 1190 l.append(n)
1212 1191 f = f * 2
1213 1192 n = p
1214 1193 i += 1
1215 1194
1216 1195 r.append(l)
1217 1196
1218 1197 return r
1219 1198
1220 1199 def findincoming(self, remote, base=None, heads=None, force=False):
1221 1200 """Return list of roots of the subsets of missing nodes from remote
1222 1201
1223 1202 If base dict is specified, assume that these nodes and their parents
1224 1203 exist on the remote side and that no child of a node of base exists
1225 1204 in both remote and self.
1226 1205 Furthermore base will be updated to include the nodes that exists
1227 1206 in self and remote but no children exists in self and remote.
1228 1207 If a list of heads is specified, return only nodes which are heads
1229 1208 or ancestors of these heads.
1230 1209
1231 1210 All the ancestors of base are in self and in remote.
1232 1211 All the descendants of the list returned are missing in self.
1233 1212 (and so we know that the rest of the nodes are missing in remote, see
1234 1213 outgoing)
1235 1214 """
1236 1215 m = self.changelog.nodemap
1237 1216 search = []
1238 1217 fetch = {}
1239 1218 seen = {}
1240 1219 seenbranch = {}
1241 1220 if base == None:
1242 1221 base = {}
1243 1222
1244 1223 if not heads:
1245 1224 heads = remote.heads()
1246 1225
1247 1226 if self.changelog.tip() == nullid:
1248 1227 base[nullid] = 1
1249 1228 if heads != [nullid]:
1250 1229 return [nullid]
1251 1230 return []
1252 1231
1253 1232 # assume we're closer to the tip than the root
1254 1233 # and start by examining the heads
1255 1234 self.ui.status(_("searching for changes\n"))
1256 1235
1257 1236 unknown = []
1258 1237 for h in heads:
1259 1238 if h not in m:
1260 1239 unknown.append(h)
1261 1240 else:
1262 1241 base[h] = 1
1263 1242
1264 1243 if not unknown:
1265 1244 return []
1266 1245
1267 1246 req = dict.fromkeys(unknown)
1268 1247 reqcnt = 0
1269 1248
1270 1249 # search through remote branches
1271 1250 # a 'branch' here is a linear segment of history, with four parts:
1272 1251 # head, root, first parent, second parent
1273 1252 # (a branch always has two parents (or none) by definition)
1274 1253 unknown = remote.branches(unknown)
1275 1254 while unknown:
1276 1255 r = []
1277 1256 while unknown:
1278 1257 n = unknown.pop(0)
1279 1258 if n[0] in seen:
1280 1259 continue
1281 1260
1282 1261 self.ui.debug(_("examining %s:%s\n")
1283 1262 % (short(n[0]), short(n[1])))
1284 1263 if n[0] == nullid: # found the end of the branch
1285 1264 pass
1286 1265 elif n in seenbranch:
1287 1266 self.ui.debug(_("branch already found\n"))
1288 1267 continue
1289 1268 elif n[1] and n[1] in m: # do we know the base?
1290 1269 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 1270 % (short(n[0]), short(n[1])))
1292 1271 search.append(n) # schedule branch range for scanning
1293 1272 seenbranch[n] = 1
1294 1273 else:
1295 1274 if n[1] not in seen and n[1] not in fetch:
1296 1275 if n[2] in m and n[3] in m:
1297 1276 self.ui.debug(_("found new changeset %s\n") %
1298 1277 short(n[1]))
1299 1278 fetch[n[1]] = 1 # earliest unknown
1300 1279 for p in n[2:4]:
1301 1280 if p in m:
1302 1281 base[p] = 1 # latest known
1303 1282
1304 1283 for p in n[2:4]:
1305 1284 if p not in req and p not in m:
1306 1285 r.append(p)
1307 1286 req[p] = 1
1308 1287 seen[n[0]] = 1
1309 1288
1310 1289 if r:
1311 1290 reqcnt += 1
1312 1291 self.ui.debug(_("request %d: %s\n") %
1313 1292 (reqcnt, " ".join(map(short, r))))
1314 1293 for p in xrange(0, len(r), 10):
1315 1294 for b in remote.branches(r[p:p+10]):
1316 1295 self.ui.debug(_("received %s:%s\n") %
1317 1296 (short(b[0]), short(b[1])))
1318 1297 unknown.append(b)
1319 1298
1320 1299 # do binary search on the branches we found
1321 1300 while search:
1322 1301 n = search.pop(0)
1323 1302 reqcnt += 1
1324 1303 l = remote.between([(n[0], n[1])])[0]
1325 1304 l.append(n[1])
1326 1305 p = n[0]
1327 1306 f = 1
1328 1307 for i in l:
1329 1308 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 1309 if i in m:
1331 1310 if f <= 2:
1332 1311 self.ui.debug(_("found new branch changeset %s\n") %
1333 1312 short(p))
1334 1313 fetch[p] = 1
1335 1314 base[i] = 1
1336 1315 else:
1337 1316 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 1317 % (short(p), short(i)))
1339 1318 search.append((p, i))
1340 1319 break
1341 1320 p, f = i, f * 2
1342 1321
1343 1322 # sanity check our fetch list
1344 1323 for f in fetch.keys():
1345 1324 if f in m:
1346 1325 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347 1326
1348 1327 if base.keys() == [nullid]:
1349 1328 if force:
1350 1329 self.ui.warn(_("warning: repository is unrelated\n"))
1351 1330 else:
1352 1331 raise util.Abort(_("repository is unrelated"))
1353 1332
1354 1333 self.ui.debug(_("found new changesets starting at ") +
1355 1334 " ".join([short(f) for f in fetch]) + "\n")
1356 1335
1357 1336 self.ui.debug(_("%d total queries\n") % reqcnt)
1358 1337
1359 1338 return fetch.keys()
1360 1339
1361 1340 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 1341 """Return list of nodes that are roots of subsets not in remote
1363 1342
1364 1343 If base dict is specified, assume that these nodes and their parents
1365 1344 exist on the remote side.
1366 1345 If a list of heads is specified, return only nodes which are heads
1367 1346 or ancestors of these heads, and return a second element which
1368 1347 contains all remote heads which get new children.
1369 1348 """
1370 1349 if base == None:
1371 1350 base = {}
1372 1351 self.findincoming(remote, base, heads, force=force)
1373 1352
1374 1353 self.ui.debug(_("common changesets up to ")
1375 1354 + " ".join(map(short, base.keys())) + "\n")
1376 1355
1377 1356 remain = dict.fromkeys(self.changelog.nodemap)
1378 1357
1379 1358 # prune everything remote has from the tree
1380 1359 del remain[nullid]
1381 1360 remove = base.keys()
1382 1361 while remove:
1383 1362 n = remove.pop(0)
1384 1363 if n in remain:
1385 1364 del remain[n]
1386 1365 for p in self.changelog.parents(n):
1387 1366 remove.append(p)
1388 1367
1389 1368 # find every node whose parents have been pruned
1390 1369 subset = []
1391 1370 # find every remote head that will get new children
1392 1371 updated_heads = {}
1393 1372 for n in remain:
1394 1373 p1, p2 = self.changelog.parents(n)
1395 1374 if p1 not in remain and p2 not in remain:
1396 1375 subset.append(n)
1397 1376 if heads:
1398 1377 if p1 in heads:
1399 1378 updated_heads[p1] = True
1400 1379 if p2 in heads:
1401 1380 updated_heads[p2] = True
1402 1381
1403 1382 # this is the set of all roots we have to push
1404 1383 if heads:
1405 1384 return subset, updated_heads.keys()
1406 1385 else:
1407 1386 return subset
1408 1387
1409 1388 def pull(self, remote, heads=None, force=False):
1410 1389 lock = self.lock()
1411 1390 try:
1412 1391 fetch = self.findincoming(remote, heads=heads, force=force)
1413 1392 if fetch == [nullid]:
1414 1393 self.ui.status(_("requesting all changes\n"))
1415 1394
1416 1395 if not fetch:
1417 1396 self.ui.status(_("no changes found\n"))
1418 1397 return 0
1419 1398
1420 1399 if heads is None:
1421 1400 cg = remote.changegroup(fetch, 'pull')
1422 1401 else:
1423 1402 if 'changegroupsubset' not in remote.capabilities:
1424 1403 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 1404 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 1405 return self.addchangegroup(cg, 'pull', remote.url())
1427 1406 finally:
1428 1407 del lock
1429 1408
1430 1409 def push(self, remote, force=False, revs=None):
1431 1410 # there are two ways to push to remote repo:
1432 1411 #
1433 1412 # addchangegroup assumes local user can lock remote
1434 1413 # repo (local filesystem, old ssh servers).
1435 1414 #
1436 1415 # unbundle assumes local user cannot lock remote repo (new ssh
1437 1416 # servers, http servers).
1438 1417
1439 1418 if remote.capable('unbundle'):
1440 1419 return self.push_unbundle(remote, force, revs)
1441 1420 return self.push_addchangegroup(remote, force, revs)
1442 1421
1443 1422 def prepush(self, remote, force, revs):
1444 1423 base = {}
1445 1424 remote_heads = remote.heads()
1446 1425 inc = self.findincoming(remote, base, remote_heads, force=force)
1447 1426
1448 1427 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 1428 if revs is not None:
1450 1429 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 1430 else:
1452 1431 bases, heads = update, self.changelog.heads()
1453 1432
1454 1433 if not bases:
1455 1434 self.ui.status(_("no changes found\n"))
1456 1435 return None, 1
1457 1436 elif not force:
1458 1437 # check if we're creating new remote heads
1459 1438 # to be a remote head after push, node must be either
1460 1439 # - unknown locally
1461 1440 # - a local outgoing head descended from update
1462 1441 # - a remote head that's known locally and not
1463 1442 # ancestral to an outgoing head
1464 1443
1465 1444 warn = 0
1466 1445
1467 1446 if remote_heads == [nullid]:
1468 1447 warn = 0
1469 1448 elif not revs and len(heads) > len(remote_heads):
1470 1449 warn = 1
1471 1450 else:
1472 1451 newheads = list(heads)
1473 1452 for r in remote_heads:
1474 1453 if r in self.changelog.nodemap:
1475 1454 desc = self.changelog.heads(r, heads)
1476 1455 l = [h for h in heads if h in desc]
1477 1456 if not l:
1478 1457 newheads.append(r)
1479 1458 else:
1480 1459 newheads.append(r)
1481 1460 if len(newheads) > len(remote_heads):
1482 1461 warn = 1
1483 1462
1484 1463 if warn:
1485 1464 self.ui.warn(_("abort: push creates new remote heads!\n"))
1486 1465 self.ui.status(_("(did you forget to merge?"
1487 1466 " use push -f to force)\n"))
1488 1467 return None, 0
1489 1468 elif inc:
1490 1469 self.ui.warn(_("note: unsynced remote changes!\n"))
1491 1470
1492 1471
1493 1472 if revs is None:
1494 1473 cg = self.changegroup(update, 'push')
1495 1474 else:
1496 1475 cg = self.changegroupsubset(update, revs, 'push')
1497 1476 return cg, remote_heads
1498 1477
1499 1478 def push_addchangegroup(self, remote, force, revs):
1500 1479 lock = remote.lock()
1501 1480 try:
1502 1481 ret = self.prepush(remote, force, revs)
1503 1482 if ret[0] is not None:
1504 1483 cg, remote_heads = ret
1505 1484 return remote.addchangegroup(cg, 'push', self.url())
1506 1485 return ret[1]
1507 1486 finally:
1508 1487 del lock
1509 1488
1510 1489 def push_unbundle(self, remote, force, revs):
1511 1490 # local repo finds heads on server, finds out what revs it
1512 1491 # must push. once revs transferred, if server finds it has
1513 1492 # different heads (someone else won commit/push race), server
1514 1493 # aborts.
1515 1494
1516 1495 ret = self.prepush(remote, force, revs)
1517 1496 if ret[0] is not None:
1518 1497 cg, remote_heads = ret
1519 1498 if force: remote_heads = ['force']
1520 1499 return remote.unbundle(cg, remote_heads, 'push')
1521 1500 return ret[1]
1522 1501
1523 1502 def changegroupinfo(self, nodes, source):
1524 1503 if self.ui.verbose or source == 'bundle':
1525 1504 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 1505 if self.ui.debugflag:
1527 1506 self.ui.debug(_("List of changesets:\n"))
1528 1507 for node in nodes:
1529 1508 self.ui.debug("%s\n" % hex(node))
1530 1509
1531 1510 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 1511 """This function generates a changegroup consisting of all the nodes
1533 1512 that are descendents of any of the bases, and ancestors of any of
1534 1513 the heads.
1535 1514
1536 1515 It is fairly complex as determining which filenodes and which
1537 1516 manifest nodes need to be included for the changeset to be complete
1538 1517 is non-trivial.
1539 1518
1540 1519 Another wrinkle is doing the reverse, figuring out which changeset in
1541 1520 the changegroup a particular filenode or manifestnode belongs to.
1542 1521
1543 1522 The caller can specify some nodes that must be included in the
1544 1523 changegroup using the extranodes argument. It should be a dict
1545 1524 where the keys are the filenames (or 1 for the manifest), and the
1546 1525 values are lists of (node, linknode) tuples, where node is a wanted
1547 1526 node and linknode is the changelog node that should be transmitted as
1548 1527 the linkrev.
1549 1528 """
1550 1529
1551 1530 self.hook('preoutgoing', throw=True, source=source)
1552 1531
1553 1532 # Set up some initial variables
1554 1533 # Make it easy to refer to self.changelog
1555 1534 cl = self.changelog
1556 1535 # msng is short for missing - compute the list of changesets in this
1557 1536 # changegroup.
1558 1537 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 1538 self.changegroupinfo(msng_cl_lst, source)
1560 1539 # Some bases may turn out to be superfluous, and some heads may be
1561 1540 # too. nodesbetween will return the minimal set of bases and heads
1562 1541 # necessary to re-create the changegroup.
1563 1542
1564 1543 # Known heads are the list of heads that it is assumed the recipient
1565 1544 # of this changegroup will know about.
1566 1545 knownheads = {}
1567 1546 # We assume that all parents of bases are known heads.
1568 1547 for n in bases:
1569 1548 for p in cl.parents(n):
1570 1549 if p != nullid:
1571 1550 knownheads[p] = 1
1572 1551 knownheads = knownheads.keys()
1573 1552 if knownheads:
1574 1553 # Now that we know what heads are known, we can compute which
1575 1554 # changesets are known. The recipient must know about all
1576 1555 # changesets required to reach the known heads from the null
1577 1556 # changeset.
1578 1557 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 1558 junk = None
1580 1559 # Transform the list into an ersatz set.
1581 1560 has_cl_set = dict.fromkeys(has_cl_set)
1582 1561 else:
1583 1562 # If there were no known heads, the recipient cannot be assumed to
1584 1563 # know about any changesets.
1585 1564 has_cl_set = {}
1586 1565
1587 1566 # Make it easy to refer to self.manifest
1588 1567 mnfst = self.manifest
1589 1568 # We don't know which manifests are missing yet
1590 1569 msng_mnfst_set = {}
1591 1570 # Nor do we know which filenodes are missing.
1592 1571 msng_filenode_set = {}
1593 1572
1594 1573 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1595 1574 junk = None
1596 1575
1597 1576 # A changeset always belongs to itself, so the changenode lookup
1598 1577 # function for a changenode is identity.
1599 1578 def identity(x):
1600 1579 return x
1601 1580
1602 1581 # A function generating function. Sets up an environment for the
1603 1582 # inner function.
1604 1583 def cmp_by_rev_func(revlog):
1605 1584 # Compare two nodes by their revision number in the environment's
1606 1585 # revision history. Since the revision number both represents the
1607 1586 # most efficient order to read the nodes in, and represents a
1608 1587 # topological sorting of the nodes, this function is often useful.
1609 1588 def cmp_by_rev(a, b):
1610 1589 return cmp(revlog.rev(a), revlog.rev(b))
1611 1590 return cmp_by_rev
1612 1591
1613 1592 # If we determine that a particular file or manifest node must be a
1614 1593 # node that the recipient of the changegroup will already have, we can
1615 1594 # also assume the recipient will have all the parents. This function
1616 1595 # prunes them from the set of missing nodes.
1617 1596 def prune_parents(revlog, hasset, msngset):
1618 1597 haslst = hasset.keys()
1619 1598 haslst.sort(cmp_by_rev_func(revlog))
1620 1599 for node in haslst:
1621 1600 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 1601 while parentlst:
1623 1602 n = parentlst.pop()
1624 1603 if n not in hasset:
1625 1604 hasset[n] = 1
1626 1605 p = [p for p in revlog.parents(n) if p != nullid]
1627 1606 parentlst.extend(p)
1628 1607 for n in hasset:
1629 1608 msngset.pop(n, None)
1630 1609
1631 1610 # This is a function generating function used to set up an environment
1632 1611 # for the inner function to execute in.
1633 1612 def manifest_and_file_collector(changedfileset):
1634 1613 # This is an information gathering function that gathers
1635 1614 # information from each changeset node that goes out as part of
1636 1615 # the changegroup. The information gathered is a list of which
1637 1616 # manifest nodes are potentially required (the recipient may
1638 1617 # already have them) and total list of all files which were
1639 1618 # changed in any changeset in the changegroup.
1640 1619 #
1641 1620 # We also remember the first changenode we saw any manifest
1642 1621 # referenced by so we can later determine which changenode 'owns'
1643 1622 # the manifest.
1644 1623 def collect_manifests_and_files(clnode):
1645 1624 c = cl.read(clnode)
1646 1625 for f in c[3]:
1647 1626 # This is to make sure we only have one instance of each
1648 1627 # filename string for each filename.
1649 1628 changedfileset.setdefault(f, f)
1650 1629 msng_mnfst_set.setdefault(c[0], clnode)
1651 1630 return collect_manifests_and_files
1652 1631
1653 1632 # Figure out which manifest nodes (of the ones we think might be part
1654 1633 # of the changegroup) the recipient must know about and remove them
1655 1634 # from the changegroup.
1656 1635 def prune_manifests():
1657 1636 has_mnfst_set = {}
1658 1637 for n in msng_mnfst_set:
1659 1638 # If a 'missing' manifest thinks it belongs to a changenode
1660 1639 # the recipient is assumed to have, obviously the recipient
1661 1640 # must have that manifest.
1662 1641 linknode = cl.node(mnfst.linkrev(n))
1663 1642 if linknode in has_cl_set:
1664 1643 has_mnfst_set[n] = 1
1665 1644 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666 1645
1667 1646 # Use the information collected in collect_manifests_and_files to say
1668 1647 # which changenode any manifestnode belongs to.
1669 1648 def lookup_manifest_link(mnfstnode):
1670 1649 return msng_mnfst_set[mnfstnode]
1671 1650
1672 1651 # A function generating function that sets up the initial environment
1673 1652 # the inner function.
1674 1653 def filenode_collector(changedfiles):
1675 1654 next_rev = [0]
1676 1655 # This gathers information from each manifestnode included in the
1677 1656 # changegroup about which filenodes the manifest node references
1678 1657 # so we can include those in the changegroup too.
1679 1658 #
1680 1659 # It also remembers which changenode each filenode belongs to. It
1681 1660 # does this by assuming the a filenode belongs to the changenode
1682 1661 # the first manifest that references it belongs to.
1683 1662 def collect_msng_filenodes(mnfstnode):
1684 1663 r = mnfst.rev(mnfstnode)
1685 1664 if r == next_rev[0]:
1686 1665 # If the last rev we looked at was the one just previous,
1687 1666 # we only need to see a diff.
1688 1667 deltamf = mnfst.readdelta(mnfstnode)
1689 1668 # For each line in the delta
1690 1669 for f, fnode in deltamf.items():
1691 1670 f = changedfiles.get(f, None)
1692 1671 # And if the file is in the list of files we care
1693 1672 # about.
1694 1673 if f is not None:
1695 1674 # Get the changenode this manifest belongs to
1696 1675 clnode = msng_mnfst_set[mnfstnode]
1697 1676 # Create the set of filenodes for the file if
1698 1677 # there isn't one already.
1699 1678 ndset = msng_filenode_set.setdefault(f, {})
1700 1679 # And set the filenode's changelog node to the
1701 1680 # manifest's if it hasn't been set already.
1702 1681 ndset.setdefault(fnode, clnode)
1703 1682 else:
1704 1683 # Otherwise we need a full manifest.
1705 1684 m = mnfst.read(mnfstnode)
1706 1685 # For every file in we care about.
1707 1686 for f in changedfiles:
1708 1687 fnode = m.get(f, None)
1709 1688 # If it's in the manifest
1710 1689 if fnode is not None:
1711 1690 # See comments above.
1712 1691 clnode = msng_mnfst_set[mnfstnode]
1713 1692 ndset = msng_filenode_set.setdefault(f, {})
1714 1693 ndset.setdefault(fnode, clnode)
1715 1694 # Remember the revision we hope to see next.
1716 1695 next_rev[0] = r + 1
1717 1696 return collect_msng_filenodes
1718 1697
1719 1698 # We have a list of filenodes we think we need for a file, lets remove
1720 1699 # all those we now the recipient must have.
1721 1700 def prune_filenodes(f, filerevlog):
1722 1701 msngset = msng_filenode_set[f]
1723 1702 hasset = {}
1724 1703 # If a 'missing' filenode thinks it belongs to a changenode we
1725 1704 # assume the recipient must have, then the recipient must have
1726 1705 # that filenode.
1727 1706 for n in msngset:
1728 1707 clnode = cl.node(filerevlog.linkrev(n))
1729 1708 if clnode in has_cl_set:
1730 1709 hasset[n] = 1
1731 1710 prune_parents(filerevlog, hasset, msngset)
1732 1711
1733 1712 # A function generator function that sets up the a context for the
1734 1713 # inner function.
1735 1714 def lookup_filenode_link_func(fname):
1736 1715 msngset = msng_filenode_set[fname]
1737 1716 # Lookup the changenode the filenode belongs to.
1738 1717 def lookup_filenode_link(fnode):
1739 1718 return msngset[fnode]
1740 1719 return lookup_filenode_link
1741 1720
1742 1721 # Add the nodes that were explicitly requested.
1743 1722 def add_extra_nodes(name, nodes):
1744 1723 if not extranodes or name not in extranodes:
1745 1724 return
1746 1725
1747 1726 for node, linknode in extranodes[name]:
1748 1727 if node not in nodes:
1749 1728 nodes[node] = linknode
1750 1729
1751 1730 # Now that we have all theses utility functions to help out and
1752 1731 # logically divide up the task, generate the group.
1753 1732 def gengroup():
1754 1733 # The set of changed files starts empty.
1755 1734 changedfiles = {}
1756 1735 # Create a changenode group generator that will call our functions
1757 1736 # back to lookup the owning changenode and collect information.
1758 1737 group = cl.group(msng_cl_lst, identity,
1759 1738 manifest_and_file_collector(changedfiles))
1760 1739 for chnk in group:
1761 1740 yield chnk
1762 1741
1763 1742 # The list of manifests has been collected by the generator
1764 1743 # calling our functions back.
1765 1744 prune_manifests()
1766 1745 add_extra_nodes(1, msng_mnfst_set)
1767 1746 msng_mnfst_lst = msng_mnfst_set.keys()
1768 1747 # Sort the manifestnodes by revision number.
1769 1748 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 1749 # Create a generator for the manifestnodes that calls our lookup
1771 1750 # and data collection functions back.
1772 1751 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 1752 filenode_collector(changedfiles))
1774 1753 for chnk in group:
1775 1754 yield chnk
1776 1755
1777 1756 # These are no longer needed, dereference and toss the memory for
1778 1757 # them.
1779 1758 msng_mnfst_lst = None
1780 1759 msng_mnfst_set.clear()
1781 1760
1782 1761 if extranodes:
1783 1762 for fname in extranodes:
1784 1763 if isinstance(fname, int):
1785 1764 continue
1786 1765 add_extra_nodes(fname,
1787 1766 msng_filenode_set.setdefault(fname, {}))
1788 1767 changedfiles[fname] = 1
1789 1768 # Go through all our files in order sorted by name.
1790 1769 for fname in util.sort(changedfiles):
1791 1770 filerevlog = self.file(fname)
1792 1771 if not len(filerevlog):
1793 1772 raise util.Abort(_("empty or missing revlog for %s") % fname)
1794 1773 # Toss out the filenodes that the recipient isn't really
1795 1774 # missing.
1796 1775 if fname in msng_filenode_set:
1797 1776 prune_filenodes(fname, filerevlog)
1798 1777 msng_filenode_lst = msng_filenode_set[fname].keys()
1799 1778 else:
1800 1779 msng_filenode_lst = []
1801 1780 # If any filenodes are left, generate the group for them,
1802 1781 # otherwise don't bother.
1803 1782 if len(msng_filenode_lst) > 0:
1804 1783 yield changegroup.chunkheader(len(fname))
1805 1784 yield fname
1806 1785 # Sort the filenodes by their revision #
1807 1786 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1808 1787 # Create a group generator and only pass in a changenode
1809 1788 # lookup function as we need to collect no information
1810 1789 # from filenodes.
1811 1790 group = filerevlog.group(msng_filenode_lst,
1812 1791 lookup_filenode_link_func(fname))
1813 1792 for chnk in group:
1814 1793 yield chnk
1815 1794 if fname in msng_filenode_set:
1816 1795 # Don't need this anymore, toss it to free memory.
1817 1796 del msng_filenode_set[fname]
1818 1797 # Signal that no more groups are left.
1819 1798 yield changegroup.closechunk()
1820 1799
1821 1800 if msng_cl_lst:
1822 1801 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1823 1802
1824 1803 return util.chunkbuffer(gengroup())
1825 1804
1826 1805 def changegroup(self, basenodes, source):
1827 1806 """Generate a changegroup of all nodes that we have that a recipient
1828 1807 doesn't.
1829 1808
1830 1809 This is much easier than the previous function as we can assume that
1831 1810 the recipient has any changenode we aren't sending them."""
1832 1811
1833 1812 self.hook('preoutgoing', throw=True, source=source)
1834 1813
1835 1814 cl = self.changelog
1836 1815 nodes = cl.nodesbetween(basenodes, None)[0]
1837 1816 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1838 1817 self.changegroupinfo(nodes, source)
1839 1818
1840 1819 def identity(x):
1841 1820 return x
1842 1821
1843 1822 def gennodelst(log):
1844 1823 for r in log:
1845 1824 n = log.node(r)
1846 1825 if log.linkrev(n) in revset:
1847 1826 yield n
1848 1827
1849 1828 def changed_file_collector(changedfileset):
1850 1829 def collect_changed_files(clnode):
1851 1830 c = cl.read(clnode)
1852 1831 for fname in c[3]:
1853 1832 changedfileset[fname] = 1
1854 1833 return collect_changed_files
1855 1834
1856 1835 def lookuprevlink_func(revlog):
1857 1836 def lookuprevlink(n):
1858 1837 return cl.node(revlog.linkrev(n))
1859 1838 return lookuprevlink
1860 1839
1861 1840 def gengroup():
1862 1841 # construct a list of all changed files
1863 1842 changedfiles = {}
1864 1843
1865 1844 for chnk in cl.group(nodes, identity,
1866 1845 changed_file_collector(changedfiles)):
1867 1846 yield chnk
1868 1847
1869 1848 mnfst = self.manifest
1870 1849 nodeiter = gennodelst(mnfst)
1871 1850 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1872 1851 yield chnk
1873 1852
1874 1853 for fname in util.sort(changedfiles):
1875 1854 filerevlog = self.file(fname)
1876 1855 if not len(filerevlog):
1877 1856 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 1857 nodeiter = gennodelst(filerevlog)
1879 1858 nodeiter = list(nodeiter)
1880 1859 if nodeiter:
1881 1860 yield changegroup.chunkheader(len(fname))
1882 1861 yield fname
1883 1862 lookup = lookuprevlink_func(filerevlog)
1884 1863 for chnk in filerevlog.group(nodeiter, lookup):
1885 1864 yield chnk
1886 1865
1887 1866 yield changegroup.closechunk()
1888 1867
1889 1868 if nodes:
1890 1869 self.hook('outgoing', node=hex(nodes[0]), source=source)
1891 1870
1892 1871 return util.chunkbuffer(gengroup())
1893 1872
1894 1873 def addchangegroup(self, source, srctype, url, emptyok=False):
1895 1874 """add changegroup to repo.
1896 1875
1897 1876 return values:
1898 1877 - nothing changed or no source: 0
1899 1878 - more heads than before: 1+added heads (2..n)
1900 1879 - less heads than before: -1-removed heads (-2..-n)
1901 1880 - number of heads stays the same: 1
1902 1881 """
1903 1882 def csmap(x):
1904 1883 self.ui.debug(_("add changeset %s\n") % short(x))
1905 1884 return len(cl)
1906 1885
1907 1886 def revmap(x):
1908 1887 return cl.rev(x)
1909 1888
1910 1889 if not source:
1911 1890 return 0
1912 1891
1913 1892 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1914 1893
1915 1894 changesets = files = revisions = 0
1916 1895
1917 1896 # write changelog data to temp files so concurrent readers will not see
1918 1897 # inconsistent view
1919 1898 cl = self.changelog
1920 1899 cl.delayupdate()
1921 1900 oldheads = len(cl.heads())
1922 1901
1923 1902 tr = self.transaction()
1924 1903 try:
1925 1904 trp = weakref.proxy(tr)
1926 1905 # pull off the changeset group
1927 1906 self.ui.status(_("adding changesets\n"))
1928 1907 cor = len(cl) - 1
1929 1908 chunkiter = changegroup.chunkiter(source)
1930 1909 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1931 1910 raise util.Abort(_("received changelog group is empty"))
1932 1911 cnr = len(cl) - 1
1933 1912 changesets = cnr - cor
1934 1913
1935 1914 # pull off the manifest group
1936 1915 self.ui.status(_("adding manifests\n"))
1937 1916 chunkiter = changegroup.chunkiter(source)
1938 1917 # no need to check for empty manifest group here:
1939 1918 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1940 1919 # no new manifest will be created and the manifest group will
1941 1920 # be empty during the pull
1942 1921 self.manifest.addgroup(chunkiter, revmap, trp)
1943 1922
1944 1923 # process the files
1945 1924 self.ui.status(_("adding file changes\n"))
1946 1925 while 1:
1947 1926 f = changegroup.getchunk(source)
1948 1927 if not f:
1949 1928 break
1950 1929 self.ui.debug(_("adding %s revisions\n") % f)
1951 1930 fl = self.file(f)
1952 1931 o = len(fl)
1953 1932 chunkiter = changegroup.chunkiter(source)
1954 1933 if fl.addgroup(chunkiter, revmap, trp) is None:
1955 1934 raise util.Abort(_("received file revlog group is empty"))
1956 1935 revisions += len(fl) - o
1957 1936 files += 1
1958 1937
1959 1938 # make changelog see real files again
1960 1939 cl.finalize(trp)
1961 1940
1962 1941 newheads = len(self.changelog.heads())
1963 1942 heads = ""
1964 1943 if oldheads and newheads != oldheads:
1965 1944 heads = _(" (%+d heads)") % (newheads - oldheads)
1966 1945
1967 1946 self.ui.status(_("added %d changesets"
1968 1947 " with %d changes to %d files%s\n")
1969 1948 % (changesets, revisions, files, heads))
1970 1949
1971 1950 if changesets > 0:
1972 1951 self.hook('pretxnchangegroup', throw=True,
1973 1952 node=hex(self.changelog.node(cor+1)), source=srctype,
1974 1953 url=url)
1975 1954
1976 1955 tr.close()
1977 1956 finally:
1978 1957 del tr
1979 1958
1980 1959 if changesets > 0:
1981 1960 # forcefully update the on-disk branch cache
1982 1961 self.ui.debug(_("updating the branch cache\n"))
1983 1962 self.branchtags()
1984 1963 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1985 1964 source=srctype, url=url)
1986 1965
1987 1966 for i in xrange(cor + 1, cnr + 1):
1988 1967 self.hook("incoming", node=hex(self.changelog.node(i)),
1989 1968 source=srctype, url=url)
1990 1969
1991 1970 # never return 0 here:
1992 1971 if newheads < oldheads:
1993 1972 return newheads - oldheads - 1
1994 1973 else:
1995 1974 return newheads - oldheads + 1
1996 1975
1997 1976
1998 1977 def stream_in(self, remote):
1999 1978 fp = remote.stream_out()
2000 1979 l = fp.readline()
2001 1980 try:
2002 1981 resp = int(l)
2003 1982 except ValueError:
2004 1983 raise util.UnexpectedOutput(
2005 1984 _('Unexpected response from remote server:'), l)
2006 1985 if resp == 1:
2007 1986 raise util.Abort(_('operation forbidden by server'))
2008 1987 elif resp == 2:
2009 1988 raise util.Abort(_('locking the remote repository failed'))
2010 1989 elif resp != 0:
2011 1990 raise util.Abort(_('the server sent an unknown error code'))
2012 1991 self.ui.status(_('streaming all changes\n'))
2013 1992 l = fp.readline()
2014 1993 try:
2015 1994 total_files, total_bytes = map(int, l.split(' ', 1))
2016 1995 except (ValueError, TypeError):
2017 1996 raise util.UnexpectedOutput(
2018 1997 _('Unexpected response from remote server:'), l)
2019 1998 self.ui.status(_('%d files to transfer, %s of data\n') %
2020 1999 (total_files, util.bytecount(total_bytes)))
2021 2000 start = time.time()
2022 2001 for i in xrange(total_files):
2023 2002 # XXX doesn't support '\n' or '\r' in filenames
2024 2003 l = fp.readline()
2025 2004 try:
2026 2005 name, size = l.split('\0', 1)
2027 2006 size = int(size)
2028 2007 except ValueError, TypeError:
2029 2008 raise util.UnexpectedOutput(
2030 2009 _('Unexpected response from remote server:'), l)
2031 2010 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2032 2011 ofp = self.sopener(name, 'w')
2033 2012 for chunk in util.filechunkiter(fp, limit=size):
2034 2013 ofp.write(chunk)
2035 2014 ofp.close()
2036 2015 elapsed = time.time() - start
2037 2016 if elapsed <= 0:
2038 2017 elapsed = 0.001
2039 2018 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2040 2019 (util.bytecount(total_bytes), elapsed,
2041 2020 util.bytecount(total_bytes / elapsed)))
2042 2021 self.invalidate()
2043 2022 return len(self.heads()) + 1
2044 2023
2045 2024 def clone(self, remote, heads=[], stream=False):
2046 2025 '''clone remote repository.
2047 2026
2048 2027 keyword arguments:
2049 2028 heads: list of revs to clone (forces use of pull)
2050 2029 stream: use streaming clone if possible'''
2051 2030
2052 2031 # now, all clients that can request uncompressed clones can
2053 2032 # read repo formats supported by all servers that can serve
2054 2033 # them.
2055 2034
2056 2035 # if revlog format changes, client will have to check version
2057 2036 # and format flags on "stream" capability, and use
2058 2037 # uncompressed only if compatible.
2059 2038
2060 2039 if stream and not heads and remote.capable('stream'):
2061 2040 return self.stream_in(remote)
2062 2041 return self.pull(remote, heads)
2063 2042
2043 def storefiles(self):
2044 '''get all *.i and *.d files in the store
2045
2046 Returns (list of (filename, size), total_bytes)'''
2047
2048 lock = None
2049 try:
2050 self.ui.debug('scanning\n')
2051 entries = []
2052 total_bytes = 0
2053 # get consistent snapshot of repo, lock during scan
2054 lock = self.lock()
2055 for name, size in self.store.walk():
2056 entries.append((name, size))
2057 total_bytes += size
2058 return entries, total_bytes
2059 finally:
2060 del lock
2061
2064 2062 # used to avoid circular references so destructors work
2065 2063 def aftertrans(files):
2066 2064 renamefiles = [tuple(t) for t in files]
2067 2065 def a():
2068 2066 for src, dest in renamefiles:
2069 2067 util.rename(src, dest)
2070 2068 return a
2071 2069
2072 2070 def instance(ui, path, create):
2073 2071 return localrepository(ui, util.drop_scheme('file', path), create)
2074 2072
2075 2073 def islocal(path):
2076 2074 return True
@@ -1,83 +1,82
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from i18n import _
11 11 import changelog, httprangereader
12 12 import repo, localrepo, manifest, util, store
13 13 import urllib, urllib2, errno
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 num = inst.code == 404 and errno.ENOENT or None
21 21 raise IOError(num, inst)
22 22 except urllib2.URLError, inst:
23 23 raise IOError(None, inst.reason[1])
24 24
25 25 def opener(base):
26 26 """return a function that opens files over http"""
27 27 p = base
28 28 def o(path, mode="r"):
29 29 f = "/".join((p, urllib.quote(path)))
30 30 return rangereader(f)
31 31 return o
32 32
33 33 class statichttprepository(localrepo.localrepository):
34 34 def __init__(self, ui, path):
35 35 self._url = path
36 36 self.ui = ui
37 37
38 38 self.path = path.rstrip('/') + "/.hg"
39 39 self.opener = opener(self.path)
40 40
41 41 # find requirements
42 42 try:
43 43 requirements = self.opener("requires").read().splitlines()
44 44 except IOError, inst:
45 45 if inst.errno == errno.ENOENT:
46 46 msg = _("'%s' does not appear to be an hg repository") % path
47 47 raise repo.RepoError(msg)
48 48 else:
49 49 requirements = []
50 50
51 51 # check them
52 52 for r in requirements:
53 53 if r not in self.supported:
54 54 raise repo.RepoError(_("requirement '%s' not supported") % r)
55 55
56 56 # setup store
57 57 if "store" in requirements:
58 self.encodefn = store.encodefilename
59 self.decodefn = store.decodefilename
60 58 self.spath = self.path + "/store"
61 59 else:
62 self.encodefn = lambda x: x
63 self.decodefn = lambda x: x
64 60 self.spath = self.path
65 self.sopener = store.encodedopener(opener(self.spath), self.encodefn)
61 self.encodefn = store.encodefn(requirements)
62 so = opener(self.spath)
63 self.sopener = lambda path, *args, **kw: so(
64 self.encodefn(path), *args, **kw)
66 65
67 66 self.manifest = manifest.manifest(self.sopener)
68 67 self.changelog = changelog.changelog(self.sopener)
69 68 self.tagscache = None
70 69 self.nodetagscache = None
71 70 self.encodepats = None
72 71 self.decodepats = None
73 72
74 73 def url(self):
75 74 return 'static-' + self._url
76 75
77 76 def local(self):
78 77 return False
79 78
80 79 def instance(ui, path, create):
81 80 if create:
82 81 raise util.Abort(_('cannot create new static-http repository'))
83 82 return statichttprepository(ui, path[7:])
@@ -1,39 +1,125
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 import os, stat, osutil, util
9
8 10 def _buildencodefun():
9 11 e = '_'
10 12 win_reserved = [ord(x) for x in '\\:*?"<>|']
11 13 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
12 14 for x in (range(32) + range(126, 256) + win_reserved):
13 15 cmap[chr(x)] = "~%02x" % x
14 16 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
15 17 cmap[chr(x)] = e + chr(x).lower()
16 18 dmap = {}
17 19 for k, v in cmap.iteritems():
18 20 dmap[v] = k
19 21 def decode(s):
20 22 i = 0
21 23 while i < len(s):
22 24 for l in xrange(1, 4):
23 25 try:
24 26 yield dmap[s[i:i+l]]
25 27 i += l
26 28 break
27 29 except KeyError:
28 30 pass
29 31 else:
30 32 raise KeyError
31 33 return (lambda s: "".join([cmap[c] for c in s]),
32 34 lambda s: "".join(list(decode(s))))
33 35
34 36 encodefilename, decodefilename = _buildencodefun()
35 37
36 def encodedopener(openerfn, fn):
37 def o(path, *args, **kw):
38 return openerfn(fn(path), *args, **kw)
39 return o
38 def _dirwalk(path, recurse):
39 '''yields (filename, size)'''
40 for e, kind, st in osutil.listdir(path, stat=True):
41 pe = os.path.join(path, e)
42 if kind == stat.S_IFDIR:
43 if recurse:
44 for x in _dirwalk(pe, True):
45 yield x
46 elif kind == stat.S_IFREG:
47 yield pe, st.st_size
48
49 class _store:
50 '''base class for local repository stores'''
51 def __init__(self, path):
52 self.path = path
53 try:
54 # files in .hg/ will be created using this mode
55 mode = os.stat(self.path).st_mode
56 # avoid some useless chmods
57 if (0777 & ~util._umask) == (0777 & mode):
58 mode = None
59 except OSError:
60 mode = None
61 self.createmode = mode
62
63 def join(self, f):
64 return os.path.join(self.path, f)
65
66 def _revlogfiles(self, relpath='', recurse=False):
67 '''yields (filename, size)'''
68 if relpath:
69 path = os.path.join(self.path, relpath)
70 else:
71 path = self.path
72 striplen = len(self.path) + len(os.sep)
73 filetypes = ('.d', '.i')
74 for f, size in _dirwalk(path, recurse):
75 if (len(f) > 2) and f[-2:] in filetypes:
76 yield util.pconvert(f[striplen:]), size
77
78 def _datafiles(self):
79 for x in self._revlogfiles('data', True):
80 yield x
81
82 def walk(self):
83 '''yields (direncoded filename, size)'''
84 # yield data files first
85 for x in self._datafiles():
86 yield x
87 # yield manifest before changelog
88 meta = util.sort(self._revlogfiles())
89 meta.reverse()
90 for x in meta:
91 yield x
92
93 class directstore(_store):
94 def __init__(self, path):
95 _store.__init__(self, path)
96 self.encodefn = lambda x: x
97 self.opener = util.opener(self.path)
98 self.opener.createmode = self.createmode
99
100 class encodedstore(_store):
101 def __init__(self, path):
102 _store.__init__(self, os.path.join(path, 'store'))
103 self.encodefn = encodefilename
104 op = util.opener(self.path)
105 op.createmode = self.createmode
106 self.opener = lambda f, *args, **kw: op(self.encodefn(f), *args, **kw)
107
108 def _datafiles(self):
109 for f, size in self._revlogfiles('data', True):
110 yield decodefilename(f), size
111
112 def join(self, f):
113 return os.path.join(self.path, self.encodefn(f))
114
115 def encodefn(requirements):
116 if 'store' not in requirements:
117 return lambda x: x
118 else:
119 return encodefilename
120
121 def store(requirements, path):
122 if 'store' not in requirements:
123 return directstore(path)
124 else:
125 return encodedstore(path)
@@ -1,93 +1,51
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 import os, osutil, stat, util, lock
8 import util, lock
9 9
10 10 # if server supports streaming clone, it advertises "stream"
11 11 # capability with value that is version+flags of repo it is serving.
12 12 # client only streams if it can read that repo format.
13 13
14 def walkrepo(root):
15 '''iterate over metadata files in repository.
16 walk in natural (sorted) order.
17 yields 2-tuples: name of .d or .i file, size of file.'''
18
19 strip_count = len(root) + len(os.sep)
20 def walk(path, recurse):
21 for e, kind, st in osutil.listdir(path, stat=True):
22 pe = os.path.join(path, e)
23 if kind == stat.S_IFDIR:
24 if recurse:
25 for x in walk(pe, True):
26 yield x
27 else:
28 if kind != stat.S_IFREG or len(e) < 2:
29 continue
30 sfx = e[-2:]
31 if sfx in ('.d', '.i'):
32 yield pe[strip_count:], st.st_size
33 # write file data first
34 for x in walk(os.path.join(root, 'data'), True):
35 yield x
36 # write manifest before changelog
37 meta = util.sort(walk(root, False))
38 meta.reverse()
39 for x in meta:
40 yield x
41
42 14 # stream file format is simple.
43 15 #
44 16 # server writes out line that says how many files, how many total
45 17 # bytes. separator is ascii space, byte counts are strings.
46 18 #
47 19 # then for each file:
48 20 #
49 21 # server writes out line that says file name, how many bytes in
50 22 # file. separator is ascii nul, byte count is string.
51 23 #
52 24 # server writes out raw file data.
53 25
54 26 def stream_out(repo, fileobj, untrusted=False):
55 27 '''stream out all metadata files in repository.
56 28 writes to file-like object, must support write() and optional flush().'''
57 29
58 30 if not repo.ui.configbool('server', 'uncompressed', untrusted=untrusted):
59 31 fileobj.write('1\n')
60 32 return
61 33
62 # get consistent snapshot of repo. lock during scan so lock not
63 # needed while we stream, and commits can happen.
64 repolock = None
65 34 try:
66 try:
67 repolock = repo.lock()
35 entries, total_bytes = repo.storefiles()
68 36 except (lock.LockHeld, lock.LockUnavailable), inst:
69 37 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
70 38 fileobj.write('2\n')
71 39 return
72 40
73 41 fileobj.write('0\n')
74 repo.ui.debug('scanning\n')
75 entries = []
76 total_bytes = 0
77 for name, size in walkrepo(repo.spath):
78 name = repo.decodefn(util.pconvert(name))
79 entries.append((name, size))
80 total_bytes += size
81 finally:
82 del repolock
83
84 42 repo.ui.debug('%d files, %d bytes to transfer\n' %
85 43 (len(entries), total_bytes))
86 44 fileobj.write('%d %d\n' % (len(entries), total_bytes))
87 45 for name, size in entries:
88 46 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
89 47 fileobj.write('%s\0%d\n' % (name, size))
90 48 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
91 49 fileobj.write(chunk)
92 50 flush = getattr(fileobj, 'flush', None)
93 51 if flush: flush()
General Comments 0
You need to be logged in to leave comments. Login now