##// END OF EJS Templates
Clean up some merge logic...
mpm@selenic.com -
r993:6f274afc default
parent child Browse files
Show More
@@ -1,2278 +1,2277 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 283 if not date:
284 284 if time.daylight: offset = time.altzone
285 285 else: offset = time.timezone
286 286 date = "%d %d" % (time.time(), offset)
287 287 list.sort()
288 288 l = [hex(manifest), user, date] + list + ["", desc]
289 289 text = "\n".join(l)
290 290 return self.addrevision(text, transaction, self.count(), p1, p2)
291 291
292 292 class dirstate:
293 293 def __init__(self, opener, ui, root):
294 294 self.opener = opener
295 295 self.root = root
296 296 self.dirty = 0
297 297 self.ui = ui
298 298 self.map = None
299 299 self.pl = None
300 300 self.copies = {}
301 301 self.ignorefunc = None
302 302
303 303 def wjoin(self, f):
304 304 return os.path.join(self.root, f)
305 305
306 306 def getcwd(self):
307 307 cwd = os.getcwd()
308 308 if cwd == self.root: return ''
309 309 return cwd[len(self.root) + 1:]
310 310
311 311 def ignore(self, f):
312 312 if not self.ignorefunc:
313 313 bigpat = []
314 314 try:
315 315 l = file(self.wjoin(".hgignore"))
316 316 for pat in l:
317 317 p = pat.rstrip()
318 318 if p:
319 319 try:
320 320 re.compile(p)
321 321 except:
322 322 self.ui.warn("ignoring invalid ignore"
323 323 + " regular expression '%s'\n" % p)
324 324 else:
325 325 bigpat.append(p)
326 326 except IOError: pass
327 327
328 328 if bigpat:
329 329 s = "(?:%s)" % (")|(?:".join(bigpat))
330 330 r = re.compile(s)
331 331 self.ignorefunc = r.search
332 332 else:
333 333 self.ignorefunc = util.never
334 334
335 335 return self.ignorefunc(f)
336 336
337 337 def __del__(self):
338 338 if self.dirty:
339 339 self.write()
340 340
341 341 def __getitem__(self, key):
342 342 try:
343 343 return self.map[key]
344 344 except TypeError:
345 345 self.read()
346 346 return self[key]
347 347
348 348 def __contains__(self, key):
349 349 if not self.map: self.read()
350 350 return key in self.map
351 351
352 352 def parents(self):
353 353 if not self.pl:
354 354 self.read()
355 355 return self.pl
356 356
357 357 def markdirty(self):
358 358 if not self.dirty:
359 359 self.dirty = 1
360 360
361 361 def setparents(self, p1, p2 = nullid):
362 362 self.markdirty()
363 363 self.pl = p1, p2
364 364
365 365 def state(self, key):
366 366 try:
367 367 return self[key][0]
368 368 except KeyError:
369 369 return "?"
370 370
371 371 def read(self):
372 372 if self.map is not None: return self.map
373 373
374 374 self.map = {}
375 375 self.pl = [nullid, nullid]
376 376 try:
377 377 st = self.opener("dirstate").read()
378 378 if not st: return
379 379 except: return
380 380
381 381 self.pl = [st[:20], st[20: 40]]
382 382
383 383 pos = 40
384 384 while pos < len(st):
385 385 e = struct.unpack(">cllll", st[pos:pos+17])
386 386 l = e[4]
387 387 pos += 17
388 388 f = st[pos:pos + l]
389 389 if '\0' in f:
390 390 f, c = f.split('\0')
391 391 self.copies[f] = c
392 392 self.map[f] = e[:4]
393 393 pos += l
394 394
395 395 def copy(self, source, dest):
396 396 self.read()
397 397 self.markdirty()
398 398 self.copies[dest] = source
399 399
400 400 def copied(self, file):
401 401 return self.copies.get(file, None)
402 402
403 403 def update(self, files, state, **kw):
404 404 ''' current states:
405 405 n normal
406 406 m needs merging
407 407 r marked for removal
408 408 a marked for addition'''
409 409
410 410 if not files: return
411 411 self.read()
412 412 self.markdirty()
413 413 for f in files:
414 414 if state == "r":
415 415 self.map[f] = ('r', 0, 0, 0)
416 416 else:
417 417 s = os.stat(os.path.join(self.root, f))
418 418 st_size = kw.get('st_size', s.st_size)
419 419 st_mtime = kw.get('st_mtime', s.st_mtime)
420 420 self.map[f] = (state, s.st_mode, st_size, st_mtime)
421 421
422 422 def forget(self, files):
423 423 if not files: return
424 424 self.read()
425 425 self.markdirty()
426 426 for f in files:
427 427 try:
428 428 del self.map[f]
429 429 except KeyError:
430 430 self.ui.warn("not in dirstate: %s!\n" % f)
431 431 pass
432 432
433 433 def clear(self):
434 434 self.map = {}
435 435 self.markdirty()
436 436
437 437 def write(self):
438 438 st = self.opener("dirstate", "w")
439 439 st.write("".join(self.pl))
440 440 for f, e in self.map.items():
441 441 c = self.copied(f)
442 442 if c:
443 443 f = f + "\0" + c
444 444 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
445 445 st.write(e + f)
446 446 self.dirty = 0
447 447
448 448 def filterfiles(self, files):
449 449 ret = {}
450 450 unknown = []
451 451
452 452 for x in files:
453 453 if x is '.':
454 454 return self.map.copy()
455 455 if x not in self.map:
456 456 unknown.append(x)
457 457 else:
458 458 ret[x] = self.map[x]
459 459
460 460 if not unknown:
461 461 return ret
462 462
463 463 b = self.map.keys()
464 464 b.sort()
465 465 blen = len(b)
466 466
467 467 for x in unknown:
468 468 bs = bisect.bisect(b, x)
469 469 if bs != 0 and b[bs-1] == x:
470 470 ret[x] = self.map[x]
471 471 continue
472 472 while bs < blen:
473 473 s = b[bs]
474 474 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
475 475 ret[s] = self.map[s]
476 476 else:
477 477 break
478 478 bs += 1
479 479 return ret
480 480
481 481 def walk(self, files = None, match = util.always, dc=None):
482 482 self.read()
483 483
484 484 # walk all files by default
485 485 if not files:
486 486 files = [self.root]
487 487 if not dc:
488 488 dc = self.map.copy()
489 489 elif not dc:
490 490 dc = self.filterfiles(files)
491 491
492 492 known = {'.hg': 1}
493 493 def seen(fn):
494 494 if fn in known: return True
495 495 known[fn] = 1
496 496 def traverse():
497 497 for ff in util.unique(files):
498 498 f = os.path.join(self.root, ff)
499 499 try:
500 500 st = os.stat(f)
501 501 except OSError, inst:
502 502 if ff not in dc: self.ui.warn('%s: %s\n' % (
503 503 util.pathto(self.getcwd(), ff),
504 504 inst.strerror))
505 505 continue
506 506 if stat.S_ISDIR(st.st_mode):
507 507 for dir, subdirs, fl in os.walk(f):
508 508 d = dir[len(self.root) + 1:]
509 509 nd = util.normpath(d)
510 510 if nd == '.': nd = ''
511 511 if seen(nd):
512 512 subdirs[:] = []
513 513 continue
514 514 for sd in subdirs:
515 515 ds = os.path.join(nd, sd +'/')
516 516 if self.ignore(ds) or not match(ds):
517 517 subdirs.remove(sd)
518 518 subdirs.sort()
519 519 fl.sort()
520 520 for fn in fl:
521 521 fn = util.pconvert(os.path.join(d, fn))
522 522 yield 'f', fn
523 523 elif stat.S_ISREG(st.st_mode):
524 524 yield 'f', ff
525 525 else:
526 526 kind = 'unknown'
527 527 if stat.S_ISCHR(st.st_mode): kind = 'character device'
528 528 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
529 529 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
530 530 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
531 531 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
532 532 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
533 533 util.pathto(self.getcwd(), ff),
534 534 kind))
535 535
536 536 ks = dc.keys()
537 537 ks.sort()
538 538 for k in ks:
539 539 yield 'm', k
540 540
541 541 # yield only files that match: all in dirstate, others only if
542 542 # not in .hgignore
543 543
544 544 for src, fn in util.unique(traverse()):
545 545 fn = util.normpath(fn)
546 546 if seen(fn): continue
547 547 if fn not in dc and self.ignore(fn):
548 548 continue
549 549 if match(fn):
550 550 yield src, fn
551 551
552 552 def changes(self, files=None, match=util.always):
553 553 self.read()
554 554 if not files:
555 555 dc = self.map.copy()
556 556 else:
557 557 dc = self.filterfiles(files)
558 558 lookup, modified, added, unknown = [], [], [], []
559 559 removed, deleted = [], []
560 560
561 561 for src, fn in self.walk(files, match, dc=dc):
562 562 try:
563 563 s = os.stat(os.path.join(self.root, fn))
564 564 except OSError:
565 565 continue
566 566 if not stat.S_ISREG(s.st_mode):
567 567 continue
568 568 c = dc.get(fn)
569 569 if c:
570 570 del dc[fn]
571 571 if c[0] == 'm':
572 572 modified.append(fn)
573 573 elif c[0] == 'a':
574 574 added.append(fn)
575 575 elif c[0] == 'r':
576 576 unknown.append(fn)
577 577 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
578 578 modified.append(fn)
579 579 elif c[3] != s.st_mtime:
580 580 lookup.append(fn)
581 581 else:
582 582 unknown.append(fn)
583 583
584 584 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
585 585 if c[0] == 'r':
586 586 removed.append(fn)
587 587 else:
588 588 deleted.append(fn)
589 589 return (lookup, modified, added, removed + deleted, unknown)
590 590
591 591 # used to avoid circular references so destructors work
592 592 def opener(base):
593 593 p = base
594 594 def o(path, mode="r"):
595 595 if p.startswith("http://"):
596 596 f = os.path.join(p, urllib.quote(path))
597 597 return httprangereader.httprangereader(f)
598 598
599 599 f = os.path.join(p, path)
600 600
601 601 mode += "b" # for that other OS
602 602
603 603 if mode[0] != "r":
604 604 try:
605 605 s = os.stat(f)
606 606 except OSError:
607 607 d = os.path.dirname(f)
608 608 if not os.path.isdir(d):
609 609 os.makedirs(d)
610 610 else:
611 611 if s.st_nlink > 1:
612 612 file(f + ".tmp", "wb").write(file(f, "rb").read())
613 613 util.rename(f+".tmp", f)
614 614
615 615 return file(f, mode)
616 616
617 617 return o
618 618
619 619 class RepoError(Exception): pass
620 620
621 621 class localrepository:
622 622 def __init__(self, ui, path=None, create=0):
623 623 self.remote = 0
624 624 if path and path.startswith("http://"):
625 625 self.remote = 1
626 626 self.path = path
627 627 else:
628 628 if not path:
629 629 p = os.getcwd()
630 630 while not os.path.isdir(os.path.join(p, ".hg")):
631 631 oldp = p
632 632 p = os.path.dirname(p)
633 633 if p == oldp: raise RepoError("no repo found")
634 634 path = p
635 635 self.path = os.path.join(path, ".hg")
636 636
637 637 if not create and not os.path.isdir(self.path):
638 638 raise RepoError("repository %s not found" % self.path)
639 639
640 640 self.root = os.path.abspath(path)
641 641 self.ui = ui
642 642
643 643 if create:
644 644 os.mkdir(self.path)
645 645 os.mkdir(self.join("data"))
646 646
647 647 self.opener = opener(self.path)
648 648 self.wopener = opener(self.root)
649 649 self.manifest = manifest(self.opener)
650 650 self.changelog = changelog(self.opener)
651 651 self.tagscache = None
652 652 self.nodetagscache = None
653 653
654 654 if not self.remote:
655 655 self.dirstate = dirstate(self.opener, ui, self.root)
656 656 try:
657 657 self.ui.readconfig(self.opener("hgrc"))
658 658 except IOError: pass
659 659
660 660 def hook(self, name, **args):
661 661 s = self.ui.config("hooks", name)
662 662 if s:
663 663 self.ui.note("running hook %s: %s\n" % (name, s))
664 664 old = {}
665 665 for k, v in args.items():
666 666 k = k.upper()
667 667 old[k] = os.environ.get(k, None)
668 668 os.environ[k] = v
669 669
670 670 r = os.system(s)
671 671
672 672 for k, v in old.items():
673 673 if v != None:
674 674 os.environ[k] = v
675 675 else:
676 676 del os.environ[k]
677 677
678 678 if r:
679 679 self.ui.warn("abort: %s hook failed with status %d!\n" %
680 680 (name, r))
681 681 return False
682 682 return True
683 683
684 684 def tags(self):
685 685 '''return a mapping of tag to node'''
686 686 if not self.tagscache:
687 687 self.tagscache = {}
688 688 def addtag(self, k, n):
689 689 try:
690 690 bin_n = bin(n)
691 691 except TypeError:
692 692 bin_n = ''
693 693 self.tagscache[k.strip()] = bin_n
694 694
695 695 try:
696 696 # read each head of the tags file, ending with the tip
697 697 # and add each tag found to the map, with "newer" ones
698 698 # taking precedence
699 699 fl = self.file(".hgtags")
700 700 h = fl.heads()
701 701 h.reverse()
702 702 for r in h:
703 703 for l in fl.revision(r).splitlines():
704 704 if l:
705 705 n, k = l.split(" ", 1)
706 706 addtag(self, k, n)
707 707 except KeyError:
708 708 pass
709 709
710 710 try:
711 711 f = self.opener("localtags")
712 712 for l in f:
713 713 n, k = l.split(" ", 1)
714 714 addtag(self, k, n)
715 715 except IOError:
716 716 pass
717 717
718 718 self.tagscache['tip'] = self.changelog.tip()
719 719
720 720 return self.tagscache
721 721
722 722 def tagslist(self):
723 723 '''return a list of tags ordered by revision'''
724 724 l = []
725 725 for t, n in self.tags().items():
726 726 try:
727 727 r = self.changelog.rev(n)
728 728 except:
729 729 r = -2 # sort to the beginning of the list if unknown
730 730 l.append((r,t,n))
731 731 l.sort()
732 732 return [(t,n) for r,t,n in l]
733 733
734 734 def nodetags(self, node):
735 735 '''return the tags associated with a node'''
736 736 if not self.nodetagscache:
737 737 self.nodetagscache = {}
738 738 for t,n in self.tags().items():
739 739 self.nodetagscache.setdefault(n,[]).append(t)
740 740 return self.nodetagscache.get(node, [])
741 741
742 742 def lookup(self, key):
743 743 try:
744 744 return self.tags()[key]
745 745 except KeyError:
746 746 try:
747 747 return self.changelog.lookup(key)
748 748 except:
749 749 raise RepoError("unknown revision '%s'" % key)
750 750
751 751 def dev(self):
752 752 if self.remote: return -1
753 753 return os.stat(self.path).st_dev
754 754
755 755 def local(self):
756 756 return not self.remote
757 757
758 758 def join(self, f):
759 759 return os.path.join(self.path, f)
760 760
761 761 def wjoin(self, f):
762 762 return os.path.join(self.root, f)
763 763
764 764 def file(self, f):
765 765 if f[0] == '/': f = f[1:]
766 766 return filelog(self.opener, f)
767 767
768 768 def getcwd(self):
769 769 return self.dirstate.getcwd()
770 770
771 771 def wfile(self, f, mode='r'):
772 772 return self.wopener(f, mode)
773 773
774 774 def transaction(self):
775 775 # save dirstate for undo
776 776 try:
777 777 ds = self.opener("dirstate").read()
778 778 except IOError:
779 779 ds = ""
780 780 self.opener("journal.dirstate", "w").write(ds)
781 781
782 782 def after():
783 783 util.rename(self.join("journal"), self.join("undo"))
784 784 util.rename(self.join("journal.dirstate"),
785 785 self.join("undo.dirstate"))
786 786
787 787 return transaction.transaction(self.ui.warn, self.opener,
788 788 self.join("journal"), after)
789 789
790 790 def recover(self):
791 791 lock = self.lock()
792 792 if os.path.exists(self.join("journal")):
793 793 self.ui.status("rolling back interrupted transaction\n")
794 794 return transaction.rollback(self.opener, self.join("journal"))
795 795 else:
796 796 self.ui.warn("no interrupted transaction available\n")
797 797
798 798 def undo(self):
799 799 lock = self.lock()
800 800 if os.path.exists(self.join("undo")):
801 801 self.ui.status("rolling back last transaction\n")
802 802 transaction.rollback(self.opener, self.join("undo"))
803 803 self.dirstate = None
804 804 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
805 805 self.dirstate = dirstate(self.opener, self.ui, self.root)
806 806 else:
807 807 self.ui.warn("no undo information available\n")
808 808
809 809 def lock(self, wait = 1):
810 810 try:
811 811 return lock.lock(self.join("lock"), 0)
812 812 except lock.LockHeld, inst:
813 813 if wait:
814 814 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
815 815 return lock.lock(self.join("lock"), wait)
816 816 raise inst
817 817
818 818 def rawcommit(self, files, text, user, date, p1=None, p2=None):
819 819 orig_parent = self.dirstate.parents()[0] or nullid
820 820 p1 = p1 or self.dirstate.parents()[0] or nullid
821 821 p2 = p2 or self.dirstate.parents()[1] or nullid
822 822 c1 = self.changelog.read(p1)
823 823 c2 = self.changelog.read(p2)
824 824 m1 = self.manifest.read(c1[0])
825 825 mf1 = self.manifest.readflags(c1[0])
826 826 m2 = self.manifest.read(c2[0])
827 827 changed = []
828 828
829 829 if orig_parent == p1:
830 830 update_dirstate = 1
831 831 else:
832 832 update_dirstate = 0
833 833
834 834 tr = self.transaction()
835 835 mm = m1.copy()
836 836 mfm = mf1.copy()
837 837 linkrev = self.changelog.count()
838 838 for f in files:
839 839 try:
840 840 t = self.wfile(f).read()
841 841 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
842 842 r = self.file(f)
843 843 mfm[f] = tm
844 844
845 845 fp1 = m1.get(f, nullid)
846 846 fp2 = m2.get(f, nullid)
847 847
848 848 # is the same revision on two branches of a merge?
849 849 if fp2 == fp1:
850 850 fp2 = nullid
851 851
852 852 if fp2 != nullid:
853 853 # is one parent an ancestor of the other?
854 854 fpa = r.ancestor(fp1, fp2)
855 855 if fpa == fp1:
856 856 fp1, fp2 = fp2, nullid
857 857 elif fpa == fp2:
858 858 fp2 = nullid
859 859
860 860 # is the file unmodified from the parent?
861 861 if t == r.read(fp1):
862 862 # record the proper existing parent in manifest
863 863 # no need to add a revision
864 864 mm[f] = fp1
865 865 continue
866 866
867 867 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
868 868 changed.append(f)
869 869 if update_dirstate:
870 870 self.dirstate.update([f], "n")
871 871 except IOError:
872 872 try:
873 873 del mm[f]
874 874 del mfm[f]
875 875 if update_dirstate:
876 876 self.dirstate.forget([f])
877 877 except:
878 878 # deleted from p2?
879 879 pass
880 880
881 881 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
882 882 user = user or self.ui.username()
883 883 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
884 884 tr.close()
885 885 if update_dirstate:
886 886 self.dirstate.setparents(n, nullid)
887 887
888 888 def commit(self, files = None, text = "", user = None, date = None,
889 889 match = util.always, force=False):
890 890 commit = []
891 891 remove = []
892 892 changed = []
893 893
894 894 if files:
895 895 for f in files:
896 896 s = self.dirstate.state(f)
897 897 if s in 'nmai':
898 898 commit.append(f)
899 899 elif s == 'r':
900 900 remove.append(f)
901 901 else:
902 902 self.ui.warn("%s not tracked!\n" % f)
903 903 else:
904 904 (c, a, d, u) = self.changes(match = match)
905 905 commit = c + a
906 906 remove = d
907 907
908 908 p1, p2 = self.dirstate.parents()
909 909 c1 = self.changelog.read(p1)
910 910 c2 = self.changelog.read(p2)
911 911 m1 = self.manifest.read(c1[0])
912 912 mf1 = self.manifest.readflags(c1[0])
913 913 m2 = self.manifest.read(c2[0])
914 914
915 915 if not commit and not remove and not force and p2 == nullid:
916 916 self.ui.status("nothing changed\n")
917 917 return None
918 918
919 919 if not self.hook("precommit"):
920 920 return None
921 921
922 922 lock = self.lock()
923 923 tr = self.transaction()
924 924
925 925 # check in files
926 926 new = {}
927 927 linkrev = self.changelog.count()
928 928 commit.sort()
929 929 for f in commit:
930 930 self.ui.note(f + "\n")
931 931 try:
932 932 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
933 933 t = self.wfile(f).read()
934 934 except IOError:
935 935 self.ui.warn("trouble committing %s!\n" % f)
936 936 raise
937 937
938 938 meta = {}
939 939 cp = self.dirstate.copied(f)
940 940 if cp:
941 941 meta["copy"] = cp
942 942 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
943 943 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
944 944
945 945 r = self.file(f)
946 946 fp1 = m1.get(f, nullid)
947 947 fp2 = m2.get(f, nullid)
948 948
949 949 # is the same revision on two branches of a merge?
950 950 if fp2 == fp1:
951 951 fp2 = nullid
952 952
953 953 if fp2 != nullid:
954 954 # is one parent an ancestor of the other?
955 955 fpa = r.ancestor(fp1, fp2)
956 956 if fpa == fp1:
957 957 fp1, fp2 = fp2, nullid
958 958 elif fpa == fp2:
959 959 fp2 = nullid
960 960
961 961 # is the file unmodified from the parent?
962 962 if not meta and t == r.read(fp1):
963 963 # record the proper existing parent in manifest
964 964 # no need to add a revision
965 965 new[f] = fp1
966 966 continue
967 967
968 968 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
969 969 # remember what we've added so that we can later calculate
970 970 # the files to pull from a set of changesets
971 971 changed.append(f)
972 972
973 973 # update manifest
974 974 m1.update(new)
975 975 for f in remove:
976 976 if f in m1:
977 977 del m1[f]
978 978 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
979 979 (new, remove))
980 980
981 981 # add changeset
982 982 new = new.keys()
983 983 new.sort()
984 984
985 985 if not text:
986 986 edittext = ""
987 987 if p2 != nullid:
988 988 edittext += "HG: branch merge\n"
989 989 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
990 990 edittext += "".join(["HG: changed %s\n" % f for f in changed])
991 991 edittext += "".join(["HG: removed %s\n" % f for f in remove])
992 992 if not changed and not remove:
993 993 edittext += "HG: no files changed\n"
994 994 edittext = self.ui.edit(edittext)
995 995 if not edittext.rstrip():
996 996 return None
997 997 text = edittext
998 998
999 999 user = user or self.ui.username()
1000 1000 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1001 1001 tr.close()
1002 1002
1003 1003 self.dirstate.setparents(n)
1004 1004 self.dirstate.update(new, "n")
1005 1005 self.dirstate.forget(remove)
1006 1006
1007 1007 if not self.hook("commit", node=hex(n)):
1008 1008 return None
1009 1009 return n
1010 1010
1011 1011 def walk(self, node = None, files = [], match = util.always):
1012 1012 if node:
1013 1013 for fn in self.manifest.read(self.changelog.read(node)[0]):
1014 1014 if match(fn): yield 'm', fn
1015 1015 else:
1016 1016 for src, fn in self.dirstate.walk(files, match):
1017 1017 yield src, fn
1018 1018
1019 1019 def changes(self, node1 = None, node2 = None, files = [],
1020 1020 match = util.always):
1021 1021 mf2, u = None, []
1022 1022
1023 1023 def fcmp(fn, mf):
1024 1024 t1 = self.wfile(fn).read()
1025 1025 t2 = self.file(fn).revision(mf.get(fn, nullid))
1026 1026 return cmp(t1, t2)
1027 1027
1028 1028 def mfmatches(node):
1029 1029 mf = dict(self.manifest.read(node))
1030 1030 for fn in mf.keys():
1031 1031 if not match(fn):
1032 1032 del mf[fn]
1033 1033 return mf
1034 1034
1035 1035 # are we comparing the working directory?
1036 1036 if not node2:
1037 1037 l, c, a, d, u = self.dirstate.changes(files, match)
1038 1038
1039 1039 # are we comparing working dir against its parent?
1040 1040 if not node1:
1041 1041 if l:
1042 1042 # do a full compare of any files that might have changed
1043 1043 change = self.changelog.read(self.dirstate.parents()[0])
1044 1044 mf2 = mfmatches(change[0])
1045 1045 for f in l:
1046 1046 if fcmp(f, mf2):
1047 1047 c.append(f)
1048 1048
1049 1049 for l in c, a, d, u:
1050 1050 l.sort()
1051 1051
1052 1052 return (c, a, d, u)
1053 1053
1054 1054 # are we comparing working dir against non-tip?
1055 1055 # generate a pseudo-manifest for the working dir
1056 1056 if not node2:
1057 1057 if not mf2:
1058 1058 change = self.changelog.read(self.dirstate.parents()[0])
1059 1059 mf2 = mfmatches(change[0])
1060 1060 for f in a + c + l:
1061 1061 mf2[f] = ""
1062 1062 for f in d:
1063 1063 if f in mf2: del mf2[f]
1064 1064 else:
1065 1065 change = self.changelog.read(node2)
1066 1066 mf2 = mfmatches(change[0])
1067 1067
1068 1068 # flush lists from dirstate before comparing manifests
1069 1069 c, a = [], []
1070 1070
1071 1071 change = self.changelog.read(node1)
1072 1072 mf1 = mfmatches(change[0])
1073 1073
1074 1074 for fn in mf2:
1075 1075 if mf1.has_key(fn):
1076 1076 if mf1[fn] != mf2[fn]:
1077 1077 if mf2[fn] != "" or fcmp(fn, mf1):
1078 1078 c.append(fn)
1079 1079 del mf1[fn]
1080 1080 else:
1081 1081 a.append(fn)
1082 1082
1083 1083 d = mf1.keys()
1084 1084
1085 1085 for l in c, a, d, u:
1086 1086 l.sort()
1087 1087
1088 1088 return (c, a, d, u)
1089 1089
1090 1090 def add(self, list):
1091 1091 for f in list:
1092 1092 p = self.wjoin(f)
1093 1093 if not os.path.exists(p):
1094 1094 self.ui.warn("%s does not exist!\n" % f)
1095 1095 elif not os.path.isfile(p):
1096 1096 self.ui.warn("%s not added: only files supported currently\n" % f)
1097 1097 elif self.dirstate.state(f) in 'an':
1098 1098 self.ui.warn("%s already tracked!\n" % f)
1099 1099 else:
1100 1100 self.dirstate.update([f], "a")
1101 1101
1102 1102 def forget(self, list):
1103 1103 for f in list:
1104 1104 if self.dirstate.state(f) not in 'ai':
1105 1105 self.ui.warn("%s not added!\n" % f)
1106 1106 else:
1107 1107 self.dirstate.forget([f])
1108 1108
1109 1109 def remove(self, list):
1110 1110 for f in list:
1111 1111 p = self.wjoin(f)
1112 1112 if os.path.exists(p):
1113 1113 self.ui.warn("%s still exists!\n" % f)
1114 1114 elif self.dirstate.state(f) == 'a':
1115 1115 self.ui.warn("%s never committed!\n" % f)
1116 1116 self.dirstate.forget([f])
1117 1117 elif f not in self.dirstate:
1118 1118 self.ui.warn("%s not tracked!\n" % f)
1119 1119 else:
1120 1120 self.dirstate.update([f], "r")
1121 1121
1122 1122 def copy(self, source, dest):
1123 1123 p = self.wjoin(dest)
1124 1124 if not os.path.exists(p):
1125 1125 self.ui.warn("%s does not exist!\n" % dest)
1126 1126 elif not os.path.isfile(p):
1127 1127 self.ui.warn("copy failed: %s is not a file\n" % dest)
1128 1128 else:
1129 1129 if self.dirstate.state(dest) == '?':
1130 1130 self.dirstate.update([dest], "a")
1131 1131 self.dirstate.copy(source, dest)
1132 1132
1133 1133 def heads(self):
1134 1134 return self.changelog.heads()
1135 1135
1136 1136 # branchlookup returns a dict giving a list of branches for
1137 1137 # each head. A branch is defined as the tag of a node or
1138 1138 # the branch of the node's parents. If a node has multiple
1139 1139 # branch tags, tags are eliminated if they are visible from other
1140 1140 # branch tags.
1141 1141 #
1142 1142 # So, for this graph: a->b->c->d->e
1143 1143 # \ /
1144 1144 # aa -----/
1145 1145 # a has tag 2.6.12
1146 1146 # d has tag 2.6.13
1147 1147 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1148 1148 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1149 1149 # from the list.
1150 1150 #
1151 1151 # It is possible that more than one head will have the same branch tag.
1152 1152 # callers need to check the result for multiple heads under the same
1153 1153 # branch tag if that is a problem for them (ie checkout of a specific
1154 1154 # branch).
1155 1155 #
1156 1156 # passing in a specific branch will limit the depth of the search
1157 1157 # through the parents. It won't limit the branches returned in the
1158 1158 # result though.
1159 1159 def branchlookup(self, heads=None, branch=None):
1160 1160 if not heads:
1161 1161 heads = self.heads()
1162 1162 headt = [ h for h in heads ]
1163 1163 chlog = self.changelog
1164 1164 branches = {}
1165 1165 merges = []
1166 1166 seenmerge = {}
1167 1167
1168 1168 # traverse the tree once for each head, recording in the branches
1169 1169 # dict which tags are visible from this head. The branches
1170 1170 # dict also records which tags are visible from each tag
1171 1171 # while we traverse.
1172 1172 while headt or merges:
1173 1173 if merges:
1174 1174 n, found = merges.pop()
1175 1175 visit = [n]
1176 1176 else:
1177 1177 h = headt.pop()
1178 1178 visit = [h]
1179 1179 found = [h]
1180 1180 seen = {}
1181 1181 while visit:
1182 1182 n = visit.pop()
1183 1183 if n in seen:
1184 1184 continue
1185 1185 pp = chlog.parents(n)
1186 1186 tags = self.nodetags(n)
1187 1187 if tags:
1188 1188 for x in tags:
1189 1189 if x == 'tip':
1190 1190 continue
1191 1191 for f in found:
1192 1192 branches.setdefault(f, {})[n] = 1
1193 1193 branches.setdefault(n, {})[n] = 1
1194 1194 break
1195 1195 if n not in found:
1196 1196 found.append(n)
1197 1197 if branch in tags:
1198 1198 continue
1199 1199 seen[n] = 1
1200 1200 if pp[1] != nullid and n not in seenmerge:
1201 1201 merges.append((pp[1], [x for x in found]))
1202 1202 seenmerge[n] = 1
1203 1203 if pp[0] != nullid:
1204 1204 visit.append(pp[0])
1205 1205 # traverse the branches dict, eliminating branch tags from each
1206 1206 # head that are visible from another branch tag for that head.
1207 1207 out = {}
1208 1208 viscache = {}
1209 1209 for h in heads:
1210 1210 def visible(node):
1211 1211 if node in viscache:
1212 1212 return viscache[node]
1213 1213 ret = {}
1214 1214 visit = [node]
1215 1215 while visit:
1216 1216 x = visit.pop()
1217 1217 if x in viscache:
1218 1218 ret.update(viscache[x])
1219 1219 elif x not in ret:
1220 1220 ret[x] = 1
1221 1221 if x in branches:
1222 1222 visit[len(visit):] = branches[x].keys()
1223 1223 viscache[node] = ret
1224 1224 return ret
1225 1225 if h not in branches:
1226 1226 continue
1227 1227 # O(n^2), but somewhat limited. This only searches the
1228 1228 # tags visible from a specific head, not all the tags in the
1229 1229 # whole repo.
1230 1230 for b in branches[h]:
1231 1231 vis = False
1232 1232 for bb in branches[h].keys():
1233 1233 if b != bb:
1234 1234 if b in visible(bb):
1235 1235 vis = True
1236 1236 break
1237 1237 if not vis:
1238 1238 l = out.setdefault(h, [])
1239 1239 l[len(l):] = self.nodetags(b)
1240 1240 return out
1241 1241
1242 1242 def branches(self, nodes):
1243 1243 if not nodes: nodes = [self.changelog.tip()]
1244 1244 b = []
1245 1245 for n in nodes:
1246 1246 t = n
1247 1247 while n:
1248 1248 p = self.changelog.parents(n)
1249 1249 if p[1] != nullid or p[0] == nullid:
1250 1250 b.append((t, n, p[0], p[1]))
1251 1251 break
1252 1252 n = p[0]
1253 1253 return b
1254 1254
1255 1255 def between(self, pairs):
1256 1256 r = []
1257 1257
1258 1258 for top, bottom in pairs:
1259 1259 n, l, i = top, [], 0
1260 1260 f = 1
1261 1261
1262 1262 while n != bottom:
1263 1263 p = self.changelog.parents(n)[0]
1264 1264 if i == f:
1265 1265 l.append(n)
1266 1266 f = f * 2
1267 1267 n = p
1268 1268 i += 1
1269 1269
1270 1270 r.append(l)
1271 1271
1272 1272 return r
1273 1273
1274 1274 def newer(self, nodes):
1275 1275 m = {}
1276 1276 nl = []
1277 1277 pm = {}
1278 1278 cl = self.changelog
1279 1279 t = l = cl.count()
1280 1280
1281 1281 # find the lowest numbered node
1282 1282 for n in nodes:
1283 1283 l = min(l, cl.rev(n))
1284 1284 m[n] = 1
1285 1285
1286 1286 for i in xrange(l, t):
1287 1287 n = cl.node(i)
1288 1288 if n in m: # explicitly listed
1289 1289 pm[n] = 1
1290 1290 nl.append(n)
1291 1291 continue
1292 1292 for p in cl.parents(n):
1293 1293 if p in pm: # parent listed
1294 1294 pm[n] = 1
1295 1295 nl.append(n)
1296 1296 break
1297 1297
1298 1298 return nl
1299 1299
1300 1300 def findincoming(self, remote, base=None, heads=None):
1301 1301 m = self.changelog.nodemap
1302 1302 search = []
1303 1303 fetch = []
1304 1304 seen = {}
1305 1305 seenbranch = {}
1306 1306 if base == None:
1307 1307 base = {}
1308 1308
1309 1309 # assume we're closer to the tip than the root
1310 1310 # and start by examining the heads
1311 1311 self.ui.status("searching for changes\n")
1312 1312
1313 1313 if not heads:
1314 1314 heads = remote.heads()
1315 1315
1316 1316 unknown = []
1317 1317 for h in heads:
1318 1318 if h not in m:
1319 1319 unknown.append(h)
1320 1320 else:
1321 1321 base[h] = 1
1322 1322
1323 1323 if not unknown:
1324 1324 return None
1325 1325
1326 1326 rep = {}
1327 1327 reqcnt = 0
1328 1328
1329 1329 # search through remote branches
1330 1330 # a 'branch' here is a linear segment of history, with four parts:
1331 1331 # head, root, first parent, second parent
1332 1332 # (a branch always has two parents (or none) by definition)
1333 1333 unknown = remote.branches(unknown)
1334 1334 while unknown:
1335 1335 r = []
1336 1336 while unknown:
1337 1337 n = unknown.pop(0)
1338 1338 if n[0] in seen:
1339 1339 continue
1340 1340
1341 1341 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1342 1342 if n[0] == nullid:
1343 1343 break
1344 1344 if n in seenbranch:
1345 1345 self.ui.debug("branch already found\n")
1346 1346 continue
1347 1347 if n[1] and n[1] in m: # do we know the base?
1348 1348 self.ui.debug("found incomplete branch %s:%s\n"
1349 1349 % (short(n[0]), short(n[1])))
1350 1350 search.append(n) # schedule branch range for scanning
1351 1351 seenbranch[n] = 1
1352 1352 else:
1353 1353 if n[1] not in seen and n[1] not in fetch:
1354 1354 if n[2] in m and n[3] in m:
1355 1355 self.ui.debug("found new changeset %s\n" %
1356 1356 short(n[1]))
1357 1357 fetch.append(n[1]) # earliest unknown
1358 1358 base[n[2]] = 1 # latest known
1359 1359 continue
1360 1360
1361 1361 for a in n[2:4]:
1362 1362 if a not in rep:
1363 1363 r.append(a)
1364 1364 rep[a] = 1
1365 1365
1366 1366 seen[n[0]] = 1
1367 1367
1368 1368 if r:
1369 1369 reqcnt += 1
1370 1370 self.ui.debug("request %d: %s\n" %
1371 1371 (reqcnt, " ".join(map(short, r))))
1372 1372 for p in range(0, len(r), 10):
1373 1373 for b in remote.branches(r[p:p+10]):
1374 1374 self.ui.debug("received %s:%s\n" %
1375 1375 (short(b[0]), short(b[1])))
1376 1376 if b[0] not in m and b[0] not in seen:
1377 1377 unknown.append(b)
1378 1378
1379 1379 # do binary search on the branches we found
1380 1380 while search:
1381 1381 n = search.pop(0)
1382 1382 reqcnt += 1
1383 1383 l = remote.between([(n[0], n[1])])[0]
1384 1384 l.append(n[1])
1385 1385 p = n[0]
1386 1386 f = 1
1387 1387 for i in l:
1388 1388 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1389 1389 if i in m:
1390 1390 if f <= 2:
1391 1391 self.ui.debug("found new branch changeset %s\n" %
1392 1392 short(p))
1393 1393 fetch.append(p)
1394 1394 base[i] = 1
1395 1395 else:
1396 1396 self.ui.debug("narrowed branch search to %s:%s\n"
1397 1397 % (short(p), short(i)))
1398 1398 search.append((p, i))
1399 1399 break
1400 1400 p, f = i, f * 2
1401 1401
1402 1402 # sanity check our fetch list
1403 1403 for f in fetch:
1404 1404 if f in m:
1405 1405 raise RepoError("already have changeset " + short(f[:4]))
1406 1406
1407 1407 if base.keys() == [nullid]:
1408 1408 self.ui.warn("warning: pulling from an unrelated repository!\n")
1409 1409
1410 1410 self.ui.note("adding new changesets starting at " +
1411 1411 " ".join([short(f) for f in fetch]) + "\n")
1412 1412
1413 1413 self.ui.debug("%d total queries\n" % reqcnt)
1414 1414
1415 1415 return fetch
1416 1416
1417 1417 def findoutgoing(self, remote, base=None, heads=None):
1418 1418 if base == None:
1419 1419 base = {}
1420 1420 self.findincoming(remote, base, heads)
1421 1421
1422 1422 remain = dict.fromkeys(self.changelog.nodemap)
1423 1423
1424 1424 # prune everything remote has from the tree
1425 1425 del remain[nullid]
1426 1426 remove = base.keys()
1427 1427 while remove:
1428 1428 n = remove.pop(0)
1429 1429 if n in remain:
1430 1430 del remain[n]
1431 1431 for p in self.changelog.parents(n):
1432 1432 remove.append(p)
1433 1433
1434 1434 # find every node whose parents have been pruned
1435 1435 subset = []
1436 1436 for n in remain:
1437 1437 p1, p2 = self.changelog.parents(n)
1438 1438 if p1 not in remain and p2 not in remain:
1439 1439 subset.append(n)
1440 1440
1441 1441 # this is the set of all roots we have to push
1442 1442 return subset
1443 1443
1444 1444 def pull(self, remote):
1445 1445 lock = self.lock()
1446 1446
1447 1447 # if we have an empty repo, fetch everything
1448 1448 if self.changelog.tip() == nullid:
1449 1449 self.ui.status("requesting all changes\n")
1450 1450 fetch = [nullid]
1451 1451 else:
1452 1452 fetch = self.findincoming(remote)
1453 1453
1454 1454 if not fetch:
1455 1455 self.ui.status("no changes found\n")
1456 1456 return 1
1457 1457
1458 1458 cg = remote.changegroup(fetch)
1459 1459 return self.addchangegroup(cg)
1460 1460
1461 1461 def push(self, remote, force=False):
1462 1462 lock = remote.lock()
1463 1463
1464 1464 base = {}
1465 1465 heads = remote.heads()
1466 1466 inc = self.findincoming(remote, base, heads)
1467 1467 if not force and inc:
1468 1468 self.ui.warn("abort: unsynced remote changes!\n")
1469 1469 self.ui.status("(did you forget to sync? use push -f to force)\n")
1470 1470 return 1
1471 1471
1472 1472 update = self.findoutgoing(remote, base)
1473 1473 if not update:
1474 1474 self.ui.status("no changes found\n")
1475 1475 return 1
1476 1476 elif not force:
1477 1477 if len(heads) < len(self.changelog.heads()):
1478 1478 self.ui.warn("abort: push creates new remote branches!\n")
1479 1479 self.ui.status("(did you forget to merge?" +
1480 1480 " use push -f to force)\n")
1481 1481 return 1
1482 1482
1483 1483 cg = self.changegroup(update)
1484 1484 return remote.addchangegroup(cg)
1485 1485
1486 1486 def changegroup(self, basenodes):
1487 1487 class genread:
1488 1488 def __init__(self, generator):
1489 1489 self.g = generator
1490 1490 self.buf = ""
1491 1491 def fillbuf(self):
1492 1492 self.buf += "".join(self.g)
1493 1493
1494 1494 def read(self, l):
1495 1495 while l > len(self.buf):
1496 1496 try:
1497 1497 self.buf += self.g.next()
1498 1498 except StopIteration:
1499 1499 break
1500 1500 d, self.buf = self.buf[:l], self.buf[l:]
1501 1501 return d
1502 1502
1503 1503 def gengroup():
1504 1504 nodes = self.newer(basenodes)
1505 1505
1506 1506 # construct the link map
1507 1507 linkmap = {}
1508 1508 for n in nodes:
1509 1509 linkmap[self.changelog.rev(n)] = n
1510 1510
1511 1511 # construct a list of all changed files
1512 1512 changed = {}
1513 1513 for n in nodes:
1514 1514 c = self.changelog.read(n)
1515 1515 for f in c[3]:
1516 1516 changed[f] = 1
1517 1517 changed = changed.keys()
1518 1518 changed.sort()
1519 1519
1520 1520 # the changegroup is changesets + manifests + all file revs
1521 1521 revs = [ self.changelog.rev(n) for n in nodes ]
1522 1522
1523 1523 for y in self.changelog.group(linkmap): yield y
1524 1524 for y in self.manifest.group(linkmap): yield y
1525 1525 for f in changed:
1526 1526 yield struct.pack(">l", len(f) + 4) + f
1527 1527 g = self.file(f).group(linkmap)
1528 1528 for y in g:
1529 1529 yield y
1530 1530
1531 1531 yield struct.pack(">l", 0)
1532 1532
1533 1533 return genread(gengroup())
1534 1534
1535 1535 def addchangegroup(self, source):
1536 1536
1537 1537 def getchunk():
1538 1538 d = source.read(4)
1539 1539 if not d: return ""
1540 1540 l = struct.unpack(">l", d)[0]
1541 1541 if l <= 4: return ""
1542 1542 return source.read(l - 4)
1543 1543
1544 1544 def getgroup():
1545 1545 while 1:
1546 1546 c = getchunk()
1547 1547 if not c: break
1548 1548 yield c
1549 1549
1550 1550 def csmap(x):
1551 1551 self.ui.debug("add changeset %s\n" % short(x))
1552 1552 return self.changelog.count()
1553 1553
1554 1554 def revmap(x):
1555 1555 return self.changelog.rev(x)
1556 1556
1557 1557 if not source: return
1558 1558 changesets = files = revisions = 0
1559 1559
1560 1560 tr = self.transaction()
1561 1561
1562 1562 # pull off the changeset group
1563 1563 self.ui.status("adding changesets\n")
1564 1564 co = self.changelog.tip()
1565 1565 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1566 1566 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1567 1567
1568 1568 # pull off the manifest group
1569 1569 self.ui.status("adding manifests\n")
1570 1570 mm = self.manifest.tip()
1571 1571 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1572 1572
1573 1573 # process the files
1574 1574 self.ui.status("adding file changes\n")
1575 1575 while 1:
1576 1576 f = getchunk()
1577 1577 if not f: break
1578 1578 self.ui.debug("adding %s revisions\n" % f)
1579 1579 fl = self.file(f)
1580 1580 o = fl.count()
1581 1581 n = fl.addgroup(getgroup(), revmap, tr)
1582 1582 revisions += fl.count() - o
1583 1583 files += 1
1584 1584
1585 1585 self.ui.status(("added %d changesets" +
1586 1586 " with %d changes to %d files\n")
1587 1587 % (changesets, revisions, files))
1588 1588
1589 1589 tr.close()
1590 1590
1591 1591 if not self.hook("changegroup"):
1592 1592 return 1
1593 1593
1594 1594 return
1595 1595
1596 1596 def update(self, node, allow=False, force=False, choose=None,
1597 1597 moddirstate=True):
1598 1598 pl = self.dirstate.parents()
1599 1599 if not force and pl[1] != nullid:
1600 1600 self.ui.warn("aborting: outstanding uncommitted merges\n")
1601 1601 return 1
1602 1602
1603 1603 p1, p2 = pl[0], node
1604 1604 pa = self.changelog.ancestor(p1, p2)
1605 1605 m1n = self.changelog.read(p1)[0]
1606 1606 m2n = self.changelog.read(p2)[0]
1607 1607 man = self.manifest.ancestor(m1n, m2n)
1608 1608 m1 = self.manifest.read(m1n)
1609 1609 mf1 = self.manifest.readflags(m1n)
1610 1610 m2 = self.manifest.read(m2n)
1611 1611 mf2 = self.manifest.readflags(m2n)
1612 1612 ma = self.manifest.read(man)
1613 1613 mfa = self.manifest.readflags(man)
1614 1614
1615 1615 (c, a, d, u) = self.changes()
1616 1616
1617 1617 # is this a jump, or a merge? i.e. is there a linear path
1618 1618 # from p1 to p2?
1619 1619 linear_path = (pa == p1 or pa == p2)
1620 1620
1621 1621 # resolve the manifest to determine which files
1622 1622 # we care about merging
1623 1623 self.ui.note("resolving manifests\n")
1624 1624 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1625 1625 (force, allow, moddirstate, linear_path))
1626 1626 self.ui.debug(" ancestor %s local %s remote %s\n" %
1627 1627 (short(man), short(m1n), short(m2n)))
1628 1628
1629 1629 merge = {}
1630 1630 get = {}
1631 1631 remove = []
1632 1632
1633 1633 # construct a working dir manifest
1634 1634 mw = m1.copy()
1635 1635 mfw = mf1.copy()
1636 1636 umap = dict.fromkeys(u)
1637 1637
1638 1638 for f in a + c + u:
1639 1639 mw[f] = ""
1640 1640 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1641 1641
1642 1642 for f in d:
1643 1643 if f in mw: del mw[f]
1644 1644
1645 1645 # If we're jumping between revisions (as opposed to merging),
1646 1646 # and if neither the working directory nor the target rev has
1647 1647 # the file, then we need to remove it from the dirstate, to
1648 1648 # prevent the dirstate from listing the file when it is no
1649 1649 # longer in the manifest.
1650 1650 if moddirstate and linear_path and f not in m2:
1651 1651 self.dirstate.forget((f,))
1652 1652
1653 1653 # Compare manifests
1654 1654 for f, n in mw.iteritems():
1655 1655 if choose and not choose(f): continue
1656 1656 if f in m2:
1657 1657 s = 0
1658 1658
1659 1659 # is the wfile new since m1, and match m2?
1660 1660 if f not in m1:
1661 1661 t1 = self.wfile(f).read()
1662 1662 t2 = self.file(f).revision(m2[f])
1663 1663 if cmp(t1, t2) == 0:
1664 1664 n = m2[f]
1665 1665 del t1, t2
1666 1666
1667 1667 # are files different?
1668 1668 if n != m2[f]:
1669 1669 a = ma.get(f, nullid)
1670 1670 # are both different from the ancestor?
1671 1671 if n != a and m2[f] != a:
1672 1672 self.ui.debug(" %s versions differ, resolve\n" % f)
1673 1673 # merge executable bits
1674 1674 # "if we changed or they changed, change in merge"
1675 1675 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1676 1676 mode = ((a^b) | (a^c)) ^ a
1677 1677 merge[f] = (m1.get(f, nullid), m2[f], mode)
1678 1678 s = 1
1679 1679 # are we clobbering?
1680 1680 # is remote's version newer?
1681 1681 # or are we going back in time?
1682 1682 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1683 1683 self.ui.debug(" remote %s is newer, get\n" % f)
1684 1684 get[f] = m2[f]
1685 1685 s = 1
1686 1686 elif f in umap:
1687 1687 # this unknown file is the same as the checkout
1688 1688 get[f] = m2[f]
1689 1689
1690 1690 if not s and mfw[f] != mf2[f]:
1691 1691 if force:
1692 1692 self.ui.debug(" updating permissions for %s\n" % f)
1693 1693 util.set_exec(self.wjoin(f), mf2[f])
1694 1694 else:
1695 1695 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1696 1696 mode = ((a^b) | (a^c)) ^ a
1697 1697 if mode != b:
1698 1698 self.ui.debug(" updating permissions for %s\n" % f)
1699 1699 util.set_exec(self.wjoin(f), mode)
1700 1700 del m2[f]
1701 1701 elif f in ma:
1702 1702 if n != ma[f]:
1703 1703 r = "d"
1704 1704 if not force and (linear_path or allow):
1705 1705 r = self.ui.prompt(
1706 1706 (" local changed %s which remote deleted\n" % f) +
1707 1707 "(k)eep or (d)elete?", "[kd]", "k")
1708 1708 if r == "d":
1709 1709 remove.append(f)
1710 1710 else:
1711 1711 self.ui.debug("other deleted %s\n" % f)
1712 1712 remove.append(f) # other deleted it
1713 1713 else:
1714 1714 if n == m1.get(f, nullid): # same as parent
1715 1715 if p2 == pa: # going backwards?
1716 1716 self.ui.debug("remote deleted %s\n" % f)
1717 1717 remove.append(f)
1718 1718 else:
1719 1719 self.ui.debug("local created %s, keeping\n" % f)
1720 1720 else:
1721 1721 self.ui.debug("working dir created %s, keeping\n" % f)
1722 1722
1723 1723 for f, n in m2.iteritems():
1724 1724 if choose and not choose(f): continue
1725 1725 if f[0] == "/": continue
1726 1726 if f in ma and n != ma[f]:
1727 1727 r = "k"
1728 1728 if not force and (linear_path or allow):
1729 1729 r = self.ui.prompt(
1730 1730 ("remote changed %s which local deleted\n" % f) +
1731 1731 "(k)eep or (d)elete?", "[kd]", "k")
1732 1732 if r == "k": get[f] = n
1733 1733 elif f not in ma:
1734 1734 self.ui.debug("remote created %s\n" % f)
1735 1735 get[f] = n
1736 1736 else:
1737 1737 if force or p2 == pa: # going backwards?
1738 1738 self.ui.debug("local deleted %s, recreating\n" % f)
1739 1739 get[f] = n
1740 1740 else:
1741 1741 self.ui.debug("local deleted %s\n" % f)
1742 1742
1743 1743 del mw, m1, m2, ma
1744 1744
1745 1745 if force:
1746 1746 for f in merge:
1747 1747 get[f] = merge[f][1]
1748 1748 merge = {}
1749 1749
1750 1750 if linear_path or force:
1751 1751 # we don't need to do any magic, just jump to the new rev
1752 mode = 'n'
1752 branch_merge = False
1753 1753 p1, p2 = p2, nullid
1754 1754 else:
1755 1755 if not allow:
1756 1756 self.ui.status("this update spans a branch" +
1757 1757 " affecting the following files:\n")
1758 1758 fl = merge.keys() + get.keys()
1759 1759 fl.sort()
1760 1760 for f in fl:
1761 1761 cf = ""
1762 1762 if f in merge: cf = " (resolve)"
1763 1763 self.ui.status(" %s%s\n" % (f, cf))
1764 1764 self.ui.warn("aborting update spanning branches!\n")
1765 1765 self.ui.status("(use update -m to merge across branches" +
1766 1766 " or -C to lose changes)\n")
1767 1767 return 1
1768 mode = 'm'
1768 branch_merge = True
1769 1769
1770 1770 if moddirstate:
1771 1771 self.dirstate.setparents(p1, p2)
1772 1772
1773 1773 # get the files we don't need to change
1774 1774 files = get.keys()
1775 1775 files.sort()
1776 1776 for f in files:
1777 1777 if f[0] == "/": continue
1778 1778 self.ui.note("getting %s\n" % f)
1779 1779 t = self.file(f).read(get[f])
1780 1780 try:
1781 1781 self.wfile(f, "w").write(t)
1782 1782 except IOError:
1783 1783 os.makedirs(os.path.dirname(self.wjoin(f)))
1784 1784 self.wfile(f, "w").write(t)
1785 1785 util.set_exec(self.wjoin(f), mf2[f])
1786 1786 if moddirstate:
1787 if mode == 'm':
1788 self.dirstate.update([f], 'n', st_mtime=0)
1787 if branch_merge:
1788 self.dirstate.update([f], 'n', st_mtime=-1)
1789 1789 else:
1790 1790 self.dirstate.update([f], 'n')
1791 1791
1792 1792 # merge the tricky bits
1793 1793 files = merge.keys()
1794 1794 files.sort()
1795 1795 for f in files:
1796 1796 self.ui.status("merging %s\n" % f)
1797 m, o, flag = merge[f]
1798 self.merge3(f, m, o)
1797 my, other, flag = merge[f]
1798 self.merge3(f, my, other)
1799 1799 util.set_exec(self.wjoin(f), flag)
1800 1800 if moddirstate:
1801 if mode == 'm':
1802 # only update dirstate on branch merge, otherwise we
1803 # could mark files with changes as unchanged
1804 self.dirstate.update([f], mode)
1805 elif p2 == nullid:
1806 # update dirstate from parent1's manifest
1807 m1n = self.changelog.read(p1)[0]
1808 m1 = self.manifest.read(m1n)
1809 f_len = len(self.file(f).read(m1[f]))
1810 self.dirstate.update([f], mode, st_size=f_len, st_mtime=0)
1801 if branch_merge:
1802 # We've done a branch merge, mark this file as merged
1803 # so that we properly record the merger later
1804 self.dirstate.update([f], 'm')
1811 1805 else:
1812 self.ui.warn("Second parent without branch merge!?\n"
1813 "Dirstate for file %s may be wrong.\n" % f)
1806 # We've update-merged a locally modified file, so
1807 # we set the dirstate to emulate a normal checkout
1808 # of that file some time in the past. Thus our
1809 # merge will appear as a normal local file
1810 # modification.
1811 f_len = len(self.file(f).read(other))
1812 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1814 1813
1815 1814 remove.sort()
1816 1815 for f in remove:
1817 1816 self.ui.note("removing %s\n" % f)
1818 1817 try:
1819 1818 os.unlink(self.wjoin(f))
1820 1819 except OSError, inst:
1821 1820 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1822 1821 # try removing directories that might now be empty
1823 1822 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1824 1823 except: pass
1825 1824 if moddirstate:
1826 if mode == 'n':
1825 if branch_merge:
1826 self.dirstate.update(remove, 'r')
1827 else:
1827 1828 self.dirstate.forget(remove)
1828 else:
1829 self.dirstate.update(remove, 'r')
1830 1829
1831 1830 def merge3(self, fn, my, other):
1832 1831 """perform a 3-way merge in the working directory"""
1833 1832
1834 1833 def temp(prefix, node):
1835 1834 pre = "%s~%s." % (os.path.basename(fn), prefix)
1836 1835 (fd, name) = tempfile.mkstemp("", pre)
1837 1836 f = os.fdopen(fd, "wb")
1838 1837 f.write(fl.revision(node))
1839 1838 f.close()
1840 1839 return name
1841 1840
1842 1841 fl = self.file(fn)
1843 1842 base = fl.ancestor(my, other)
1844 1843 a = self.wjoin(fn)
1845 1844 b = temp("base", base)
1846 1845 c = temp("other", other)
1847 1846
1848 1847 self.ui.note("resolving %s\n" % fn)
1849 1848 self.ui.debug("file %s: other %s ancestor %s\n" %
1850 1849 (fn, short(other), short(base)))
1851 1850
1852 1851 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1853 1852 or "hgmerge")
1854 1853 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1855 1854 if r:
1856 1855 self.ui.warn("merging %s failed!\n" % fn)
1857 1856
1858 1857 os.unlink(b)
1859 1858 os.unlink(c)
1860 1859
1861 1860 def verify(self):
1862 1861 filelinkrevs = {}
1863 1862 filenodes = {}
1864 1863 changesets = revisions = files = 0
1865 1864 errors = 0
1866 1865
1867 1866 seen = {}
1868 1867 self.ui.status("checking changesets\n")
1869 1868 for i in range(self.changelog.count()):
1870 1869 changesets += 1
1871 1870 n = self.changelog.node(i)
1872 1871 if n in seen:
1873 1872 self.ui.warn("duplicate changeset at revision %d\n" % i)
1874 1873 errors += 1
1875 1874 seen[n] = 1
1876 1875
1877 1876 for p in self.changelog.parents(n):
1878 1877 if p not in self.changelog.nodemap:
1879 1878 self.ui.warn("changeset %s has unknown parent %s\n" %
1880 1879 (short(n), short(p)))
1881 1880 errors += 1
1882 1881 try:
1883 1882 changes = self.changelog.read(n)
1884 1883 except Exception, inst:
1885 1884 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1886 1885 errors += 1
1887 1886
1888 1887 for f in changes[3]:
1889 1888 filelinkrevs.setdefault(f, []).append(i)
1890 1889
1891 1890 seen = {}
1892 1891 self.ui.status("checking manifests\n")
1893 1892 for i in range(self.manifest.count()):
1894 1893 n = self.manifest.node(i)
1895 1894 if n in seen:
1896 1895 self.ui.warn("duplicate manifest at revision %d\n" % i)
1897 1896 errors += 1
1898 1897 seen[n] = 1
1899 1898
1900 1899 for p in self.manifest.parents(n):
1901 1900 if p not in self.manifest.nodemap:
1902 1901 self.ui.warn("manifest %s has unknown parent %s\n" %
1903 1902 (short(n), short(p)))
1904 1903 errors += 1
1905 1904
1906 1905 try:
1907 1906 delta = mdiff.patchtext(self.manifest.delta(n))
1908 1907 except KeyboardInterrupt:
1909 1908 self.ui.warn("aborted")
1910 1909 sys.exit(0)
1911 1910 except Exception, inst:
1912 1911 self.ui.warn("unpacking manifest %s: %s\n"
1913 1912 % (short(n), inst))
1914 1913 errors += 1
1915 1914
1916 1915 ff = [ l.split('\0') for l in delta.splitlines() ]
1917 1916 for f, fn in ff:
1918 1917 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1919 1918
1920 1919 self.ui.status("crosschecking files in changesets and manifests\n")
1921 1920 for f in filenodes:
1922 1921 if f not in filelinkrevs:
1923 1922 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1924 1923 errors += 1
1925 1924
1926 1925 for f in filelinkrevs:
1927 1926 if f not in filenodes:
1928 1927 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1929 1928 errors += 1
1930 1929
1931 1930 self.ui.status("checking files\n")
1932 1931 ff = filenodes.keys()
1933 1932 ff.sort()
1934 1933 for f in ff:
1935 1934 if f == "/dev/null": continue
1936 1935 files += 1
1937 1936 fl = self.file(f)
1938 1937 nodes = { nullid: 1 }
1939 1938 seen = {}
1940 1939 for i in range(fl.count()):
1941 1940 revisions += 1
1942 1941 n = fl.node(i)
1943 1942
1944 1943 if n in seen:
1945 1944 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1946 1945 errors += 1
1947 1946
1948 1947 if n not in filenodes[f]:
1949 1948 self.ui.warn("%s: %d:%s not in manifests\n"
1950 1949 % (f, i, short(n)))
1951 1950 errors += 1
1952 1951 else:
1953 1952 del filenodes[f][n]
1954 1953
1955 1954 flr = fl.linkrev(n)
1956 1955 if flr not in filelinkrevs[f]:
1957 1956 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1958 1957 % (f, short(n), fl.linkrev(n)))
1959 1958 errors += 1
1960 1959 else:
1961 1960 filelinkrevs[f].remove(flr)
1962 1961
1963 1962 # verify contents
1964 1963 try:
1965 1964 t = fl.read(n)
1966 1965 except Exception, inst:
1967 1966 self.ui.warn("unpacking file %s %s: %s\n"
1968 1967 % (f, short(n), inst))
1969 1968 errors += 1
1970 1969
1971 1970 # verify parents
1972 1971 (p1, p2) = fl.parents(n)
1973 1972 if p1 not in nodes:
1974 1973 self.ui.warn("file %s:%s unknown parent 1 %s" %
1975 1974 (f, short(n), short(p1)))
1976 1975 errors += 1
1977 1976 if p2 not in nodes:
1978 1977 self.ui.warn("file %s:%s unknown parent 2 %s" %
1979 1978 (f, short(n), short(p1)))
1980 1979 errors += 1
1981 1980 nodes[n] = 1
1982 1981
1983 1982 # cross-check
1984 1983 for node in filenodes[f]:
1985 1984 self.ui.warn("node %s in manifests not in %s\n"
1986 1985 % (hex(node), f))
1987 1986 errors += 1
1988 1987
1989 1988 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1990 1989 (files, changesets, revisions))
1991 1990
1992 1991 if errors:
1993 1992 self.ui.warn("%d integrity errors encountered!\n" % errors)
1994 1993 return 1
1995 1994
1996 1995 class remoterepository:
1997 1996 def local(self):
1998 1997 return False
1999 1998
2000 1999 class httprepository(remoterepository):
2001 2000 def __init__(self, ui, path):
2002 2001 # fix missing / after hostname
2003 2002 s = urlparse.urlsplit(path)
2004 2003 partial = s[2]
2005 2004 if not partial: partial = "/"
2006 2005 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2007 2006 self.ui = ui
2008 2007 no_list = [ "localhost", "127.0.0.1" ]
2009 2008 host = ui.config("http_proxy", "host")
2010 2009 if host is None:
2011 2010 host = os.environ.get("http_proxy")
2012 2011 if host and host.startswith('http://'):
2013 2012 host = host[7:]
2014 2013 user = ui.config("http_proxy", "user")
2015 2014 passwd = ui.config("http_proxy", "passwd")
2016 2015 no = ui.config("http_proxy", "no")
2017 2016 if no is None:
2018 2017 no = os.environ.get("no_proxy")
2019 2018 if no:
2020 2019 no_list = no_list + no.split(",")
2021 2020
2022 2021 no_proxy = 0
2023 2022 for h in no_list:
2024 2023 if (path.startswith("http://" + h + "/") or
2025 2024 path.startswith("http://" + h + ":") or
2026 2025 path == "http://" + h):
2027 2026 no_proxy = 1
2028 2027
2029 2028 # Note: urllib2 takes proxy values from the environment and those will
2030 2029 # take precedence
2031 2030 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2032 2031 try:
2033 2032 if os.environ.has_key(env):
2034 2033 del os.environ[env]
2035 2034 except OSError:
2036 2035 pass
2037 2036
2038 2037 proxy_handler = urllib2.BaseHandler()
2039 2038 if host and not no_proxy:
2040 2039 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2041 2040
2042 2041 authinfo = None
2043 2042 if user and passwd:
2044 2043 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2045 2044 passmgr.add_password(None, host, user, passwd)
2046 2045 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2047 2046
2048 2047 opener = urllib2.build_opener(proxy_handler, authinfo)
2049 2048 urllib2.install_opener(opener)
2050 2049
2051 2050 def dev(self):
2052 2051 return -1
2053 2052
2054 2053 def do_cmd(self, cmd, **args):
2055 2054 self.ui.debug("sending %s command\n" % cmd)
2056 2055 q = {"cmd": cmd}
2057 2056 q.update(args)
2058 2057 qs = urllib.urlencode(q)
2059 2058 cu = "%s?%s" % (self.url, qs)
2060 2059 resp = urllib2.urlopen(cu)
2061 2060 proto = resp.headers['content-type']
2062 2061
2063 2062 # accept old "text/plain" and "application/hg-changegroup" for now
2064 2063 if not proto.startswith('application/mercurial') and \
2065 2064 not proto.startswith('text/plain') and \
2066 2065 not proto.startswith('application/hg-changegroup'):
2067 2066 raise RepoError("'%s' does not appear to be an hg repository"
2068 2067 % self.url)
2069 2068
2070 2069 if proto.startswith('application/mercurial'):
2071 2070 version = proto[22:]
2072 2071 if float(version) > 0.1:
2073 2072 raise RepoError("'%s' uses newer protocol %s" %
2074 2073 (self.url, version))
2075 2074
2076 2075 return resp
2077 2076
2078 2077 def heads(self):
2079 2078 d = self.do_cmd("heads").read()
2080 2079 try:
2081 2080 return map(bin, d[:-1].split(" "))
2082 2081 except:
2083 2082 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2084 2083 raise
2085 2084
2086 2085 def branches(self, nodes):
2087 2086 n = " ".join(map(hex, nodes))
2088 2087 d = self.do_cmd("branches", nodes=n).read()
2089 2088 try:
2090 2089 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2091 2090 return br
2092 2091 except:
2093 2092 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2094 2093 raise
2095 2094
2096 2095 def between(self, pairs):
2097 2096 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2098 2097 d = self.do_cmd("between", pairs=n).read()
2099 2098 try:
2100 2099 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2101 2100 return p
2102 2101 except:
2103 2102 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2104 2103 raise
2105 2104
2106 2105 def changegroup(self, nodes):
2107 2106 n = " ".join(map(hex, nodes))
2108 2107 f = self.do_cmd("changegroup", roots=n)
2109 2108 bytes = 0
2110 2109
2111 2110 class zread:
2112 2111 def __init__(self, f):
2113 2112 self.zd = zlib.decompressobj()
2114 2113 self.f = f
2115 2114 self.buf = ""
2116 2115 def read(self, l):
2117 2116 while l > len(self.buf):
2118 2117 r = self.f.read(4096)
2119 2118 if r:
2120 2119 self.buf += self.zd.decompress(r)
2121 2120 else:
2122 2121 self.buf += self.zd.flush()
2123 2122 break
2124 2123 d, self.buf = self.buf[:l], self.buf[l:]
2125 2124 return d
2126 2125
2127 2126 return zread(f)
2128 2127
2129 2128 class remotelock:
2130 2129 def __init__(self, repo):
2131 2130 self.repo = repo
2132 2131 def release(self):
2133 2132 self.repo.unlock()
2134 2133 self.repo = None
2135 2134 def __del__(self):
2136 2135 if self.repo:
2137 2136 self.release()
2138 2137
2139 2138 class sshrepository(remoterepository):
2140 2139 def __init__(self, ui, path):
2141 2140 self.url = path
2142 2141 self.ui = ui
2143 2142
2144 2143 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2145 2144 if not m:
2146 2145 raise RepoError("couldn't parse destination %s" % path)
2147 2146
2148 2147 self.user = m.group(2)
2149 2148 self.host = m.group(3)
2150 2149 self.port = m.group(5)
2151 2150 self.path = m.group(7)
2152 2151
2153 2152 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2154 2153 args = self.port and ("%s -p %s") % (args, self.port) or args
2155 2154 path = self.path or ""
2156 2155
2157 2156 if not path:
2158 2157 raise RepoError("no remote repository path specified")
2159 2158
2160 2159 sshcmd = self.ui.config("ui", "ssh", "ssh")
2161 2160 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2162 2161 cmd = "%s %s '%s -R %s serve --stdio'"
2163 2162 cmd = cmd % (sshcmd, args, remotecmd, path)
2164 2163
2165 2164 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2166 2165
2167 2166 def readerr(self):
2168 2167 while 1:
2169 2168 r,w,x = select.select([self.pipee], [], [], 0)
2170 2169 if not r: break
2171 2170 l = self.pipee.readline()
2172 2171 if not l: break
2173 2172 self.ui.status("remote: ", l)
2174 2173
2175 2174 def __del__(self):
2176 2175 try:
2177 2176 self.pipeo.close()
2178 2177 self.pipei.close()
2179 2178 for l in self.pipee:
2180 2179 self.ui.status("remote: ", l)
2181 2180 self.pipee.close()
2182 2181 except:
2183 2182 pass
2184 2183
2185 2184 def dev(self):
2186 2185 return -1
2187 2186
2188 2187 def do_cmd(self, cmd, **args):
2189 2188 self.ui.debug("sending %s command\n" % cmd)
2190 2189 self.pipeo.write("%s\n" % cmd)
2191 2190 for k, v in args.items():
2192 2191 self.pipeo.write("%s %d\n" % (k, len(v)))
2193 2192 self.pipeo.write(v)
2194 2193 self.pipeo.flush()
2195 2194
2196 2195 return self.pipei
2197 2196
2198 2197 def call(self, cmd, **args):
2199 2198 r = self.do_cmd(cmd, **args)
2200 2199 l = r.readline()
2201 2200 self.readerr()
2202 2201 try:
2203 2202 l = int(l)
2204 2203 except:
2205 2204 raise RepoError("unexpected response '%s'" % l)
2206 2205 return r.read(l)
2207 2206
2208 2207 def lock(self):
2209 2208 self.call("lock")
2210 2209 return remotelock(self)
2211 2210
2212 2211 def unlock(self):
2213 2212 self.call("unlock")
2214 2213
2215 2214 def heads(self):
2216 2215 d = self.call("heads")
2217 2216 try:
2218 2217 return map(bin, d[:-1].split(" "))
2219 2218 except:
2220 2219 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2221 2220
2222 2221 def branches(self, nodes):
2223 2222 n = " ".join(map(hex, nodes))
2224 2223 d = self.call("branches", nodes=n)
2225 2224 try:
2226 2225 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2227 2226 return br
2228 2227 except:
2229 2228 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2230 2229
2231 2230 def between(self, pairs):
2232 2231 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2233 2232 d = self.call("between", pairs=n)
2234 2233 try:
2235 2234 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2236 2235 return p
2237 2236 except:
2238 2237 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239 2238
2240 2239 def changegroup(self, nodes):
2241 2240 n = " ".join(map(hex, nodes))
2242 2241 f = self.do_cmd("changegroup", roots=n)
2243 2242 return self.pipei
2244 2243
2245 2244 def addchangegroup(self, cg):
2246 2245 d = self.call("addchangegroup")
2247 2246 if d:
2248 2247 raise RepoError("push refused: %s", d)
2249 2248
2250 2249 while 1:
2251 2250 d = cg.read(4096)
2252 2251 if not d: break
2253 2252 self.pipeo.write(d)
2254 2253 self.readerr()
2255 2254
2256 2255 self.pipeo.flush()
2257 2256
2258 2257 self.readerr()
2259 2258 l = int(self.pipei.readline())
2260 2259 return self.pipei.read(l) != ""
2261 2260
2262 2261 class httpsrepository(httprepository):
2263 2262 pass
2264 2263
2265 2264 def repository(ui, path=None, create=0):
2266 2265 if path:
2267 2266 if path.startswith("http://"):
2268 2267 return httprepository(ui, path)
2269 2268 if path.startswith("https://"):
2270 2269 return httpsrepository(ui, path)
2271 2270 if path.startswith("hg://"):
2272 2271 return httprepository(ui, path.replace("hg://", "http://"))
2273 2272 if path.startswith("old-http://"):
2274 2273 return localrepository(ui, path.replace("old-http://", "http://"))
2275 2274 if path.startswith("ssh://"):
2276 2275 return sshrepository(ui, path)
2277 2276
2278 2277 return localrepository(ui, path, create)
@@ -1,73 +1,79 b''
1 1 #!/bin/sh
2 2
3 3 # This test makes sure that we don't mark a file as merged with its ancestor
4 4 # when we do a merge.
5 5
6 6 cat <<'EOF' > merge
7 7 #!/bin/sh
8 8 echo merging for `basename $1`
9 9 EOF
10 10 chmod +x merge
11 11
12 12 echo creating base
13 13 hg init a
14 14 cd a
15 15 echo 1 > foo
16 16 echo 1 > bar
17 17 echo 1 > baz
18 18 echo 1 > quux
19 19 hg add foo bar baz quux
20 20 hg commit -m "base" -d "0 0"
21 21
22 22 cd ..
23 23 hg clone a b
24 24
25 25 echo creating branch a
26 26 cd a
27 27 echo 2a > foo
28 28 echo 2a > bar
29 29 hg commit -m "branch a" -d "0 0"
30 30
31 31 echo creating branch b
32 32
33 33 cd ..
34 34 cd b
35 35 echo 2b > foo
36 36 echo 2b > baz
37 37 hg commit -m "branch b" -d "0 0"
38 38
39 39 echo "we shouldn't have anything but n state here"
40 40 hg debugstate | cut -b 1-16,35-
41 41
42 42 echo merging
43 43 hg pull ../a
44 env HGMERGE=../merge hg update -vm --debug
44 env HGMERGE=../merge hg update -vm
45 45
46 46 echo 2m > foo
47 47 echo 2b > baz
48 48 echo new > quux
49 49
50 50 echo "we shouldn't have anything but foo in merge state here"
51 51 hg debugstate | cut -b 1-16,35- | grep "^m"
52 52
53 53 hg ci -m "merge" -d "0 0"
54 54
55 55 echo "main: we should have a merge here"
56 56 hg debugindex .hg/00changelog.i
57 57
58 echo "log should show foo and quux changed"
59 hg log -v -r tip
60
58 61 echo "foo: we should have a merge here"
59 62 hg debugindex .hg/data/foo.i
60 63
61 64 echo "bar: we shouldn't have a merge here"
62 65 hg debugindex .hg/data/bar.i
63 66
64 67 echo "baz: we shouldn't have a merge here"
65 68 hg debugindex .hg/data/baz.i
66 69
67 70 echo "quux: we shouldn't have a merge here"
68 71 hg debugindex .hg/data/quux.i
69 72
73 echo "manifest entries should match tips of all files"
74 hg manifest
75
70 76 echo "everything should be clean now"
71 77 hg status
72 78
73 79 hg verify
@@ -1,58 +1,70 b''
1 1 creating base
2 2 creating branch a
3 3 creating branch b
4 4 we shouldn't have anything but n state here
5 5 n 644 2 bar
6 6 n 644 3 baz
7 7 n 644 3 foo
8 8 n 644 2 quux
9 9 merging
10 10 pulling from ../a
11 11 searching for changes
12 12 adding changesets
13 13 adding manifests
14 14 adding file changes
15 15 added 1 changesets with 2 changes to 2 files
16 16 (run 'hg update' to get a working copy)
17 17 merging for foo
18 18 resolving manifests
19 force None allow 1 moddirstate True linear False
20 ancestor a0486579db29 local ef1b4dbe2193 remote 336d8406d617
21 remote bar is newer, get
22 foo versions differ, resolve
23 19 getting bar
24 20 merging foo
25 21 resolving foo
26 file foo: other 33d1fb69067a ancestor b8e02f643373
27 22 we shouldn't have anything but foo in merge state here
28 23 m 644 3 foo
29 24 main: we should have a merge here
30 25 rev offset length base linkrev nodeid p1 p2
31 26 0 0 73 0 0 cdca01651b96 000000000000 000000000000
32 27 1 73 68 1 1 f6718a9cb7f3 cdca01651b96 000000000000
33 28 2 141 68 2 2 bdd988058d16 cdca01651b96 000000000000
34 3 209 66 3 3 9da9fbd62226 f6718a9cb7f3 bdd988058d16
29 3 209 66 3 3 d8a521142a3c f6718a9cb7f3 bdd988058d16
30 log should show foo and quux changed
31 changeset: 3:d8a521142a3c02186ee6c7254738a7e6427ed4c8
32 tag: tip
33 parent: 1:f6718a9cb7f31f1a92d27bd6544c71617d6d4e4f
34 parent: 2:bdd988058d16e2d7392958eace7b64817e44a54e
35 user: test
36 date: Thu Jan 1 00:00:00 1970 +0000
37 files: foo quux
38 description:
39 merge
40
41
35 42 foo: we should have a merge here
36 43 rev offset length base linkrev nodeid p1 p2
37 44 0 0 3 0 0 b8e02f643373 000000000000 000000000000
38 45 1 3 4 1 1 2ffeddde1b65 b8e02f643373 000000000000
39 46 2 7 4 2 2 33d1fb69067a b8e02f643373 000000000000
40 47 3 11 4 3 3 aa27919ee430 2ffeddde1b65 33d1fb69067a
41 48 bar: we shouldn't have a merge here
42 49 rev offset length base linkrev nodeid p1 p2
43 50 0 0 3 0 0 b8e02f643373 000000000000 000000000000
44 51 1 3 4 1 2 33d1fb69067a b8e02f643373 000000000000
45 52 baz: we shouldn't have a merge here
46 53 rev offset length base linkrev nodeid p1 p2
47 54 0 0 3 0 0 b8e02f643373 000000000000 000000000000
48 55 1 3 4 1 1 2ffeddde1b65 b8e02f643373 000000000000
49 56 quux: we shouldn't have a merge here
50 57 rev offset length base linkrev nodeid p1 p2
51 58 0 0 3 0 0 b8e02f643373 000000000000 000000000000
52 59 1 3 5 1 3 6128c0f33108 b8e02f643373 000000000000
60 manifest entries should match tips of all files
61 33d1fb69067a0139622a3fa3b7ba1cdb1367972e 644 bar
62 2ffeddde1b65b4827f6746174a145474129fa2ce 644 baz
63 aa27919ee4303cfd575e1fb932dd64d75aa08be4 644 foo
64 6128c0f33108e8cfbb4e0824d13ae48b466d7280 644 quux
53 65 everything should be clean now
54 66 checking changesets
55 67 checking manifests
56 68 crosschecking files in changesets and manifests
57 69 checking files
58 70 4 files, 4 changesets, 10 total revisions
@@ -1,18 +1,18 b''
1 1 pulling from ../B1
2 2 searching for changes
3 3 adding changesets
4 4 adding manifests
5 5 adding file changes
6 6 added 1 changesets with 1 changes to 1 files
7 7 (run 'hg update' to get a working copy)
8 8 bar should remain deleted.
9 f405ac83a5611071d6b54dd5eb26943b1fdc4460 644 foo
9 f9b0e817f6a48de3564c6b2957687c5e7297c5a0 644 foo
10 10 pulling from ../A2
11 11 searching for changes
12 12 adding changesets
13 13 adding manifests
14 14 adding file changes
15 15 added 1 changesets with 0 changes to 0 files
16 16 (run 'hg update' to get a working copy)
17 17 bar should remain deleted.
18 18 f9b0e817f6a48de3564c6b2957687c5e7297c5a0 644 foo
@@ -1,53 +1,53 b''
1 1 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
2 2 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
3 3 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
4 4 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
5 5 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
6 6 changeset: 2:9f827976dae4
7 7 tag: tip
8 8 user: test
9 9 date: Thu Jan 1 00:00:00 1970 +0000
10 10 summary: 2
11 11
12 12 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
13 13 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
14 changeset: 3:c8225a106186
14 changeset: 3:142428fbbcc5
15 15 tag: tip
16 16 user: test
17 17 date: Thu Jan 1 00:00:00 1970 +0000
18 18 summary: 3
19 19
20 20 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
21 21 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
22 changeset: 4:8dfeee82a94b
22 changeset: 4:4d450f9aa680
23 23 tag: tip
24 24 user: test
25 25 date: Thu Jan 1 00:00:00 1970 +0000
26 26 summary: 4
27 27
28 28 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
29 29 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
30 30 3570202ceac2b52517df64ebd0a062cb0d8fe33a 644 c
31 changeset: 4:8dfeee82a94b
31 changeset: 4:4d450f9aa680
32 32 user: test
33 33 date: Thu Jan 1 00:00:00 1970 +0000
34 34 summary: 4
35 35
36 36 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
37 37 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
38 changeset: 6:c0e932ecae5e
38 changeset: 6:b4b8b9afa8cc
39 39 tag: tip
40 parent: 4:8dfeee82a94b
40 parent: 4:4d450f9aa680
41 41 parent: 5:a7925a42d0df
42 42 user: test
43 43 date: Thu Jan 1 00:00:00 1970 +0000
44 44 summary: 6
45 45
46 46 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
47 47 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
48 changeset: 7:3a157da4365d
48 changeset: 7:f84d0b1b024e
49 49 tag: tip
50 50 user: test
51 51 date: Thu Jan 1 00:00:00 1970 +0000
52 52 summary: 7
53 53
@@ -1,13 +1,14 b''
1 1 unknown
2 2 acb14030fe0a tip
3 3 acb14030fe0a21b60322c440ad2d20cf7685a376 first
4 4 tip 1:b9154636be938d3d431e75a7c906504a079bfe07
5 5 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
6 6 b9154636be93 tip
7 7 M a
8 8 b9154636be93+ tip
9 9 acb14030fe0a+ first
10 10 acb14030fe0a21b60322c440ad2d20cf7685a376+ first
11 11 M a
12 12 c8edf04160c7 tip
13 c8edf04160c7+b9154636be93 tip
13 c8edf04160c7+b9154636be93+ tip
14 M .hgtags
General Comments 0
You need to be logged in to leave comments. Login now