##// END OF EJS Templates
Deal with repos with missing timezones
mpm@selenic.com -
r1013:2e8b8da9 default
parent child Browse files
Show More
@@ -1,2277 +1,2279 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 if " " not in date:
276 date += " 0" # some tools used -d without a timezone
275 277 files = l[3:]
276 278 return (manifest, user, date, files, desc)
277 279
278 280 def read(self, node):
279 281 return self.extract(self.revision(node))
280 282
281 283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 284 user=None, date=None):
283 285 if not date:
284 286 if time.daylight: offset = time.altzone
285 287 else: offset = time.timezone
286 288 date = "%d %d" % (time.time(), offset)
287 289 list.sort()
288 290 l = [hex(manifest), user, date] + list + ["", desc]
289 291 text = "\n".join(l)
290 292 return self.addrevision(text, transaction, self.count(), p1, p2)
291 293
292 294 class dirstate:
293 295 def __init__(self, opener, ui, root):
294 296 self.opener = opener
295 297 self.root = root
296 298 self.dirty = 0
297 299 self.ui = ui
298 300 self.map = None
299 301 self.pl = None
300 302 self.copies = {}
301 303 self.ignorefunc = None
302 304
303 305 def wjoin(self, f):
304 306 return os.path.join(self.root, f)
305 307
306 308 def getcwd(self):
307 309 cwd = os.getcwd()
308 310 if cwd == self.root: return ''
309 311 return cwd[len(self.root) + 1:]
310 312
311 313 def ignore(self, f):
312 314 if not self.ignorefunc:
313 315 bigpat = []
314 316 try:
315 317 l = file(self.wjoin(".hgignore"))
316 318 for pat in l:
317 319 p = pat.rstrip()
318 320 if p:
319 321 try:
320 322 re.compile(p)
321 323 except:
322 324 self.ui.warn("ignoring invalid ignore"
323 325 + " regular expression '%s'\n" % p)
324 326 else:
325 327 bigpat.append(p)
326 328 except IOError: pass
327 329
328 330 if bigpat:
329 331 s = "(?:%s)" % (")|(?:".join(bigpat))
330 332 r = re.compile(s)
331 333 self.ignorefunc = r.search
332 334 else:
333 335 self.ignorefunc = util.never
334 336
335 337 return self.ignorefunc(f)
336 338
337 339 def __del__(self):
338 340 if self.dirty:
339 341 self.write()
340 342
341 343 def __getitem__(self, key):
342 344 try:
343 345 return self.map[key]
344 346 except TypeError:
345 347 self.read()
346 348 return self[key]
347 349
348 350 def __contains__(self, key):
349 351 if not self.map: self.read()
350 352 return key in self.map
351 353
352 354 def parents(self):
353 355 if not self.pl:
354 356 self.read()
355 357 return self.pl
356 358
357 359 def markdirty(self):
358 360 if not self.dirty:
359 361 self.dirty = 1
360 362
361 363 def setparents(self, p1, p2 = nullid):
362 364 self.markdirty()
363 365 self.pl = p1, p2
364 366
365 367 def state(self, key):
366 368 try:
367 369 return self[key][0]
368 370 except KeyError:
369 371 return "?"
370 372
371 373 def read(self):
372 374 if self.map is not None: return self.map
373 375
374 376 self.map = {}
375 377 self.pl = [nullid, nullid]
376 378 try:
377 379 st = self.opener("dirstate").read()
378 380 if not st: return
379 381 except: return
380 382
381 383 self.pl = [st[:20], st[20: 40]]
382 384
383 385 pos = 40
384 386 while pos < len(st):
385 387 e = struct.unpack(">cllll", st[pos:pos+17])
386 388 l = e[4]
387 389 pos += 17
388 390 f = st[pos:pos + l]
389 391 if '\0' in f:
390 392 f, c = f.split('\0')
391 393 self.copies[f] = c
392 394 self.map[f] = e[:4]
393 395 pos += l
394 396
395 397 def copy(self, source, dest):
396 398 self.read()
397 399 self.markdirty()
398 400 self.copies[dest] = source
399 401
400 402 def copied(self, file):
401 403 return self.copies.get(file, None)
402 404
403 405 def update(self, files, state, **kw):
404 406 ''' current states:
405 407 n normal
406 408 m needs merging
407 409 r marked for removal
408 410 a marked for addition'''
409 411
410 412 if not files: return
411 413 self.read()
412 414 self.markdirty()
413 415 for f in files:
414 416 if state == "r":
415 417 self.map[f] = ('r', 0, 0, 0)
416 418 else:
417 419 s = os.stat(os.path.join(self.root, f))
418 420 st_size = kw.get('st_size', s.st_size)
419 421 st_mtime = kw.get('st_mtime', s.st_mtime)
420 422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
421 423
422 424 def forget(self, files):
423 425 if not files: return
424 426 self.read()
425 427 self.markdirty()
426 428 for f in files:
427 429 try:
428 430 del self.map[f]
429 431 except KeyError:
430 432 self.ui.warn("not in dirstate: %s!\n" % f)
431 433 pass
432 434
433 435 def clear(self):
434 436 self.map = {}
435 437 self.markdirty()
436 438
437 439 def write(self):
438 440 st = self.opener("dirstate", "w")
439 441 st.write("".join(self.pl))
440 442 for f, e in self.map.items():
441 443 c = self.copied(f)
442 444 if c:
443 445 f = f + "\0" + c
444 446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
445 447 st.write(e + f)
446 448 self.dirty = 0
447 449
448 450 def filterfiles(self, files):
449 451 ret = {}
450 452 unknown = []
451 453
452 454 for x in files:
453 455 if x is '.':
454 456 return self.map.copy()
455 457 if x not in self.map:
456 458 unknown.append(x)
457 459 else:
458 460 ret[x] = self.map[x]
459 461
460 462 if not unknown:
461 463 return ret
462 464
463 465 b = self.map.keys()
464 466 b.sort()
465 467 blen = len(b)
466 468
467 469 for x in unknown:
468 470 bs = bisect.bisect(b, x)
469 471 if bs != 0 and b[bs-1] == x:
470 472 ret[x] = self.map[x]
471 473 continue
472 474 while bs < blen:
473 475 s = b[bs]
474 476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
475 477 ret[s] = self.map[s]
476 478 else:
477 479 break
478 480 bs += 1
479 481 return ret
480 482
481 483 def walk(self, files = None, match = util.always, dc=None):
482 484 self.read()
483 485
484 486 # walk all files by default
485 487 if not files:
486 488 files = [self.root]
487 489 if not dc:
488 490 dc = self.map.copy()
489 491 elif not dc:
490 492 dc = self.filterfiles(files)
491 493
492 494 known = {'.hg': 1}
493 495 def seen(fn):
494 496 if fn in known: return True
495 497 known[fn] = 1
496 498 def traverse():
497 499 for ff in util.unique(files):
498 500 f = os.path.join(self.root, ff)
499 501 try:
500 502 st = os.stat(f)
501 503 except OSError, inst:
502 504 if ff not in dc: self.ui.warn('%s: %s\n' % (
503 505 util.pathto(self.getcwd(), ff),
504 506 inst.strerror))
505 507 continue
506 508 if stat.S_ISDIR(st.st_mode):
507 509 for dir, subdirs, fl in os.walk(f):
508 510 d = dir[len(self.root) + 1:]
509 511 nd = util.normpath(d)
510 512 if nd == '.': nd = ''
511 513 if seen(nd):
512 514 subdirs[:] = []
513 515 continue
514 516 for sd in subdirs:
515 517 ds = os.path.join(nd, sd +'/')
516 518 if self.ignore(ds) or not match(ds):
517 519 subdirs.remove(sd)
518 520 subdirs.sort()
519 521 fl.sort()
520 522 for fn in fl:
521 523 fn = util.pconvert(os.path.join(d, fn))
522 524 yield 'f', fn
523 525 elif stat.S_ISREG(st.st_mode):
524 526 yield 'f', ff
525 527 else:
526 528 kind = 'unknown'
527 529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
528 530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
529 531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
530 532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
531 533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
532 534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
533 535 util.pathto(self.getcwd(), ff),
534 536 kind))
535 537
536 538 ks = dc.keys()
537 539 ks.sort()
538 540 for k in ks:
539 541 yield 'm', k
540 542
541 543 # yield only files that match: all in dirstate, others only if
542 544 # not in .hgignore
543 545
544 546 for src, fn in util.unique(traverse()):
545 547 fn = util.normpath(fn)
546 548 if seen(fn): continue
547 549 if fn not in dc and self.ignore(fn):
548 550 continue
549 551 if match(fn):
550 552 yield src, fn
551 553
552 554 def changes(self, files=None, match=util.always):
553 555 self.read()
554 556 if not files:
555 557 dc = self.map.copy()
556 558 else:
557 559 dc = self.filterfiles(files)
558 560 lookup, modified, added, unknown = [], [], [], []
559 561 removed, deleted = [], []
560 562
561 563 for src, fn in self.walk(files, match, dc=dc):
562 564 try:
563 565 s = os.stat(os.path.join(self.root, fn))
564 566 except OSError:
565 567 continue
566 568 if not stat.S_ISREG(s.st_mode):
567 569 continue
568 570 c = dc.get(fn)
569 571 if c:
570 572 del dc[fn]
571 573 if c[0] == 'm':
572 574 modified.append(fn)
573 575 elif c[0] == 'a':
574 576 added.append(fn)
575 577 elif c[0] == 'r':
576 578 unknown.append(fn)
577 579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
578 580 modified.append(fn)
579 581 elif c[3] != s.st_mtime:
580 582 lookup.append(fn)
581 583 else:
582 584 unknown.append(fn)
583 585
584 586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
585 587 if c[0] == 'r':
586 588 removed.append(fn)
587 589 else:
588 590 deleted.append(fn)
589 591 return (lookup, modified, added, removed + deleted, unknown)
590 592
591 593 # used to avoid circular references so destructors work
592 594 def opener(base):
593 595 p = base
594 596 def o(path, mode="r"):
595 597 if p.startswith("http://"):
596 598 f = os.path.join(p, urllib.quote(path))
597 599 return httprangereader.httprangereader(f)
598 600
599 601 f = os.path.join(p, path)
600 602
601 603 mode += "b" # for that other OS
602 604
603 605 if mode[0] != "r":
604 606 try:
605 607 s = os.stat(f)
606 608 except OSError:
607 609 d = os.path.dirname(f)
608 610 if not os.path.isdir(d):
609 611 os.makedirs(d)
610 612 else:
611 613 if s.st_nlink > 1:
612 614 file(f + ".tmp", "wb").write(file(f, "rb").read())
613 615 util.rename(f+".tmp", f)
614 616
615 617 return file(f, mode)
616 618
617 619 return o
618 620
619 621 class RepoError(Exception): pass
620 622
621 623 class localrepository:
622 624 def __init__(self, ui, path=None, create=0):
623 625 self.remote = 0
624 626 if path and path.startswith("http://"):
625 627 self.remote = 1
626 628 self.path = path
627 629 else:
628 630 if not path:
629 631 p = os.getcwd()
630 632 while not os.path.isdir(os.path.join(p, ".hg")):
631 633 oldp = p
632 634 p = os.path.dirname(p)
633 635 if p == oldp: raise RepoError("no repo found")
634 636 path = p
635 637 self.path = os.path.join(path, ".hg")
636 638
637 639 if not create and not os.path.isdir(self.path):
638 640 raise RepoError("repository %s not found" % self.path)
639 641
640 642 self.root = os.path.abspath(path)
641 643 self.ui = ui
642 644
643 645 if create:
644 646 os.mkdir(self.path)
645 647 os.mkdir(self.join("data"))
646 648
647 649 self.opener = opener(self.path)
648 650 self.wopener = opener(self.root)
649 651 self.manifest = manifest(self.opener)
650 652 self.changelog = changelog(self.opener)
651 653 self.tagscache = None
652 654 self.nodetagscache = None
653 655
654 656 if not self.remote:
655 657 self.dirstate = dirstate(self.opener, ui, self.root)
656 658 try:
657 659 self.ui.readconfig(self.opener("hgrc"))
658 660 except IOError: pass
659 661
660 662 def hook(self, name, **args):
661 663 s = self.ui.config("hooks", name)
662 664 if s:
663 665 self.ui.note("running hook %s: %s\n" % (name, s))
664 666 old = {}
665 667 for k, v in args.items():
666 668 k = k.upper()
667 669 old[k] = os.environ.get(k, None)
668 670 os.environ[k] = v
669 671
670 672 r = os.system(s)
671 673
672 674 for k, v in old.items():
673 675 if v != None:
674 676 os.environ[k] = v
675 677 else:
676 678 del os.environ[k]
677 679
678 680 if r:
679 681 self.ui.warn("abort: %s hook failed with status %d!\n" %
680 682 (name, r))
681 683 return False
682 684 return True
683 685
684 686 def tags(self):
685 687 '''return a mapping of tag to node'''
686 688 if not self.tagscache:
687 689 self.tagscache = {}
688 690 def addtag(self, k, n):
689 691 try:
690 692 bin_n = bin(n)
691 693 except TypeError:
692 694 bin_n = ''
693 695 self.tagscache[k.strip()] = bin_n
694 696
695 697 try:
696 698 # read each head of the tags file, ending with the tip
697 699 # and add each tag found to the map, with "newer" ones
698 700 # taking precedence
699 701 fl = self.file(".hgtags")
700 702 h = fl.heads()
701 703 h.reverse()
702 704 for r in h:
703 705 for l in fl.read(r).splitlines():
704 706 if l:
705 707 n, k = l.split(" ", 1)
706 708 addtag(self, k, n)
707 709 except KeyError:
708 710 pass
709 711
710 712 try:
711 713 f = self.opener("localtags")
712 714 for l in f:
713 715 n, k = l.split(" ", 1)
714 716 addtag(self, k, n)
715 717 except IOError:
716 718 pass
717 719
718 720 self.tagscache['tip'] = self.changelog.tip()
719 721
720 722 return self.tagscache
721 723
722 724 def tagslist(self):
723 725 '''return a list of tags ordered by revision'''
724 726 l = []
725 727 for t, n in self.tags().items():
726 728 try:
727 729 r = self.changelog.rev(n)
728 730 except:
729 731 r = -2 # sort to the beginning of the list if unknown
730 732 l.append((r,t,n))
731 733 l.sort()
732 734 return [(t,n) for r,t,n in l]
733 735
734 736 def nodetags(self, node):
735 737 '''return the tags associated with a node'''
736 738 if not self.nodetagscache:
737 739 self.nodetagscache = {}
738 740 for t,n in self.tags().items():
739 741 self.nodetagscache.setdefault(n,[]).append(t)
740 742 return self.nodetagscache.get(node, [])
741 743
742 744 def lookup(self, key):
743 745 try:
744 746 return self.tags()[key]
745 747 except KeyError:
746 748 try:
747 749 return self.changelog.lookup(key)
748 750 except:
749 751 raise RepoError("unknown revision '%s'" % key)
750 752
751 753 def dev(self):
752 754 if self.remote: return -1
753 755 return os.stat(self.path).st_dev
754 756
755 757 def local(self):
756 758 return not self.remote
757 759
758 760 def join(self, f):
759 761 return os.path.join(self.path, f)
760 762
761 763 def wjoin(self, f):
762 764 return os.path.join(self.root, f)
763 765
764 766 def file(self, f):
765 767 if f[0] == '/': f = f[1:]
766 768 return filelog(self.opener, f)
767 769
768 770 def getcwd(self):
769 771 return self.dirstate.getcwd()
770 772
771 773 def wfile(self, f, mode='r'):
772 774 return self.wopener(f, mode)
773 775
774 776 def transaction(self):
775 777 # save dirstate for undo
776 778 try:
777 779 ds = self.opener("dirstate").read()
778 780 except IOError:
779 781 ds = ""
780 782 self.opener("journal.dirstate", "w").write(ds)
781 783
782 784 def after():
783 785 util.rename(self.join("journal"), self.join("undo"))
784 786 util.rename(self.join("journal.dirstate"),
785 787 self.join("undo.dirstate"))
786 788
787 789 return transaction.transaction(self.ui.warn, self.opener,
788 790 self.join("journal"), after)
789 791
790 792 def recover(self):
791 793 lock = self.lock()
792 794 if os.path.exists(self.join("journal")):
793 795 self.ui.status("rolling back interrupted transaction\n")
794 796 return transaction.rollback(self.opener, self.join("journal"))
795 797 else:
796 798 self.ui.warn("no interrupted transaction available\n")
797 799
798 800 def undo(self):
799 801 lock = self.lock()
800 802 if os.path.exists(self.join("undo")):
801 803 self.ui.status("rolling back last transaction\n")
802 804 transaction.rollback(self.opener, self.join("undo"))
803 805 self.dirstate = None
804 806 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
805 807 self.dirstate = dirstate(self.opener, self.ui, self.root)
806 808 else:
807 809 self.ui.warn("no undo information available\n")
808 810
809 811 def lock(self, wait = 1):
810 812 try:
811 813 return lock.lock(self.join("lock"), 0)
812 814 except lock.LockHeld, inst:
813 815 if wait:
814 816 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
815 817 return lock.lock(self.join("lock"), wait)
816 818 raise inst
817 819
818 820 def rawcommit(self, files, text, user, date, p1=None, p2=None):
819 821 orig_parent = self.dirstate.parents()[0] or nullid
820 822 p1 = p1 or self.dirstate.parents()[0] or nullid
821 823 p2 = p2 or self.dirstate.parents()[1] or nullid
822 824 c1 = self.changelog.read(p1)
823 825 c2 = self.changelog.read(p2)
824 826 m1 = self.manifest.read(c1[0])
825 827 mf1 = self.manifest.readflags(c1[0])
826 828 m2 = self.manifest.read(c2[0])
827 829 changed = []
828 830
829 831 if orig_parent == p1:
830 832 update_dirstate = 1
831 833 else:
832 834 update_dirstate = 0
833 835
834 836 tr = self.transaction()
835 837 mm = m1.copy()
836 838 mfm = mf1.copy()
837 839 linkrev = self.changelog.count()
838 840 for f in files:
839 841 try:
840 842 t = self.wfile(f).read()
841 843 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
842 844 r = self.file(f)
843 845 mfm[f] = tm
844 846
845 847 fp1 = m1.get(f, nullid)
846 848 fp2 = m2.get(f, nullid)
847 849
848 850 # is the same revision on two branches of a merge?
849 851 if fp2 == fp1:
850 852 fp2 = nullid
851 853
852 854 if fp2 != nullid:
853 855 # is one parent an ancestor of the other?
854 856 fpa = r.ancestor(fp1, fp2)
855 857 if fpa == fp1:
856 858 fp1, fp2 = fp2, nullid
857 859 elif fpa == fp2:
858 860 fp2 = nullid
859 861
860 862 # is the file unmodified from the parent?
861 863 if t == r.read(fp1):
862 864 # record the proper existing parent in manifest
863 865 # no need to add a revision
864 866 mm[f] = fp1
865 867 continue
866 868
867 869 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
868 870 changed.append(f)
869 871 if update_dirstate:
870 872 self.dirstate.update([f], "n")
871 873 except IOError:
872 874 try:
873 875 del mm[f]
874 876 del mfm[f]
875 877 if update_dirstate:
876 878 self.dirstate.forget([f])
877 879 except:
878 880 # deleted from p2?
879 881 pass
880 882
881 883 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
882 884 user = user or self.ui.username()
883 885 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
884 886 tr.close()
885 887 if update_dirstate:
886 888 self.dirstate.setparents(n, nullid)
887 889
888 890 def commit(self, files = None, text = "", user = None, date = None,
889 891 match = util.always, force=False):
890 892 commit = []
891 893 remove = []
892 894 changed = []
893 895
894 896 if files:
895 897 for f in files:
896 898 s = self.dirstate.state(f)
897 899 if s in 'nmai':
898 900 commit.append(f)
899 901 elif s == 'r':
900 902 remove.append(f)
901 903 else:
902 904 self.ui.warn("%s not tracked!\n" % f)
903 905 else:
904 906 (c, a, d, u) = self.changes(match = match)
905 907 commit = c + a
906 908 remove = d
907 909
908 910 p1, p2 = self.dirstate.parents()
909 911 c1 = self.changelog.read(p1)
910 912 c2 = self.changelog.read(p2)
911 913 m1 = self.manifest.read(c1[0])
912 914 mf1 = self.manifest.readflags(c1[0])
913 915 m2 = self.manifest.read(c2[0])
914 916
915 917 if not commit and not remove and not force and p2 == nullid:
916 918 self.ui.status("nothing changed\n")
917 919 return None
918 920
919 921 if not self.hook("precommit"):
920 922 return None
921 923
922 924 lock = self.lock()
923 925 tr = self.transaction()
924 926
925 927 # check in files
926 928 new = {}
927 929 linkrev = self.changelog.count()
928 930 commit.sort()
929 931 for f in commit:
930 932 self.ui.note(f + "\n")
931 933 try:
932 934 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
933 935 t = self.wfile(f).read()
934 936 except IOError:
935 937 self.ui.warn("trouble committing %s!\n" % f)
936 938 raise
937 939
938 940 meta = {}
939 941 cp = self.dirstate.copied(f)
940 942 if cp:
941 943 meta["copy"] = cp
942 944 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
943 945 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
944 946
945 947 r = self.file(f)
946 948 fp1 = m1.get(f, nullid)
947 949 fp2 = m2.get(f, nullid)
948 950
949 951 # is the same revision on two branches of a merge?
950 952 if fp2 == fp1:
951 953 fp2 = nullid
952 954
953 955 if fp2 != nullid:
954 956 # is one parent an ancestor of the other?
955 957 fpa = r.ancestor(fp1, fp2)
956 958 if fpa == fp1:
957 959 fp1, fp2 = fp2, nullid
958 960 elif fpa == fp2:
959 961 fp2 = nullid
960 962
961 963 # is the file unmodified from the parent?
962 964 if not meta and t == r.read(fp1):
963 965 # record the proper existing parent in manifest
964 966 # no need to add a revision
965 967 new[f] = fp1
966 968 continue
967 969
968 970 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
969 971 # remember what we've added so that we can later calculate
970 972 # the files to pull from a set of changesets
971 973 changed.append(f)
972 974
973 975 # update manifest
974 976 m1.update(new)
975 977 for f in remove:
976 978 if f in m1:
977 979 del m1[f]
978 980 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
979 981 (new, remove))
980 982
981 983 # add changeset
982 984 new = new.keys()
983 985 new.sort()
984 986
985 987 if not text:
986 988 edittext = ""
987 989 if p2 != nullid:
988 990 edittext += "HG: branch merge\n"
989 991 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
990 992 edittext += "".join(["HG: changed %s\n" % f for f in changed])
991 993 edittext += "".join(["HG: removed %s\n" % f for f in remove])
992 994 if not changed and not remove:
993 995 edittext += "HG: no files changed\n"
994 996 edittext = self.ui.edit(edittext)
995 997 if not edittext.rstrip():
996 998 return None
997 999 text = edittext
998 1000
999 1001 user = user or self.ui.username()
1000 1002 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1001 1003 tr.close()
1002 1004
1003 1005 self.dirstate.setparents(n)
1004 1006 self.dirstate.update(new, "n")
1005 1007 self.dirstate.forget(remove)
1006 1008
1007 1009 if not self.hook("commit", node=hex(n)):
1008 1010 return None
1009 1011 return n
1010 1012
1011 1013 def walk(self, node = None, files = [], match = util.always):
1012 1014 if node:
1013 1015 for fn in self.manifest.read(self.changelog.read(node)[0]):
1014 1016 if match(fn): yield 'm', fn
1015 1017 else:
1016 1018 for src, fn in self.dirstate.walk(files, match):
1017 1019 yield src, fn
1018 1020
1019 1021 def changes(self, node1 = None, node2 = None, files = [],
1020 1022 match = util.always):
1021 1023 mf2, u = None, []
1022 1024
1023 1025 def fcmp(fn, mf):
1024 1026 t1 = self.wfile(fn).read()
1025 1027 t2 = self.file(fn).read(mf.get(fn, nullid))
1026 1028 return cmp(t1, t2)
1027 1029
1028 1030 def mfmatches(node):
1029 1031 mf = dict(self.manifest.read(node))
1030 1032 for fn in mf.keys():
1031 1033 if not match(fn):
1032 1034 del mf[fn]
1033 1035 return mf
1034 1036
1035 1037 # are we comparing the working directory?
1036 1038 if not node2:
1037 1039 l, c, a, d, u = self.dirstate.changes(files, match)
1038 1040
1039 1041 # are we comparing working dir against its parent?
1040 1042 if not node1:
1041 1043 if l:
1042 1044 # do a full compare of any files that might have changed
1043 1045 change = self.changelog.read(self.dirstate.parents()[0])
1044 1046 mf2 = mfmatches(change[0])
1045 1047 for f in l:
1046 1048 if fcmp(f, mf2):
1047 1049 c.append(f)
1048 1050
1049 1051 for l in c, a, d, u:
1050 1052 l.sort()
1051 1053
1052 1054 return (c, a, d, u)
1053 1055
1054 1056 # are we comparing working dir against non-tip?
1055 1057 # generate a pseudo-manifest for the working dir
1056 1058 if not node2:
1057 1059 if not mf2:
1058 1060 change = self.changelog.read(self.dirstate.parents()[0])
1059 1061 mf2 = mfmatches(change[0])
1060 1062 for f in a + c + l:
1061 1063 mf2[f] = ""
1062 1064 for f in d:
1063 1065 if f in mf2: del mf2[f]
1064 1066 else:
1065 1067 change = self.changelog.read(node2)
1066 1068 mf2 = mfmatches(change[0])
1067 1069
1068 1070 # flush lists from dirstate before comparing manifests
1069 1071 c, a = [], []
1070 1072
1071 1073 change = self.changelog.read(node1)
1072 1074 mf1 = mfmatches(change[0])
1073 1075
1074 1076 for fn in mf2:
1075 1077 if mf1.has_key(fn):
1076 1078 if mf1[fn] != mf2[fn]:
1077 1079 if mf2[fn] != "" or fcmp(fn, mf1):
1078 1080 c.append(fn)
1079 1081 del mf1[fn]
1080 1082 else:
1081 1083 a.append(fn)
1082 1084
1083 1085 d = mf1.keys()
1084 1086
1085 1087 for l in c, a, d, u:
1086 1088 l.sort()
1087 1089
1088 1090 return (c, a, d, u)
1089 1091
1090 1092 def add(self, list):
1091 1093 for f in list:
1092 1094 p = self.wjoin(f)
1093 1095 if not os.path.exists(p):
1094 1096 self.ui.warn("%s does not exist!\n" % f)
1095 1097 elif not os.path.isfile(p):
1096 1098 self.ui.warn("%s not added: only files supported currently\n" % f)
1097 1099 elif self.dirstate.state(f) in 'an':
1098 1100 self.ui.warn("%s already tracked!\n" % f)
1099 1101 else:
1100 1102 self.dirstate.update([f], "a")
1101 1103
1102 1104 def forget(self, list):
1103 1105 for f in list:
1104 1106 if self.dirstate.state(f) not in 'ai':
1105 1107 self.ui.warn("%s not added!\n" % f)
1106 1108 else:
1107 1109 self.dirstate.forget([f])
1108 1110
1109 1111 def remove(self, list):
1110 1112 for f in list:
1111 1113 p = self.wjoin(f)
1112 1114 if os.path.exists(p):
1113 1115 self.ui.warn("%s still exists!\n" % f)
1114 1116 elif self.dirstate.state(f) == 'a':
1115 1117 self.ui.warn("%s never committed!\n" % f)
1116 1118 self.dirstate.forget([f])
1117 1119 elif f not in self.dirstate:
1118 1120 self.ui.warn("%s not tracked!\n" % f)
1119 1121 else:
1120 1122 self.dirstate.update([f], "r")
1121 1123
1122 1124 def copy(self, source, dest):
1123 1125 p = self.wjoin(dest)
1124 1126 if not os.path.exists(p):
1125 1127 self.ui.warn("%s does not exist!\n" % dest)
1126 1128 elif not os.path.isfile(p):
1127 1129 self.ui.warn("copy failed: %s is not a file\n" % dest)
1128 1130 else:
1129 1131 if self.dirstate.state(dest) == '?':
1130 1132 self.dirstate.update([dest], "a")
1131 1133 self.dirstate.copy(source, dest)
1132 1134
1133 1135 def heads(self):
1134 1136 return self.changelog.heads()
1135 1137
1136 1138 # branchlookup returns a dict giving a list of branches for
1137 1139 # each head. A branch is defined as the tag of a node or
1138 1140 # the branch of the node's parents. If a node has multiple
1139 1141 # branch tags, tags are eliminated if they are visible from other
1140 1142 # branch tags.
1141 1143 #
1142 1144 # So, for this graph: a->b->c->d->e
1143 1145 # \ /
1144 1146 # aa -----/
1145 1147 # a has tag 2.6.12
1146 1148 # d has tag 2.6.13
1147 1149 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1148 1150 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1149 1151 # from the list.
1150 1152 #
1151 1153 # It is possible that more than one head will have the same branch tag.
1152 1154 # callers need to check the result for multiple heads under the same
1153 1155 # branch tag if that is a problem for them (ie checkout of a specific
1154 1156 # branch).
1155 1157 #
1156 1158 # passing in a specific branch will limit the depth of the search
1157 1159 # through the parents. It won't limit the branches returned in the
1158 1160 # result though.
1159 1161 def branchlookup(self, heads=None, branch=None):
1160 1162 if not heads:
1161 1163 heads = self.heads()
1162 1164 headt = [ h for h in heads ]
1163 1165 chlog = self.changelog
1164 1166 branches = {}
1165 1167 merges = []
1166 1168 seenmerge = {}
1167 1169
1168 1170 # traverse the tree once for each head, recording in the branches
1169 1171 # dict which tags are visible from this head. The branches
1170 1172 # dict also records which tags are visible from each tag
1171 1173 # while we traverse.
1172 1174 while headt or merges:
1173 1175 if merges:
1174 1176 n, found = merges.pop()
1175 1177 visit = [n]
1176 1178 else:
1177 1179 h = headt.pop()
1178 1180 visit = [h]
1179 1181 found = [h]
1180 1182 seen = {}
1181 1183 while visit:
1182 1184 n = visit.pop()
1183 1185 if n in seen:
1184 1186 continue
1185 1187 pp = chlog.parents(n)
1186 1188 tags = self.nodetags(n)
1187 1189 if tags:
1188 1190 for x in tags:
1189 1191 if x == 'tip':
1190 1192 continue
1191 1193 for f in found:
1192 1194 branches.setdefault(f, {})[n] = 1
1193 1195 branches.setdefault(n, {})[n] = 1
1194 1196 break
1195 1197 if n not in found:
1196 1198 found.append(n)
1197 1199 if branch in tags:
1198 1200 continue
1199 1201 seen[n] = 1
1200 1202 if pp[1] != nullid and n not in seenmerge:
1201 1203 merges.append((pp[1], [x for x in found]))
1202 1204 seenmerge[n] = 1
1203 1205 if pp[0] != nullid:
1204 1206 visit.append(pp[0])
1205 1207 # traverse the branches dict, eliminating branch tags from each
1206 1208 # head that are visible from another branch tag for that head.
1207 1209 out = {}
1208 1210 viscache = {}
1209 1211 for h in heads:
1210 1212 def visible(node):
1211 1213 if node in viscache:
1212 1214 return viscache[node]
1213 1215 ret = {}
1214 1216 visit = [node]
1215 1217 while visit:
1216 1218 x = visit.pop()
1217 1219 if x in viscache:
1218 1220 ret.update(viscache[x])
1219 1221 elif x not in ret:
1220 1222 ret[x] = 1
1221 1223 if x in branches:
1222 1224 visit[len(visit):] = branches[x].keys()
1223 1225 viscache[node] = ret
1224 1226 return ret
1225 1227 if h not in branches:
1226 1228 continue
1227 1229 # O(n^2), but somewhat limited. This only searches the
1228 1230 # tags visible from a specific head, not all the tags in the
1229 1231 # whole repo.
1230 1232 for b in branches[h]:
1231 1233 vis = False
1232 1234 for bb in branches[h].keys():
1233 1235 if b != bb:
1234 1236 if b in visible(bb):
1235 1237 vis = True
1236 1238 break
1237 1239 if not vis:
1238 1240 l = out.setdefault(h, [])
1239 1241 l[len(l):] = self.nodetags(b)
1240 1242 return out
1241 1243
1242 1244 def branches(self, nodes):
1243 1245 if not nodes: nodes = [self.changelog.tip()]
1244 1246 b = []
1245 1247 for n in nodes:
1246 1248 t = n
1247 1249 while n:
1248 1250 p = self.changelog.parents(n)
1249 1251 if p[1] != nullid or p[0] == nullid:
1250 1252 b.append((t, n, p[0], p[1]))
1251 1253 break
1252 1254 n = p[0]
1253 1255 return b
1254 1256
1255 1257 def between(self, pairs):
1256 1258 r = []
1257 1259
1258 1260 for top, bottom in pairs:
1259 1261 n, l, i = top, [], 0
1260 1262 f = 1
1261 1263
1262 1264 while n != bottom:
1263 1265 p = self.changelog.parents(n)[0]
1264 1266 if i == f:
1265 1267 l.append(n)
1266 1268 f = f * 2
1267 1269 n = p
1268 1270 i += 1
1269 1271
1270 1272 r.append(l)
1271 1273
1272 1274 return r
1273 1275
1274 1276 def newer(self, nodes):
1275 1277 m = {}
1276 1278 nl = []
1277 1279 pm = {}
1278 1280 cl = self.changelog
1279 1281 t = l = cl.count()
1280 1282
1281 1283 # find the lowest numbered node
1282 1284 for n in nodes:
1283 1285 l = min(l, cl.rev(n))
1284 1286 m[n] = 1
1285 1287
1286 1288 for i in xrange(l, t):
1287 1289 n = cl.node(i)
1288 1290 if n in m: # explicitly listed
1289 1291 pm[n] = 1
1290 1292 nl.append(n)
1291 1293 continue
1292 1294 for p in cl.parents(n):
1293 1295 if p in pm: # parent listed
1294 1296 pm[n] = 1
1295 1297 nl.append(n)
1296 1298 break
1297 1299
1298 1300 return nl
1299 1301
1300 1302 def findincoming(self, remote, base=None, heads=None):
1301 1303 m = self.changelog.nodemap
1302 1304 search = []
1303 1305 fetch = []
1304 1306 seen = {}
1305 1307 seenbranch = {}
1306 1308 if base == None:
1307 1309 base = {}
1308 1310
1309 1311 # assume we're closer to the tip than the root
1310 1312 # and start by examining the heads
1311 1313 self.ui.status("searching for changes\n")
1312 1314
1313 1315 if not heads:
1314 1316 heads = remote.heads()
1315 1317
1316 1318 unknown = []
1317 1319 for h in heads:
1318 1320 if h not in m:
1319 1321 unknown.append(h)
1320 1322 else:
1321 1323 base[h] = 1
1322 1324
1323 1325 if not unknown:
1324 1326 return None
1325 1327
1326 1328 rep = {}
1327 1329 reqcnt = 0
1328 1330
1329 1331 # search through remote branches
1330 1332 # a 'branch' here is a linear segment of history, with four parts:
1331 1333 # head, root, first parent, second parent
1332 1334 # (a branch always has two parents (or none) by definition)
1333 1335 unknown = remote.branches(unknown)
1334 1336 while unknown:
1335 1337 r = []
1336 1338 while unknown:
1337 1339 n = unknown.pop(0)
1338 1340 if n[0] in seen:
1339 1341 continue
1340 1342
1341 1343 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1342 1344 if n[0] == nullid:
1343 1345 break
1344 1346 if n in seenbranch:
1345 1347 self.ui.debug("branch already found\n")
1346 1348 continue
1347 1349 if n[1] and n[1] in m: # do we know the base?
1348 1350 self.ui.debug("found incomplete branch %s:%s\n"
1349 1351 % (short(n[0]), short(n[1])))
1350 1352 search.append(n) # schedule branch range for scanning
1351 1353 seenbranch[n] = 1
1352 1354 else:
1353 1355 if n[1] not in seen and n[1] not in fetch:
1354 1356 if n[2] in m and n[3] in m:
1355 1357 self.ui.debug("found new changeset %s\n" %
1356 1358 short(n[1]))
1357 1359 fetch.append(n[1]) # earliest unknown
1358 1360 base[n[2]] = 1 # latest known
1359 1361 continue
1360 1362
1361 1363 for a in n[2:4]:
1362 1364 if a not in rep:
1363 1365 r.append(a)
1364 1366 rep[a] = 1
1365 1367
1366 1368 seen[n[0]] = 1
1367 1369
1368 1370 if r:
1369 1371 reqcnt += 1
1370 1372 self.ui.debug("request %d: %s\n" %
1371 1373 (reqcnt, " ".join(map(short, r))))
1372 1374 for p in range(0, len(r), 10):
1373 1375 for b in remote.branches(r[p:p+10]):
1374 1376 self.ui.debug("received %s:%s\n" %
1375 1377 (short(b[0]), short(b[1])))
1376 1378 if b[0] not in m and b[0] not in seen:
1377 1379 unknown.append(b)
1378 1380
1379 1381 # do binary search on the branches we found
1380 1382 while search:
1381 1383 n = search.pop(0)
1382 1384 reqcnt += 1
1383 1385 l = remote.between([(n[0], n[1])])[0]
1384 1386 l.append(n[1])
1385 1387 p = n[0]
1386 1388 f = 1
1387 1389 for i in l:
1388 1390 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1389 1391 if i in m:
1390 1392 if f <= 2:
1391 1393 self.ui.debug("found new branch changeset %s\n" %
1392 1394 short(p))
1393 1395 fetch.append(p)
1394 1396 base[i] = 1
1395 1397 else:
1396 1398 self.ui.debug("narrowed branch search to %s:%s\n"
1397 1399 % (short(p), short(i)))
1398 1400 search.append((p, i))
1399 1401 break
1400 1402 p, f = i, f * 2
1401 1403
1402 1404 # sanity check our fetch list
1403 1405 for f in fetch:
1404 1406 if f in m:
1405 1407 raise RepoError("already have changeset " + short(f[:4]))
1406 1408
1407 1409 if base.keys() == [nullid]:
1408 1410 self.ui.warn("warning: pulling from an unrelated repository!\n")
1409 1411
1410 1412 self.ui.note("adding new changesets starting at " +
1411 1413 " ".join([short(f) for f in fetch]) + "\n")
1412 1414
1413 1415 self.ui.debug("%d total queries\n" % reqcnt)
1414 1416
1415 1417 return fetch
1416 1418
1417 1419 def findoutgoing(self, remote, base=None, heads=None):
1418 1420 if base == None:
1419 1421 base = {}
1420 1422 self.findincoming(remote, base, heads)
1421 1423
1422 1424 remain = dict.fromkeys(self.changelog.nodemap)
1423 1425
1424 1426 # prune everything remote has from the tree
1425 1427 del remain[nullid]
1426 1428 remove = base.keys()
1427 1429 while remove:
1428 1430 n = remove.pop(0)
1429 1431 if n in remain:
1430 1432 del remain[n]
1431 1433 for p in self.changelog.parents(n):
1432 1434 remove.append(p)
1433 1435
1434 1436 # find every node whose parents have been pruned
1435 1437 subset = []
1436 1438 for n in remain:
1437 1439 p1, p2 = self.changelog.parents(n)
1438 1440 if p1 not in remain and p2 not in remain:
1439 1441 subset.append(n)
1440 1442
1441 1443 # this is the set of all roots we have to push
1442 1444 return subset
1443 1445
1444 1446 def pull(self, remote):
1445 1447 lock = self.lock()
1446 1448
1447 1449 # if we have an empty repo, fetch everything
1448 1450 if self.changelog.tip() == nullid:
1449 1451 self.ui.status("requesting all changes\n")
1450 1452 fetch = [nullid]
1451 1453 else:
1452 1454 fetch = self.findincoming(remote)
1453 1455
1454 1456 if not fetch:
1455 1457 self.ui.status("no changes found\n")
1456 1458 return 1
1457 1459
1458 1460 cg = remote.changegroup(fetch)
1459 1461 return self.addchangegroup(cg)
1460 1462
1461 1463 def push(self, remote, force=False):
1462 1464 lock = remote.lock()
1463 1465
1464 1466 base = {}
1465 1467 heads = remote.heads()
1466 1468 inc = self.findincoming(remote, base, heads)
1467 1469 if not force and inc:
1468 1470 self.ui.warn("abort: unsynced remote changes!\n")
1469 1471 self.ui.status("(did you forget to sync? use push -f to force)\n")
1470 1472 return 1
1471 1473
1472 1474 update = self.findoutgoing(remote, base)
1473 1475 if not update:
1474 1476 self.ui.status("no changes found\n")
1475 1477 return 1
1476 1478 elif not force:
1477 1479 if len(heads) < len(self.changelog.heads()):
1478 1480 self.ui.warn("abort: push creates new remote branches!\n")
1479 1481 self.ui.status("(did you forget to merge?" +
1480 1482 " use push -f to force)\n")
1481 1483 return 1
1482 1484
1483 1485 cg = self.changegroup(update)
1484 1486 return remote.addchangegroup(cg)
1485 1487
1486 1488 def changegroup(self, basenodes):
1487 1489 class genread:
1488 1490 def __init__(self, generator):
1489 1491 self.g = generator
1490 1492 self.buf = ""
1491 1493 def fillbuf(self):
1492 1494 self.buf += "".join(self.g)
1493 1495
1494 1496 def read(self, l):
1495 1497 while l > len(self.buf):
1496 1498 try:
1497 1499 self.buf += self.g.next()
1498 1500 except StopIteration:
1499 1501 break
1500 1502 d, self.buf = self.buf[:l], self.buf[l:]
1501 1503 return d
1502 1504
1503 1505 def gengroup():
1504 1506 nodes = self.newer(basenodes)
1505 1507
1506 1508 # construct the link map
1507 1509 linkmap = {}
1508 1510 for n in nodes:
1509 1511 linkmap[self.changelog.rev(n)] = n
1510 1512
1511 1513 # construct a list of all changed files
1512 1514 changed = {}
1513 1515 for n in nodes:
1514 1516 c = self.changelog.read(n)
1515 1517 for f in c[3]:
1516 1518 changed[f] = 1
1517 1519 changed = changed.keys()
1518 1520 changed.sort()
1519 1521
1520 1522 # the changegroup is changesets + manifests + all file revs
1521 1523 revs = [ self.changelog.rev(n) for n in nodes ]
1522 1524
1523 1525 for y in self.changelog.group(linkmap): yield y
1524 1526 for y in self.manifest.group(linkmap): yield y
1525 1527 for f in changed:
1526 1528 yield struct.pack(">l", len(f) + 4) + f
1527 1529 g = self.file(f).group(linkmap)
1528 1530 for y in g:
1529 1531 yield y
1530 1532
1531 1533 yield struct.pack(">l", 0)
1532 1534
1533 1535 return genread(gengroup())
1534 1536
1535 1537 def addchangegroup(self, source):
1536 1538
1537 1539 def getchunk():
1538 1540 d = source.read(4)
1539 1541 if not d: return ""
1540 1542 l = struct.unpack(">l", d)[0]
1541 1543 if l <= 4: return ""
1542 1544 return source.read(l - 4)
1543 1545
1544 1546 def getgroup():
1545 1547 while 1:
1546 1548 c = getchunk()
1547 1549 if not c: break
1548 1550 yield c
1549 1551
1550 1552 def csmap(x):
1551 1553 self.ui.debug("add changeset %s\n" % short(x))
1552 1554 return self.changelog.count()
1553 1555
1554 1556 def revmap(x):
1555 1557 return self.changelog.rev(x)
1556 1558
1557 1559 if not source: return
1558 1560 changesets = files = revisions = 0
1559 1561
1560 1562 tr = self.transaction()
1561 1563
1562 1564 # pull off the changeset group
1563 1565 self.ui.status("adding changesets\n")
1564 1566 co = self.changelog.tip()
1565 1567 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1566 1568 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1567 1569
1568 1570 # pull off the manifest group
1569 1571 self.ui.status("adding manifests\n")
1570 1572 mm = self.manifest.tip()
1571 1573 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1572 1574
1573 1575 # process the files
1574 1576 self.ui.status("adding file changes\n")
1575 1577 while 1:
1576 1578 f = getchunk()
1577 1579 if not f: break
1578 1580 self.ui.debug("adding %s revisions\n" % f)
1579 1581 fl = self.file(f)
1580 1582 o = fl.count()
1581 1583 n = fl.addgroup(getgroup(), revmap, tr)
1582 1584 revisions += fl.count() - o
1583 1585 files += 1
1584 1586
1585 1587 self.ui.status(("added %d changesets" +
1586 1588 " with %d changes to %d files\n")
1587 1589 % (changesets, revisions, files))
1588 1590
1589 1591 tr.close()
1590 1592
1591 1593 if not self.hook("changegroup"):
1592 1594 return 1
1593 1595
1594 1596 return
1595 1597
1596 1598 def update(self, node, allow=False, force=False, choose=None,
1597 1599 moddirstate=True):
1598 1600 pl = self.dirstate.parents()
1599 1601 if not force and pl[1] != nullid:
1600 1602 self.ui.warn("aborting: outstanding uncommitted merges\n")
1601 1603 return 1
1602 1604
1603 1605 p1, p2 = pl[0], node
1604 1606 pa = self.changelog.ancestor(p1, p2)
1605 1607 m1n = self.changelog.read(p1)[0]
1606 1608 m2n = self.changelog.read(p2)[0]
1607 1609 man = self.manifest.ancestor(m1n, m2n)
1608 1610 m1 = self.manifest.read(m1n)
1609 1611 mf1 = self.manifest.readflags(m1n)
1610 1612 m2 = self.manifest.read(m2n)
1611 1613 mf2 = self.manifest.readflags(m2n)
1612 1614 ma = self.manifest.read(man)
1613 1615 mfa = self.manifest.readflags(man)
1614 1616
1615 1617 (c, a, d, u) = self.changes()
1616 1618
1617 1619 # is this a jump, or a merge? i.e. is there a linear path
1618 1620 # from p1 to p2?
1619 1621 linear_path = (pa == p1 or pa == p2)
1620 1622
1621 1623 # resolve the manifest to determine which files
1622 1624 # we care about merging
1623 1625 self.ui.note("resolving manifests\n")
1624 1626 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1625 1627 (force, allow, moddirstate, linear_path))
1626 1628 self.ui.debug(" ancestor %s local %s remote %s\n" %
1627 1629 (short(man), short(m1n), short(m2n)))
1628 1630
1629 1631 merge = {}
1630 1632 get = {}
1631 1633 remove = []
1632 1634
1633 1635 # construct a working dir manifest
1634 1636 mw = m1.copy()
1635 1637 mfw = mf1.copy()
1636 1638 umap = dict.fromkeys(u)
1637 1639
1638 1640 for f in a + c + u:
1639 1641 mw[f] = ""
1640 1642 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1641 1643
1642 1644 for f in d:
1643 1645 if f in mw: del mw[f]
1644 1646
1645 1647 # If we're jumping between revisions (as opposed to merging),
1646 1648 # and if neither the working directory nor the target rev has
1647 1649 # the file, then we need to remove it from the dirstate, to
1648 1650 # prevent the dirstate from listing the file when it is no
1649 1651 # longer in the manifest.
1650 1652 if moddirstate and linear_path and f not in m2:
1651 1653 self.dirstate.forget((f,))
1652 1654
1653 1655 # Compare manifests
1654 1656 for f, n in mw.iteritems():
1655 1657 if choose and not choose(f): continue
1656 1658 if f in m2:
1657 1659 s = 0
1658 1660
1659 1661 # is the wfile new since m1, and match m2?
1660 1662 if f not in m1:
1661 1663 t1 = self.wfile(f).read()
1662 1664 t2 = self.file(f).read(m2[f])
1663 1665 if cmp(t1, t2) == 0:
1664 1666 n = m2[f]
1665 1667 del t1, t2
1666 1668
1667 1669 # are files different?
1668 1670 if n != m2[f]:
1669 1671 a = ma.get(f, nullid)
1670 1672 # are both different from the ancestor?
1671 1673 if n != a and m2[f] != a:
1672 1674 self.ui.debug(" %s versions differ, resolve\n" % f)
1673 1675 # merge executable bits
1674 1676 # "if we changed or they changed, change in merge"
1675 1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1676 1678 mode = ((a^b) | (a^c)) ^ a
1677 1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1678 1680 s = 1
1679 1681 # are we clobbering?
1680 1682 # is remote's version newer?
1681 1683 # or are we going back in time?
1682 1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1683 1685 self.ui.debug(" remote %s is newer, get\n" % f)
1684 1686 get[f] = m2[f]
1685 1687 s = 1
1686 1688 elif f in umap:
1687 1689 # this unknown file is the same as the checkout
1688 1690 get[f] = m2[f]
1689 1691
1690 1692 if not s and mfw[f] != mf2[f]:
1691 1693 if force:
1692 1694 self.ui.debug(" updating permissions for %s\n" % f)
1693 1695 util.set_exec(self.wjoin(f), mf2[f])
1694 1696 else:
1695 1697 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1696 1698 mode = ((a^b) | (a^c)) ^ a
1697 1699 if mode != b:
1698 1700 self.ui.debug(" updating permissions for %s\n" % f)
1699 1701 util.set_exec(self.wjoin(f), mode)
1700 1702 del m2[f]
1701 1703 elif f in ma:
1702 1704 if n != ma[f]:
1703 1705 r = "d"
1704 1706 if not force and (linear_path or allow):
1705 1707 r = self.ui.prompt(
1706 1708 (" local changed %s which remote deleted\n" % f) +
1707 1709 "(k)eep or (d)elete?", "[kd]", "k")
1708 1710 if r == "d":
1709 1711 remove.append(f)
1710 1712 else:
1711 1713 self.ui.debug("other deleted %s\n" % f)
1712 1714 remove.append(f) # other deleted it
1713 1715 else:
1714 1716 if n == m1.get(f, nullid): # same as parent
1715 1717 if p2 == pa: # going backwards?
1716 1718 self.ui.debug("remote deleted %s\n" % f)
1717 1719 remove.append(f)
1718 1720 else:
1719 1721 self.ui.debug("local created %s, keeping\n" % f)
1720 1722 else:
1721 1723 self.ui.debug("working dir created %s, keeping\n" % f)
1722 1724
1723 1725 for f, n in m2.iteritems():
1724 1726 if choose and not choose(f): continue
1725 1727 if f[0] == "/": continue
1726 1728 if f in ma and n != ma[f]:
1727 1729 r = "k"
1728 1730 if not force and (linear_path or allow):
1729 1731 r = self.ui.prompt(
1730 1732 ("remote changed %s which local deleted\n" % f) +
1731 1733 "(k)eep or (d)elete?", "[kd]", "k")
1732 1734 if r == "k": get[f] = n
1733 1735 elif f not in ma:
1734 1736 self.ui.debug("remote created %s\n" % f)
1735 1737 get[f] = n
1736 1738 else:
1737 1739 if force or p2 == pa: # going backwards?
1738 1740 self.ui.debug("local deleted %s, recreating\n" % f)
1739 1741 get[f] = n
1740 1742 else:
1741 1743 self.ui.debug("local deleted %s\n" % f)
1742 1744
1743 1745 del mw, m1, m2, ma
1744 1746
1745 1747 if force:
1746 1748 for f in merge:
1747 1749 get[f] = merge[f][1]
1748 1750 merge = {}
1749 1751
1750 1752 if linear_path or force:
1751 1753 # we don't need to do any magic, just jump to the new rev
1752 1754 branch_merge = False
1753 1755 p1, p2 = p2, nullid
1754 1756 else:
1755 1757 if not allow:
1756 1758 self.ui.status("this update spans a branch" +
1757 1759 " affecting the following files:\n")
1758 1760 fl = merge.keys() + get.keys()
1759 1761 fl.sort()
1760 1762 for f in fl:
1761 1763 cf = ""
1762 1764 if f in merge: cf = " (resolve)"
1763 1765 self.ui.status(" %s%s\n" % (f, cf))
1764 1766 self.ui.warn("aborting update spanning branches!\n")
1765 1767 self.ui.status("(use update -m to merge across branches" +
1766 1768 " or -C to lose changes)\n")
1767 1769 return 1
1768 1770 branch_merge = True
1769 1771
1770 1772 if moddirstate:
1771 1773 self.dirstate.setparents(p1, p2)
1772 1774
1773 1775 # get the files we don't need to change
1774 1776 files = get.keys()
1775 1777 files.sort()
1776 1778 for f in files:
1777 1779 if f[0] == "/": continue
1778 1780 self.ui.note("getting %s\n" % f)
1779 1781 t = self.file(f).read(get[f])
1780 1782 try:
1781 1783 self.wfile(f, "w").write(t)
1782 1784 except IOError:
1783 1785 os.makedirs(os.path.dirname(self.wjoin(f)))
1784 1786 self.wfile(f, "w").write(t)
1785 1787 util.set_exec(self.wjoin(f), mf2[f])
1786 1788 if moddirstate:
1787 1789 if branch_merge:
1788 1790 self.dirstate.update([f], 'n', st_mtime=-1)
1789 1791 else:
1790 1792 self.dirstate.update([f], 'n')
1791 1793
1792 1794 # merge the tricky bits
1793 1795 files = merge.keys()
1794 1796 files.sort()
1795 1797 for f in files:
1796 1798 self.ui.status("merging %s\n" % f)
1797 1799 my, other, flag = merge[f]
1798 1800 self.merge3(f, my, other)
1799 1801 util.set_exec(self.wjoin(f), flag)
1800 1802 if moddirstate:
1801 1803 if branch_merge:
1802 1804 # We've done a branch merge, mark this file as merged
1803 1805 # so that we properly record the merger later
1804 1806 self.dirstate.update([f], 'm')
1805 1807 else:
1806 1808 # We've update-merged a locally modified file, so
1807 1809 # we set the dirstate to emulate a normal checkout
1808 1810 # of that file some time in the past. Thus our
1809 1811 # merge will appear as a normal local file
1810 1812 # modification.
1811 1813 f_len = len(self.file(f).read(other))
1812 1814 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1813 1815
1814 1816 remove.sort()
1815 1817 for f in remove:
1816 1818 self.ui.note("removing %s\n" % f)
1817 1819 try:
1818 1820 os.unlink(self.wjoin(f))
1819 1821 except OSError, inst:
1820 1822 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1821 1823 # try removing directories that might now be empty
1822 1824 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1823 1825 except: pass
1824 1826 if moddirstate:
1825 1827 if branch_merge:
1826 1828 self.dirstate.update(remove, 'r')
1827 1829 else:
1828 1830 self.dirstate.forget(remove)
1829 1831
1830 1832 def merge3(self, fn, my, other):
1831 1833 """perform a 3-way merge in the working directory"""
1832 1834
1833 1835 def temp(prefix, node):
1834 1836 pre = "%s~%s." % (os.path.basename(fn), prefix)
1835 1837 (fd, name) = tempfile.mkstemp("", pre)
1836 1838 f = os.fdopen(fd, "wb")
1837 1839 f.write(fl.read(node))
1838 1840 f.close()
1839 1841 return name
1840 1842
1841 1843 fl = self.file(fn)
1842 1844 base = fl.ancestor(my, other)
1843 1845 a = self.wjoin(fn)
1844 1846 b = temp("base", base)
1845 1847 c = temp("other", other)
1846 1848
1847 1849 self.ui.note("resolving %s\n" % fn)
1848 1850 self.ui.debug("file %s: other %s ancestor %s\n" %
1849 1851 (fn, short(other), short(base)))
1850 1852
1851 1853 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1852 1854 or "hgmerge")
1853 1855 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1854 1856 if r:
1855 1857 self.ui.warn("merging %s failed!\n" % fn)
1856 1858
1857 1859 os.unlink(b)
1858 1860 os.unlink(c)
1859 1861
1860 1862 def verify(self):
1861 1863 filelinkrevs = {}
1862 1864 filenodes = {}
1863 1865 changesets = revisions = files = 0
1864 1866 errors = 0
1865 1867
1866 1868 seen = {}
1867 1869 self.ui.status("checking changesets\n")
1868 1870 for i in range(self.changelog.count()):
1869 1871 changesets += 1
1870 1872 n = self.changelog.node(i)
1871 1873 if n in seen:
1872 1874 self.ui.warn("duplicate changeset at revision %d\n" % i)
1873 1875 errors += 1
1874 1876 seen[n] = 1
1875 1877
1876 1878 for p in self.changelog.parents(n):
1877 1879 if p not in self.changelog.nodemap:
1878 1880 self.ui.warn("changeset %s has unknown parent %s\n" %
1879 1881 (short(n), short(p)))
1880 1882 errors += 1
1881 1883 try:
1882 1884 changes = self.changelog.read(n)
1883 1885 except Exception, inst:
1884 1886 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1885 1887 errors += 1
1886 1888
1887 1889 for f in changes[3]:
1888 1890 filelinkrevs.setdefault(f, []).append(i)
1889 1891
1890 1892 seen = {}
1891 1893 self.ui.status("checking manifests\n")
1892 1894 for i in range(self.manifest.count()):
1893 1895 n = self.manifest.node(i)
1894 1896 if n in seen:
1895 1897 self.ui.warn("duplicate manifest at revision %d\n" % i)
1896 1898 errors += 1
1897 1899 seen[n] = 1
1898 1900
1899 1901 for p in self.manifest.parents(n):
1900 1902 if p not in self.manifest.nodemap:
1901 1903 self.ui.warn("manifest %s has unknown parent %s\n" %
1902 1904 (short(n), short(p)))
1903 1905 errors += 1
1904 1906
1905 1907 try:
1906 1908 delta = mdiff.patchtext(self.manifest.delta(n))
1907 1909 except KeyboardInterrupt:
1908 1910 self.ui.warn("aborted")
1909 1911 sys.exit(0)
1910 1912 except Exception, inst:
1911 1913 self.ui.warn("unpacking manifest %s: %s\n"
1912 1914 % (short(n), inst))
1913 1915 errors += 1
1914 1916
1915 1917 ff = [ l.split('\0') for l in delta.splitlines() ]
1916 1918 for f, fn in ff:
1917 1919 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1918 1920
1919 1921 self.ui.status("crosschecking files in changesets and manifests\n")
1920 1922 for f in filenodes:
1921 1923 if f not in filelinkrevs:
1922 1924 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1923 1925 errors += 1
1924 1926
1925 1927 for f in filelinkrevs:
1926 1928 if f not in filenodes:
1927 1929 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1928 1930 errors += 1
1929 1931
1930 1932 self.ui.status("checking files\n")
1931 1933 ff = filenodes.keys()
1932 1934 ff.sort()
1933 1935 for f in ff:
1934 1936 if f == "/dev/null": continue
1935 1937 files += 1
1936 1938 fl = self.file(f)
1937 1939 nodes = { nullid: 1 }
1938 1940 seen = {}
1939 1941 for i in range(fl.count()):
1940 1942 revisions += 1
1941 1943 n = fl.node(i)
1942 1944
1943 1945 if n in seen:
1944 1946 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1945 1947 errors += 1
1946 1948
1947 1949 if n not in filenodes[f]:
1948 1950 self.ui.warn("%s: %d:%s not in manifests\n"
1949 1951 % (f, i, short(n)))
1950 1952 errors += 1
1951 1953 else:
1952 1954 del filenodes[f][n]
1953 1955
1954 1956 flr = fl.linkrev(n)
1955 1957 if flr not in filelinkrevs[f]:
1956 1958 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1957 1959 % (f, short(n), fl.linkrev(n)))
1958 1960 errors += 1
1959 1961 else:
1960 1962 filelinkrevs[f].remove(flr)
1961 1963
1962 1964 # verify contents
1963 1965 try:
1964 1966 t = fl.read(n)
1965 1967 except Exception, inst:
1966 1968 self.ui.warn("unpacking file %s %s: %s\n"
1967 1969 % (f, short(n), inst))
1968 1970 errors += 1
1969 1971
1970 1972 # verify parents
1971 1973 (p1, p2) = fl.parents(n)
1972 1974 if p1 not in nodes:
1973 1975 self.ui.warn("file %s:%s unknown parent 1 %s" %
1974 1976 (f, short(n), short(p1)))
1975 1977 errors += 1
1976 1978 if p2 not in nodes:
1977 1979 self.ui.warn("file %s:%s unknown parent 2 %s" %
1978 1980 (f, short(n), short(p1)))
1979 1981 errors += 1
1980 1982 nodes[n] = 1
1981 1983
1982 1984 # cross-check
1983 1985 for node in filenodes[f]:
1984 1986 self.ui.warn("node %s in manifests not in %s\n"
1985 1987 % (hex(node), f))
1986 1988 errors += 1
1987 1989
1988 1990 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1989 1991 (files, changesets, revisions))
1990 1992
1991 1993 if errors:
1992 1994 self.ui.warn("%d integrity errors encountered!\n" % errors)
1993 1995 return 1
1994 1996
1995 1997 class remoterepository:
1996 1998 def local(self):
1997 1999 return False
1998 2000
1999 2001 class httprepository(remoterepository):
2000 2002 def __init__(self, ui, path):
2001 2003 # fix missing / after hostname
2002 2004 s = urlparse.urlsplit(path)
2003 2005 partial = s[2]
2004 2006 if not partial: partial = "/"
2005 2007 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2006 2008 self.ui = ui
2007 2009 no_list = [ "localhost", "127.0.0.1" ]
2008 2010 host = ui.config("http_proxy", "host")
2009 2011 if host is None:
2010 2012 host = os.environ.get("http_proxy")
2011 2013 if host and host.startswith('http://'):
2012 2014 host = host[7:]
2013 2015 user = ui.config("http_proxy", "user")
2014 2016 passwd = ui.config("http_proxy", "passwd")
2015 2017 no = ui.config("http_proxy", "no")
2016 2018 if no is None:
2017 2019 no = os.environ.get("no_proxy")
2018 2020 if no:
2019 2021 no_list = no_list + no.split(",")
2020 2022
2021 2023 no_proxy = 0
2022 2024 for h in no_list:
2023 2025 if (path.startswith("http://" + h + "/") or
2024 2026 path.startswith("http://" + h + ":") or
2025 2027 path == "http://" + h):
2026 2028 no_proxy = 1
2027 2029
2028 2030 # Note: urllib2 takes proxy values from the environment and those will
2029 2031 # take precedence
2030 2032 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2031 2033 try:
2032 2034 if os.environ.has_key(env):
2033 2035 del os.environ[env]
2034 2036 except OSError:
2035 2037 pass
2036 2038
2037 2039 proxy_handler = urllib2.BaseHandler()
2038 2040 if host and not no_proxy:
2039 2041 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2040 2042
2041 2043 authinfo = None
2042 2044 if user and passwd:
2043 2045 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2044 2046 passmgr.add_password(None, host, user, passwd)
2045 2047 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2046 2048
2047 2049 opener = urllib2.build_opener(proxy_handler, authinfo)
2048 2050 urllib2.install_opener(opener)
2049 2051
2050 2052 def dev(self):
2051 2053 return -1
2052 2054
2053 2055 def do_cmd(self, cmd, **args):
2054 2056 self.ui.debug("sending %s command\n" % cmd)
2055 2057 q = {"cmd": cmd}
2056 2058 q.update(args)
2057 2059 qs = urllib.urlencode(q)
2058 2060 cu = "%s?%s" % (self.url, qs)
2059 2061 resp = urllib2.urlopen(cu)
2060 2062 proto = resp.headers['content-type']
2061 2063
2062 2064 # accept old "text/plain" and "application/hg-changegroup" for now
2063 2065 if not proto.startswith('application/mercurial') and \
2064 2066 not proto.startswith('text/plain') and \
2065 2067 not proto.startswith('application/hg-changegroup'):
2066 2068 raise RepoError("'%s' does not appear to be an hg repository"
2067 2069 % self.url)
2068 2070
2069 2071 if proto.startswith('application/mercurial'):
2070 2072 version = proto[22:]
2071 2073 if float(version) > 0.1:
2072 2074 raise RepoError("'%s' uses newer protocol %s" %
2073 2075 (self.url, version))
2074 2076
2075 2077 return resp
2076 2078
2077 2079 def heads(self):
2078 2080 d = self.do_cmd("heads").read()
2079 2081 try:
2080 2082 return map(bin, d[:-1].split(" "))
2081 2083 except:
2082 2084 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2083 2085 raise
2084 2086
2085 2087 def branches(self, nodes):
2086 2088 n = " ".join(map(hex, nodes))
2087 2089 d = self.do_cmd("branches", nodes=n).read()
2088 2090 try:
2089 2091 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2090 2092 return br
2091 2093 except:
2092 2094 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2093 2095 raise
2094 2096
2095 2097 def between(self, pairs):
2096 2098 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2097 2099 d = self.do_cmd("between", pairs=n).read()
2098 2100 try:
2099 2101 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2100 2102 return p
2101 2103 except:
2102 2104 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2103 2105 raise
2104 2106
2105 2107 def changegroup(self, nodes):
2106 2108 n = " ".join(map(hex, nodes))
2107 2109 f = self.do_cmd("changegroup", roots=n)
2108 2110 bytes = 0
2109 2111
2110 2112 class zread:
2111 2113 def __init__(self, f):
2112 2114 self.zd = zlib.decompressobj()
2113 2115 self.f = f
2114 2116 self.buf = ""
2115 2117 def read(self, l):
2116 2118 while l > len(self.buf):
2117 2119 r = self.f.read(4096)
2118 2120 if r:
2119 2121 self.buf += self.zd.decompress(r)
2120 2122 else:
2121 2123 self.buf += self.zd.flush()
2122 2124 break
2123 2125 d, self.buf = self.buf[:l], self.buf[l:]
2124 2126 return d
2125 2127
2126 2128 return zread(f)
2127 2129
2128 2130 class remotelock:
2129 2131 def __init__(self, repo):
2130 2132 self.repo = repo
2131 2133 def release(self):
2132 2134 self.repo.unlock()
2133 2135 self.repo = None
2134 2136 def __del__(self):
2135 2137 if self.repo:
2136 2138 self.release()
2137 2139
2138 2140 class sshrepository(remoterepository):
2139 2141 def __init__(self, ui, path):
2140 2142 self.url = path
2141 2143 self.ui = ui
2142 2144
2143 2145 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2144 2146 if not m:
2145 2147 raise RepoError("couldn't parse destination %s" % path)
2146 2148
2147 2149 self.user = m.group(2)
2148 2150 self.host = m.group(3)
2149 2151 self.port = m.group(5)
2150 2152 self.path = m.group(7)
2151 2153
2152 2154 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2153 2155 args = self.port and ("%s -p %s") % (args, self.port) or args
2154 2156 path = self.path or ""
2155 2157
2156 2158 if not path:
2157 2159 raise RepoError("no remote repository path specified")
2158 2160
2159 2161 sshcmd = self.ui.config("ui", "ssh", "ssh")
2160 2162 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2161 2163 cmd = "%s %s '%s -R %s serve --stdio'"
2162 2164 cmd = cmd % (sshcmd, args, remotecmd, path)
2163 2165
2164 2166 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2165 2167
2166 2168 def readerr(self):
2167 2169 while 1:
2168 2170 r,w,x = select.select([self.pipee], [], [], 0)
2169 2171 if not r: break
2170 2172 l = self.pipee.readline()
2171 2173 if not l: break
2172 2174 self.ui.status("remote: ", l)
2173 2175
2174 2176 def __del__(self):
2175 2177 try:
2176 2178 self.pipeo.close()
2177 2179 self.pipei.close()
2178 2180 for l in self.pipee:
2179 2181 self.ui.status("remote: ", l)
2180 2182 self.pipee.close()
2181 2183 except:
2182 2184 pass
2183 2185
2184 2186 def dev(self):
2185 2187 return -1
2186 2188
2187 2189 def do_cmd(self, cmd, **args):
2188 2190 self.ui.debug("sending %s command\n" % cmd)
2189 2191 self.pipeo.write("%s\n" % cmd)
2190 2192 for k, v in args.items():
2191 2193 self.pipeo.write("%s %d\n" % (k, len(v)))
2192 2194 self.pipeo.write(v)
2193 2195 self.pipeo.flush()
2194 2196
2195 2197 return self.pipei
2196 2198
2197 2199 def call(self, cmd, **args):
2198 2200 r = self.do_cmd(cmd, **args)
2199 2201 l = r.readline()
2200 2202 self.readerr()
2201 2203 try:
2202 2204 l = int(l)
2203 2205 except:
2204 2206 raise RepoError("unexpected response '%s'" % l)
2205 2207 return r.read(l)
2206 2208
2207 2209 def lock(self):
2208 2210 self.call("lock")
2209 2211 return remotelock(self)
2210 2212
2211 2213 def unlock(self):
2212 2214 self.call("unlock")
2213 2215
2214 2216 def heads(self):
2215 2217 d = self.call("heads")
2216 2218 try:
2217 2219 return map(bin, d[:-1].split(" "))
2218 2220 except:
2219 2221 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2220 2222
2221 2223 def branches(self, nodes):
2222 2224 n = " ".join(map(hex, nodes))
2223 2225 d = self.call("branches", nodes=n)
2224 2226 try:
2225 2227 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2226 2228 return br
2227 2229 except:
2228 2230 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2229 2231
2230 2232 def between(self, pairs):
2231 2233 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2232 2234 d = self.call("between", pairs=n)
2233 2235 try:
2234 2236 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2235 2237 return p
2236 2238 except:
2237 2239 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2238 2240
2239 2241 def changegroup(self, nodes):
2240 2242 n = " ".join(map(hex, nodes))
2241 2243 f = self.do_cmd("changegroup", roots=n)
2242 2244 return self.pipei
2243 2245
2244 2246 def addchangegroup(self, cg):
2245 2247 d = self.call("addchangegroup")
2246 2248 if d:
2247 2249 raise RepoError("push refused: %s", d)
2248 2250
2249 2251 while 1:
2250 2252 d = cg.read(4096)
2251 2253 if not d: break
2252 2254 self.pipeo.write(d)
2253 2255 self.readerr()
2254 2256
2255 2257 self.pipeo.flush()
2256 2258
2257 2259 self.readerr()
2258 2260 l = int(self.pipei.readline())
2259 2261 return self.pipei.read(l) != ""
2260 2262
2261 2263 class httpsrepository(httprepository):
2262 2264 pass
2263 2265
2264 2266 def repository(ui, path=None, create=0):
2265 2267 if path:
2266 2268 if path.startswith("http://"):
2267 2269 return httprepository(ui, path)
2268 2270 if path.startswith("https://"):
2269 2271 return httpsrepository(ui, path)
2270 2272 if path.startswith("hg://"):
2271 2273 return httprepository(ui, path.replace("hg://", "http://"))
2272 2274 if path.startswith("old-http://"):
2273 2275 return localrepository(ui, path.replace("old-http://", "http://"))
2274 2276 if path.startswith("ssh://"):
2275 2277 return sshrepository(ui, path)
2276 2278
2277 2279 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now