##// END OF EJS Templates
Fix long-standing excessive file merges...
mpm@selenic.com -
r990:5007e0bd default
parent child Browse files
Show More
@@ -0,0 +1,73 b''
1 #!/bin/sh
2
3 # This test makes sure that we don't mark a file as merged with its ancestor
4 # when we do a merge.
5
6 cat <<'EOF' > merge
7 #!/bin/sh
8 echo merging for `basename $1`
9 EOF
10 chmod +x merge
11
12 echo creating base
13 hg init a
14 cd a
15 echo 1 > foo
16 echo 1 > bar
17 echo 1 > baz
18 echo 1 > quux
19 hg add foo bar baz quux
20 hg commit -m "base" -d "0 0"
21
22 cd ..
23 hg clone a b
24
25 echo creating branch a
26 cd a
27 echo 2a > foo
28 echo 2a > bar
29 hg commit -m "branch a" -d "0 0"
30
31 echo creating branch b
32
33 cd ..
34 cd b
35 echo 2b > foo
36 echo 2b > baz
37 hg commit -m "branch b" -d "0 0"
38
39 echo "we shouldn't have anything but n state here"
40 hg debugstate | cut -b 1-16,35-
41
42 echo merging
43 hg pull ../a
44 env HGMERGE=../merge hg update -vm --debug
45
46 echo 2m > foo
47 echo 2b > baz
48 echo new > quux
49
50 echo "we shouldn't have anything but foo in merge state here"
51 hg debugstate | cut -b 1-16,35- | grep "^m"
52
53 hg ci -m "merge" -d "0 0"
54
55 echo "main: we should have a merge here"
56 hg debugindex .hg/00changelog.i
57
58 echo "foo: we should have a merge here"
59 hg debugindex .hg/data/foo.i
60
61 echo "bar: we shouldn't have a merge here"
62 hg debugindex .hg/data/bar.i
63
64 echo "baz: we shouldn't have a merge here"
65 hg debugindex .hg/data/baz.i
66
67 echo "quux: we shouldn't have a merge here"
68 hg debugindex .hg/data/quux.i
69
70 echo "everything should be clean now"
71 hg status
72
73 hg verify
@@ -0,0 +1,58 b''
1 creating base
2 creating branch a
3 creating branch b
4 we shouldn't have anything but n state here
5 n 644 2 bar
6 n 644 3 baz
7 n 644 3 foo
8 n 644 2 quux
9 merging
10 pulling from ../a
11 searching for changes
12 adding changesets
13 adding manifests
14 adding file changes
15 added 1 changesets with 2 changes to 2 files
16 (run 'hg update' to get a working copy)
17 merging for foo
18 resolving manifests
19 force None allow 1 moddirstate True linear False
20 ancestor a0486579db29 local ef1b4dbe2193 remote 336d8406d617
21 remote bar is newer, get
22 foo versions differ, resolve
23 getting bar
24 merging foo
25 resolving foo
26 file foo: other 33d1fb69067a ancestor b8e02f643373
27 we shouldn't have anything but foo in merge state here
28 m 644 3 foo
29 main: we should have a merge here
30 rev offset length base linkrev nodeid p1 p2
31 0 0 73 0 0 cdca01651b96 000000000000 000000000000
32 1 73 68 1 1 f6718a9cb7f3 cdca01651b96 000000000000
33 2 141 68 2 2 bdd988058d16 cdca01651b96 000000000000
34 3 209 66 3 3 9da9fbd62226 f6718a9cb7f3 bdd988058d16
35 foo: we should have a merge here
36 rev offset length base linkrev nodeid p1 p2
37 0 0 3 0 0 b8e02f643373 000000000000 000000000000
38 1 3 4 1 1 2ffeddde1b65 b8e02f643373 000000000000
39 2 7 4 2 2 33d1fb69067a b8e02f643373 000000000000
40 3 11 4 3 3 aa27919ee430 2ffeddde1b65 33d1fb69067a
41 bar: we shouldn't have a merge here
42 rev offset length base linkrev nodeid p1 p2
43 0 0 3 0 0 b8e02f643373 000000000000 000000000000
44 1 3 4 1 2 33d1fb69067a b8e02f643373 000000000000
45 baz: we shouldn't have a merge here
46 rev offset length base linkrev nodeid p1 p2
47 0 0 3 0 0 b8e02f643373 000000000000 000000000000
48 1 3 4 1 1 2ffeddde1b65 b8e02f643373 000000000000
49 quux: we shouldn't have a merge here
50 rev offset length base linkrev nodeid p1 p2
51 0 0 3 0 0 b8e02f643373 000000000000 000000000000
52 1 3 5 1 3 6128c0f33108 b8e02f643373 000000000000
53 everything should be clean now
54 checking changesets
55 checking manifests
56 crosschecking files in changesets and manifests
57 checking files
58 4 files, 4 changesets, 10 total revisions
@@ -1,2230 +1,2268 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 283 if not date:
284 284 if time.daylight: offset = time.altzone
285 285 else: offset = time.timezone
286 286 date = "%d %d" % (time.time(), offset)
287 287 list.sort()
288 288 l = [hex(manifest), user, date] + list + ["", desc]
289 289 text = "\n".join(l)
290 290 return self.addrevision(text, transaction, self.count(), p1, p2)
291 291
292 292 class dirstate:
293 293 def __init__(self, opener, ui, root):
294 294 self.opener = opener
295 295 self.root = root
296 296 self.dirty = 0
297 297 self.ui = ui
298 298 self.map = None
299 299 self.pl = None
300 300 self.copies = {}
301 301 self.ignorefunc = None
302 302
303 303 def wjoin(self, f):
304 304 return os.path.join(self.root, f)
305 305
306 306 def getcwd(self):
307 307 cwd = os.getcwd()
308 308 if cwd == self.root: return ''
309 309 return cwd[len(self.root) + 1:]
310 310
311 311 def ignore(self, f):
312 312 if not self.ignorefunc:
313 313 bigpat = []
314 314 try:
315 315 l = file(self.wjoin(".hgignore"))
316 316 for pat in l:
317 317 p = pat.rstrip()
318 318 if p:
319 319 try:
320 320 re.compile(p)
321 321 except:
322 322 self.ui.warn("ignoring invalid ignore"
323 323 + " regular expression '%s'\n" % p)
324 324 else:
325 325 bigpat.append(p)
326 326 except IOError: pass
327 327
328 328 if bigpat:
329 329 s = "(?:%s)" % (")|(?:".join(bigpat))
330 330 r = re.compile(s)
331 331 self.ignorefunc = r.search
332 332 else:
333 333 self.ignorefunc = util.never
334 334
335 335 return self.ignorefunc(f)
336 336
337 337 def __del__(self):
338 338 if self.dirty:
339 339 self.write()
340 340
341 341 def __getitem__(self, key):
342 342 try:
343 343 return self.map[key]
344 344 except TypeError:
345 345 self.read()
346 346 return self[key]
347 347
348 348 def __contains__(self, key):
349 349 if not self.map: self.read()
350 350 return key in self.map
351 351
352 352 def parents(self):
353 353 if not self.pl:
354 354 self.read()
355 355 return self.pl
356 356
357 357 def markdirty(self):
358 358 if not self.dirty:
359 359 self.dirty = 1
360 360
361 361 def setparents(self, p1, p2 = nullid):
362 362 self.markdirty()
363 363 self.pl = p1, p2
364 364
365 365 def state(self, key):
366 366 try:
367 367 return self[key][0]
368 368 except KeyError:
369 369 return "?"
370 370
371 371 def read(self):
372 372 if self.map is not None: return self.map
373 373
374 374 self.map = {}
375 375 self.pl = [nullid, nullid]
376 376 try:
377 377 st = self.opener("dirstate").read()
378 378 if not st: return
379 379 except: return
380 380
381 381 self.pl = [st[:20], st[20: 40]]
382 382
383 383 pos = 40
384 384 while pos < len(st):
385 385 e = struct.unpack(">cllll", st[pos:pos+17])
386 386 l = e[4]
387 387 pos += 17
388 388 f = st[pos:pos + l]
389 389 if '\0' in f:
390 390 f, c = f.split('\0')
391 391 self.copies[f] = c
392 392 self.map[f] = e[:4]
393 393 pos += l
394 394
395 395 def copy(self, source, dest):
396 396 self.read()
397 397 self.markdirty()
398 398 self.copies[dest] = source
399 399
400 400 def copied(self, file):
401 401 return self.copies.get(file, None)
402 402
403 403 def update(self, files, state, **kw):
404 404 ''' current states:
405 405 n normal
406 406 m needs merging
407 407 r marked for removal
408 408 a marked for addition'''
409 409
410 410 if not files: return
411 411 self.read()
412 412 self.markdirty()
413 413 for f in files:
414 414 if state == "r":
415 415 self.map[f] = ('r', 0, 0, 0)
416 416 else:
417 417 s = os.stat(os.path.join(self.root, f))
418 418 st_size = kw.get('st_size', s.st_size)
419 419 st_mtime = kw.get('st_mtime', s.st_mtime)
420 420 self.map[f] = (state, s.st_mode, st_size, st_mtime)
421 421
422 422 def forget(self, files):
423 423 if not files: return
424 424 self.read()
425 425 self.markdirty()
426 426 for f in files:
427 427 try:
428 428 del self.map[f]
429 429 except KeyError:
430 430 self.ui.warn("not in dirstate: %s!\n" % f)
431 431 pass
432 432
433 433 def clear(self):
434 434 self.map = {}
435 435 self.markdirty()
436 436
437 437 def write(self):
438 438 st = self.opener("dirstate", "w")
439 439 st.write("".join(self.pl))
440 440 for f, e in self.map.items():
441 441 c = self.copied(f)
442 442 if c:
443 443 f = f + "\0" + c
444 444 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
445 445 st.write(e + f)
446 446 self.dirty = 0
447 447
448 448 def filterfiles(self, files):
449 449 ret = {}
450 450 unknown = []
451 451
452 452 for x in files:
453 453 if x is '.':
454 454 return self.map.copy()
455 455 if x not in self.map:
456 456 unknown.append(x)
457 457 else:
458 458 ret[x] = self.map[x]
459 459
460 460 if not unknown:
461 461 return ret
462 462
463 463 b = self.map.keys()
464 464 b.sort()
465 465 blen = len(b)
466 466
467 467 for x in unknown:
468 468 bs = bisect.bisect(b, x)
469 469 if bs != 0 and b[bs-1] == x:
470 470 ret[x] = self.map[x]
471 471 continue
472 472 while bs < blen:
473 473 s = b[bs]
474 474 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
475 475 ret[s] = self.map[s]
476 476 else:
477 477 break
478 478 bs += 1
479 479 return ret
480 480
481 481 def walk(self, files = None, match = util.always, dc=None):
482 482 self.read()
483 483
484 484 # walk all files by default
485 485 if not files:
486 486 files = [self.root]
487 487 if not dc:
488 488 dc = self.map.copy()
489 489 elif not dc:
490 490 dc = self.filterfiles(files)
491 491
492 492 known = {'.hg': 1}
493 493 def seen(fn):
494 494 if fn in known: return True
495 495 known[fn] = 1
496 496 def traverse():
497 497 for ff in util.unique(files):
498 498 f = os.path.join(self.root, ff)
499 499 try:
500 500 st = os.stat(f)
501 501 except OSError, inst:
502 502 if ff not in dc: self.ui.warn('%s: %s\n' % (
503 503 util.pathto(self.getcwd(), ff),
504 504 inst.strerror))
505 505 continue
506 506 if stat.S_ISDIR(st.st_mode):
507 507 for dir, subdirs, fl in os.walk(f):
508 508 d = dir[len(self.root) + 1:]
509 509 nd = util.normpath(d)
510 510 if nd == '.': nd = ''
511 511 if seen(nd):
512 512 subdirs[:] = []
513 513 continue
514 514 for sd in subdirs:
515 515 ds = os.path.join(nd, sd +'/')
516 516 if self.ignore(ds) or not match(ds):
517 517 subdirs.remove(sd)
518 518 subdirs.sort()
519 519 fl.sort()
520 520 for fn in fl:
521 521 fn = util.pconvert(os.path.join(d, fn))
522 522 yield 'f', fn
523 523 elif stat.S_ISREG(st.st_mode):
524 524 yield 'f', ff
525 525 else:
526 526 kind = 'unknown'
527 527 if stat.S_ISCHR(st.st_mode): kind = 'character device'
528 528 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
529 529 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
530 530 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
531 531 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
532 532 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
533 533 util.pathto(self.getcwd(), ff),
534 534 kind))
535 535
536 536 ks = dc.keys()
537 537 ks.sort()
538 538 for k in ks:
539 539 yield 'm', k
540 540
541 541 # yield only files that match: all in dirstate, others only if
542 542 # not in .hgignore
543 543
544 544 for src, fn in util.unique(traverse()):
545 545 fn = util.normpath(fn)
546 546 if seen(fn): continue
547 547 if fn not in dc and self.ignore(fn):
548 548 continue
549 549 if match(fn):
550 550 yield src, fn
551 551
552 552 def changes(self, files=None, match=util.always):
553 553 self.read()
554 554 if not files:
555 555 dc = self.map.copy()
556 556 else:
557 557 dc = self.filterfiles(files)
558 558 lookup, modified, added, unknown = [], [], [], []
559 559 removed, deleted = [], []
560 560
561 561 for src, fn in self.walk(files, match, dc=dc):
562 562 try:
563 563 s = os.stat(os.path.join(self.root, fn))
564 564 except OSError:
565 565 continue
566 566 if not stat.S_ISREG(s.st_mode):
567 567 continue
568 568 c = dc.get(fn)
569 569 if c:
570 570 del dc[fn]
571 571 if c[0] == 'm':
572 572 modified.append(fn)
573 573 elif c[0] == 'a':
574 574 added.append(fn)
575 575 elif c[0] == 'r':
576 576 unknown.append(fn)
577 577 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
578 578 modified.append(fn)
579 579 elif c[3] != s.st_mtime:
580 580 lookup.append(fn)
581 581 else:
582 582 unknown.append(fn)
583 583
584 584 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
585 585 if c[0] == 'r':
586 586 removed.append(fn)
587 587 else:
588 588 deleted.append(fn)
589 589 return (lookup, modified, added, removed + deleted, unknown)
590 590
591 591 # used to avoid circular references so destructors work
592 592 def opener(base):
593 593 p = base
594 594 def o(path, mode="r"):
595 595 if p.startswith("http://"):
596 596 f = os.path.join(p, urllib.quote(path))
597 597 return httprangereader.httprangereader(f)
598 598
599 599 f = os.path.join(p, path)
600 600
601 601 mode += "b" # for that other OS
602 602
603 603 if mode[0] != "r":
604 604 try:
605 605 s = os.stat(f)
606 606 except OSError:
607 607 d = os.path.dirname(f)
608 608 if not os.path.isdir(d):
609 609 os.makedirs(d)
610 610 else:
611 611 if s.st_nlink > 1:
612 612 file(f + ".tmp", "wb").write(file(f, "rb").read())
613 613 util.rename(f+".tmp", f)
614 614
615 615 return file(f, mode)
616 616
617 617 return o
618 618
619 619 class RepoError(Exception): pass
620 620
621 621 class localrepository:
622 622 def __init__(self, ui, path=None, create=0):
623 623 self.remote = 0
624 624 if path and path.startswith("http://"):
625 625 self.remote = 1
626 626 self.path = path
627 627 else:
628 628 if not path:
629 629 p = os.getcwd()
630 630 while not os.path.isdir(os.path.join(p, ".hg")):
631 631 oldp = p
632 632 p = os.path.dirname(p)
633 633 if p == oldp: raise RepoError("no repo found")
634 634 path = p
635 635 self.path = os.path.join(path, ".hg")
636 636
637 637 if not create and not os.path.isdir(self.path):
638 638 raise RepoError("repository %s not found" % self.path)
639 639
640 640 self.root = os.path.abspath(path)
641 641 self.ui = ui
642 642
643 643 if create:
644 644 os.mkdir(self.path)
645 645 os.mkdir(self.join("data"))
646 646
647 647 self.opener = opener(self.path)
648 648 self.wopener = opener(self.root)
649 649 self.manifest = manifest(self.opener)
650 650 self.changelog = changelog(self.opener)
651 651 self.tagscache = None
652 652 self.nodetagscache = None
653 653
654 654 if not self.remote:
655 655 self.dirstate = dirstate(self.opener, ui, self.root)
656 656 try:
657 657 self.ui.readconfig(self.opener("hgrc"))
658 658 except IOError: pass
659 659
660 660 def hook(self, name, **args):
661 661 s = self.ui.config("hooks", name)
662 662 if s:
663 663 self.ui.note("running hook %s: %s\n" % (name, s))
664 664 old = {}
665 665 for k, v in args.items():
666 666 k = k.upper()
667 667 old[k] = os.environ.get(k, None)
668 668 os.environ[k] = v
669 669
670 670 r = os.system(s)
671 671
672 672 for k, v in old.items():
673 673 if v != None:
674 674 os.environ[k] = v
675 675 else:
676 676 del os.environ[k]
677 677
678 678 if r:
679 679 self.ui.warn("abort: %s hook failed with status %d!\n" %
680 680 (name, r))
681 681 return False
682 682 return True
683 683
684 684 def tags(self):
685 685 '''return a mapping of tag to node'''
686 686 if not self.tagscache:
687 687 self.tagscache = {}
688 688 def addtag(self, k, n):
689 689 try:
690 690 bin_n = bin(n)
691 691 except TypeError:
692 692 bin_n = ''
693 693 self.tagscache[k.strip()] = bin_n
694 694
695 695 try:
696 696 # read each head of the tags file, ending with the tip
697 697 # and add each tag found to the map, with "newer" ones
698 698 # taking precedence
699 699 fl = self.file(".hgtags")
700 700 h = fl.heads()
701 701 h.reverse()
702 702 for r in h:
703 703 for l in fl.revision(r).splitlines():
704 704 if l:
705 705 n, k = l.split(" ", 1)
706 706 addtag(self, k, n)
707 707 except KeyError:
708 708 pass
709 709
710 710 try:
711 711 f = self.opener("localtags")
712 712 for l in f:
713 713 n, k = l.split(" ", 1)
714 714 addtag(self, k, n)
715 715 except IOError:
716 716 pass
717 717
718 718 self.tagscache['tip'] = self.changelog.tip()
719 719
720 720 return self.tagscache
721 721
722 722 def tagslist(self):
723 723 '''return a list of tags ordered by revision'''
724 724 l = []
725 725 for t, n in self.tags().items():
726 726 try:
727 727 r = self.changelog.rev(n)
728 728 except:
729 729 r = -2 # sort to the beginning of the list if unknown
730 730 l.append((r,t,n))
731 731 l.sort()
732 732 return [(t,n) for r,t,n in l]
733 733
734 734 def nodetags(self, node):
735 735 '''return the tags associated with a node'''
736 736 if not self.nodetagscache:
737 737 self.nodetagscache = {}
738 738 for t,n in self.tags().items():
739 739 self.nodetagscache.setdefault(n,[]).append(t)
740 740 return self.nodetagscache.get(node, [])
741 741
742 742 def lookup(self, key):
743 743 try:
744 744 return self.tags()[key]
745 745 except KeyError:
746 746 try:
747 747 return self.changelog.lookup(key)
748 748 except:
749 749 raise RepoError("unknown revision '%s'" % key)
750 750
751 751 def dev(self):
752 752 if self.remote: return -1
753 753 return os.stat(self.path).st_dev
754 754
755 755 def local(self):
756 756 return not self.remote
757 757
758 758 def join(self, f):
759 759 return os.path.join(self.path, f)
760 760
761 761 def wjoin(self, f):
762 762 return os.path.join(self.root, f)
763 763
764 764 def file(self, f):
765 765 if f[0] == '/': f = f[1:]
766 766 return filelog(self.opener, f)
767 767
768 768 def getcwd(self):
769 769 return self.dirstate.getcwd()
770 770
771 771 def wfile(self, f, mode='r'):
772 772 return self.wopener(f, mode)
773 773
774 774 def transaction(self):
775 775 # save dirstate for undo
776 776 try:
777 777 ds = self.opener("dirstate").read()
778 778 except IOError:
779 779 ds = ""
780 780 self.opener("journal.dirstate", "w").write(ds)
781 781
782 782 def after():
783 783 util.rename(self.join("journal"), self.join("undo"))
784 784 util.rename(self.join("journal.dirstate"),
785 785 self.join("undo.dirstate"))
786 786
787 787 return transaction.transaction(self.ui.warn, self.opener,
788 788 self.join("journal"), after)
789 789
790 790 def recover(self):
791 791 lock = self.lock()
792 792 if os.path.exists(self.join("journal")):
793 793 self.ui.status("rolling back interrupted transaction\n")
794 794 return transaction.rollback(self.opener, self.join("journal"))
795 795 else:
796 796 self.ui.warn("no interrupted transaction available\n")
797 797
798 798 def undo(self):
799 799 lock = self.lock()
800 800 if os.path.exists(self.join("undo")):
801 801 self.ui.status("rolling back last transaction\n")
802 802 transaction.rollback(self.opener, self.join("undo"))
803 803 self.dirstate = None
804 804 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
805 805 self.dirstate = dirstate(self.opener, self.ui, self.root)
806 806 else:
807 807 self.ui.warn("no undo information available\n")
808 808
809 809 def lock(self, wait = 1):
810 810 try:
811 811 return lock.lock(self.join("lock"), 0)
812 812 except lock.LockHeld, inst:
813 813 if wait:
814 814 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
815 815 return lock.lock(self.join("lock"), wait)
816 816 raise inst
817 817
818 818 def rawcommit(self, files, text, user, date, p1=None, p2=None):
819 819 orig_parent = self.dirstate.parents()[0] or nullid
820 820 p1 = p1 or self.dirstate.parents()[0] or nullid
821 821 p2 = p2 or self.dirstate.parents()[1] or nullid
822 822 c1 = self.changelog.read(p1)
823 823 c2 = self.changelog.read(p2)
824 824 m1 = self.manifest.read(c1[0])
825 825 mf1 = self.manifest.readflags(c1[0])
826 826 m2 = self.manifest.read(c2[0])
827 827
828 828 if orig_parent == p1:
829 829 update_dirstate = 1
830 830 else:
831 831 update_dirstate = 0
832 832
833 833 tr = self.transaction()
834 834 mm = m1.copy()
835 835 mfm = mf1.copy()
836 836 linkrev = self.changelog.count()
837 837 for f in files:
838 838 try:
839 839 t = self.wfile(f).read()
840 840 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
841 841 r = self.file(f)
842 842 mfm[f] = tm
843 mm[f] = r.add(t, {}, tr, linkrev,
844 m1.get(f, nullid), m2.get(f, nullid))
843
844 fp1 = m1.get(f, nullid)
845 fp2 = m2.get(f, nullid)
846
847 # is the same revision on two branches of a merge?
848 if fp2 == fp1:
849 fp2 = nullid
850
851 if fp2 != nullid:
852 # is one parent an ancestor of the other?
853 fpa = r.ancestor(fp1, fp2)
854 if fpa == fp1:
855 fp1, fp2 = fp2, nullid
856 elif fpa == fp2:
857 fp2 = nullid
858
859 # is the file unmodified from the parent?
860 if t == r.read(fp1):
861 # record the proper existing parent in manifest
862 # no need to add a revision
863 mm[f] = fp1
864 continue
865
866 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
845 867 if update_dirstate:
846 868 self.dirstate.update([f], "n")
847 869 except IOError:
848 870 try:
849 871 del mm[f]
850 872 del mfm[f]
851 873 if update_dirstate:
852 874 self.dirstate.forget([f])
853 875 except:
854 876 # deleted from p2?
855 877 pass
856 878
857 879 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
858 880 user = user or self.ui.username()
859 881 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
860 882 tr.close()
861 883 if update_dirstate:
862 884 self.dirstate.setparents(n, nullid)
863 885
864 886 def commit(self, files = None, text = "", user = None, date = None,
865 887 match = util.always, force=False):
866 888 commit = []
867 889 remove = []
868 890 if files:
869 891 for f in files:
870 892 s = self.dirstate.state(f)
871 893 if s in 'nmai':
872 894 commit.append(f)
873 895 elif s == 'r':
874 896 remove.append(f)
875 897 else:
876 898 self.ui.warn("%s not tracked!\n" % f)
877 899 else:
878 900 (c, a, d, u) = self.changes(match = match)
879 901 commit = c + a
880 902 remove = d
881 903
882 if not commit and not remove and not force:
904 p1, p2 = self.dirstate.parents()
905 c1 = self.changelog.read(p1)
906 c2 = self.changelog.read(p2)
907 m1 = self.manifest.read(c1[0])
908 mf1 = self.manifest.readflags(c1[0])
909 m2 = self.manifest.read(c2[0])
910
911 if not commit and not remove and not force and p2 == nullid:
883 912 self.ui.status("nothing changed\n")
884 913 return None
885 914
886 915 if not self.hook("precommit"):
887 916 return None
888 917
889 p1, p2 = self.dirstate.parents()
890 c1 = self.changelog.read(p1)
891 c2 = self.changelog.read(p2)
892 m1 = self.manifest.read(c1[0])
893 mf1 = self.manifest.readflags(c1[0])
894 m2 = self.manifest.read(c2[0])
895 918 lock = self.lock()
896 919 tr = self.transaction()
897 920
898 921 # check in files
899 922 new = {}
900 923 linkrev = self.changelog.count()
901 924 commit.sort()
902 925 for f in commit:
903 926 self.ui.note(f + "\n")
904 927 try:
905 928 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
906 929 t = self.wfile(f).read()
907 930 except IOError:
908 931 self.ui.warn("trouble committing %s!\n" % f)
909 932 raise
910 933
911 934 meta = {}
912 935 cp = self.dirstate.copied(f)
913 936 if cp:
914 937 meta["copy"] = cp
915 938 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
916 939 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
917 940
918 941 r = self.file(f)
919 942 fp1 = m1.get(f, nullid)
920 943 fp2 = m2.get(f, nullid)
944
945 # is the same revision on two branches of a merge?
946 if fp2 == fp1:
947 fp2 = nullid
948
949 if fp2 != nullid:
950 # is one parent an ancestor of the other?
951 fpa = r.ancestor(fp1, fp2)
952 if fpa == fp1:
953 fp1, fp2 = fp2, nullid
954 elif fpa == fp2:
955 fp2 = nullid
956
957 # is the file unmodified from the parent?
958 if not meta and t == r.read(fp1):
959 # record the proper existing parent in manifest
960 # no need to add a revision
961 new[f] = fp1
962 continue
963
921 964 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
922 965
923 966 # update manifest
924 967 m1.update(new)
925 968 for f in remove:
926 969 if f in m1:
927 970 del m1[f]
928 971 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
929 972 (new, remove))
930 973
931 974 # add changeset
932 975 new = new.keys()
933 976 new.sort()
934 977
935 978 if not text:
936 979 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
937 980 edittext += "".join(["HG: changed %s\n" % f for f in new])
938 981 edittext += "".join(["HG: removed %s\n" % f for f in remove])
939 982 edittext = self.ui.edit(edittext)
940 983 if not edittext.rstrip():
941 984 return None
942 985 text = edittext
943 986
944 987 user = user or self.ui.username()
945 988 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
946 989 tr.close()
947 990
948 991 self.dirstate.setparents(n)
949 992 self.dirstate.update(new, "n")
950 993 self.dirstate.forget(remove)
951 994
952 995 if not self.hook("commit", node=hex(n)):
953 996 return None
954 997 return n
955 998
956 999 def walk(self, node = None, files = [], match = util.always):
957 1000 if node:
958 1001 for fn in self.manifest.read(self.changelog.read(node)[0]):
959 1002 if match(fn): yield 'm', fn
960 1003 else:
961 1004 for src, fn in self.dirstate.walk(files, match):
962 1005 yield src, fn
963 1006
964 1007 def changes(self, node1 = None, node2 = None, files = [],
965 1008 match = util.always):
966 1009 mf2, u = None, []
967 1010
968 1011 def fcmp(fn, mf):
969 1012 t1 = self.wfile(fn).read()
970 1013 t2 = self.file(fn).revision(mf[fn])
971 1014 return cmp(t1, t2)
972 1015
973 1016 def mfmatches(node):
974 1017 mf = dict(self.manifest.read(node))
975 1018 for fn in mf.keys():
976 1019 if not match(fn):
977 1020 del mf[fn]
978 1021 return mf
979 1022
980 1023 # are we comparing the working directory?
981 1024 if not node2:
982 1025 l, c, a, d, u = self.dirstate.changes(files, match)
983 1026
984 1027 # are we comparing working dir against its parent?
985 1028 if not node1:
986 1029 if l:
987 1030 # do a full compare of any files that might have changed
988 1031 change = self.changelog.read(self.dirstate.parents()[0])
989 1032 mf2 = mfmatches(change[0])
990 1033 for f in l:
991 1034 if fcmp(f, mf2):
992 1035 c.append(f)
993 1036
994 1037 for l in c, a, d, u:
995 1038 l.sort()
996 1039
997 1040 return (c, a, d, u)
998 1041
999 1042 # are we comparing working dir against non-tip?
1000 1043 # generate a pseudo-manifest for the working dir
1001 1044 if not node2:
1002 1045 if not mf2:
1003 1046 change = self.changelog.read(self.dirstate.parents()[0])
1004 1047 mf2 = mfmatches(change[0])
1005 1048 for f in a + c + l:
1006 1049 mf2[f] = ""
1007 1050 for f in d:
1008 1051 if f in mf2: del mf2[f]
1009 1052 else:
1010 1053 change = self.changelog.read(node2)
1011 1054 mf2 = mfmatches(change[0])
1012 1055
1013 1056 # flush lists from dirstate before comparing manifests
1014 1057 c, a = [], []
1015 1058
1016 1059 change = self.changelog.read(node1)
1017 1060 mf1 = mfmatches(change[0])
1018 1061
1019 1062 for fn in mf2:
1020 1063 if mf1.has_key(fn):
1021 1064 if mf1[fn] != mf2[fn]:
1022 1065 if mf2[fn] != "" or fcmp(fn, mf1):
1023 1066 c.append(fn)
1024 1067 del mf1[fn]
1025 1068 else:
1026 1069 a.append(fn)
1027 1070
1028 1071 d = mf1.keys()
1029 1072
1030 1073 for l in c, a, d, u:
1031 1074 l.sort()
1032 1075
1033 1076 return (c, a, d, u)
1034 1077
1035 1078 def add(self, list):
1036 1079 for f in list:
1037 1080 p = self.wjoin(f)
1038 1081 if not os.path.exists(p):
1039 1082 self.ui.warn("%s does not exist!\n" % f)
1040 1083 elif not os.path.isfile(p):
1041 1084 self.ui.warn("%s not added: only files supported currently\n" % f)
1042 1085 elif self.dirstate.state(f) in 'an':
1043 1086 self.ui.warn("%s already tracked!\n" % f)
1044 1087 else:
1045 1088 self.dirstate.update([f], "a")
1046 1089
1047 1090 def forget(self, list):
1048 1091 for f in list:
1049 1092 if self.dirstate.state(f) not in 'ai':
1050 1093 self.ui.warn("%s not added!\n" % f)
1051 1094 else:
1052 1095 self.dirstate.forget([f])
1053 1096
1054 1097 def remove(self, list):
1055 1098 for f in list:
1056 1099 p = self.wjoin(f)
1057 1100 if os.path.exists(p):
1058 1101 self.ui.warn("%s still exists!\n" % f)
1059 1102 elif self.dirstate.state(f) == 'a':
1060 1103 self.ui.warn("%s never committed!\n" % f)
1061 1104 self.dirstate.forget([f])
1062 1105 elif f not in self.dirstate:
1063 1106 self.ui.warn("%s not tracked!\n" % f)
1064 1107 else:
1065 1108 self.dirstate.update([f], "r")
1066 1109
1067 1110 def copy(self, source, dest):
1068 1111 p = self.wjoin(dest)
1069 1112 if not os.path.exists(p):
1070 1113 self.ui.warn("%s does not exist!\n" % dest)
1071 1114 elif not os.path.isfile(p):
1072 1115 self.ui.warn("copy failed: %s is not a file\n" % dest)
1073 1116 else:
1074 1117 if self.dirstate.state(dest) == '?':
1075 1118 self.dirstate.update([dest], "a")
1076 1119 self.dirstate.copy(source, dest)
1077 1120
1078 1121 def heads(self):
1079 1122 return self.changelog.heads()
1080 1123
1081 1124 # branchlookup returns a dict giving a list of branches for
1082 1125 # each head. A branch is defined as the tag of a node or
1083 1126 # the branch of the node's parents. If a node has multiple
1084 1127 # branch tags, tags are eliminated if they are visible from other
1085 1128 # branch tags.
1086 1129 #
1087 1130 # So, for this graph: a->b->c->d->e
1088 1131 # \ /
1089 1132 # aa -----/
1090 1133 # a has tag 2.6.12
1091 1134 # d has tag 2.6.13
1092 1135 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1093 1136 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1094 1137 # from the list.
1095 1138 #
1096 1139 # It is possible that more than one head will have the same branch tag.
1097 1140 # callers need to check the result for multiple heads under the same
1098 1141 # branch tag if that is a problem for them (ie checkout of a specific
1099 1142 # branch).
1100 1143 #
1101 1144 # passing in a specific branch will limit the depth of the search
1102 1145 # through the parents. It won't limit the branches returned in the
1103 1146 # result though.
1104 1147 def branchlookup(self, heads=None, branch=None):
1105 1148 if not heads:
1106 1149 heads = self.heads()
1107 1150 headt = [ h for h in heads ]
1108 1151 chlog = self.changelog
1109 1152 branches = {}
1110 1153 merges = []
1111 1154 seenmerge = {}
1112 1155
1113 1156 # traverse the tree once for each head, recording in the branches
1114 1157 # dict which tags are visible from this head. The branches
1115 1158 # dict also records which tags are visible from each tag
1116 1159 # while we traverse.
1117 1160 while headt or merges:
1118 1161 if merges:
1119 1162 n, found = merges.pop()
1120 1163 visit = [n]
1121 1164 else:
1122 1165 h = headt.pop()
1123 1166 visit = [h]
1124 1167 found = [h]
1125 1168 seen = {}
1126 1169 while visit:
1127 1170 n = visit.pop()
1128 1171 if n in seen:
1129 1172 continue
1130 1173 pp = chlog.parents(n)
1131 1174 tags = self.nodetags(n)
1132 1175 if tags:
1133 1176 for x in tags:
1134 1177 if x == 'tip':
1135 1178 continue
1136 1179 for f in found:
1137 1180 branches.setdefault(f, {})[n] = 1
1138 1181 branches.setdefault(n, {})[n] = 1
1139 1182 break
1140 1183 if n not in found:
1141 1184 found.append(n)
1142 1185 if branch in tags:
1143 1186 continue
1144 1187 seen[n] = 1
1145 1188 if pp[1] != nullid and n not in seenmerge:
1146 1189 merges.append((pp[1], [x for x in found]))
1147 1190 seenmerge[n] = 1
1148 1191 if pp[0] != nullid:
1149 1192 visit.append(pp[0])
1150 1193 # traverse the branches dict, eliminating branch tags from each
1151 1194 # head that are visible from another branch tag for that head.
1152 1195 out = {}
1153 1196 viscache = {}
1154 1197 for h in heads:
1155 1198 def visible(node):
1156 1199 if node in viscache:
1157 1200 return viscache[node]
1158 1201 ret = {}
1159 1202 visit = [node]
1160 1203 while visit:
1161 1204 x = visit.pop()
1162 1205 if x in viscache:
1163 1206 ret.update(viscache[x])
1164 1207 elif x not in ret:
1165 1208 ret[x] = 1
1166 1209 if x in branches:
1167 1210 visit[len(visit):] = branches[x].keys()
1168 1211 viscache[node] = ret
1169 1212 return ret
1170 1213 if h not in branches:
1171 1214 continue
1172 1215 # O(n^2), but somewhat limited. This only searches the
1173 1216 # tags visible from a specific head, not all the tags in the
1174 1217 # whole repo.
1175 1218 for b in branches[h]:
1176 1219 vis = False
1177 1220 for bb in branches[h].keys():
1178 1221 if b != bb:
1179 1222 if b in visible(bb):
1180 1223 vis = True
1181 1224 break
1182 1225 if not vis:
1183 1226 l = out.setdefault(h, [])
1184 1227 l[len(l):] = self.nodetags(b)
1185 1228 return out
1186 1229
1187 1230 def branches(self, nodes):
1188 1231 if not nodes: nodes = [self.changelog.tip()]
1189 1232 b = []
1190 1233 for n in nodes:
1191 1234 t = n
1192 1235 while n:
1193 1236 p = self.changelog.parents(n)
1194 1237 if p[1] != nullid or p[0] == nullid:
1195 1238 b.append((t, n, p[0], p[1]))
1196 1239 break
1197 1240 n = p[0]
1198 1241 return b
1199 1242
1200 1243 def between(self, pairs):
1201 1244 r = []
1202 1245
1203 1246 for top, bottom in pairs:
1204 1247 n, l, i = top, [], 0
1205 1248 f = 1
1206 1249
1207 1250 while n != bottom:
1208 1251 p = self.changelog.parents(n)[0]
1209 1252 if i == f:
1210 1253 l.append(n)
1211 1254 f = f * 2
1212 1255 n = p
1213 1256 i += 1
1214 1257
1215 1258 r.append(l)
1216 1259
1217 1260 return r
1218 1261
1219 1262 def newer(self, nodes):
1220 1263 m = {}
1221 1264 nl = []
1222 1265 pm = {}
1223 1266 cl = self.changelog
1224 1267 t = l = cl.count()
1225 1268
1226 1269 # find the lowest numbered node
1227 1270 for n in nodes:
1228 1271 l = min(l, cl.rev(n))
1229 1272 m[n] = 1
1230 1273
1231 1274 for i in xrange(l, t):
1232 1275 n = cl.node(i)
1233 1276 if n in m: # explicitly listed
1234 1277 pm[n] = 1
1235 1278 nl.append(n)
1236 1279 continue
1237 1280 for p in cl.parents(n):
1238 1281 if p in pm: # parent listed
1239 1282 pm[n] = 1
1240 1283 nl.append(n)
1241 1284 break
1242 1285
1243 1286 return nl
1244 1287
1245 1288 def findincoming(self, remote, base=None, heads=None):
1246 1289 m = self.changelog.nodemap
1247 1290 search = []
1248 1291 fetch = []
1249 1292 seen = {}
1250 1293 seenbranch = {}
1251 1294 if base == None:
1252 1295 base = {}
1253 1296
1254 1297 # assume we're closer to the tip than the root
1255 1298 # and start by examining the heads
1256 1299 self.ui.status("searching for changes\n")
1257 1300
1258 1301 if not heads:
1259 1302 heads = remote.heads()
1260 1303
1261 1304 unknown = []
1262 1305 for h in heads:
1263 1306 if h not in m:
1264 1307 unknown.append(h)
1265 1308 else:
1266 1309 base[h] = 1
1267 1310
1268 1311 if not unknown:
1269 1312 return None
1270 1313
1271 1314 rep = {}
1272 1315 reqcnt = 0
1273 1316
1274 1317 # search through remote branches
1275 1318 # a 'branch' here is a linear segment of history, with four parts:
1276 1319 # head, root, first parent, second parent
1277 1320 # (a branch always has two parents (or none) by definition)
1278 1321 unknown = remote.branches(unknown)
1279 1322 while unknown:
1280 1323 r = []
1281 1324 while unknown:
1282 1325 n = unknown.pop(0)
1283 1326 if n[0] in seen:
1284 1327 continue
1285 1328
1286 1329 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1287 1330 if n[0] == nullid:
1288 1331 break
1289 1332 if n in seenbranch:
1290 1333 self.ui.debug("branch already found\n")
1291 1334 continue
1292 1335 if n[1] and n[1] in m: # do we know the base?
1293 1336 self.ui.debug("found incomplete branch %s:%s\n"
1294 1337 % (short(n[0]), short(n[1])))
1295 1338 search.append(n) # schedule branch range for scanning
1296 1339 seenbranch[n] = 1
1297 1340 else:
1298 1341 if n[1] not in seen and n[1] not in fetch:
1299 1342 if n[2] in m and n[3] in m:
1300 1343 self.ui.debug("found new changeset %s\n" %
1301 1344 short(n[1]))
1302 1345 fetch.append(n[1]) # earliest unknown
1303 1346 base[n[2]] = 1 # latest known
1304 1347 continue
1305 1348
1306 1349 for a in n[2:4]:
1307 1350 if a not in rep:
1308 1351 r.append(a)
1309 1352 rep[a] = 1
1310 1353
1311 1354 seen[n[0]] = 1
1312 1355
1313 1356 if r:
1314 1357 reqcnt += 1
1315 1358 self.ui.debug("request %d: %s\n" %
1316 1359 (reqcnt, " ".join(map(short, r))))
1317 1360 for p in range(0, len(r), 10):
1318 1361 for b in remote.branches(r[p:p+10]):
1319 1362 self.ui.debug("received %s:%s\n" %
1320 1363 (short(b[0]), short(b[1])))
1321 1364 if b[0] not in m and b[0] not in seen:
1322 1365 unknown.append(b)
1323 1366
1324 1367 # do binary search on the branches we found
1325 1368 while search:
1326 1369 n = search.pop(0)
1327 1370 reqcnt += 1
1328 1371 l = remote.between([(n[0], n[1])])[0]
1329 1372 l.append(n[1])
1330 1373 p = n[0]
1331 1374 f = 1
1332 1375 for i in l:
1333 1376 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1334 1377 if i in m:
1335 1378 if f <= 2:
1336 1379 self.ui.debug("found new branch changeset %s\n" %
1337 1380 short(p))
1338 1381 fetch.append(p)
1339 1382 base[i] = 1
1340 1383 else:
1341 1384 self.ui.debug("narrowed branch search to %s:%s\n"
1342 1385 % (short(p), short(i)))
1343 1386 search.append((p, i))
1344 1387 break
1345 1388 p, f = i, f * 2
1346 1389
1347 1390 # sanity check our fetch list
1348 1391 for f in fetch:
1349 1392 if f in m:
1350 1393 raise RepoError("already have changeset " + short(f[:4]))
1351 1394
1352 1395 if base.keys() == [nullid]:
1353 1396 self.ui.warn("warning: pulling from an unrelated repository!\n")
1354 1397
1355 1398 self.ui.note("adding new changesets starting at " +
1356 1399 " ".join([short(f) for f in fetch]) + "\n")
1357 1400
1358 1401 self.ui.debug("%d total queries\n" % reqcnt)
1359 1402
1360 1403 return fetch
1361 1404
1362 1405 def findoutgoing(self, remote, base=None, heads=None):
1363 1406 if base == None:
1364 1407 base = {}
1365 1408 self.findincoming(remote, base, heads)
1366 1409
1367 1410 remain = dict.fromkeys(self.changelog.nodemap)
1368 1411
1369 1412 # prune everything remote has from the tree
1370 1413 del remain[nullid]
1371 1414 remove = base.keys()
1372 1415 while remove:
1373 1416 n = remove.pop(0)
1374 1417 if n in remain:
1375 1418 del remain[n]
1376 1419 for p in self.changelog.parents(n):
1377 1420 remove.append(p)
1378 1421
1379 1422 # find every node whose parents have been pruned
1380 1423 subset = []
1381 1424 for n in remain:
1382 1425 p1, p2 = self.changelog.parents(n)
1383 1426 if p1 not in remain and p2 not in remain:
1384 1427 subset.append(n)
1385 1428
1386 1429 # this is the set of all roots we have to push
1387 1430 return subset
1388 1431
1389 1432 def pull(self, remote):
1390 1433 lock = self.lock()
1391 1434
1392 1435 # if we have an empty repo, fetch everything
1393 1436 if self.changelog.tip() == nullid:
1394 1437 self.ui.status("requesting all changes\n")
1395 1438 fetch = [nullid]
1396 1439 else:
1397 1440 fetch = self.findincoming(remote)
1398 1441
1399 1442 if not fetch:
1400 1443 self.ui.status("no changes found\n")
1401 1444 return 1
1402 1445
1403 1446 cg = remote.changegroup(fetch)
1404 1447 return self.addchangegroup(cg)
1405 1448
1406 1449 def push(self, remote, force=False):
1407 1450 lock = remote.lock()
1408 1451
1409 1452 base = {}
1410 1453 heads = remote.heads()
1411 1454 inc = self.findincoming(remote, base, heads)
1412 1455 if not force and inc:
1413 1456 self.ui.warn("abort: unsynced remote changes!\n")
1414 1457 self.ui.status("(did you forget to sync? use push -f to force)\n")
1415 1458 return 1
1416 1459
1417 1460 update = self.findoutgoing(remote, base)
1418 1461 if not update:
1419 1462 self.ui.status("no changes found\n")
1420 1463 return 1
1421 1464 elif not force:
1422 1465 if len(heads) < len(self.changelog.heads()):
1423 1466 self.ui.warn("abort: push creates new remote branches!\n")
1424 1467 self.ui.status("(did you forget to merge?" +
1425 1468 " use push -f to force)\n")
1426 1469 return 1
1427 1470
1428 1471 cg = self.changegroup(update)
1429 1472 return remote.addchangegroup(cg)
1430 1473
1431 1474 def changegroup(self, basenodes):
1432 1475 class genread:
1433 1476 def __init__(self, generator):
1434 1477 self.g = generator
1435 1478 self.buf = ""
1436 1479 def fillbuf(self):
1437 1480 self.buf += "".join(self.g)
1438 1481
1439 1482 def read(self, l):
1440 1483 while l > len(self.buf):
1441 1484 try:
1442 1485 self.buf += self.g.next()
1443 1486 except StopIteration:
1444 1487 break
1445 1488 d, self.buf = self.buf[:l], self.buf[l:]
1446 1489 return d
1447 1490
1448 1491 def gengroup():
1449 1492 nodes = self.newer(basenodes)
1450 1493
1451 1494 # construct the link map
1452 1495 linkmap = {}
1453 1496 for n in nodes:
1454 1497 linkmap[self.changelog.rev(n)] = n
1455 1498
1456 1499 # construct a list of all changed files
1457 1500 changed = {}
1458 1501 for n in nodes:
1459 1502 c = self.changelog.read(n)
1460 1503 for f in c[3]:
1461 1504 changed[f] = 1
1462 1505 changed = changed.keys()
1463 1506 changed.sort()
1464 1507
1465 1508 # the changegroup is changesets + manifests + all file revs
1466 1509 revs = [ self.changelog.rev(n) for n in nodes ]
1467 1510
1468 1511 for y in self.changelog.group(linkmap): yield y
1469 1512 for y in self.manifest.group(linkmap): yield y
1470 1513 for f in changed:
1471 1514 yield struct.pack(">l", len(f) + 4) + f
1472 1515 g = self.file(f).group(linkmap)
1473 1516 for y in g:
1474 1517 yield y
1475 1518
1476 1519 yield struct.pack(">l", 0)
1477 1520
1478 1521 return genread(gengroup())
1479 1522
1480 1523 def addchangegroup(self, source):
1481 1524
1482 1525 def getchunk():
1483 1526 d = source.read(4)
1484 1527 if not d: return ""
1485 1528 l = struct.unpack(">l", d)[0]
1486 1529 if l <= 4: return ""
1487 1530 return source.read(l - 4)
1488 1531
1489 1532 def getgroup():
1490 1533 while 1:
1491 1534 c = getchunk()
1492 1535 if not c: break
1493 1536 yield c
1494 1537
1495 1538 def csmap(x):
1496 1539 self.ui.debug("add changeset %s\n" % short(x))
1497 1540 return self.changelog.count()
1498 1541
1499 1542 def revmap(x):
1500 1543 return self.changelog.rev(x)
1501 1544
1502 1545 if not source: return
1503 1546 changesets = files = revisions = 0
1504 1547
1505 1548 tr = self.transaction()
1506 1549
1507 1550 # pull off the changeset group
1508 1551 self.ui.status("adding changesets\n")
1509 1552 co = self.changelog.tip()
1510 1553 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1511 1554 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1512 1555
1513 1556 # pull off the manifest group
1514 1557 self.ui.status("adding manifests\n")
1515 1558 mm = self.manifest.tip()
1516 1559 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1517 1560
1518 1561 # process the files
1519 1562 self.ui.status("adding file changes\n")
1520 1563 while 1:
1521 1564 f = getchunk()
1522 1565 if not f: break
1523 1566 self.ui.debug("adding %s revisions\n" % f)
1524 1567 fl = self.file(f)
1525 1568 o = fl.count()
1526 1569 n = fl.addgroup(getgroup(), revmap, tr)
1527 1570 revisions += fl.count() - o
1528 1571 files += 1
1529 1572
1530 1573 self.ui.status(("added %d changesets" +
1531 1574 " with %d changes to %d files\n")
1532 1575 % (changesets, revisions, files))
1533 1576
1534 1577 tr.close()
1535 1578
1536 1579 if not self.hook("changegroup"):
1537 1580 return 1
1538 1581
1539 1582 return
1540 1583
1541 1584 def update(self, node, allow=False, force=False, choose=None,
1542 1585 moddirstate=True):
1543 1586 pl = self.dirstate.parents()
1544 1587 if not force and pl[1] != nullid:
1545 1588 self.ui.warn("aborting: outstanding uncommitted merges\n")
1546 1589 return 1
1547 1590
1548 1591 p1, p2 = pl[0], node
1549 1592 pa = self.changelog.ancestor(p1, p2)
1550 1593 m1n = self.changelog.read(p1)[0]
1551 1594 m2n = self.changelog.read(p2)[0]
1552 1595 man = self.manifest.ancestor(m1n, m2n)
1553 1596 m1 = self.manifest.read(m1n)
1554 1597 mf1 = self.manifest.readflags(m1n)
1555 1598 m2 = self.manifest.read(m2n)
1556 1599 mf2 = self.manifest.readflags(m2n)
1557 1600 ma = self.manifest.read(man)
1558 1601 mfa = self.manifest.readflags(man)
1559 1602
1560 1603 (c, a, d, u) = self.changes()
1561 1604
1562 1605 # is this a jump, or a merge? i.e. is there a linear path
1563 1606 # from p1 to p2?
1564 1607 linear_path = (pa == p1 or pa == p2)
1565 1608
1566 1609 # resolve the manifest to determine which files
1567 1610 # we care about merging
1568 1611 self.ui.note("resolving manifests\n")
1569 1612 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1570 1613 (force, allow, moddirstate, linear_path))
1571 1614 self.ui.debug(" ancestor %s local %s remote %s\n" %
1572 1615 (short(man), short(m1n), short(m2n)))
1573 1616
1574 1617 merge = {}
1575 1618 get = {}
1576 1619 remove = []
1577 1620 mark = {}
1578 1621
1579 1622 # construct a working dir manifest
1580 1623 mw = m1.copy()
1581 1624 mfw = mf1.copy()
1582 1625 umap = dict.fromkeys(u)
1583 1626
1584 1627 for f in a + c + u:
1585 1628 mw[f] = ""
1586 1629 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1587 1630
1588 1631 for f in d:
1589 1632 if f in mw: del mw[f]
1590 1633
1591 1634 # If we're jumping between revisions (as opposed to merging),
1592 1635 # and if neither the working directory nor the target rev has
1593 1636 # the file, then we need to remove it from the dirstate, to
1594 1637 # prevent the dirstate from listing the file when it is no
1595 1638 # longer in the manifest.
1596 1639 if moddirstate and linear_path and f not in m2:
1597 1640 self.dirstate.forget((f,))
1598 1641
1599 1642 # Compare manifests
1600 1643 for f, n in mw.iteritems():
1601 1644 if choose and not choose(f): continue
1602 1645 if f in m2:
1603 1646 s = 0
1604 1647
1605 1648 # is the wfile new since m1, and match m2?
1606 1649 if f not in m1:
1607 1650 t1 = self.wfile(f).read()
1608 1651 t2 = self.file(f).revision(m2[f])
1609 1652 if cmp(t1, t2) == 0:
1610 1653 mark[f] = 1
1611 1654 n = m2[f]
1612 1655 del t1, t2
1613 1656
1614 1657 # are files different?
1615 1658 if n != m2[f]:
1616 1659 a = ma.get(f, nullid)
1617 1660 # are both different from the ancestor?
1618 1661 if n != a and m2[f] != a:
1619 1662 self.ui.debug(" %s versions differ, resolve\n" % f)
1620 1663 # merge executable bits
1621 1664 # "if we changed or they changed, change in merge"
1622 1665 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1623 1666 mode = ((a^b) | (a^c)) ^ a
1624 1667 merge[f] = (m1.get(f, nullid), m2[f], mode)
1625 1668 s = 1
1626 1669 # are we clobbering?
1627 1670 # is remote's version newer?
1628 1671 # or are we going back in time?
1629 1672 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1630 1673 self.ui.debug(" remote %s is newer, get\n" % f)
1631 1674 get[f] = m2[f]
1632 1675 s = 1
1633 1676 else:
1634 1677 mark[f] = 1
1635 1678 elif f in umap:
1636 1679 # this unknown file is the same as the checkout
1637 1680 get[f] = m2[f]
1638 1681
1639 1682 if not s and mfw[f] != mf2[f]:
1640 1683 if force:
1641 1684 self.ui.debug(" updating permissions for %s\n" % f)
1642 1685 util.set_exec(self.wjoin(f), mf2[f])
1643 1686 else:
1644 1687 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1645 1688 mode = ((a^b) | (a^c)) ^ a
1646 1689 if mode != b:
1647 1690 self.ui.debug(" updating permissions for %s\n" % f)
1648 1691 util.set_exec(self.wjoin(f), mode)
1649 1692 mark[f] = 1
1650 1693 del m2[f]
1651 1694 elif f in ma:
1652 1695 if n != ma[f]:
1653 1696 r = "d"
1654 1697 if not force and (linear_path or allow):
1655 1698 r = self.ui.prompt(
1656 1699 (" local changed %s which remote deleted\n" % f) +
1657 1700 "(k)eep or (d)elete?", "[kd]", "k")
1658 1701 if r == "d":
1659 1702 remove.append(f)
1660 1703 else:
1661 1704 self.ui.debug("other deleted %s\n" % f)
1662 1705 remove.append(f) # other deleted it
1663 1706 else:
1664 1707 if n == m1.get(f, nullid): # same as parent
1665 1708 if p2 == pa: # going backwards?
1666 1709 self.ui.debug("remote deleted %s\n" % f)
1667 1710 remove.append(f)
1668 1711 else:
1669 1712 self.ui.debug("local created %s, keeping\n" % f)
1670 1713 else:
1671 1714 self.ui.debug("working dir created %s, keeping\n" % f)
1672 1715
1673 1716 for f, n in m2.iteritems():
1674 1717 if choose and not choose(f): continue
1675 1718 if f[0] == "/": continue
1676 1719 if f in ma and n != ma[f]:
1677 1720 r = "k"
1678 1721 if not force and (linear_path or allow):
1679 1722 r = self.ui.prompt(
1680 1723 ("remote changed %s which local deleted\n" % f) +
1681 1724 "(k)eep or (d)elete?", "[kd]", "k")
1682 1725 if r == "k": get[f] = n
1683 1726 elif f not in ma:
1684 1727 self.ui.debug("remote created %s\n" % f)
1685 1728 get[f] = n
1686 1729 else:
1687 1730 if force or p2 == pa: # going backwards?
1688 1731 self.ui.debug("local deleted %s, recreating\n" % f)
1689 1732 get[f] = n
1690 1733 else:
1691 1734 self.ui.debug("local deleted %s\n" % f)
1692 1735
1693 1736 del mw, m1, m2, ma
1694 1737
1695 1738 if force:
1696 1739 for f in merge:
1697 1740 get[f] = merge[f][1]
1698 1741 merge = {}
1699 1742
1700 1743 if linear_path or force:
1701 1744 # we don't need to do any magic, just jump to the new rev
1702 1745 mode = 'n'
1703 1746 p1, p2 = p2, nullid
1704 1747 else:
1705 1748 if not allow:
1706 1749 self.ui.status("this update spans a branch" +
1707 1750 " affecting the following files:\n")
1708 1751 fl = merge.keys() + get.keys()
1709 1752 fl.sort()
1710 1753 for f in fl:
1711 1754 cf = ""
1712 1755 if f in merge: cf = " (resolve)"
1713 1756 self.ui.status(" %s%s\n" % (f, cf))
1714 1757 self.ui.warn("aborting update spanning branches!\n")
1715 1758 self.ui.status("(use update -m to merge across branches" +
1716 1759 " or -C to lose changes)\n")
1717 1760 return 1
1718 # we have to remember what files we needed to get/change
1719 # because any file that's different from either one of its
1720 # parents must be in the changeset
1721 1761 mode = 'm'
1722 if moddirstate:
1723 self.dirstate.update(mark.keys(), "m")
1724 1762
1725 1763 if moddirstate:
1726 1764 self.dirstate.setparents(p1, p2)
1727 1765
1728 1766 # get the files we don't need to change
1729 1767 files = get.keys()
1730 1768 files.sort()
1731 1769 for f in files:
1732 1770 if f[0] == "/": continue
1733 1771 self.ui.note("getting %s\n" % f)
1734 1772 t = self.file(f).read(get[f])
1735 1773 try:
1736 1774 self.wfile(f, "w").write(t)
1737 1775 except IOError:
1738 1776 os.makedirs(os.path.dirname(self.wjoin(f)))
1739 1777 self.wfile(f, "w").write(t)
1740 1778 util.set_exec(self.wjoin(f), mf2[f])
1741 1779 if moddirstate:
1742 self.dirstate.update([f], mode)
1780 self.dirstate.update([f], 'n')
1743 1781
1744 1782 # merge the tricky bits
1745 1783 files = merge.keys()
1746 1784 files.sort()
1747 1785 for f in files:
1748 1786 self.ui.status("merging %s\n" % f)
1749 1787 m, o, flag = merge[f]
1750 1788 self.merge3(f, m, o)
1751 1789 util.set_exec(self.wjoin(f), flag)
1752 1790 if moddirstate:
1753 1791 if mode == 'm':
1754 1792 # only update dirstate on branch merge, otherwise we
1755 1793 # could mark files with changes as unchanged
1756 1794 self.dirstate.update([f], mode)
1757 1795 elif p2 == nullid:
1758 1796 # update dirstate from parent1's manifest
1759 1797 m1n = self.changelog.read(p1)[0]
1760 1798 m1 = self.manifest.read(m1n)
1761 1799 f_len = len(self.file(f).read(m1[f]))
1762 1800 self.dirstate.update([f], mode, st_size=f_len, st_mtime=0)
1763 1801 else:
1764 1802 self.ui.warn("Second parent without branch merge!?\n"
1765 1803 "Dirstate for file %s may be wrong.\n" % f)
1766 1804
1767 1805 remove.sort()
1768 1806 for f in remove:
1769 1807 self.ui.note("removing %s\n" % f)
1770 1808 try:
1771 1809 os.unlink(self.wjoin(f))
1772 1810 except OSError, inst:
1773 1811 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1774 1812 # try removing directories that might now be empty
1775 1813 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1776 1814 except: pass
1777 1815 if moddirstate:
1778 1816 if mode == 'n':
1779 1817 self.dirstate.forget(remove)
1780 1818 else:
1781 1819 self.dirstate.update(remove, 'r')
1782 1820
1783 1821 def merge3(self, fn, my, other):
1784 1822 """perform a 3-way merge in the working directory"""
1785 1823
1786 1824 def temp(prefix, node):
1787 1825 pre = "%s~%s." % (os.path.basename(fn), prefix)
1788 1826 (fd, name) = tempfile.mkstemp("", pre)
1789 1827 f = os.fdopen(fd, "wb")
1790 1828 f.write(fl.revision(node))
1791 1829 f.close()
1792 1830 return name
1793 1831
1794 1832 fl = self.file(fn)
1795 1833 base = fl.ancestor(my, other)
1796 1834 a = self.wjoin(fn)
1797 1835 b = temp("base", base)
1798 1836 c = temp("other", other)
1799 1837
1800 1838 self.ui.note("resolving %s\n" % fn)
1801 1839 self.ui.debug("file %s: other %s ancestor %s\n" %
1802 1840 (fn, short(other), short(base)))
1803 1841
1804 1842 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1805 1843 or "hgmerge")
1806 1844 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1807 1845 if r:
1808 1846 self.ui.warn("merging %s failed!\n" % fn)
1809 1847
1810 1848 os.unlink(b)
1811 1849 os.unlink(c)
1812 1850
1813 1851 def verify(self):
1814 1852 filelinkrevs = {}
1815 1853 filenodes = {}
1816 1854 changesets = revisions = files = 0
1817 1855 errors = 0
1818 1856
1819 1857 seen = {}
1820 1858 self.ui.status("checking changesets\n")
1821 1859 for i in range(self.changelog.count()):
1822 1860 changesets += 1
1823 1861 n = self.changelog.node(i)
1824 1862 if n in seen:
1825 1863 self.ui.warn("duplicate changeset at revision %d\n" % i)
1826 1864 errors += 1
1827 1865 seen[n] = 1
1828 1866
1829 1867 for p in self.changelog.parents(n):
1830 1868 if p not in self.changelog.nodemap:
1831 1869 self.ui.warn("changeset %s has unknown parent %s\n" %
1832 1870 (short(n), short(p)))
1833 1871 errors += 1
1834 1872 try:
1835 1873 changes = self.changelog.read(n)
1836 1874 except Exception, inst:
1837 1875 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1838 1876 errors += 1
1839 1877
1840 1878 for f in changes[3]:
1841 1879 filelinkrevs.setdefault(f, []).append(i)
1842 1880
1843 1881 seen = {}
1844 1882 self.ui.status("checking manifests\n")
1845 1883 for i in range(self.manifest.count()):
1846 1884 n = self.manifest.node(i)
1847 1885 if n in seen:
1848 1886 self.ui.warn("duplicate manifest at revision %d\n" % i)
1849 1887 errors += 1
1850 1888 seen[n] = 1
1851 1889
1852 1890 for p in self.manifest.parents(n):
1853 1891 if p not in self.manifest.nodemap:
1854 1892 self.ui.warn("manifest %s has unknown parent %s\n" %
1855 1893 (short(n), short(p)))
1856 1894 errors += 1
1857 1895
1858 1896 try:
1859 1897 delta = mdiff.patchtext(self.manifest.delta(n))
1860 1898 except KeyboardInterrupt:
1861 1899 self.ui.warn("aborted")
1862 1900 sys.exit(0)
1863 1901 except Exception, inst:
1864 1902 self.ui.warn("unpacking manifest %s: %s\n"
1865 1903 % (short(n), inst))
1866 1904 errors += 1
1867 1905
1868 1906 ff = [ l.split('\0') for l in delta.splitlines() ]
1869 1907 for f, fn in ff:
1870 1908 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1871 1909
1872 1910 self.ui.status("crosschecking files in changesets and manifests\n")
1873 1911 for f in filenodes:
1874 1912 if f not in filelinkrevs:
1875 1913 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1876 1914 errors += 1
1877 1915
1878 1916 for f in filelinkrevs:
1879 1917 if f not in filenodes:
1880 1918 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1881 1919 errors += 1
1882 1920
1883 1921 self.ui.status("checking files\n")
1884 1922 ff = filenodes.keys()
1885 1923 ff.sort()
1886 1924 for f in ff:
1887 1925 if f == "/dev/null": continue
1888 1926 files += 1
1889 1927 fl = self.file(f)
1890 1928 nodes = { nullid: 1 }
1891 1929 seen = {}
1892 1930 for i in range(fl.count()):
1893 1931 revisions += 1
1894 1932 n = fl.node(i)
1895 1933
1896 1934 if n in seen:
1897 1935 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1898 1936 errors += 1
1899 1937
1900 1938 if n not in filenodes[f]:
1901 1939 self.ui.warn("%s: %d:%s not in manifests\n"
1902 1940 % (f, i, short(n)))
1903 1941 errors += 1
1904 1942 else:
1905 1943 del filenodes[f][n]
1906 1944
1907 1945 flr = fl.linkrev(n)
1908 1946 if flr not in filelinkrevs[f]:
1909 1947 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1910 1948 % (f, short(n), fl.linkrev(n)))
1911 1949 errors += 1
1912 1950 else:
1913 1951 filelinkrevs[f].remove(flr)
1914 1952
1915 1953 # verify contents
1916 1954 try:
1917 1955 t = fl.read(n)
1918 1956 except Exception, inst:
1919 1957 self.ui.warn("unpacking file %s %s: %s\n"
1920 1958 % (f, short(n), inst))
1921 1959 errors += 1
1922 1960
1923 1961 # verify parents
1924 1962 (p1, p2) = fl.parents(n)
1925 1963 if p1 not in nodes:
1926 1964 self.ui.warn("file %s:%s unknown parent 1 %s" %
1927 1965 (f, short(n), short(p1)))
1928 1966 errors += 1
1929 1967 if p2 not in nodes:
1930 1968 self.ui.warn("file %s:%s unknown parent 2 %s" %
1931 1969 (f, short(n), short(p1)))
1932 1970 errors += 1
1933 1971 nodes[n] = 1
1934 1972
1935 1973 # cross-check
1936 1974 for node in filenodes[f]:
1937 1975 self.ui.warn("node %s in manifests not in %s\n"
1938 1976 % (hex(node), f))
1939 1977 errors += 1
1940 1978
1941 1979 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1942 1980 (files, changesets, revisions))
1943 1981
1944 1982 if errors:
1945 1983 self.ui.warn("%d integrity errors encountered!\n" % errors)
1946 1984 return 1
1947 1985
1948 1986 class remoterepository:
1949 1987 def local(self):
1950 1988 return False
1951 1989
1952 1990 class httprepository(remoterepository):
1953 1991 def __init__(self, ui, path):
1954 1992 # fix missing / after hostname
1955 1993 s = urlparse.urlsplit(path)
1956 1994 partial = s[2]
1957 1995 if not partial: partial = "/"
1958 1996 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1959 1997 self.ui = ui
1960 1998 no_list = [ "localhost", "127.0.0.1" ]
1961 1999 host = ui.config("http_proxy", "host")
1962 2000 if host is None:
1963 2001 host = os.environ.get("http_proxy")
1964 2002 if host and host.startswith('http://'):
1965 2003 host = host[7:]
1966 2004 user = ui.config("http_proxy", "user")
1967 2005 passwd = ui.config("http_proxy", "passwd")
1968 2006 no = ui.config("http_proxy", "no")
1969 2007 if no is None:
1970 2008 no = os.environ.get("no_proxy")
1971 2009 if no:
1972 2010 no_list = no_list + no.split(",")
1973 2011
1974 2012 no_proxy = 0
1975 2013 for h in no_list:
1976 2014 if (path.startswith("http://" + h + "/") or
1977 2015 path.startswith("http://" + h + ":") or
1978 2016 path == "http://" + h):
1979 2017 no_proxy = 1
1980 2018
1981 2019 # Note: urllib2 takes proxy values from the environment and those will
1982 2020 # take precedence
1983 2021 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1984 2022 try:
1985 2023 if os.environ.has_key(env):
1986 2024 del os.environ[env]
1987 2025 except OSError:
1988 2026 pass
1989 2027
1990 2028 proxy_handler = urllib2.BaseHandler()
1991 2029 if host and not no_proxy:
1992 2030 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1993 2031
1994 2032 authinfo = None
1995 2033 if user and passwd:
1996 2034 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1997 2035 passmgr.add_password(None, host, user, passwd)
1998 2036 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1999 2037
2000 2038 opener = urllib2.build_opener(proxy_handler, authinfo)
2001 2039 urllib2.install_opener(opener)
2002 2040
2003 2041 def dev(self):
2004 2042 return -1
2005 2043
2006 2044 def do_cmd(self, cmd, **args):
2007 2045 self.ui.debug("sending %s command\n" % cmd)
2008 2046 q = {"cmd": cmd}
2009 2047 q.update(args)
2010 2048 qs = urllib.urlencode(q)
2011 2049 cu = "%s?%s" % (self.url, qs)
2012 2050 resp = urllib2.urlopen(cu)
2013 2051 proto = resp.headers['content-type']
2014 2052
2015 2053 # accept old "text/plain" and "application/hg-changegroup" for now
2016 2054 if not proto.startswith('application/mercurial') and \
2017 2055 not proto.startswith('text/plain') and \
2018 2056 not proto.startswith('application/hg-changegroup'):
2019 2057 raise RepoError("'%s' does not appear to be an hg repository"
2020 2058 % self.url)
2021 2059
2022 2060 if proto.startswith('application/mercurial'):
2023 2061 version = proto[22:]
2024 2062 if float(version) > 0.1:
2025 2063 raise RepoError("'%s' uses newer protocol %s" %
2026 2064 (self.url, version))
2027 2065
2028 2066 return resp
2029 2067
2030 2068 def heads(self):
2031 2069 d = self.do_cmd("heads").read()
2032 2070 try:
2033 2071 return map(bin, d[:-1].split(" "))
2034 2072 except:
2035 2073 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2036 2074 raise
2037 2075
2038 2076 def branches(self, nodes):
2039 2077 n = " ".join(map(hex, nodes))
2040 2078 d = self.do_cmd("branches", nodes=n).read()
2041 2079 try:
2042 2080 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2043 2081 return br
2044 2082 except:
2045 2083 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2046 2084 raise
2047 2085
2048 2086 def between(self, pairs):
2049 2087 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2050 2088 d = self.do_cmd("between", pairs=n).read()
2051 2089 try:
2052 2090 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2053 2091 return p
2054 2092 except:
2055 2093 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2056 2094 raise
2057 2095
2058 2096 def changegroup(self, nodes):
2059 2097 n = " ".join(map(hex, nodes))
2060 2098 f = self.do_cmd("changegroup", roots=n)
2061 2099 bytes = 0
2062 2100
2063 2101 class zread:
2064 2102 def __init__(self, f):
2065 2103 self.zd = zlib.decompressobj()
2066 2104 self.f = f
2067 2105 self.buf = ""
2068 2106 def read(self, l):
2069 2107 while l > len(self.buf):
2070 2108 r = self.f.read(4096)
2071 2109 if r:
2072 2110 self.buf += self.zd.decompress(r)
2073 2111 else:
2074 2112 self.buf += self.zd.flush()
2075 2113 break
2076 2114 d, self.buf = self.buf[:l], self.buf[l:]
2077 2115 return d
2078 2116
2079 2117 return zread(f)
2080 2118
2081 2119 class remotelock:
2082 2120 def __init__(self, repo):
2083 2121 self.repo = repo
2084 2122 def release(self):
2085 2123 self.repo.unlock()
2086 2124 self.repo = None
2087 2125 def __del__(self):
2088 2126 if self.repo:
2089 2127 self.release()
2090 2128
2091 2129 class sshrepository(remoterepository):
2092 2130 def __init__(self, ui, path):
2093 2131 self.url = path
2094 2132 self.ui = ui
2095 2133
2096 2134 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2097 2135 if not m:
2098 2136 raise RepoError("couldn't parse destination %s" % path)
2099 2137
2100 2138 self.user = m.group(2)
2101 2139 self.host = m.group(3)
2102 2140 self.port = m.group(5)
2103 2141 self.path = m.group(7)
2104 2142
2105 2143 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2106 2144 args = self.port and ("%s -p %s") % (args, self.port) or args
2107 2145 path = self.path or ""
2108 2146
2109 2147 if not path:
2110 2148 raise RepoError("no remote repository path specified")
2111 2149
2112 2150 sshcmd = self.ui.config("ui", "ssh", "ssh")
2113 2151 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2114 2152 cmd = "%s %s '%s -R %s serve --stdio'"
2115 2153 cmd = cmd % (sshcmd, args, remotecmd, path)
2116 2154
2117 2155 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2118 2156
2119 2157 def readerr(self):
2120 2158 while 1:
2121 2159 r,w,x = select.select([self.pipee], [], [], 0)
2122 2160 if not r: break
2123 2161 l = self.pipee.readline()
2124 2162 if not l: break
2125 2163 self.ui.status("remote: ", l)
2126 2164
2127 2165 def __del__(self):
2128 2166 try:
2129 2167 self.pipeo.close()
2130 2168 self.pipei.close()
2131 2169 for l in self.pipee:
2132 2170 self.ui.status("remote: ", l)
2133 2171 self.pipee.close()
2134 2172 except:
2135 2173 pass
2136 2174
2137 2175 def dev(self):
2138 2176 return -1
2139 2177
2140 2178 def do_cmd(self, cmd, **args):
2141 2179 self.ui.debug("sending %s command\n" % cmd)
2142 2180 self.pipeo.write("%s\n" % cmd)
2143 2181 for k, v in args.items():
2144 2182 self.pipeo.write("%s %d\n" % (k, len(v)))
2145 2183 self.pipeo.write(v)
2146 2184 self.pipeo.flush()
2147 2185
2148 2186 return self.pipei
2149 2187
2150 2188 def call(self, cmd, **args):
2151 2189 r = self.do_cmd(cmd, **args)
2152 2190 l = r.readline()
2153 2191 self.readerr()
2154 2192 try:
2155 2193 l = int(l)
2156 2194 except:
2157 2195 raise RepoError("unexpected response '%s'" % l)
2158 2196 return r.read(l)
2159 2197
2160 2198 def lock(self):
2161 2199 self.call("lock")
2162 2200 return remotelock(self)
2163 2201
2164 2202 def unlock(self):
2165 2203 self.call("unlock")
2166 2204
2167 2205 def heads(self):
2168 2206 d = self.call("heads")
2169 2207 try:
2170 2208 return map(bin, d[:-1].split(" "))
2171 2209 except:
2172 2210 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2173 2211
2174 2212 def branches(self, nodes):
2175 2213 n = " ".join(map(hex, nodes))
2176 2214 d = self.call("branches", nodes=n)
2177 2215 try:
2178 2216 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2179 2217 return br
2180 2218 except:
2181 2219 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2182 2220
2183 2221 def between(self, pairs):
2184 2222 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2185 2223 d = self.call("between", pairs=n)
2186 2224 try:
2187 2225 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2188 2226 return p
2189 2227 except:
2190 2228 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2191 2229
2192 2230 def changegroup(self, nodes):
2193 2231 n = " ".join(map(hex, nodes))
2194 2232 f = self.do_cmd("changegroup", roots=n)
2195 2233 return self.pipei
2196 2234
2197 2235 def addchangegroup(self, cg):
2198 2236 d = self.call("addchangegroup")
2199 2237 if d:
2200 2238 raise RepoError("push refused: %s", d)
2201 2239
2202 2240 while 1:
2203 2241 d = cg.read(4096)
2204 2242 if not d: break
2205 2243 self.pipeo.write(d)
2206 2244 self.readerr()
2207 2245
2208 2246 self.pipeo.flush()
2209 2247
2210 2248 self.readerr()
2211 2249 l = int(self.pipei.readline())
2212 2250 return self.pipei.read(l) != ""
2213 2251
2214 2252 class httpsrepository(httprepository):
2215 2253 pass
2216 2254
2217 2255 def repository(ui, path=None, create=0):
2218 2256 if path:
2219 2257 if path.startswith("http://"):
2220 2258 return httprepository(ui, path)
2221 2259 if path.startswith("https://"):
2222 2260 return httpsrepository(ui, path)
2223 2261 if path.startswith("hg://"):
2224 2262 return httprepository(ui, path.replace("hg://", "http://"))
2225 2263 if path.startswith("old-http://"):
2226 2264 return localrepository(ui, path.replace("old-http://", "http://"))
2227 2265 if path.startswith("ssh://"):
2228 2266 return sshrepository(ui, path)
2229 2267
2230 2268 return localrepository(ui, path, create)
@@ -1,18 +1,18 b''
1 1 pulling from ../B1
2 2 searching for changes
3 3 adding changesets
4 4 adding manifests
5 5 adding file changes
6 6 added 1 changesets with 1 changes to 1 files
7 7 (run 'hg update' to get a working copy)
8 8 bar should remain deleted.
9 6b70e9e451a5a33faad7bbebe627e46b937b7364 644 foo
9 f405ac83a5611071d6b54dd5eb26943b1fdc4460 644 foo
10 10 pulling from ../A2
11 11 searching for changes
12 12 adding changesets
13 13 adding manifests
14 14 adding file changes
15 15 added 1 changesets with 0 changes to 0 files
16 16 (run 'hg update' to get a working copy)
17 17 bar should remain deleted.
18 6b70e9e451a5a33faad7bbebe627e46b937b7364 644 foo
18 f9b0e817f6a48de3564c6b2957687c5e7297c5a0 644 foo
@@ -1,21 +1,21 b''
1 1 pushing to ../a
2 2 searching for changes
3 3 abort: unsynced remote changes!
4 4 (did you forget to sync? use push -f to force)
5 5 pulling from ../a
6 6 searching for changes
7 7 adding changesets
8 8 adding manifests
9 9 adding file changes
10 10 added 1 changesets with 1 changes to 1 files
11 11 (run 'hg update' to get a working copy)
12 12 pushing to ../a
13 13 searching for changes
14 14 abort: push creates new remote branches!
15 15 (did you forget to merge? use push -f to force)
16 16 pushing to ../a
17 17 searching for changes
18 18 adding changesets
19 19 adding manifests
20 20 adding file changes
21 added 2 changesets with 2 changes to 2 files
21 added 2 changesets with 1 changes to 1 files
@@ -1,15 +1,13 b''
1 1 unknown
2 2 acb14030fe0a tip
3 3 acb14030fe0a21b60322c440ad2d20cf7685a376 first
4 4 tip 1:b9154636be938d3d431e75a7c906504a079bfe07
5 5 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
6 6 b9154636be93 tip
7 7 M a
8 8 b9154636be93+ tip
9 9 acb14030fe0a+ first
10 10 acb14030fe0a21b60322c440ad2d20cf7685a376+ first
11 11 M a
12 12 c8edf04160c7 tip
13 c8edf04160c7+b9154636be93+ tip
14 M .hgtags
15 M a
13 c8edf04160c7+b9154636be93 tip
General Comments 0
You need to be logged in to leave comments. Login now