##// END OF EJS Templates
fix some rename/copy bugs...
mpm@selenic.com -
r1117:30ab5b8e default
parent child Browse files
Show More
@@ -0,0 +1,41
1 #!/bin/sh
2
3 hg init
4 echo foo > foo
5 hg add foo
6 hg commit -m1 -d"0 0"
7
8 echo "# should show copy"
9 cp foo bar
10 hg copy foo bar
11 hg debugstate
12
13 echo "# shouldn't show copy"
14 hg commit -m2 -d"0 0"
15 hg debugstate
16
17 echo "# should match"
18 hg debugindex .hg/data/foo.i
19 hg debugrename bar
20
21 echo bleah > foo
22 echo quux > bar
23 hg commit -m3 -d"0 0"
24
25 echo "# should not be renamed"
26 hg debugrename bar
27
28 cp foo bar
29 hg copy foo bar
30 echo "# should show copy"
31 hg debugstate
32 hg commit -m3 -d"0 0"
33
34 echo "# should show no parents for tip"
35 hg debugindex .hg/data/bar.i
36 echo "# should match"
37 hg debugindex .hg/data/foo.i
38 hg debugrename bar
39
40 echo "# should show no copies"
41 hg debugstate No newline at end of file
@@ -0,0 +1,34
1 # should show copy
2 a 644 4 08/28/05 05:00:19 bar
3 n 644 4 08/28/05 05:00:19 foo
4
5 foo -> bar
6 # shouldn't show copy
7 n 644 4 08/28/05 05:00:19 bar
8 n 644 4 08/28/05 05:00:19 foo
9
10 # should match
11 rev offset length base linkrev nodeid p1 p2
12 0 0 5 0 0 2ed2a3912a0b 000000000000 000000000000
13 renamed from foo:2ed2a3912a0b24502043eae84ee4b279c18b90dd
14 # should not be renamed
15 not renamed
16 # should show copy
17 n 644 5 08/28/05 05:00:19 bar
18 n 644 6 08/28/05 05:00:19 foo
19
20 foo -> bar
21 # should show no parents for tip
22 rev offset length base linkrev nodeid p1 p2
23 0 0 69 0 1 6ca237634e1f 000000000000 000000000000
24 1 69 6 1 2 7a1ff8e75f5b 6ca237634e1f 000000000000
25 2 75 82 1 3 243dfe60f3d9 000000000000 000000000000
26 # should match
27 rev offset length base linkrev nodeid p1 p2
28 0 0 5 0 0 2ed2a3912a0b 000000000000 000000000000
29 1 5 7 1 2 dd12c926cf16 2ed2a3912a0b 000000000000
30 renamed from foo:dd12c926cf165e3eb4cf87b084955cb617221c17
31 # should show no copies
32 n 644 6 08/28/05 05:00:19 bar
33 n 644 6 08/28/05 05:00:19 foo
34
@@ -1,312 +1,314
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 import struct, os
11 11 from node import *
12 12 from demandload import *
13 13 demandload(globals(), "time bisect stat util re")
14 14
15 15 class dirstate:
16 16 def __init__(self, opener, ui, root):
17 17 self.opener = opener
18 18 self.root = root
19 19 self.dirty = 0
20 20 self.ui = ui
21 21 self.map = None
22 22 self.pl = None
23 23 self.copies = {}
24 24 self.ignorefunc = None
25 25
26 26 def wjoin(self, f):
27 27 return os.path.join(self.root, f)
28 28
29 29 def getcwd(self):
30 30 cwd = os.getcwd()
31 31 if cwd == self.root: return ''
32 32 return cwd[len(self.root) + 1:]
33 33
34 34 def ignore(self, f):
35 35 if not self.ignorefunc:
36 36 bigpat = []
37 37 try:
38 38 l = file(self.wjoin(".hgignore"))
39 39 for pat in l:
40 40 p = pat.rstrip()
41 41 if p:
42 42 try:
43 43 re.compile(p)
44 44 except:
45 45 self.ui.warn("ignoring invalid ignore"
46 46 + " regular expression '%s'\n" % p)
47 47 else:
48 48 bigpat.append(p)
49 49 except IOError: pass
50 50
51 51 if bigpat:
52 52 s = "(?:%s)" % (")|(?:".join(bigpat))
53 53 r = re.compile(s)
54 54 self.ignorefunc = r.search
55 55 else:
56 56 self.ignorefunc = util.never
57 57
58 58 return self.ignorefunc(f)
59 59
60 60 def __del__(self):
61 61 if self.dirty:
62 62 self.write()
63 63
64 64 def __getitem__(self, key):
65 65 try:
66 66 return self.map[key]
67 67 except TypeError:
68 68 self.read()
69 69 return self[key]
70 70
71 71 def __contains__(self, key):
72 72 if not self.map: self.read()
73 73 return key in self.map
74 74
75 75 def parents(self):
76 76 if not self.pl:
77 77 self.read()
78 78 return self.pl
79 79
80 80 def markdirty(self):
81 81 if not self.dirty:
82 82 self.dirty = 1
83 83
84 84 def setparents(self, p1, p2=nullid):
85 85 self.markdirty()
86 86 self.pl = p1, p2
87 87
88 88 def state(self, key):
89 89 try:
90 90 return self[key][0]
91 91 except KeyError:
92 92 return "?"
93 93
94 94 def read(self):
95 95 if self.map is not None: return self.map
96 96
97 97 self.map = {}
98 98 self.pl = [nullid, nullid]
99 99 try:
100 100 st = self.opener("dirstate").read()
101 101 if not st: return
102 102 except: return
103 103
104 104 self.pl = [st[:20], st[20: 40]]
105 105
106 106 pos = 40
107 107 while pos < len(st):
108 108 e = struct.unpack(">cllll", st[pos:pos+17])
109 109 l = e[4]
110 110 pos += 17
111 111 f = st[pos:pos + l]
112 112 if '\0' in f:
113 113 f, c = f.split('\0')
114 114 self.copies[f] = c
115 115 self.map[f] = e[:4]
116 116 pos += l
117 117
118 118 def copy(self, source, dest):
119 119 self.read()
120 120 self.markdirty()
121 121 self.copies[dest] = source
122 122
123 123 def copied(self, file):
124 124 return self.copies.get(file, None)
125 125
126 126 def update(self, files, state, **kw):
127 127 ''' current states:
128 128 n normal
129 129 m needs merging
130 130 r marked for removal
131 131 a marked for addition'''
132 132
133 133 if not files: return
134 134 self.read()
135 135 self.markdirty()
136 136 for f in files:
137 137 if state == "r":
138 138 self.map[f] = ('r', 0, 0, 0)
139 139 else:
140 140 s = os.stat(os.path.join(self.root, f))
141 141 st_size = kw.get('st_size', s.st_size)
142 142 st_mtime = kw.get('st_mtime', s.st_mtime)
143 143 self.map[f] = (state, s.st_mode, st_size, st_mtime)
144 if self.copies.has_key(f):
145 del self.copies[f]
144 146
145 147 def forget(self, files):
146 148 if not files: return
147 149 self.read()
148 150 self.markdirty()
149 151 for f in files:
150 152 try:
151 153 del self.map[f]
152 154 except KeyError:
153 155 self.ui.warn("not in dirstate: %s!\n" % f)
154 156 pass
155 157
156 158 def clear(self):
157 159 self.map = {}
158 160 self.markdirty()
159 161
160 162 def write(self):
161 163 st = self.opener("dirstate", "w")
162 164 st.write("".join(self.pl))
163 165 for f, e in self.map.items():
164 166 c = self.copied(f)
165 167 if c:
166 168 f = f + "\0" + c
167 169 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
168 170 st.write(e + f)
169 171 self.dirty = 0
170 172
171 173 def filterfiles(self, files):
172 174 ret = {}
173 175 unknown = []
174 176
175 177 for x in files:
176 178 if x is '.':
177 179 return self.map.copy()
178 180 if x not in self.map:
179 181 unknown.append(x)
180 182 else:
181 183 ret[x] = self.map[x]
182 184
183 185 if not unknown:
184 186 return ret
185 187
186 188 b = self.map.keys()
187 189 b.sort()
188 190 blen = len(b)
189 191
190 192 for x in unknown:
191 193 bs = bisect.bisect(b, x)
192 194 if bs != 0 and b[bs-1] == x:
193 195 ret[x] = self.map[x]
194 196 continue
195 197 while bs < blen:
196 198 s = b[bs]
197 199 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
198 200 ret[s] = self.map[s]
199 201 else:
200 202 break
201 203 bs += 1
202 204 return ret
203 205
204 206 def walk(self, files=None, match=util.always, dc=None):
205 207 self.read()
206 208
207 209 # walk all files by default
208 210 if not files:
209 211 files = [self.root]
210 212 if not dc:
211 213 dc = self.map.copy()
212 214 elif not dc:
213 215 dc = self.filterfiles(files)
214 216
215 217 known = {'.hg': 1}
216 218 def seen(fn):
217 219 if fn in known: return True
218 220 known[fn] = 1
219 221 def traverse():
220 222 for ff in util.unique(files):
221 223 f = os.path.join(self.root, ff)
222 224 try:
223 225 st = os.stat(f)
224 226 except OSError, inst:
225 227 if ff not in dc: self.ui.warn('%s: %s\n' % (
226 228 util.pathto(self.getcwd(), ff),
227 229 inst.strerror))
228 230 continue
229 231 if stat.S_ISDIR(st.st_mode):
230 232 for dir, subdirs, fl in os.walk(f):
231 233 d = dir[len(self.root) + 1:]
232 234 nd = util.normpath(d)
233 235 if nd == '.': nd = ''
234 236 if seen(nd):
235 237 subdirs[:] = []
236 238 continue
237 239 for sd in subdirs:
238 240 ds = os.path.join(nd, sd +'/')
239 241 if self.ignore(ds) or not match(ds):
240 242 subdirs.remove(sd)
241 243 subdirs.sort()
242 244 fl.sort()
243 245 for fn in fl:
244 246 fn = util.pconvert(os.path.join(d, fn))
245 247 yield 'f', fn
246 248 elif stat.S_ISREG(st.st_mode):
247 249 yield 'f', ff
248 250 else:
249 251 kind = 'unknown'
250 252 if stat.S_ISCHR(st.st_mode): kind = 'character device'
251 253 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
252 254 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
253 255 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
254 256 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
255 257 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
256 258 util.pathto(self.getcwd(), ff),
257 259 kind))
258 260
259 261 ks = dc.keys()
260 262 ks.sort()
261 263 for k in ks:
262 264 yield 'm', k
263 265
264 266 # yield only files that match: all in dirstate, others only if
265 267 # not in .hgignore
266 268
267 269 for src, fn in util.unique(traverse()):
268 270 fn = util.normpath(fn)
269 271 if seen(fn): continue
270 272 if fn not in dc and self.ignore(fn):
271 273 continue
272 274 if match(fn):
273 275 yield src, fn
274 276
275 277 def changes(self, files=None, match=util.always):
276 278 self.read()
277 279 if not files:
278 280 dc = self.map.copy()
279 281 else:
280 282 dc = self.filterfiles(files)
281 283 lookup, modified, added, unknown = [], [], [], []
282 284 removed, deleted = [], []
283 285
284 286 for src, fn in self.walk(files, match, dc=dc):
285 287 try:
286 288 s = os.stat(os.path.join(self.root, fn))
287 289 except OSError:
288 290 continue
289 291 if not stat.S_ISREG(s.st_mode):
290 292 continue
291 293 c = dc.get(fn)
292 294 if c:
293 295 del dc[fn]
294 296 if c[0] == 'm':
295 297 modified.append(fn)
296 298 elif c[0] == 'a':
297 299 added.append(fn)
298 300 elif c[0] == 'r':
299 301 unknown.append(fn)
300 302 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
301 303 modified.append(fn)
302 304 elif c[3] != s.st_mtime:
303 305 lookup.append(fn)
304 306 else:
305 307 unknown.append(fn)
306 308
307 309 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
308 310 if c[0] == 'r':
309 311 removed.append(fn)
310 312 else:
311 313 deleted.append(fn)
312 314 return (lookup, modified, added, removed + deleted, unknown)
@@ -1,108 +1,107
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os
9 9 from revlog import *
10 10 from demandload import *
11 11 demandload(globals(), "bdiff")
12 12
13 13 class filelog(revlog):
14 14 def __init__(self, opener, path):
15 15 revlog.__init__(self, opener,
16 16 os.path.join("data", self.encodedir(path + ".i")),
17 17 os.path.join("data", self.encodedir(path + ".d")))
18 18
19 19 # This avoids a collision between a file named foo and a dir named
20 20 # foo.i or foo.d
21 21 def encodedir(self, path):
22 22 return (path
23 23 .replace(".hg/", ".hg.hg/")
24 24 .replace(".i/", ".i.hg/")
25 25 .replace(".d/", ".d.hg/"))
26 26
27 27 def decodedir(self, path):
28 28 return (path
29 29 .replace(".d.hg/", ".d/")
30 30 .replace(".i.hg/", ".i/")
31 31 .replace(".hg.hg/", ".hg/"))
32 32
33 33 def read(self, node):
34 34 t = self.revision(node)
35 35 if not t.startswith('\1\n'):
36 36 return t
37 37 s = t.find('\1\n', 2)
38 38 return t[s+2:]
39 39
40 40 def readmeta(self, node):
41 41 t = self.revision(node)
42 42 if not t.startswith('\1\n'):
43 43 return {}
44 44 s = t.find('\1\n', 2)
45 45 mt = t[2:s]
46 46 m = {}
47 47 for l in mt.splitlines():
48 48 k, v = l.split(": ", 1)
49 49 m[k] = v
50 50 return m
51 51
52 52 def add(self, text, meta, transaction, link, p1=None, p2=None):
53 53 if meta or text.startswith('\1\n'):
54 54 mt = ""
55 55 if meta:
56 56 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
57 57 text = "\1\n" + "".join(mt) + "\1\n" + text
58 58 return self.addrevision(text, transaction, link, p1, p2)
59 59
60 60 def renamed(self, node):
61 61 if 0 and self.parents(node)[0] != nullid:
62 print "shortcut"
63 62 return False
64 63 m = self.readmeta(node)
65 64 if m and m.has_key("copy"):
66 65 return (m["copy"], bin(m["copyrev"]))
67 66 return False
68 67
69 68 def annotate(self, node):
70 69
71 70 def decorate(text, rev):
72 71 return ([rev] * len(text.splitlines()), text)
73 72
74 73 def pair(parent, child):
75 74 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
76 75 child[0][b1:b2] = parent[0][a1:a2]
77 76 return child
78 77
79 78 # find all ancestors
80 79 needed = {node:1}
81 80 visit = [node]
82 81 while visit:
83 82 n = visit.pop(0)
84 83 for p in self.parents(n):
85 84 if p not in needed:
86 85 needed[p] = 1
87 86 visit.append(p)
88 87 else:
89 88 # count how many times we'll use this
90 89 needed[p] += 1
91 90
92 91 # sort by revision which is a topological order
93 92 visit = [ (self.rev(n), n) for n in needed.keys() ]
94 93 visit.sort()
95 94 hist = {}
96 95
97 96 for r,n in visit:
98 97 curr = decorate(self.read(n), self.linkrev(n))
99 98 for p in self.parents(n):
100 99 if p != nullid:
101 100 curr = pair(hist[p], curr)
102 101 # trim the history of unneeded revs
103 102 needed[p] -= 1
104 103 if not needed[p]:
105 104 del hist[p]
106 105 hist[n] = curr
107 106
108 107 return zip(hist[n][0], hist[n][1].splitlines(1))
@@ -1,1400 +1,1402
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock transaction tempfile stat mdiff")
13 13
14 14 class localrepository:
15 15 def __init__(self, ui, path=None, create=0):
16 16 if not path:
17 17 p = os.getcwd()
18 18 while not os.path.isdir(os.path.join(p, ".hg")):
19 19 oldp = p
20 20 p = os.path.dirname(p)
21 21 if p == oldp: raise repo.RepoError("no repo found")
22 22 path = p
23 23 self.path = os.path.join(path, ".hg")
24 24
25 25 if not create and not os.path.isdir(self.path):
26 26 raise repo.RepoError("repository %s not found" % self.path)
27 27
28 28 self.root = os.path.abspath(path)
29 29 self.ui = ui
30 30
31 31 if create:
32 32 os.mkdir(self.path)
33 33 os.mkdir(self.join("data"))
34 34
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.manifest = manifest.manifest(self.opener)
38 38 self.changelog = changelog.changelog(self.opener)
39 39 self.tagscache = None
40 40 self.nodetagscache = None
41 41
42 42 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
43 43 try:
44 44 self.ui.readconfig(self.opener("hgrc"))
45 45 except IOError: pass
46 46
47 47 def hook(self, name, **args):
48 48 s = self.ui.config("hooks", name)
49 49 if s:
50 50 self.ui.note("running hook %s: %s\n" % (name, s))
51 51 old = {}
52 52 for k, v in args.items():
53 53 k = k.upper()
54 54 old[k] = os.environ.get(k, None)
55 55 os.environ[k] = v
56 56
57 57 r = os.system(s)
58 58
59 59 for k, v in old.items():
60 60 if v != None:
61 61 os.environ[k] = v
62 62 else:
63 63 del os.environ[k]
64 64
65 65 if r:
66 66 self.ui.warn("abort: %s hook failed with status %d!\n" %
67 67 (name, r))
68 68 return False
69 69 return True
70 70
71 71 def tags(self):
72 72 '''return a mapping of tag to node'''
73 73 if not self.tagscache:
74 74 self.tagscache = {}
75 75 def addtag(self, k, n):
76 76 try:
77 77 bin_n = bin(n)
78 78 except TypeError:
79 79 bin_n = ''
80 80 self.tagscache[k.strip()] = bin_n
81 81
82 82 try:
83 83 # read each head of the tags file, ending with the tip
84 84 # and add each tag found to the map, with "newer" ones
85 85 # taking precedence
86 86 fl = self.file(".hgtags")
87 87 h = fl.heads()
88 88 h.reverse()
89 89 for r in h:
90 90 for l in fl.read(r).splitlines():
91 91 if l:
92 92 n, k = l.split(" ", 1)
93 93 addtag(self, k, n)
94 94 except KeyError:
95 95 pass
96 96
97 97 try:
98 98 f = self.opener("localtags")
99 99 for l in f:
100 100 n, k = l.split(" ", 1)
101 101 addtag(self, k, n)
102 102 except IOError:
103 103 pass
104 104
105 105 self.tagscache['tip'] = self.changelog.tip()
106 106
107 107 return self.tagscache
108 108
109 109 def tagslist(self):
110 110 '''return a list of tags ordered by revision'''
111 111 l = []
112 112 for t, n in self.tags().items():
113 113 try:
114 114 r = self.changelog.rev(n)
115 115 except:
116 116 r = -2 # sort to the beginning of the list if unknown
117 117 l.append((r,t,n))
118 118 l.sort()
119 119 return [(t,n) for r,t,n in l]
120 120
121 121 def nodetags(self, node):
122 122 '''return the tags associated with a node'''
123 123 if not self.nodetagscache:
124 124 self.nodetagscache = {}
125 125 for t,n in self.tags().items():
126 126 self.nodetagscache.setdefault(n,[]).append(t)
127 127 return self.nodetagscache.get(node, [])
128 128
129 129 def lookup(self, key):
130 130 try:
131 131 return self.tags()[key]
132 132 except KeyError:
133 133 try:
134 134 return self.changelog.lookup(key)
135 135 except:
136 136 raise repo.RepoError("unknown revision '%s'" % key)
137 137
138 138 def dev(self):
139 139 return os.stat(self.path).st_dev
140 140
141 141 def local(self):
142 142 return True
143 143
144 144 def join(self, f):
145 145 return os.path.join(self.path, f)
146 146
147 147 def wjoin(self, f):
148 148 return os.path.join(self.root, f)
149 149
150 150 def file(self, f):
151 151 if f[0] == '/': f = f[1:]
152 152 return filelog.filelog(self.opener, f)
153 153
154 154 def getcwd(self):
155 155 return self.dirstate.getcwd()
156 156
157 157 def wfile(self, f, mode='r'):
158 158 return self.wopener(f, mode)
159 159
160 160 def wread(self, filename):
161 161 return self.wopener(filename, 'r').read()
162 162
163 163 def wwrite(self, filename, data, fd=None):
164 164 if fd:
165 165 return fd.write(data)
166 166 return self.wopener(filename, 'w').write(data)
167 167
168 168 def transaction(self):
169 169 # save dirstate for undo
170 170 try:
171 171 ds = self.opener("dirstate").read()
172 172 except IOError:
173 173 ds = ""
174 174 self.opener("journal.dirstate", "w").write(ds)
175 175
176 176 def after():
177 177 util.rename(self.join("journal"), self.join("undo"))
178 178 util.rename(self.join("journal.dirstate"),
179 179 self.join("undo.dirstate"))
180 180
181 181 return transaction.transaction(self.ui.warn, self.opener,
182 182 self.join("journal"), after)
183 183
184 184 def recover(self):
185 185 lock = self.lock()
186 186 if os.path.exists(self.join("journal")):
187 187 self.ui.status("rolling back interrupted transaction\n")
188 188 return transaction.rollback(self.opener, self.join("journal"))
189 189 else:
190 190 self.ui.warn("no interrupted transaction available\n")
191 191
192 192 def undo(self):
193 193 lock = self.lock()
194 194 if os.path.exists(self.join("undo")):
195 195 self.ui.status("rolling back last transaction\n")
196 196 transaction.rollback(self.opener, self.join("undo"))
197 197 self.dirstate = None
198 198 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
199 199 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
200 200 else:
201 201 self.ui.warn("no undo information available\n")
202 202
203 203 def lock(self, wait=1):
204 204 try:
205 205 return lock.lock(self.join("lock"), 0)
206 206 except lock.LockHeld, inst:
207 207 if wait:
208 208 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
209 209 return lock.lock(self.join("lock"), wait)
210 210 raise inst
211 211
212 212 def rawcommit(self, files, text, user, date, p1=None, p2=None):
213 213 orig_parent = self.dirstate.parents()[0] or nullid
214 214 p1 = p1 or self.dirstate.parents()[0] or nullid
215 215 p2 = p2 or self.dirstate.parents()[1] or nullid
216 216 c1 = self.changelog.read(p1)
217 217 c2 = self.changelog.read(p2)
218 218 m1 = self.manifest.read(c1[0])
219 219 mf1 = self.manifest.readflags(c1[0])
220 220 m2 = self.manifest.read(c2[0])
221 221 changed = []
222 222
223 223 if orig_parent == p1:
224 224 update_dirstate = 1
225 225 else:
226 226 update_dirstate = 0
227 227
228 228 tr = self.transaction()
229 229 mm = m1.copy()
230 230 mfm = mf1.copy()
231 231 linkrev = self.changelog.count()
232 232 for f in files:
233 233 try:
234 234 t = self.wread(f)
235 235 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
236 236 r = self.file(f)
237 237 mfm[f] = tm
238 238
239 239 fp1 = m1.get(f, nullid)
240 240 fp2 = m2.get(f, nullid)
241 241
242 242 # is the same revision on two branches of a merge?
243 243 if fp2 == fp1:
244 244 fp2 = nullid
245 245
246 246 if fp2 != nullid:
247 247 # is one parent an ancestor of the other?
248 248 fpa = r.ancestor(fp1, fp2)
249 249 if fpa == fp1:
250 250 fp1, fp2 = fp2, nullid
251 251 elif fpa == fp2:
252 252 fp2 = nullid
253 253
254 254 # is the file unmodified from the parent?
255 255 if t == r.read(fp1):
256 256 # record the proper existing parent in manifest
257 257 # no need to add a revision
258 258 mm[f] = fp1
259 259 continue
260 260
261 261 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
262 262 changed.append(f)
263 263 if update_dirstate:
264 264 self.dirstate.update([f], "n")
265 265 except IOError:
266 266 try:
267 267 del mm[f]
268 268 del mfm[f]
269 269 if update_dirstate:
270 270 self.dirstate.forget([f])
271 271 except:
272 272 # deleted from p2?
273 273 pass
274 274
275 275 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
276 276 user = user or self.ui.username()
277 277 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
278 278 tr.close()
279 279 if update_dirstate:
280 280 self.dirstate.setparents(n, nullid)
281 281
282 282 def commit(self, files = None, text = "", user = None, date = None,
283 283 match = util.always, force=False):
284 284 commit = []
285 285 remove = []
286 286 changed = []
287 287
288 288 if files:
289 289 for f in files:
290 290 s = self.dirstate.state(f)
291 291 if s in 'nmai':
292 292 commit.append(f)
293 293 elif s == 'r':
294 294 remove.append(f)
295 295 else:
296 296 self.ui.warn("%s not tracked!\n" % f)
297 297 else:
298 298 (c, a, d, u) = self.changes(match=match)
299 299 commit = c + a
300 300 remove = d
301 301
302 302 p1, p2 = self.dirstate.parents()
303 303 c1 = self.changelog.read(p1)
304 304 c2 = self.changelog.read(p2)
305 305 m1 = self.manifest.read(c1[0])
306 306 mf1 = self.manifest.readflags(c1[0])
307 307 m2 = self.manifest.read(c2[0])
308 308
309 309 if not commit and not remove and not force and p2 == nullid:
310 310 self.ui.status("nothing changed\n")
311 311 return None
312 312
313 313 if not self.hook("precommit"):
314 314 return None
315 315
316 316 lock = self.lock()
317 317 tr = self.transaction()
318 318
319 319 # check in files
320 320 new = {}
321 321 linkrev = self.changelog.count()
322 322 commit.sort()
323 323 for f in commit:
324 324 self.ui.note(f + "\n")
325 325 try:
326 326 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
327 327 t = self.wread(f)
328 328 except IOError:
329 329 self.ui.warn("trouble committing %s!\n" % f)
330 330 raise
331 331
332 r = self.file(f)
333
332 334 meta = {}
333 335 cp = self.dirstate.copied(f)
334 336 if cp:
335 337 meta["copy"] = cp
336 338 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
337 339 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
338
339 r = self.file(f)
340 fp1 = m1.get(f, nullid)
341 fp2 = m2.get(f, nullid)
340 fp1, fp2 = nullid, nullid
341 else:
342 fp1 = m1.get(f, nullid)
343 fp2 = m2.get(f, nullid)
342 344
343 345 # is the same revision on two branches of a merge?
344 346 if fp2 == fp1:
345 347 fp2 = nullid
346 348
347 349 if fp2 != nullid:
348 350 # is one parent an ancestor of the other?
349 351 fpa = r.ancestor(fp1, fp2)
350 352 if fpa == fp1:
351 353 fp1, fp2 = fp2, nullid
352 354 elif fpa == fp2:
353 355 fp2 = nullid
354 356
355 357 # is the file unmodified from the parent?
356 358 if not meta and t == r.read(fp1):
357 359 # record the proper existing parent in manifest
358 360 # no need to add a revision
359 361 new[f] = fp1
360 362 continue
361 363
362 364 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
363 365 # remember what we've added so that we can later calculate
364 366 # the files to pull from a set of changesets
365 367 changed.append(f)
366 368
367 369 # update manifest
368 370 m1.update(new)
369 371 for f in remove:
370 372 if f in m1:
371 373 del m1[f]
372 374 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
373 375 (new, remove))
374 376
375 377 # add changeset
376 378 new = new.keys()
377 379 new.sort()
378 380
379 381 if not text:
380 382 edittext = ""
381 383 if p2 != nullid:
382 384 edittext += "HG: branch merge\n"
383 385 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
384 386 edittext += "".join(["HG: changed %s\n" % f for f in changed])
385 387 edittext += "".join(["HG: removed %s\n" % f for f in remove])
386 388 if not changed and not remove:
387 389 edittext += "HG: no files changed\n"
388 390 edittext = self.ui.edit(edittext)
389 391 if not edittext.rstrip():
390 392 return None
391 393 text = edittext
392 394
393 395 user = user or self.ui.username()
394 396 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
395 397 tr.close()
396 398
397 399 self.dirstate.setparents(n)
398 400 self.dirstate.update(new, "n")
399 401 self.dirstate.forget(remove)
400 402
401 403 if not self.hook("commit", node=hex(n)):
402 404 return None
403 405 return n
404 406
405 407 def walk(self, node=None, files=[], match=util.always):
406 408 if node:
407 409 for fn in self.manifest.read(self.changelog.read(node)[0]):
408 410 if match(fn): yield 'm', fn
409 411 else:
410 412 for src, fn in self.dirstate.walk(files, match):
411 413 yield src, fn
412 414
413 415 def changes(self, node1 = None, node2 = None, files = [],
414 416 match = util.always):
415 417 mf2, u = None, []
416 418
417 419 def fcmp(fn, mf):
418 420 t1 = self.wread(fn)
419 421 t2 = self.file(fn).read(mf.get(fn, nullid))
420 422 return cmp(t1, t2)
421 423
422 424 def mfmatches(node):
423 425 mf = dict(self.manifest.read(node))
424 426 for fn in mf.keys():
425 427 if not match(fn):
426 428 del mf[fn]
427 429 return mf
428 430
429 431 # are we comparing the working directory?
430 432 if not node2:
431 433 l, c, a, d, u = self.dirstate.changes(files, match)
432 434
433 435 # are we comparing working dir against its parent?
434 436 if not node1:
435 437 if l:
436 438 # do a full compare of any files that might have changed
437 439 change = self.changelog.read(self.dirstate.parents()[0])
438 440 mf2 = mfmatches(change[0])
439 441 for f in l:
440 442 if fcmp(f, mf2):
441 443 c.append(f)
442 444
443 445 for l in c, a, d, u:
444 446 l.sort()
445 447
446 448 return (c, a, d, u)
447 449
448 450 # are we comparing working dir against non-tip?
449 451 # generate a pseudo-manifest for the working dir
450 452 if not node2:
451 453 if not mf2:
452 454 change = self.changelog.read(self.dirstate.parents()[0])
453 455 mf2 = mfmatches(change[0])
454 456 for f in a + c + l:
455 457 mf2[f] = ""
456 458 for f in d:
457 459 if f in mf2: del mf2[f]
458 460 else:
459 461 change = self.changelog.read(node2)
460 462 mf2 = mfmatches(change[0])
461 463
462 464 # flush lists from dirstate before comparing manifests
463 465 c, a = [], []
464 466
465 467 change = self.changelog.read(node1)
466 468 mf1 = mfmatches(change[0])
467 469
468 470 for fn in mf2:
469 471 if mf1.has_key(fn):
470 472 if mf1[fn] != mf2[fn]:
471 473 if mf2[fn] != "" or fcmp(fn, mf1):
472 474 c.append(fn)
473 475 del mf1[fn]
474 476 else:
475 477 a.append(fn)
476 478
477 479 d = mf1.keys()
478 480
479 481 for l in c, a, d, u:
480 482 l.sort()
481 483
482 484 return (c, a, d, u)
483 485
484 486 def add(self, list):
485 487 for f in list:
486 488 p = self.wjoin(f)
487 489 if not os.path.exists(p):
488 490 self.ui.warn("%s does not exist!\n" % f)
489 491 elif not os.path.isfile(p):
490 492 self.ui.warn("%s not added: only files supported currently\n" % f)
491 493 elif self.dirstate.state(f) in 'an':
492 494 self.ui.warn("%s already tracked!\n" % f)
493 495 else:
494 496 self.dirstate.update([f], "a")
495 497
496 498 def forget(self, list):
497 499 for f in list:
498 500 if self.dirstate.state(f) not in 'ai':
499 501 self.ui.warn("%s not added!\n" % f)
500 502 else:
501 503 self.dirstate.forget([f])
502 504
503 505 def remove(self, list):
504 506 for f in list:
505 507 p = self.wjoin(f)
506 508 if os.path.exists(p):
507 509 self.ui.warn("%s still exists!\n" % f)
508 510 elif self.dirstate.state(f) == 'a':
509 511 self.ui.warn("%s never committed!\n" % f)
510 512 self.dirstate.forget([f])
511 513 elif f not in self.dirstate:
512 514 self.ui.warn("%s not tracked!\n" % f)
513 515 else:
514 516 self.dirstate.update([f], "r")
515 517
516 518 def copy(self, source, dest):
517 519 p = self.wjoin(dest)
518 520 if not os.path.exists(p):
519 521 self.ui.warn("%s does not exist!\n" % dest)
520 522 elif not os.path.isfile(p):
521 523 self.ui.warn("copy failed: %s is not a file\n" % dest)
522 524 else:
523 525 if self.dirstate.state(dest) == '?':
524 526 self.dirstate.update([dest], "a")
525 527 self.dirstate.copy(source, dest)
526 528
527 529 def heads(self):
528 530 return self.changelog.heads()
529 531
530 532 # branchlookup returns a dict giving a list of branches for
531 533 # each head. A branch is defined as the tag of a node or
532 534 # the branch of the node's parents. If a node has multiple
533 535 # branch tags, tags are eliminated if they are visible from other
534 536 # branch tags.
535 537 #
536 538 # So, for this graph: a->b->c->d->e
537 539 # \ /
538 540 # aa -----/
539 541 # a has tag 2.6.12
540 542 # d has tag 2.6.13
541 543 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
542 544 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
543 545 # from the list.
544 546 #
545 547 # It is possible that more than one head will have the same branch tag.
546 548 # callers need to check the result for multiple heads under the same
547 549 # branch tag if that is a problem for them (ie checkout of a specific
548 550 # branch).
549 551 #
550 552 # passing in a specific branch will limit the depth of the search
551 553 # through the parents. It won't limit the branches returned in the
552 554 # result though.
553 555 def branchlookup(self, heads=None, branch=None):
554 556 if not heads:
555 557 heads = self.heads()
556 558 headt = [ h for h in heads ]
557 559 chlog = self.changelog
558 560 branches = {}
559 561 merges = []
560 562 seenmerge = {}
561 563
562 564 # traverse the tree once for each head, recording in the branches
563 565 # dict which tags are visible from this head. The branches
564 566 # dict also records which tags are visible from each tag
565 567 # while we traverse.
566 568 while headt or merges:
567 569 if merges:
568 570 n, found = merges.pop()
569 571 visit = [n]
570 572 else:
571 573 h = headt.pop()
572 574 visit = [h]
573 575 found = [h]
574 576 seen = {}
575 577 while visit:
576 578 n = visit.pop()
577 579 if n in seen:
578 580 continue
579 581 pp = chlog.parents(n)
580 582 tags = self.nodetags(n)
581 583 if tags:
582 584 for x in tags:
583 585 if x == 'tip':
584 586 continue
585 587 for f in found:
586 588 branches.setdefault(f, {})[n] = 1
587 589 branches.setdefault(n, {})[n] = 1
588 590 break
589 591 if n not in found:
590 592 found.append(n)
591 593 if branch in tags:
592 594 continue
593 595 seen[n] = 1
594 596 if pp[1] != nullid and n not in seenmerge:
595 597 merges.append((pp[1], [x for x in found]))
596 598 seenmerge[n] = 1
597 599 if pp[0] != nullid:
598 600 visit.append(pp[0])
599 601 # traverse the branches dict, eliminating branch tags from each
600 602 # head that are visible from another branch tag for that head.
601 603 out = {}
602 604 viscache = {}
603 605 for h in heads:
604 606 def visible(node):
605 607 if node in viscache:
606 608 return viscache[node]
607 609 ret = {}
608 610 visit = [node]
609 611 while visit:
610 612 x = visit.pop()
611 613 if x in viscache:
612 614 ret.update(viscache[x])
613 615 elif x not in ret:
614 616 ret[x] = 1
615 617 if x in branches:
616 618 visit[len(visit):] = branches[x].keys()
617 619 viscache[node] = ret
618 620 return ret
619 621 if h not in branches:
620 622 continue
621 623 # O(n^2), but somewhat limited. This only searches the
622 624 # tags visible from a specific head, not all the tags in the
623 625 # whole repo.
624 626 for b in branches[h]:
625 627 vis = False
626 628 for bb in branches[h].keys():
627 629 if b != bb:
628 630 if b in visible(bb):
629 631 vis = True
630 632 break
631 633 if not vis:
632 634 l = out.setdefault(h, [])
633 635 l[len(l):] = self.nodetags(b)
634 636 return out
635 637
636 638 def branches(self, nodes):
637 639 if not nodes: nodes = [self.changelog.tip()]
638 640 b = []
639 641 for n in nodes:
640 642 t = n
641 643 while n:
642 644 p = self.changelog.parents(n)
643 645 if p[1] != nullid or p[0] == nullid:
644 646 b.append((t, n, p[0], p[1]))
645 647 break
646 648 n = p[0]
647 649 return b
648 650
649 651 def between(self, pairs):
650 652 r = []
651 653
652 654 for top, bottom in pairs:
653 655 n, l, i = top, [], 0
654 656 f = 1
655 657
656 658 while n != bottom:
657 659 p = self.changelog.parents(n)[0]
658 660 if i == f:
659 661 l.append(n)
660 662 f = f * 2
661 663 n = p
662 664 i += 1
663 665
664 666 r.append(l)
665 667
666 668 return r
667 669
668 670 def newer(self, nodes):
669 671 m = {}
670 672 nl = []
671 673 pm = {}
672 674 cl = self.changelog
673 675 t = l = cl.count()
674 676
675 677 # find the lowest numbered node
676 678 for n in nodes:
677 679 l = min(l, cl.rev(n))
678 680 m[n] = 1
679 681
680 682 for i in xrange(l, t):
681 683 n = cl.node(i)
682 684 if n in m: # explicitly listed
683 685 pm[n] = 1
684 686 nl.append(n)
685 687 continue
686 688 for p in cl.parents(n):
687 689 if p in pm: # parent listed
688 690 pm[n] = 1
689 691 nl.append(n)
690 692 break
691 693
692 694 return nl
693 695
694 696 def findincoming(self, remote, base=None, heads=None):
695 697 m = self.changelog.nodemap
696 698 search = []
697 699 fetch = {}
698 700 seen = {}
699 701 seenbranch = {}
700 702 if base == None:
701 703 base = {}
702 704
703 705 # assume we're closer to the tip than the root
704 706 # and start by examining the heads
705 707 self.ui.status("searching for changes\n")
706 708
707 709 if not heads:
708 710 heads = remote.heads()
709 711
710 712 unknown = []
711 713 for h in heads:
712 714 if h not in m:
713 715 unknown.append(h)
714 716 else:
715 717 base[h] = 1
716 718
717 719 if not unknown:
718 720 return None
719 721
720 722 rep = {}
721 723 reqcnt = 0
722 724
723 725 # search through remote branches
724 726 # a 'branch' here is a linear segment of history, with four parts:
725 727 # head, root, first parent, second parent
726 728 # (a branch always has two parents (or none) by definition)
727 729 unknown = remote.branches(unknown)
728 730 while unknown:
729 731 r = []
730 732 while unknown:
731 733 n = unknown.pop(0)
732 734 if n[0] in seen:
733 735 continue
734 736
735 737 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
736 738 if n[0] == nullid:
737 739 break
738 740 if n in seenbranch:
739 741 self.ui.debug("branch already found\n")
740 742 continue
741 743 if n[1] and n[1] in m: # do we know the base?
742 744 self.ui.debug("found incomplete branch %s:%s\n"
743 745 % (short(n[0]), short(n[1])))
744 746 search.append(n) # schedule branch range for scanning
745 747 seenbranch[n] = 1
746 748 else:
747 749 if n[1] not in seen and n[1] not in fetch:
748 750 if n[2] in m and n[3] in m:
749 751 self.ui.debug("found new changeset %s\n" %
750 752 short(n[1]))
751 753 fetch[n[1]] = 1 # earliest unknown
752 754 base[n[2]] = 1 # latest known
753 755 continue
754 756
755 757 for a in n[2:4]:
756 758 if a not in rep:
757 759 r.append(a)
758 760 rep[a] = 1
759 761
760 762 seen[n[0]] = 1
761 763
762 764 if r:
763 765 reqcnt += 1
764 766 self.ui.debug("request %d: %s\n" %
765 767 (reqcnt, " ".join(map(short, r))))
766 768 for p in range(0, len(r), 10):
767 769 for b in remote.branches(r[p:p+10]):
768 770 self.ui.debug("received %s:%s\n" %
769 771 (short(b[0]), short(b[1])))
770 772 if b[0] in m:
771 773 self.ui.debug("found base node %s\n" % short(b[0]))
772 774 base[b[0]] = 1
773 775 elif b[0] not in seen:
774 776 unknown.append(b)
775 777
776 778 # do binary search on the branches we found
777 779 while search:
778 780 n = search.pop(0)
779 781 reqcnt += 1
780 782 l = remote.between([(n[0], n[1])])[0]
781 783 l.append(n[1])
782 784 p = n[0]
783 785 f = 1
784 786 for i in l:
785 787 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
786 788 if i in m:
787 789 if f <= 2:
788 790 self.ui.debug("found new branch changeset %s\n" %
789 791 short(p))
790 792 fetch[p] = 1
791 793 base[i] = 1
792 794 else:
793 795 self.ui.debug("narrowed branch search to %s:%s\n"
794 796 % (short(p), short(i)))
795 797 search.append((p, i))
796 798 break
797 799 p, f = i, f * 2
798 800
799 801 # sanity check our fetch list
800 802 for f in fetch.keys():
801 803 if f in m:
802 804 raise repo.RepoError("already have changeset " + short(f[:4]))
803 805
804 806 if base.keys() == [nullid]:
805 807 self.ui.warn("warning: pulling from an unrelated repository!\n")
806 808
807 809 self.ui.note("found new changesets starting at " +
808 810 " ".join([short(f) for f in fetch]) + "\n")
809 811
810 812 self.ui.debug("%d total queries\n" % reqcnt)
811 813
812 814 return fetch.keys()
813 815
814 816 def findoutgoing(self, remote, base=None, heads=None):
815 817 if base == None:
816 818 base = {}
817 819 self.findincoming(remote, base, heads)
818 820
819 821 self.ui.debug("common changesets up to "
820 822 + " ".join(map(short, base.keys())) + "\n")
821 823
822 824 remain = dict.fromkeys(self.changelog.nodemap)
823 825
824 826 # prune everything remote has from the tree
825 827 del remain[nullid]
826 828 remove = base.keys()
827 829 while remove:
828 830 n = remove.pop(0)
829 831 if n in remain:
830 832 del remain[n]
831 833 for p in self.changelog.parents(n):
832 834 remove.append(p)
833 835
834 836 # find every node whose parents have been pruned
835 837 subset = []
836 838 for n in remain:
837 839 p1, p2 = self.changelog.parents(n)
838 840 if p1 not in remain and p2 not in remain:
839 841 subset.append(n)
840 842
841 843 # this is the set of all roots we have to push
842 844 return subset
843 845
844 846 def pull(self, remote):
845 847 lock = self.lock()
846 848
847 849 # if we have an empty repo, fetch everything
848 850 if self.changelog.tip() == nullid:
849 851 self.ui.status("requesting all changes\n")
850 852 fetch = [nullid]
851 853 else:
852 854 fetch = self.findincoming(remote)
853 855
854 856 if not fetch:
855 857 self.ui.status("no changes found\n")
856 858 return 1
857 859
858 860 cg = remote.changegroup(fetch)
859 861 return self.addchangegroup(cg)
860 862
861 863 def push(self, remote, force=False):
862 864 lock = remote.lock()
863 865
864 866 base = {}
865 867 heads = remote.heads()
866 868 inc = self.findincoming(remote, base, heads)
867 869 if not force and inc:
868 870 self.ui.warn("abort: unsynced remote changes!\n")
869 871 self.ui.status("(did you forget to sync? use push -f to force)\n")
870 872 return 1
871 873
872 874 update = self.findoutgoing(remote, base)
873 875 if not update:
874 876 self.ui.status("no changes found\n")
875 877 return 1
876 878 elif not force:
877 879 if len(heads) < len(self.changelog.heads()):
878 880 self.ui.warn("abort: push creates new remote branches!\n")
879 881 self.ui.status("(did you forget to merge?" +
880 882 " use push -f to force)\n")
881 883 return 1
882 884
883 885 cg = self.changegroup(update)
884 886 return remote.addchangegroup(cg)
885 887
886 888 def changegroup(self, basenodes):
887 889 class genread:
888 890 def __init__(self, generator):
889 891 self.g = generator
890 892 self.buf = ""
891 893 def fillbuf(self):
892 894 self.buf += "".join(self.g)
893 895
894 896 def read(self, l):
895 897 while l > len(self.buf):
896 898 try:
897 899 self.buf += self.g.next()
898 900 except StopIteration:
899 901 break
900 902 d, self.buf = self.buf[:l], self.buf[l:]
901 903 return d
902 904
903 905 def gengroup():
904 906 nodes = self.newer(basenodes)
905 907
906 908 # construct the link map
907 909 linkmap = {}
908 910 for n in nodes:
909 911 linkmap[self.changelog.rev(n)] = n
910 912
911 913 # construct a list of all changed files
912 914 changed = {}
913 915 for n in nodes:
914 916 c = self.changelog.read(n)
915 917 for f in c[3]:
916 918 changed[f] = 1
917 919 changed = changed.keys()
918 920 changed.sort()
919 921
920 922 # the changegroup is changesets + manifests + all file revs
921 923 revs = [ self.changelog.rev(n) for n in nodes ]
922 924
923 925 for y in self.changelog.group(linkmap): yield y
924 926 for y in self.manifest.group(linkmap): yield y
925 927 for f in changed:
926 928 yield struct.pack(">l", len(f) + 4) + f
927 929 g = self.file(f).group(linkmap)
928 930 for y in g:
929 931 yield y
930 932
931 933 yield struct.pack(">l", 0)
932 934
933 935 return genread(gengroup())
934 936
935 937 def addchangegroup(self, source):
936 938
937 939 def getchunk():
938 940 d = source.read(4)
939 941 if not d: return ""
940 942 l = struct.unpack(">l", d)[0]
941 943 if l <= 4: return ""
942 944 return source.read(l - 4)
943 945
944 946 def getgroup():
945 947 while 1:
946 948 c = getchunk()
947 949 if not c: break
948 950 yield c
949 951
950 952 def csmap(x):
951 953 self.ui.debug("add changeset %s\n" % short(x))
952 954 return self.changelog.count()
953 955
954 956 def revmap(x):
955 957 return self.changelog.rev(x)
956 958
957 959 if not source: return
958 960 changesets = files = revisions = 0
959 961
960 962 tr = self.transaction()
961 963
962 964 oldheads = len(self.changelog.heads())
963 965
964 966 # pull off the changeset group
965 967 self.ui.status("adding changesets\n")
966 968 co = self.changelog.tip()
967 969 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
968 970 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
969 971
970 972 # pull off the manifest group
971 973 self.ui.status("adding manifests\n")
972 974 mm = self.manifest.tip()
973 975 mo = self.manifest.addgroup(getgroup(), revmap, tr)
974 976
975 977 # process the files
976 978 self.ui.status("adding file changes\n")
977 979 while 1:
978 980 f = getchunk()
979 981 if not f: break
980 982 self.ui.debug("adding %s revisions\n" % f)
981 983 fl = self.file(f)
982 984 o = fl.count()
983 985 n = fl.addgroup(getgroup(), revmap, tr)
984 986 revisions += fl.count() - o
985 987 files += 1
986 988
987 989 newheads = len(self.changelog.heads())
988 990 heads = ""
989 991 if oldheads and newheads > oldheads:
990 992 heads = " (+%d heads)" % (newheads - oldheads)
991 993
992 994 self.ui.status(("added %d changesets" +
993 995 " with %d changes to %d files%s\n")
994 996 % (changesets, revisions, files, heads))
995 997
996 998 tr.close()
997 999
998 1000 if not self.hook("changegroup"):
999 1001 return 1
1000 1002
1001 1003 return
1002 1004
1003 1005 def update(self, node, allow=False, force=False, choose=None,
1004 1006 moddirstate=True):
1005 1007 pl = self.dirstate.parents()
1006 1008 if not force and pl[1] != nullid:
1007 1009 self.ui.warn("aborting: outstanding uncommitted merges\n")
1008 1010 return 1
1009 1011
1010 1012 p1, p2 = pl[0], node
1011 1013 pa = self.changelog.ancestor(p1, p2)
1012 1014 m1n = self.changelog.read(p1)[0]
1013 1015 m2n = self.changelog.read(p2)[0]
1014 1016 man = self.manifest.ancestor(m1n, m2n)
1015 1017 m1 = self.manifest.read(m1n)
1016 1018 mf1 = self.manifest.readflags(m1n)
1017 1019 m2 = self.manifest.read(m2n)
1018 1020 mf2 = self.manifest.readflags(m2n)
1019 1021 ma = self.manifest.read(man)
1020 1022 mfa = self.manifest.readflags(man)
1021 1023
1022 1024 (c, a, d, u) = self.changes()
1023 1025
1024 1026 # is this a jump, or a merge? i.e. is there a linear path
1025 1027 # from p1 to p2?
1026 1028 linear_path = (pa == p1 or pa == p2)
1027 1029
1028 1030 # resolve the manifest to determine which files
1029 1031 # we care about merging
1030 1032 self.ui.note("resolving manifests\n")
1031 1033 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1032 1034 (force, allow, moddirstate, linear_path))
1033 1035 self.ui.debug(" ancestor %s local %s remote %s\n" %
1034 1036 (short(man), short(m1n), short(m2n)))
1035 1037
1036 1038 merge = {}
1037 1039 get = {}
1038 1040 remove = []
1039 1041
1040 1042 # construct a working dir manifest
1041 1043 mw = m1.copy()
1042 1044 mfw = mf1.copy()
1043 1045 umap = dict.fromkeys(u)
1044 1046
1045 1047 for f in a + c + u:
1046 1048 mw[f] = ""
1047 1049 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1048 1050
1049 1051 for f in d:
1050 1052 if f in mw: del mw[f]
1051 1053
1052 1054 # If we're jumping between revisions (as opposed to merging),
1053 1055 # and if neither the working directory nor the target rev has
1054 1056 # the file, then we need to remove it from the dirstate, to
1055 1057 # prevent the dirstate from listing the file when it is no
1056 1058 # longer in the manifest.
1057 1059 if moddirstate and linear_path and f not in m2:
1058 1060 self.dirstate.forget((f,))
1059 1061
1060 1062 # Compare manifests
1061 1063 for f, n in mw.iteritems():
1062 1064 if choose and not choose(f): continue
1063 1065 if f in m2:
1064 1066 s = 0
1065 1067
1066 1068 # is the wfile new since m1, and match m2?
1067 1069 if f not in m1:
1068 1070 t1 = self.wread(f)
1069 1071 t2 = self.file(f).read(m2[f])
1070 1072 if cmp(t1, t2) == 0:
1071 1073 n = m2[f]
1072 1074 del t1, t2
1073 1075
1074 1076 # are files different?
1075 1077 if n != m2[f]:
1076 1078 a = ma.get(f, nullid)
1077 1079 # are both different from the ancestor?
1078 1080 if n != a and m2[f] != a:
1079 1081 self.ui.debug(" %s versions differ, resolve\n" % f)
1080 1082 # merge executable bits
1081 1083 # "if we changed or they changed, change in merge"
1082 1084 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1083 1085 mode = ((a^b) | (a^c)) ^ a
1084 1086 merge[f] = (m1.get(f, nullid), m2[f], mode)
1085 1087 s = 1
1086 1088 # are we clobbering?
1087 1089 # is remote's version newer?
1088 1090 # or are we going back in time?
1089 1091 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1090 1092 self.ui.debug(" remote %s is newer, get\n" % f)
1091 1093 get[f] = m2[f]
1092 1094 s = 1
1093 1095 elif f in umap:
1094 1096 # this unknown file is the same as the checkout
1095 1097 get[f] = m2[f]
1096 1098
1097 1099 if not s and mfw[f] != mf2[f]:
1098 1100 if force:
1099 1101 self.ui.debug(" updating permissions for %s\n" % f)
1100 1102 util.set_exec(self.wjoin(f), mf2[f])
1101 1103 else:
1102 1104 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1103 1105 mode = ((a^b) | (a^c)) ^ a
1104 1106 if mode != b:
1105 1107 self.ui.debug(" updating permissions for %s\n" % f)
1106 1108 util.set_exec(self.wjoin(f), mode)
1107 1109 del m2[f]
1108 1110 elif f in ma:
1109 1111 if n != ma[f]:
1110 1112 r = "d"
1111 1113 if not force and (linear_path or allow):
1112 1114 r = self.ui.prompt(
1113 1115 (" local changed %s which remote deleted\n" % f) +
1114 1116 "(k)eep or (d)elete?", "[kd]", "k")
1115 1117 if r == "d":
1116 1118 remove.append(f)
1117 1119 else:
1118 1120 self.ui.debug("other deleted %s\n" % f)
1119 1121 remove.append(f) # other deleted it
1120 1122 else:
1121 1123 if n == m1.get(f, nullid): # same as parent
1122 1124 if p2 == pa: # going backwards?
1123 1125 self.ui.debug("remote deleted %s\n" % f)
1124 1126 remove.append(f)
1125 1127 else:
1126 1128 self.ui.debug("local created %s, keeping\n" % f)
1127 1129 else:
1128 1130 self.ui.debug("working dir created %s, keeping\n" % f)
1129 1131
1130 1132 for f, n in m2.iteritems():
1131 1133 if choose and not choose(f): continue
1132 1134 if f[0] == "/": continue
1133 1135 if f in ma and n != ma[f]:
1134 1136 r = "k"
1135 1137 if not force and (linear_path or allow):
1136 1138 r = self.ui.prompt(
1137 1139 ("remote changed %s which local deleted\n" % f) +
1138 1140 "(k)eep or (d)elete?", "[kd]", "k")
1139 1141 if r == "k": get[f] = n
1140 1142 elif f not in ma:
1141 1143 self.ui.debug("remote created %s\n" % f)
1142 1144 get[f] = n
1143 1145 else:
1144 1146 if force or p2 == pa: # going backwards?
1145 1147 self.ui.debug("local deleted %s, recreating\n" % f)
1146 1148 get[f] = n
1147 1149 else:
1148 1150 self.ui.debug("local deleted %s\n" % f)
1149 1151
1150 1152 del mw, m1, m2, ma
1151 1153
1152 1154 if force:
1153 1155 for f in merge:
1154 1156 get[f] = merge[f][1]
1155 1157 merge = {}
1156 1158
1157 1159 if linear_path or force:
1158 1160 # we don't need to do any magic, just jump to the new rev
1159 1161 branch_merge = False
1160 1162 p1, p2 = p2, nullid
1161 1163 else:
1162 1164 if not allow:
1163 1165 self.ui.status("this update spans a branch" +
1164 1166 " affecting the following files:\n")
1165 1167 fl = merge.keys() + get.keys()
1166 1168 fl.sort()
1167 1169 for f in fl:
1168 1170 cf = ""
1169 1171 if f in merge: cf = " (resolve)"
1170 1172 self.ui.status(" %s%s\n" % (f, cf))
1171 1173 self.ui.warn("aborting update spanning branches!\n")
1172 1174 self.ui.status("(use update -m to merge across branches" +
1173 1175 " or -C to lose changes)\n")
1174 1176 return 1
1175 1177 branch_merge = True
1176 1178
1177 1179 if moddirstate:
1178 1180 self.dirstate.setparents(p1, p2)
1179 1181
1180 1182 # get the files we don't need to change
1181 1183 files = get.keys()
1182 1184 files.sort()
1183 1185 for f in files:
1184 1186 if f[0] == "/": continue
1185 1187 self.ui.note("getting %s\n" % f)
1186 1188 t = self.file(f).read(get[f])
1187 1189 try:
1188 1190 self.wwrite(f, t)
1189 1191 except IOError:
1190 1192 os.makedirs(os.path.dirname(self.wjoin(f)))
1191 1193 self.wwrite(f, t)
1192 1194 util.set_exec(self.wjoin(f), mf2[f])
1193 1195 if moddirstate:
1194 1196 if branch_merge:
1195 1197 self.dirstate.update([f], 'n', st_mtime=-1)
1196 1198 else:
1197 1199 self.dirstate.update([f], 'n')
1198 1200
1199 1201 # merge the tricky bits
1200 1202 files = merge.keys()
1201 1203 files.sort()
1202 1204 for f in files:
1203 1205 self.ui.status("merging %s\n" % f)
1204 1206 my, other, flag = merge[f]
1205 1207 self.merge3(f, my, other)
1206 1208 util.set_exec(self.wjoin(f), flag)
1207 1209 if moddirstate:
1208 1210 if branch_merge:
1209 1211 # We've done a branch merge, mark this file as merged
1210 1212 # so that we properly record the merger later
1211 1213 self.dirstate.update([f], 'm')
1212 1214 else:
1213 1215 # We've update-merged a locally modified file, so
1214 1216 # we set the dirstate to emulate a normal checkout
1215 1217 # of that file some time in the past. Thus our
1216 1218 # merge will appear as a normal local file
1217 1219 # modification.
1218 1220 f_len = len(self.file(f).read(other))
1219 1221 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1220 1222
1221 1223 remove.sort()
1222 1224 for f in remove:
1223 1225 self.ui.note("removing %s\n" % f)
1224 1226 try:
1225 1227 os.unlink(self.wjoin(f))
1226 1228 except OSError, inst:
1227 1229 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1228 1230 # try removing directories that might now be empty
1229 1231 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1230 1232 except: pass
1231 1233 if moddirstate:
1232 1234 if branch_merge:
1233 1235 self.dirstate.update(remove, 'r')
1234 1236 else:
1235 1237 self.dirstate.forget(remove)
1236 1238
1237 1239 def merge3(self, fn, my, other):
1238 1240 """perform a 3-way merge in the working directory"""
1239 1241
1240 1242 def temp(prefix, node):
1241 1243 pre = "%s~%s." % (os.path.basename(fn), prefix)
1242 1244 (fd, name) = tempfile.mkstemp("", pre)
1243 1245 f = os.fdopen(fd, "wb")
1244 1246 self.wwrite(fn, fl.read(node), f)
1245 1247 f.close()
1246 1248 return name
1247 1249
1248 1250 fl = self.file(fn)
1249 1251 base = fl.ancestor(my, other)
1250 1252 a = self.wjoin(fn)
1251 1253 b = temp("base", base)
1252 1254 c = temp("other", other)
1253 1255
1254 1256 self.ui.note("resolving %s\n" % fn)
1255 1257 self.ui.debug("file %s: other %s ancestor %s\n" %
1256 1258 (fn, short(other), short(base)))
1257 1259
1258 1260 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1259 1261 or "hgmerge")
1260 1262 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1261 1263 if r:
1262 1264 self.ui.warn("merging %s failed!\n" % fn)
1263 1265
1264 1266 os.unlink(b)
1265 1267 os.unlink(c)
1266 1268
1267 1269 def verify(self):
1268 1270 filelinkrevs = {}
1269 1271 filenodes = {}
1270 1272 changesets = revisions = files = 0
1271 1273 errors = 0
1272 1274
1273 1275 seen = {}
1274 1276 self.ui.status("checking changesets\n")
1275 1277 for i in range(self.changelog.count()):
1276 1278 changesets += 1
1277 1279 n = self.changelog.node(i)
1278 1280 if n in seen:
1279 1281 self.ui.warn("duplicate changeset at revision %d\n" % i)
1280 1282 errors += 1
1281 1283 seen[n] = 1
1282 1284
1283 1285 for p in self.changelog.parents(n):
1284 1286 if p not in self.changelog.nodemap:
1285 1287 self.ui.warn("changeset %s has unknown parent %s\n" %
1286 1288 (short(n), short(p)))
1287 1289 errors += 1
1288 1290 try:
1289 1291 changes = self.changelog.read(n)
1290 1292 except Exception, inst:
1291 1293 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1292 1294 errors += 1
1293 1295
1294 1296 for f in changes[3]:
1295 1297 filelinkrevs.setdefault(f, []).append(i)
1296 1298
1297 1299 seen = {}
1298 1300 self.ui.status("checking manifests\n")
1299 1301 for i in range(self.manifest.count()):
1300 1302 n = self.manifest.node(i)
1301 1303 if n in seen:
1302 1304 self.ui.warn("duplicate manifest at revision %d\n" % i)
1303 1305 errors += 1
1304 1306 seen[n] = 1
1305 1307
1306 1308 for p in self.manifest.parents(n):
1307 1309 if p not in self.manifest.nodemap:
1308 1310 self.ui.warn("manifest %s has unknown parent %s\n" %
1309 1311 (short(n), short(p)))
1310 1312 errors += 1
1311 1313
1312 1314 try:
1313 1315 delta = mdiff.patchtext(self.manifest.delta(n))
1314 1316 except KeyboardInterrupt:
1315 1317 self.ui.warn("interrupted")
1316 1318 raise
1317 1319 except Exception, inst:
1318 1320 self.ui.warn("unpacking manifest %s: %s\n"
1319 1321 % (short(n), inst))
1320 1322 errors += 1
1321 1323
1322 1324 ff = [ l.split('\0') for l in delta.splitlines() ]
1323 1325 for f, fn in ff:
1324 1326 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1325 1327
1326 1328 self.ui.status("crosschecking files in changesets and manifests\n")
1327 1329 for f in filenodes:
1328 1330 if f not in filelinkrevs:
1329 1331 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1330 1332 errors += 1
1331 1333
1332 1334 for f in filelinkrevs:
1333 1335 if f not in filenodes:
1334 1336 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1335 1337 errors += 1
1336 1338
1337 1339 self.ui.status("checking files\n")
1338 1340 ff = filenodes.keys()
1339 1341 ff.sort()
1340 1342 for f in ff:
1341 1343 if f == "/dev/null": continue
1342 1344 files += 1
1343 1345 fl = self.file(f)
1344 1346 nodes = { nullid: 1 }
1345 1347 seen = {}
1346 1348 for i in range(fl.count()):
1347 1349 revisions += 1
1348 1350 n = fl.node(i)
1349 1351
1350 1352 if n in seen:
1351 1353 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1352 1354 errors += 1
1353 1355
1354 1356 if n not in filenodes[f]:
1355 1357 self.ui.warn("%s: %d:%s not in manifests\n"
1356 1358 % (f, i, short(n)))
1357 1359 errors += 1
1358 1360 else:
1359 1361 del filenodes[f][n]
1360 1362
1361 1363 flr = fl.linkrev(n)
1362 1364 if flr not in filelinkrevs[f]:
1363 1365 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1364 1366 % (f, short(n), fl.linkrev(n)))
1365 1367 errors += 1
1366 1368 else:
1367 1369 filelinkrevs[f].remove(flr)
1368 1370
1369 1371 # verify contents
1370 1372 try:
1371 1373 t = fl.read(n)
1372 1374 except Exception, inst:
1373 1375 self.ui.warn("unpacking file %s %s: %s\n"
1374 1376 % (f, short(n), inst))
1375 1377 errors += 1
1376 1378
1377 1379 # verify parents
1378 1380 (p1, p2) = fl.parents(n)
1379 1381 if p1 not in nodes:
1380 1382 self.ui.warn("file %s:%s unknown parent 1 %s" %
1381 1383 (f, short(n), short(p1)))
1382 1384 errors += 1
1383 1385 if p2 not in nodes:
1384 1386 self.ui.warn("file %s:%s unknown parent 2 %s" %
1385 1387 (f, short(n), short(p1)))
1386 1388 errors += 1
1387 1389 nodes[n] = 1
1388 1390
1389 1391 # cross-check
1390 1392 for node in filenodes[f]:
1391 1393 self.ui.warn("node %s in manifests not in %s\n"
1392 1394 % (hex(node), f))
1393 1395 errors += 1
1394 1396
1395 1397 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1396 1398 (files, changesets, revisions))
1397 1399
1398 1400 if errors:
1399 1401 self.ui.warn("%d integrity errors encountered!\n" % errors)
1400 1402 return 1
General Comments 0
You need to be logged in to leave comments. Login now