##// END OF EJS Templates
Diff in subdirectories from Jake Edge...
mpm@selenic.com -
r64:b3e2ddff default
parent child Browse files
Show More
@@ -1,371 +1,380
1 1 #!/usr/bin/env python
2 2 #
3 3 # mercurial - a minimal scalable distributed SCM
4 4 # v0.4e "sabina"
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 11 # the psyco compiler makes commits a bit faster
12 12 # and makes changegroup merge about 20 times slower!
13 13 # try:
14 14 # import psyco
15 15 # psyco.full()
16 16 # except:
17 17 # pass
18 18
19 19 import sys, os, time
20 20 from mercurial import hg, mdiff, fancyopts
21 21
22 22 def help():
23 23 print """\
24 24 commands:
25 25
26 26 init create a new repository in this directory
27 27 branch <path> create a branch of <path> in this directory
28 28 merge <path> merge changes from <path> into local repository
29 29 checkout [changeset] checkout the latest or given changeset
30 30 status show new, missing, and changed files in working dir
31 31 add [files...] add the given files in the next commit
32 32 remove [files...] remove the given files in the next commit
33 33 addremove add all new files, delete all missing files
34 34 commit commit all changes to the repository
35 35 history show changeset history
36 36 log <file> show revision history of a single file
37 37 dump <file> [rev] dump the latest or given revision of a file
38 38 dumpmanifest [rev] dump the latest or given revision of the manifest
39 39 diff [files...] diff working directory (or selected files)
40 40 """
41 41
42 42 def filterfiles(list, files):
43 43 l = [ x for x in list if x in files ]
44 44
45 45 for f in files:
46 46 if f[-1] != os.sep: f += os.sep
47 47 l += [ x for x in list if x.startswith(f) ]
48 48 return l
49 49
50 50 def diff(files = None, node1 = None, node2 = None):
51 def date(c):
52 return time.asctime(time.gmtime(float(c[2].split(' ')[0])))
51 53
52 54 if node2:
53 55 change = repo.changelog.read(node2)
54 56 mmap2 = repo.manifest.read(change[0])
55 57 (c, a, d) = repo.diffrevs(node1, node2)
56 58 def read(f): return repo.file(f).read(mmap2[f])
59 date2 = date(change)
57 60 else:
61 date2 = time.asctime()
58 62 if not node1:
59 63 node1 = repo.current
60 64 (c, a, d) = repo.diffdir(repo.root, node1)
61 def read(f): return file(f).read()
65 def read(f): return file(os.path.join(repo.root, f)).read()
62 66
63 67 change = repo.changelog.read(node1)
64 68 mmap = repo.manifest.read(change[0])
69 date1 = date(change)
65 70
66 71 if files:
67 72 (c, a, d) = map(lambda x: filterfiles(x, files), (c, a, d))
68 73
69 74 for f in c:
70 75 to = repo.file(f).read(mmap[f])
71 76 tn = read(f)
72 sys.stdout.write(mdiff.unidiff(to, tn, f))
77 sys.stdout.write(mdiff.unidiff(to, date1, tn, date2, f))
73 78 for f in a:
74 79 to = ""
75 80 tn = read(f)
76 sys.stdout.write(mdiff.unidiff(to, tn, f))
81 sys.stdout.write(mdiff.unidiff(to, date1, tn, date2, f))
77 82 for f in d:
78 83 to = repo.file(f).read(mmap[f])
79 84 tn = ""
80 sys.stdout.write(mdiff.unidiff(to, tn, f))
81
85 sys.stdout.write(mdiff.unidiff(to, date1, tn, date2, f))
82 86
83 87 options = {}
84 88 opts = [('v', 'verbose', None, 'verbose'),
85 89 ('d', 'debug', None, 'debug')]
86 90
87 91 args = fancyopts.fancyopts(sys.argv[1:], opts, options,
88 92 'hg [options] <command> [command options] [files]')
89 93
90 94 try:
91 95 cmd = args[0]
92 96 args = args[1:]
93 97 except:
94 98 cmd = ""
95 99
96 100 ui = hg.ui(options["verbose"], options["debug"])
97 101
98 102 if cmd == "init":
99 103 repo = hg.repository(ui, ".", create=1)
100 104 sys.exit(0)
101 105 elif cmd == "branch" or cmd == "clone":
102 106 os.system("cp -al %s/.hg .hg" % args[0])
103 107 sys.exit(0)
104 108 elif cmd == "help":
105 109 help()
106 110 sys.exit(0)
107 111 else:
108 112 try:
109 113 repo = hg.repository(ui=ui)
110 114 except:
111 115 print "Unable to open repository"
112 116 sys.exit(0)
113 117
114 118 if cmd == "checkout" or cmd == "co":
115 119 node = repo.changelog.tip()
116 120 if args:
117 121 node = repo.changelog.lookup(args[0])
118 122 repo.checkout(node)
119 123
120 124 elif cmd == "add":
121 125 repo.add(args)
122 126
123 127 elif cmd == "remove" or cmd == "rm" or cmd == "del" or cmd == "delete":
124 128 repo.remove(args)
125 129
126 130 elif cmd == "commit" or cmd == "checkin" or cmd == "ci":
127 131 if 1:
128 132 if len(args) > 0:
129 133 repo.commit(repo.current, args)
130 134 else:
131 135 repo.commit(repo.current)
132 136
133 137 elif cmd == "import" or cmd == "patch":
134 138 ioptions = {}
135 139 opts = [('p', 'strip', 1, 'path strip'),
136 140 ('b', 'base', "", 'base path'),
137 141 ('q', 'quiet', "", 'silence diff')
138 142 ]
139 143
140 144 args = fancyopts.fancyopts(args, opts, ioptions,
141 145 'hg import [options] <patch names>')
142 146 d = ioptions["base"]
143 147 strip = ioptions["strip"]
144 148 quiet = ioptions["quiet"] and "> /dev/null" or ""
145 149
146 150 for patch in args:
147 151 ui.status("applying %s\n" % patch)
148 152 pf = os.path.join(d, patch)
149 153
150 154 text = ""
151 155 for l in file(pf):
152 156 if l[:3] == "---": break
153 157 text += l
154 158
155 159 if os.system("patch -p%d < %s %s" % (strip, pf, quiet)):
156 160 raise "patch failed!"
157 161 f = os.popen("lsdiff --strip %d %s" % (strip, pf))
158 162 files = filter(None, map(lambda x: x.rstrip(), f.read().splitlines()))
159 163 f.close()
160 164 repo.commit(repo.current, files, text)
161 165
162 166 elif cmd == "status":
163 167 (c, a, d) = repo.diffdir(repo.root, repo.current)
164 168 for f in c: print "C", f
165 169 for f in a: print "?", f
166 170 for f in d: print "R", f
167 171
168 172 elif cmd == "diff":
169 173 revs = []
170 174
171 175 if args:
172 176 doptions = {}
173 177 opts = [('r', 'revision', [], 'revision')]
174 178 args = fancyopts.fancyopts(args, opts, doptions,
175 179 'hg diff [options] [files]')
176 180 revs = map(lambda x: repo.changelog.lookup(x), doptions['revision'])
177 181
178 182 if len(revs) > 2:
179 183 print "too many revisions to diff"
180 184 sys.exit(1)
181 else:
185
186 if os.getcwd() != repo.root:
187 relpath = os.getcwd()[len(repo.root) + 1: ]
188 if not args: args = [ relpath ]
189 else: args = [ os.path.join(relpath, x) for x in args ]
190
182 191 diff(args, *revs)
183 192
184 193 elif cmd == "export":
185 194 node = repo.changelog.lookup(args[0])
186 195 prev = repo.changelog.parents(node)[0]
187 196 diff(None, prev, node)
188 197
189 198 elif cmd == "debugchangegroup":
190 199 newer = repo.newer(map(repo.changelog.lookup, args))
191 200 for chunk in repo.changegroup(newer):
192 201 sys.stdout.write(chunk)
193 202
194 203 elif cmd == "debugaddchangegroup":
195 204 data = sys.stdin.read()
196 205 repo.addchangegroup(data)
197 206
198 207 elif cmd == "addremove":
199 208 (c, a, d) = repo.diffdir(repo.root, repo.current)
200 209 repo.add(a)
201 210 repo.remove(d)
202 211
203 212 elif cmd == "history":
204 213 for i in range(repo.changelog.count()):
205 214 n = repo.changelog.node(i)
206 215 changes = repo.changelog.read(n)
207 216 (p1, p2) = repo.changelog.parents(n)
208 217 (h, h1, h2) = map(hg.hex, (n, p1, p2))
209 218 (i1, i2) = map(repo.changelog.rev, (p1, p2))
210 219 print "rev: %4d:%s" % (i, h)
211 220 print "parents: %4d:%s" % (i1, h1)
212 221 if i2: print " %4d:%s" % (i2, h2)
213 222 print "manifest: %4d:%s" % (repo.manifest.rev(changes[0]),
214 223 hg.hex(changes[0]))
215 224 print "user:", changes[1]
216 225 print "date:", time.asctime(
217 226 time.localtime(float(changes[2].split(' ')[0])))
218 227 print "files:", " ".join(changes[3])
219 228 print "description:"
220 229 print changes[4]
221 230
222 231 elif cmd == "log":
223 232 if args:
224 233 r = repo.file(args[0])
225 234 for i in range(r.count()):
226 235 n = r.node(i)
227 236 (p1, p2) = r.parents(n)
228 237 (h, h1, h2) = map(hg.hex, (n, p1, p2))
229 238 (i1, i2) = map(r.rev, (p1, p2))
230 239 cr = r.linkrev(n)
231 240 cn = hg.hex(repo.changelog.node(cr))
232 241 print "rev: %4d:%s" % (i, h)
233 242 print "changeset: %4d:%s" % (cr, cn)
234 243 print "parents: %4d:%s" % (i1, h1)
235 244 if i2: print " %4d:%s" % (i2, h2)
236 245 else:
237 246 print "missing filename"
238 247
239 248 elif cmd == "dump":
240 249 if args:
241 250 r = repo.file(args[0])
242 251 n = r.tip()
243 252 if len(args) > 1: n = r.lookup(args[1])
244 253 sys.stdout.write(r.read(n))
245 254 else:
246 255 print "missing filename"
247 256
248 257 elif cmd == "dumpmanifest":
249 258 n = repo.manifest.tip()
250 259 if len(args) > 0:
251 260 n = repo.manifest.lookup(args[0])
252 261 m = repo.manifest.read(n)
253 262 files = m.keys()
254 263 files.sort()
255 264
256 265 for f in files:
257 266 print hg.hex(m[f]), f
258 267
259 268 elif cmd == "debughash":
260 269 f = repo.file(args[0])
261 270 print f.encodepath(args[0])
262 271
263 272 elif cmd == "debugindex":
264 273 r = hg.revlog(open, args[0], "")
265 274 print " rev offset length base linkrev"+\
266 275 " p1 p2 nodeid"
267 276 for i in range(r.count()):
268 277 e = r.index[i]
269 278 print "% 6d % 9d % 7d % 5d % 7d %s.. %s.. %s.." % (
270 279 i, e[0], e[1], e[2], e[3],
271 280 hg.hex(e[4][:5]), hg.hex(e[5][:5]), hg.hex(e[6][:5]))
272 281
273 282 elif cmd == "merge":
274 283 if args:
275 284 other = hg.repository(ui, args[0])
276 285 print "retrieving changegroup"
277 286 cg = repo.getchangegroup(other)
278 287 repo.addchangegroup(cg)
279 288 else:
280 289 print "missing source repository"
281 290
282 291 elif cmd == "debugoldmerge":
283 292 if args:
284 293 other = hg.repository(ui, args[0])
285 294 repo.merge(other)
286 295 else:
287 296 print "missing source repository"
288 297
289 298 elif cmd == "verify":
290 299 filelinkrevs = {}
291 300 filenodes = {}
292 301 manifestchangeset = {}
293 302 changesets = revisions = files = 0
294 303
295 304 print "checking changesets"
296 305 for i in range(repo.changelog.count()):
297 306 changesets += 1
298 307 n = repo.changelog.node(i)
299 308 changes = repo.changelog.read(n)
300 309 manifestchangeset[changes[0]] = n
301 310 for f in changes[3]:
302 311 revisions += 1
303 312 filelinkrevs.setdefault(f, []).append(i)
304 313
305 314 print "checking manifests"
306 315 for i in range(repo.manifest.count()):
307 316 n = repo.manifest.node(i)
308 317 ca = repo.changelog.node(repo.manifest.linkrev(n))
309 318 cc = manifestchangeset[n]
310 319 if ca != cc:
311 320 print "manifest %s points to %s, not %s" % \
312 321 (hg.hex(n), hg.hex(ca), hg.hex(cc))
313 322 m = repo.manifest.read(n)
314 323 for f, fn in m.items():
315 324 filenodes.setdefault(f, {})[fn] = 1
316 325
317 326 print "crosschecking files in changesets and manifests"
318 327 for f in filenodes:
319 328 if f not in filelinkrevs:
320 329 print "file %s in manifest but not in changesets"
321 330
322 331 for f in filelinkrevs:
323 332 if f not in filenodes:
324 333 print "file %s in changeset but not in manifest"
325 334
326 335 print "checking files"
327 336 for f in filenodes:
328 337 files += 1
329 338 fl = repo.file(f)
330 339 nodes = {"\0"*20: 1}
331 340 for i in range(fl.count()):
332 341 n = fl.node(i)
333 342
334 343 if n not in filenodes[f]:
335 344 print "%s:%s not in manifests" % (f, hg.hex(n))
336 345 else:
337 346 del filenodes[f][n]
338 347
339 348 flr = fl.linkrev(n)
340 349 if flr not in filelinkrevs[f]:
341 350 print "%s:%s points to unexpected changeset rev %d" \
342 351 % (f, hg.hex(n), fl.linkrev(n))
343 352 else:
344 353 filelinkrevs[f].remove(flr)
345 354
346 355 # verify contents
347 356 t = fl.read(n)
348 357
349 358 # verify parents
350 359 (p1, p2) = fl.parents(n)
351 360 if p1 not in nodes:
352 361 print "%s:%s unknown parent 1 %s" % (f, hg.hex(n), hg.hex(p1))
353 362 if p2 not in nodes:
354 363 print "file %s:%s unknown parent %s" % (f, hg.hex(n), hg.hex(p1))
355 364 nodes[n] = 1
356 365
357 366 # cross-check
358 367 for flr in filelinkrevs[f]:
359 368 print "changeset rev %d not in %s" % (flr, f)
360 369
361 370 for node in filenodes[f]:
362 371 print "node %s in manifests not in %s" % (hg.hex(n), f)
363 372
364 373
365 374 print "%d files, %d changesets, %d total revisions" % (files, changesets,
366 375 revisions)
367 376
368 377 else:
369 378 print "unknown command\n"
370 379 help()
371 380 sys.exit(1)
@@ -1,873 +1,873
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, sha, socket, os, time, base64, re, urllib2
9 9 import urllib
10 10 from mercurial import byterange
11 11 from mercurial.transaction import *
12 12 from mercurial.revlog import *
13 13
14 14 class filelog(revlog):
15 15 def __init__(self, opener, path):
16 16 s = self.encodepath(path)
17 17 revlog.__init__(self, opener, os.path.join("data", s + "i"),
18 18 os.path.join("data", s))
19 19
20 20 def encodepath(self, path):
21 21 s = sha.sha(path).digest()
22 22 s = base64.encodestring(s)[:-3]
23 23 s = re.sub("\+", "%", s)
24 24 s = re.sub("/", "_", s)
25 25 return s
26 26
27 27 def read(self, node):
28 28 return self.revision(node)
29 29 def add(self, text, transaction, link, p1=None, p2=None):
30 30 return self.addrevision(text, transaction, link, p1, p2)
31 31
32 32 def resolvedag(self, old, new, transaction, link):
33 33 """resolve unmerged heads in our DAG"""
34 34 if old == new: return None
35 35 a = self.ancestor(old, new)
36 36 if old == a: return None
37 37 return self.merge3(old, new, a, transaction, link)
38 38
39 39 def merge3(self, my, other, base, transaction, link):
40 40 """perform a 3-way merge and append the result"""
41 41 def temp(prefix, node):
42 42 (fd, name) = tempfile.mkstemp(prefix)
43 43 f = os.fdopen(fd, "w")
44 44 f.write(self.revision(node))
45 45 f.close()
46 46 return name
47 47
48 48 a = temp("local", my)
49 49 b = temp("remote", other)
50 50 c = temp("parent", base)
51 51
52 52 cmd = os.environ["HGMERGE"]
53 53 r = os.system("%s %s %s %s" % (cmd, a, b, c))
54 54 if r:
55 55 raise "Merge failed, implement rollback!"
56 56
57 57 t = open(a).read()
58 58 os.unlink(a)
59 59 os.unlink(b)
60 60 os.unlink(c)
61 61 return self.addrevision(t, transaction, link, my, other)
62 62
63 63 def merge(self, other, transaction, linkseq, link):
64 64 """perform a merge and resolve resulting heads"""
65 65 (o, n) = self.mergedag(other, transaction, linkseq)
66 66 return self.resolvedag(o, n, transaction, link)
67 67
68 68 class manifest(revlog):
69 69 def __init__(self, opener):
70 70 self.mapcache = None
71 71 self.listcache = None
72 72 self.addlist = None
73 73 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
74 74
75 75 def read(self, node):
76 76 if self.mapcache and self.mapcache[0] == node:
77 77 return self.mapcache[1]
78 78 text = self.revision(node)
79 79 map = {}
80 80 self.listcache = (text, text.splitlines(1))
81 81 for l in self.listcache[1]:
82 82 (f, n) = l.split('\0')
83 83 map[f] = bin(n[:40])
84 84 self.mapcache = (node, map)
85 85 return map
86 86
87 87 def diff(self, a, b):
88 88 # this is sneaky, as we're not actually using a and b
89 89 if self.listcache and len(self.listcache[0]) == len(a):
90 90 return mdiff.diff(self.listcache[1], self.addlist, 1)
91 91 else:
92 92 return mdiff.textdiff(a, b)
93 93
94 94 def add(self, map, transaction, link, p1=None, p2=None):
95 95 files = map.keys()
96 96 files.sort()
97 97
98 98 self.addlist = ["%s\000%s\n" % (f, hex(map[f])) for f in files]
99 99 text = "".join(self.addlist)
100 100
101 101 n = self.addrevision(text, transaction, link, p1, p2)
102 102 self.mapcache = (n, map)
103 103 self.listcache = (text, self.addlist)
104 104
105 105 return n
106 106
107 107 class changelog(revlog):
108 108 def __init__(self, opener):
109 109 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
110 110
111 111 def extract(self, text):
112 112 if not text:
113 113 return (nullid, "", "0", [], "")
114 114 last = text.index("\n\n")
115 115 desc = text[last + 2:]
116 116 l = text[:last].splitlines()
117 117 manifest = bin(l[0])
118 118 user = l[1]
119 119 date = l[2]
120 120 files = l[3:]
121 121 return (manifest, user, date, files, desc)
122 122
123 123 def read(self, node):
124 124 return self.extract(self.revision(node))
125 125
126 126 def add(self, manifest, list, desc, transaction, p1=None, p2=None):
127 127 user = (os.environ.get("HGUSER") or
128 128 os.environ.get("EMAIL") or
129 129 os.environ.get("LOGNAME", "unknown") + '@' + socket.getfqdn())
130 130 date = "%d %d" % (time.time(), time.timezone)
131 131 list.sort()
132 132 l = [hex(manifest), user, date] + list + ["", desc]
133 133 text = "\n".join(l)
134 134 return self.addrevision(text, transaction, self.count(), p1, p2)
135 135
136 136 def merge3(self, my, other, base):
137 137 pass
138 138
139 139 class dircache:
140 140 def __init__(self, opener, ui):
141 141 self.opener = opener
142 142 self.dirty = 0
143 143 self.ui = ui
144 144 self.map = None
145 145 def __del__(self):
146 146 if self.dirty: self.write()
147 147 def __getitem__(self, key):
148 148 try:
149 149 return self.map[key]
150 150 except TypeError:
151 151 self.read()
152 152 return self[key]
153 153
154 154 def read(self):
155 155 if self.map is not None: return self.map
156 156
157 157 self.map = {}
158 158 try:
159 159 st = self.opener("dircache").read()
160 160 except: return
161 161
162 162 pos = 0
163 163 while pos < len(st):
164 164 e = struct.unpack(">llll", st[pos:pos+16])
165 165 l = e[3]
166 166 pos += 16
167 167 f = st[pos:pos + l]
168 168 self.map[f] = e[:3]
169 169 pos += l
170 170
171 171 def update(self, files):
172 172 if not files: return
173 173 self.read()
174 174 self.dirty = 1
175 175 for f in files:
176 176 try:
177 177 s = os.stat(f)
178 178 self.map[f] = (s.st_mode, s.st_size, s.st_mtime)
179 179 except IOError:
180 180 self.remove(f)
181 181
182 182 def taint(self, files):
183 183 if not files: return
184 184 self.read()
185 185 self.dirty = 1
186 186 for f in files:
187 187 self.map[f] = (0, -1, 0)
188 188
189 189 def remove(self, files):
190 190 if not files: return
191 191 self.read()
192 192 self.dirty = 1
193 193 for f in files:
194 194 try:
195 195 del self.map[f]
196 196 except KeyError:
197 197 self.ui.warn("Not in dircache: %s\n" % f)
198 198 pass
199 199
200 200 def clear(self):
201 201 self.map = {}
202 202 self.dirty = 1
203 203
204 204 def write(self):
205 205 st = self.opener("dircache", "w")
206 206 for f, e in self.map.items():
207 207 e = struct.pack(">llll", e[0], e[1], e[2], len(f))
208 208 st.write(e + f)
209 209 self.dirty = 0
210 210
211 211 def copy(self):
212 212 self.read()
213 213 return self.map.copy()
214 214
215 215 # used to avoid circular references so destructors work
216 216 def opener(base):
217 217 p = base
218 218 def o(path, mode="r"):
219 219 if p[:7] == "http://":
220 220 f = os.path.join(p, urllib.quote(path))
221 221 return httprangereader(f)
222 222
223 223 f = os.path.join(p, path)
224 224
225 225 if mode != "r" and os.path.isfile(f):
226 226 s = os.stat(f)
227 227 if s.st_nlink > 1:
228 228 file(f + ".tmp", "w").write(file(f).read())
229 229 os.rename(f+".tmp", f)
230 230
231 231 return file(f, mode)
232 232
233 233 return o
234 234
235 235 class localrepository:
236 236 def __init__(self, ui, path=None, create=0):
237 237 self.remote = 0
238 238 if path and path[:7] == "http://":
239 239 self.remote = 1
240 240 self.path = path
241 241 else:
242 242 if not path:
243 243 p = os.getcwd()
244 244 while not os.path.isdir(os.path.join(p, ".hg")):
245 245 p = os.path.dirname(p)
246 246 if p == "/": raise "No repo found"
247 247 path = p
248 248 self.path = os.path.join(path, ".hg")
249 249
250 250 self.root = path
251 251 self.ui = ui
252 252
253 253 if create:
254 254 os.mkdir(self.path)
255 255 os.mkdir(self.join("data"))
256 256
257 257 self.opener = opener(self.path)
258 258 self.manifest = manifest(self.opener)
259 259 self.changelog = changelog(self.opener)
260 260 self.ignorelist = None
261 261
262 262 if not self.remote:
263 263 self.dircache = dircache(self.opener, ui)
264 264 try:
265 265 self.current = bin(self.opener("current").read())
266 266 except IOError:
267 267 self.current = None
268 268
269 269 def setcurrent(self, node):
270 270 self.current = node
271 271 self.opener("current", "w").write(hex(node))
272 272
273 273 def ignore(self, f):
274 274 if self.ignorelist is None:
275 275 self.ignorelist = []
276 276 try:
277 277 l = open(os.path.join(self.root, ".hgignore")).readlines()
278 278 for pat in l:
279 279 if pat != "\n":
280 280 self.ignorelist.append(re.compile(pat[:-1]))
281 281 except IOError: pass
282 282 for pat in self.ignorelist:
283 283 if pat.search(f): return True
284 284 return False
285 285
286 286 def join(self, f):
287 287 return os.path.join(self.path, f)
288 288
289 289 def file(self, f):
290 290 return filelog(self.opener, f)
291 291
292 292 def transaction(self):
293 293 return transaction(self.opener, self.join("journal"))
294 294
295 295 def merge(self, other):
296 296 tr = self.transaction()
297 297 changed = {}
298 298 new = {}
299 299 seqrev = self.changelog.count()
300 300 # some magic to allow fiddling in nested scope
301 301 nextrev = [seqrev]
302 302
303 303 # helpers for back-linking file revisions to local changeset
304 304 # revisions so we can immediately get to changeset from annotate
305 305 def accumulate(text):
306 306 # track which files are added in which changeset and the
307 307 # corresponding _local_ changeset revision
308 308 files = self.changelog.extract(text)[3]
309 309 for f in files:
310 310 changed.setdefault(f, []).append(nextrev[0])
311 311 nextrev[0] += 1
312 312
313 313 def seq(start):
314 314 while 1:
315 315 yield start
316 316 start += 1
317 317
318 318 def lseq(l):
319 319 for r in l:
320 320 yield r
321 321
322 322 # begin the import/merge of changesets
323 323 self.ui.status("merging new changesets\n")
324 324 (co, cn) = self.changelog.mergedag(other.changelog, tr,
325 325 seq(seqrev), accumulate)
326 326 resolverev = self.changelog.count()
327 327
328 328 # is there anything to do?
329 329 if co == cn:
330 330 tr.close()
331 331 return
332 332
333 333 # do we need to resolve?
334 334 simple = (co == self.changelog.ancestor(co, cn))
335 335
336 336 # merge all files changed by the changesets,
337 337 # keeping track of the new tips
338 338 changelist = changed.keys()
339 339 changelist.sort()
340 340 for f in changelist:
341 341 sys.stdout.write(".")
342 342 sys.stdout.flush()
343 343 r = self.file(f)
344 344 node = r.merge(other.file(f), tr, lseq(changed[f]), resolverev)
345 345 if node:
346 346 new[f] = node
347 347 sys.stdout.write("\n")
348 348
349 349 # begin the merge of the manifest
350 350 self.ui.status("merging manifests\n")
351 351 (mm, mo) = self.manifest.mergedag(other.manifest, tr, seq(seqrev))
352 352
353 353 # For simple merges, we don't need to resolve manifests or changesets
354 354 if simple:
355 355 tr.close()
356 356 return
357 357
358 358 ma = self.manifest.ancestor(mm, mo)
359 359
360 360 # resolve the manifest to point to all the merged files
361 361 self.ui.status("resolving manifests\n")
362 362 mmap = self.manifest.read(mm) # mine
363 363 omap = self.manifest.read(mo) # other
364 364 amap = self.manifest.read(ma) # ancestor
365 365 nmap = {}
366 366
367 367 for f, mid in mmap.iteritems():
368 368 if f in omap:
369 369 if mid != omap[f]:
370 370 nmap[f] = new.get(f, mid) # use merged version
371 371 else:
372 372 nmap[f] = new.get(f, mid) # they're the same
373 373 del omap[f]
374 374 elif f in amap:
375 375 if mid != amap[f]:
376 376 pass # we should prompt here
377 377 else:
378 378 pass # other deleted it
379 379 else:
380 380 nmap[f] = new.get(f, mid) # we created it
381 381
382 382 del mmap
383 383
384 384 for f, oid in omap.iteritems():
385 385 if f in amap:
386 386 if oid != amap[f]:
387 387 pass # this is the nasty case, we should prompt
388 388 else:
389 389 pass # probably safe
390 390 else:
391 391 nmap[f] = new.get(f, oid) # remote created it
392 392
393 393 del omap
394 394 del amap
395 395
396 396 node = self.manifest.add(nmap, tr, resolverev, mm, mo)
397 397
398 398 # Now all files and manifests are merged, we add the changed files
399 399 # and manifest id to the changelog
400 400 self.ui.status("committing merge changeset\n")
401 401 new = new.keys()
402 402 new.sort()
403 403 if co == cn: cn = -1
404 404
405 405 edittext = "\n"+"".join(["HG: changed %s\n" % f for f in new])
406 406 edittext = self.ui.edit(edittext)
407 407 n = self.changelog.add(node, new, edittext, tr, co, cn)
408 408
409 409 tr.close()
410 410
411 411 def commit(self, parent, update = None, text = ""):
412 412 tr = self.transaction()
413 413
414 414 try:
415 415 remove = [ l[:-1] for l in self.opener("to-remove") ]
416 416 os.unlink(self.join("to-remove"))
417 417
418 418 except IOError:
419 419 remove = []
420 420
421 421 if update == None:
422 422 update = self.diffdir(self.root, parent)[0]
423 423
424 424 # check in files
425 425 new = {}
426 426 linkrev = self.changelog.count()
427 427 for f in update:
428 428 try:
429 429 t = file(f).read()
430 430 except IOError:
431 431 remove.append(f)
432 432 continue
433 433 r = self.file(f)
434 434 new[f] = r.add(t, tr, linkrev)
435 435
436 436 # update manifest
437 437 mmap = self.manifest.read(self.manifest.tip())
438 438 mmap.update(new)
439 439 for f in remove:
440 440 del mmap[f]
441 441 mnode = self.manifest.add(mmap, tr, linkrev)
442 442
443 443 # add changeset
444 444 new = new.keys()
445 445 new.sort()
446 446
447 447 edittext = text + "\n"+"".join(["HG: changed %s\n" % f for f in new])
448 448 edittext += "".join(["HG: removed %s\n" % f for f in remove])
449 449 edittext = self.ui.edit(edittext)
450 450
451 451 n = self.changelog.add(mnode, new, edittext, tr)
452 452 tr.close()
453 453
454 454 self.setcurrent(n)
455 455 self.dircache.update(new)
456 456 self.dircache.remove(remove)
457 457
458 458 def checkdir(self, path):
459 459 d = os.path.dirname(path)
460 460 if not d: return
461 461 if not os.path.isdir(d):
462 462 self.checkdir(d)
463 463 os.mkdir(d)
464 464
465 465 def checkout(self, node):
466 466 # checkout is really dumb at the moment
467 467 # it ought to basically merge
468 468 change = self.changelog.read(node)
469 469 mmap = self.manifest.read(change[0])
470 470
471 471 l = mmap.keys()
472 472 l.sort()
473 473 stats = []
474 474 for f in l:
475 475 r = self.file(f)
476 476 t = r.revision(mmap[f])
477 477 try:
478 478 file(f, "w").write(t)
479 479 except:
480 480 self.checkdir(f)
481 481 file(f, "w").write(t)
482 482
483 483 self.setcurrent(node)
484 484 self.dircache.clear()
485 485 self.dircache.update(l)
486 486
487 487 def diffdir(self, path, changeset):
488 488 changed = []
489 489 mf = {}
490 490 added = []
491 491
492 492 if changeset:
493 493 change = self.changelog.read(changeset)
494 494 mf = self.manifest.read(change[0])
495 495
496 496 if changeset == self.current:
497 497 dc = self.dircache.copy()
498 498 else:
499 499 dc = dict.fromkeys(mf)
500 500
501 501 def fcmp(fn):
502 t1 = file(fn).read()
502 t1 = file(os.path.join(self.root, fn)).read()
503 503 t2 = self.file(fn).revision(mf[fn])
504 504 return cmp(t1, t2)
505 505
506 506 for dir, subdirs, files in os.walk(self.root):
507 507 d = dir[len(self.root)+1:]
508 508 if ".hg" in subdirs: subdirs.remove(".hg")
509 509
510 510 for f in files:
511 511 fn = os.path.join(d, f)
512 try: s = os.stat(fn)
512 try: s = os.stat(os.path.join(self.root, fn))
513 513 except: continue
514 514 if fn in dc:
515 515 c = dc[fn]
516 516 del dc[fn]
517 517 if not c:
518 518 if fcmp(fn):
519 519 changed.append(fn)
520 520 elif c[1] != s.st_size:
521 521 changed.append(fn)
522 522 elif c[0] != s.st_mode or c[2] != s.st_mtime:
523 523 if fcmp(fn):
524 524 changed.append(fn)
525 525 else:
526 526 if self.ignore(fn): continue
527 527 added.append(fn)
528 528
529 529 deleted = dc.keys()
530 530 deleted.sort()
531 531
532 532 return (changed, added, deleted)
533 533
534 534 def diffrevs(self, node1, node2):
535 535 changed, added = [], []
536 536
537 537 change = self.changelog.read(node1)
538 538 mf1 = self.manifest.read(change[0])
539 539 change = self.changelog.read(node2)
540 540 mf2 = self.manifest.read(change[0])
541 541
542 542 for fn in mf2:
543 543 if mf1.has_key(fn):
544 544 if mf1[fn] != mf2[fn]:
545 545 changed.append(fn)
546 546 del mf1[fn]
547 547 else:
548 548 added.append(fn)
549 549
550 550 deleted = mf1.keys()
551 551 deleted.sort()
552 552
553 553 return (changed, added, deleted)
554 554
555 555 def add(self, list):
556 556 self.dircache.taint(list)
557 557
558 558 def remove(self, list):
559 559 dl = self.opener("to-remove", "a")
560 560 for f in list:
561 561 dl.write(f + "\n")
562 562
563 563 def branches(self, nodes):
564 564 if not nodes: nodes = [self.changelog.tip()]
565 565 b = []
566 566 for n in nodes:
567 567 t = n
568 568 while n:
569 569 p = self.changelog.parents(n)
570 570 if p[1] != nullid or p[0] == nullid:
571 571 b.append((t, n, p[0], p[1]))
572 572 break
573 573 n = p[0]
574 574 return b
575 575
576 576 def between(self, pairs):
577 577 r = []
578 578
579 579 for top, bottom in pairs:
580 580 n, l, i = top, [], 0
581 581 f = 1
582 582
583 583 while n != bottom:
584 584 p = self.changelog.parents(n)[0]
585 585 if i == f:
586 586 l.append(n)
587 587 f = f * 2
588 588 n = p
589 589 i += 1
590 590
591 591 r.append(l)
592 592
593 593 return r
594 594
595 595 def newer(self, nodes):
596 596 m = {}
597 597 nl = []
598 598 cl = self.changelog
599 599 t = l = cl.count()
600 600 for n in nodes:
601 601 l = min(l, cl.rev(n))
602 602 for p in cl.parents(n):
603 603 m[p] = 1
604 604
605 605 for i in xrange(l, t):
606 606 n = cl.node(i)
607 607 for p in cl.parents(n):
608 608 if p in m and n not in m:
609 609 m[n] = 1
610 610 nl.append(n)
611 611
612 612 return nl
613 613
614 614 def getchangegroup(self, remote):
615 615 tip = remote.branches([])[0]
616 616 cl = self.changelog
617 617 unknown = [tip]
618 618 search = []
619 619 fetch = []
620 620
621 621 if tip[0] == self.changelog.tip():
622 622 return None
623 623
624 624 while unknown:
625 625 n = unknown.pop(0)
626 626 if n == nullid: break
627 627 if n[1] and cl.nodemap.has_key(n[1]): # do we know the base?
628 628 search.append(n) # schedule branch range for scanning
629 629 else:
630 630 for b in remote.branches([n[2], n[3]]):
631 631 if cl.nodemap.has_key(b[0]):
632 632 fetch.append(n[1]) # earliest unknown
633 633 else:
634 634 unknown.append(b)
635 635
636 636 while search:
637 637 n = search.pop(0)
638 638 l = remote.between([(n[0], n[1])])[0]
639 639 p = n[0]
640 640 f = 1
641 641 for i in l + [n[1]]:
642 642 if self.changelog.nodemap.has_key(i):
643 643 if f <= 4:
644 644 fetch.append(p)
645 645 else:
646 646 search.append((p, i))
647 647 p, f = i, f * 2
648 648
649 649 return remote.changegroup(fetch)
650 650
651 651 def changegroup(self, basenodes):
652 652 nodes = self.newer(basenodes)
653 653
654 654 # construct the link map
655 655 linkmap = {}
656 656 for n in nodes:
657 657 linkmap[self.changelog.rev(n)] = n
658 658
659 659 # construct a list of all changed files
660 660 changed = {}
661 661 for n in nodes:
662 662 c = self.changelog.read(n)
663 663 for f in c[3]:
664 664 changed[f] = 1
665 665 changed = changed.keys()
666 666 changed.sort()
667 667
668 668 # the changegroup is changesets + manifests + all file revs
669 669 revs = [ self.changelog.rev(n) for n in nodes ]
670 670
671 671 yield self.changelog.group(linkmap)
672 672 yield self.manifest.group(linkmap)
673 673
674 674 for f in changed:
675 675 g = self.file(f).group(linkmap)
676 676 if not g: raise "couldn't find change to %s" % f
677 677 l = struct.pack(">l", len(f))
678 678 yield "".join([l, f, g])
679 679
680 680 def addchangegroup(self, data):
681 681 def getlen(data, pos):
682 682 return struct.unpack(">l", data[pos:pos + 4])[0]
683 683
684 684 if not data: return
685 685
686 686 tr = self.transaction()
687 687 simple = True
688 688
689 689 print "merging changesets"
690 690 # pull off the changeset group
691 691 l = getlen(data, 0)
692 692 csg = data[0:l]
693 693 pos = l
694 694 co = self.changelog.tip()
695 695 cn = self.changelog.addgroup(csg, lambda x: self.changelog.count(), tr)
696 696
697 697 print "merging manifests"
698 698 # pull off the manifest group
699 699 l = getlen(data, pos)
700 700 mfg = data[pos: pos + l]
701 701 pos += l
702 702 mo = self.manifest.tip()
703 703 mn = self.manifest.addgroup(mfg, lambda x: self.changelog.rev(x), tr)
704 704
705 705 # do we need a resolve?
706 706 if self.changelog.ancestor(co, cn) != co:
707 707 print "NEED RESOLVE"
708 708 simple = False
709 709 resolverev = self.changelog.count()
710 710
711 711 # process the files
712 712 print "merging files"
713 713 new = {}
714 714 while pos < len(data):
715 715 l = getlen(data, pos)
716 716 pos += 4
717 717 f = data[pos:pos + l]
718 718 pos += l
719 719
720 720 l = getlen(data, pos)
721 721 fg = data[pos: pos + l]
722 722 pos += l
723 723
724 724 fl = self.file(f)
725 725 o = fl.tip()
726 726 n = fl.addgroup(fg, lambda x: self.changelog.rev(x), tr)
727 727 if not simple:
728 728 new[fl] = fl.resolvedag(o, n, tr, resolverev)
729 729
730 730 # For simple merges, we don't need to resolve manifests or changesets
731 731 if simple:
732 732 tr.close()
733 733 return
734 734
735 735 # resolve the manifest to point to all the merged files
736 736 self.ui.status("resolving manifests\n")
737 737 ma = self.manifest.ancestor(mm, mo)
738 738 mmap = self.manifest.read(mm) # mine
739 739 omap = self.manifest.read(mo) # other
740 740 amap = self.manifest.read(ma) # ancestor
741 741 nmap = {}
742 742
743 743 for f, mid in mmap.iteritems():
744 744 if f in omap:
745 745 if mid != omap[f]:
746 746 nmap[f] = new.get(f, mid) # use merged version
747 747 else:
748 748 nmap[f] = new.get(f, mid) # they're the same
749 749 del omap[f]
750 750 elif f in amap:
751 751 if mid != amap[f]:
752 752 pass # we should prompt here
753 753 else:
754 754 pass # other deleted it
755 755 else:
756 756 nmap[f] = new.get(f, mid) # we created it
757 757
758 758 del mmap
759 759
760 760 for f, oid in omap.iteritems():
761 761 if f in amap:
762 762 if oid != amap[f]:
763 763 pass # this is the nasty case, we should prompt
764 764 else:
765 765 pass # probably safe
766 766 else:
767 767 nmap[f] = new.get(f, oid) # remote created it
768 768
769 769 del omap
770 770 del amap
771 771
772 772 node = self.manifest.add(nmap, tr, resolverev, mm, mo)
773 773
774 774 # Now all files and manifests are merged, we add the changed files
775 775 # and manifest id to the changelog
776 776 self.ui.status("committing merge changeset\n")
777 777 new = new.keys()
778 778 new.sort()
779 779 if co == cn: cn = -1
780 780
781 781 edittext = "\n"+"".join(["HG: changed %s\n" % f for f in new])
782 782 edittext = self.ui.edit(edittext)
783 783 n = self.changelog.add(node, new, edittext, tr, co, cn)
784 784
785 785 tr.close()
786 786
787 787 class remoterepository:
788 788 def __init__(self, ui, path):
789 789 self.url = path.replace("hg://", "http://", 1)
790 790 self.ui = ui
791 791
792 792 def do_cmd(self, cmd, **args):
793 793 q = {"cmd": cmd}
794 794 q.update(args)
795 795 qs = urllib.urlencode(q)
796 796 cu = "%s?%s" % (self.url, qs)
797 797 return urllib.urlopen(cu).read()
798 798
799 799 def branches(self, nodes):
800 800 n = " ".join(map(hex, nodes))
801 801 d = self.do_cmd("branches", nodes=n)
802 802 br = [ map(bin, b.split(" ")) for b in d.splitlines() ]
803 803 return br
804 804
805 805 def between(self, pairs):
806 806 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
807 807 d = self.do_cmd("between", pairs=n)
808 808 p = [ map(bin, l.split(" ")) for l in d.splitlines() ]
809 809 return p
810 810
811 811 def changegroup(self, nodes):
812 812 n = " ".join(map(hex, nodes))
813 813 d = self.do_cmd("changegroup", roots=n)
814 814 return zlib.decompress(d)
815 815
816 816 def repository(ui, path=None, create=0):
817 817 if path and path[:5] == "hg://":
818 818 return remoterepository(ui, path)
819 819 else:
820 820 return localrepository(ui, path, create)
821 821
822 822 class ui:
823 823 def __init__(self, verbose=False, debug=False):
824 824 self.verbose = verbose
825 825 def write(self, *args):
826 826 for a in args:
827 827 sys.stdout.write(str(a))
828 828 def prompt(self, msg, pat):
829 829 while 1:
830 830 sys.stdout.write(msg)
831 831 r = sys.stdin.readline()[:-1]
832 832 if re.match(pat, r):
833 833 return r
834 834 def status(self, *msg):
835 835 self.write(*msg)
836 836 def warn(self, msg):
837 837 self.write(*msg)
838 838 def note(self, msg):
839 839 if self.verbose: self.write(*msg)
840 840 def debug(self, msg):
841 841 if self.debug: self.write(*msg)
842 842 def edit(self, text):
843 843 (fd, name) = tempfile.mkstemp("hg")
844 844 f = os.fdopen(fd, "w")
845 845 f.write(text)
846 846 f.close()
847 847
848 848 editor = os.environ.get("EDITOR", "vi")
849 849 r = os.system("%s %s" % (editor, name))
850 850 if r:
851 851 raise "Edit failed!"
852 852
853 853 t = open(name).read()
854 854 t = re.sub("(?m)^HG:.*\n", "", t)
855 855
856 856 return t
857 857
858 858
859 859 class httprangereader:
860 860 def __init__(self, url):
861 861 self.url = url
862 862 self.pos = 0
863 863 def seek(self, pos):
864 864 self.pos = pos
865 865 def read(self, bytes=None):
866 866 opener = urllib2.build_opener(byterange.HTTPRangeHandler())
867 867 urllib2.install_opener(opener)
868 868 req = urllib2.Request(self.url)
869 869 end = ''
870 870 if bytes: end = self.pos + bytes
871 871 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
872 872 f = urllib2.urlopen(req)
873 873 return f.read()
@@ -1,77 +1,75
1 1 #!/usr/bin/python
2 2 import difflib, struct
3 3 from cStringIO import StringIO
4 4
5 def unidiff(a, b, fn):
5 def unidiff(a, ad, b, bd, fn):
6 6 if not a and not b: return ""
7 7 a = a.splitlines(1)
8 8 b = b.splitlines(1)
9 l = list(difflib.unified_diff(a, b, fn, fn))
9 l = list(difflib.unified_diff(a, b, "a/" + fn, "b/" + fn, ad, bd))
10 10 return "".join(l)
11 11
12 12 def textdiff(a, b):
13 13 return diff(a.splitlines(1), b.splitlines(1))
14 14
15 15 def sortdiff(a, b):
16 16 la = lb = 0
17 17
18 18 while 1:
19 19 if la >= len(a) or lb >= len(b): break
20 20 if b[lb] < a[la]:
21 21 si = lb
22 22 while lb < len(b) and b[lb] < a[la] : lb += 1
23 23 yield "insert", la, la, si, lb
24 24 elif a[la] < b[lb]:
25 25 si = la
26 26 while la < len(a) and a[la] < b[lb]: la += 1
27 27 yield "delete", si, la, lb, lb
28 28 else:
29 29 la += 1
30 30 lb += 1
31 31
32 si = lb
33 while lb < len(b):
34 lb += 1
35 yield "insert", la, la, si, lb
32 if lb < len(b):
33 yield "insert", la, la, lb, len(b)
36 34
37 si = la
38 while la < len(a):
39 la += 1
40 yield "delete", si, la, lb, lb
35 if la < len(a):
36 yield "delete", la, len(a), lb, lb
41 37
42 38 def diff(a, b, sorted=0):
43 39 bin = []
44 40 p = [0]
45 41 for i in a: p.append(p[-1] + len(i))
46 42
47 43 if sorted:
48 44 d = sortdiff(a, b)
49 45 else:
50 46 d = difflib.SequenceMatcher(None, a, b).get_opcodes()
51 47
52 48 for o, m, n, s, t in d:
53 49 if o == 'equal': continue
54 50 s = "".join(b[s:t])
55 51 bin.append(struct.pack(">lll", p[m], p[n], len(s)) + s)
56 52
57 53 return "".join(bin)
58 54
59 55 def patch(a, bin):
60 56 last = pos = 0
61 57 r = []
62 58
59 c = 0
63 60 while pos < len(bin):
64 61 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
65 62 pos += 12
66 63 r.append(a[last:p1])
67 64 r.append(bin[pos:pos + l])
68 65 pos += l
69 66 last = p2
67 c += 1
70 68 r.append(a[last:])
71 69
72 70 return "".join(r)
73 71
74 72
75 73
76 74
77 75
@@ -1,412 +1,414
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # This provides efficient delta storage with O(1) retrieve and append
4 4 # and O(changes) merge between branches
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 11 import zlib, struct, sha, os, tempfile, binascii
12 12 from mercurial import mdiff
13 13
14 14 def hex(node): return binascii.hexlify(node)
15 15 def bin(node): return binascii.unhexlify(node)
16 16
17 17 def compress(text):
18 18 return zlib.compress(text)
19 19
20 20 def decompress(bin):
21 21 return zlib.decompress(bin)
22 22
23 23 def hash(text, p1, p2):
24 24 l = [p1, p2]
25 25 l.sort()
26 26 return sha.sha(l[0] + l[1] + text).digest()
27 27
28 28 nullid = "\0" * 20
29 29 indexformat = ">4l20s20s20s"
30 30
31 31 class revlog:
32 32 def __init__(self, opener, indexfile, datafile):
33 33 self.indexfile = indexfile
34 34 self.datafile = datafile
35 35 self.index = []
36 36 self.opener = opener
37 37 self.cache = None
38 38 self.nodemap = {nullid: -1}
39 39 # read the whole index for now, handle on-demand later
40 40 try:
41 41 n = 0
42 42 i = self.opener(self.indexfile).read()
43 43 s = struct.calcsize(indexformat)
44 44 for f in range(0, len(i), s):
45 45 # offset, size, base, linkrev, p1, p2, nodeid
46 46 e = struct.unpack(indexformat, i[f:f + s])
47 47 self.nodemap[e[6]] = n
48 48 self.index.append(e)
49 49 n += 1
50 50 except IOError: pass
51 51
52 52 def tip(self): return self.node(len(self.index) - 1)
53 53 def count(self): return len(self.index)
54 54 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
55 55 def rev(self, node): return self.nodemap[node]
56 56 def linkrev(self, node): return self.index[self.nodemap[node]][3]
57 57 def parents(self, node):
58 58 if node == nullid: return (nullid, nullid)
59 59 return self.index[self.nodemap[node]][4:6]
60 60
61 61 def start(self, rev): return self.index[rev][0]
62 62 def length(self, rev): return self.index[rev][1]
63 63 def end(self, rev): return self.start(rev) + self.length(rev)
64 64 def base(self, rev): return self.index[rev][2]
65 65
66 66 def lookup(self, id):
67 67 try:
68 68 rev = int(id)
69 69 return self.node(rev)
70 70 except ValueError:
71 71 c = []
72 72 for n in self.nodemap:
73 73 if id in hex(n):
74 74 c.append(n)
75 75 if len(c) > 1: raise KeyError("Ambiguous identifier")
76 76 if len(c) < 1: raise KeyError
77 77 return c[0]
78 78
79 79 return None
80 80
81 81 def revisions(self, list):
82 82 # this can be optimized to do spans, etc
83 83 # be stupid for now
84 84 for node in list:
85 85 yield self.revision(node)
86 86
87 87 def diff(self, a, b):
88 88 return mdiff.textdiff(a, b)
89 89
90 90 def patch(self, text, patch):
91 91 return mdiff.patch(text, patch)
92 92
93 93 def revision(self, node):
94 94 if node == nullid: return ""
95 95 if self.cache and self.cache[0] == node: return self.cache[2]
96 96
97 97 text = None
98 98 rev = self.rev(node)
99 99 base = self.base(rev)
100 100 start = self.start(base)
101 101 end = self.end(rev)
102 102
103 103 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
104 104 base = self.cache[1]
105 105 start = self.start(base + 1)
106 106 text = self.cache[2]
107 107 last = 0
108 108
109 109 f = self.opener(self.datafile)
110 110 f.seek(start)
111 111 data = f.read(end - start)
112 112
113 113 if not text:
114 114 last = self.length(base)
115 115 text = decompress(data[:last])
116 116
117 for r in range(base + 1, rev + 1):
117 for r in xrange(base + 1, rev + 1):
118 118 s = self.length(r)
119 119 b = decompress(data[last:last + s])
120 120 text = self.patch(text, b)
121 121 last = last + s
122 122
123 123 (p1, p2) = self.parents(node)
124 124 if node != hash(text, p1, p2):
125 125 raise "integrity check failed on %s:%d" % (self.datafile, rev)
126 126
127 127 self.cache = (node, rev, text)
128 128 return text
129 129
130 130 def addrevision(self, text, transaction, link, p1=None, p2=None):
131 131 if text is None: text = ""
132 132 if p1 is None: p1 = self.tip()
133 133 if p2 is None: p2 = nullid
134 134
135 135 node = hash(text, p1, p2)
136 136
137 137 n = self.count()
138 138 t = n - 1
139 139
140 140 if n:
141 start = self.start(self.base(t))
141 base = self.base(t)
142 start = self.start(base)
142 143 end = self.end(t)
143 144 prev = self.revision(self.tip())
144 145 data = compress(self.diff(prev, text))
146 dist = end - start + len(data)
145 147
146 148 # full versions are inserted when the needed deltas
147 149 # become comparable to the uncompressed text
148 if not n or (end + len(data) - start) > len(text) * 2:
150 if not n or dist > len(text) * 2:
149 151 data = compress(text)
150 152 base = n
151 153 else:
152 154 base = self.base(t)
153 155
154 156 offset = 0
155 157 if t >= 0:
156 158 offset = self.end(t)
157 159
158 160 e = (offset, len(data), base, link, p1, p2, node)
159 161
160 162 self.index.append(e)
161 163 self.nodemap[node] = n
162 164 entry = struct.pack(indexformat, *e)
163 165
164 166 transaction.add(self.datafile, e[0])
165 167 self.opener(self.datafile, "a").write(data)
166 168 transaction.add(self.indexfile, n * len(entry))
167 169 self.opener(self.indexfile, "a").write(entry)
168 170
169 171 self.cache = (node, n, text)
170 172 return node
171 173
172 174 def ancestor(self, a, b):
173 175 def expand(list, map):
174 176 a = []
175 177 while list:
176 178 n = list.pop(0)
177 179 map[n] = 1
178 180 yield n
179 181 for p in self.parents(n):
180 182 if p != nullid and p not in map:
181 183 list.append(p)
182 184 yield nullid
183 185
184 186 amap = {}
185 187 bmap = {}
186 188 ag = expand([a], amap)
187 189 bg = expand([b], bmap)
188 190 adone = bdone = 0
189 191
190 192 while not adone or not bdone:
191 193 if not adone:
192 194 an = ag.next()
193 195 if an == nullid:
194 196 adone = 1
195 197 elif an in bmap:
196 198 return an
197 199 if not bdone:
198 200 bn = bg.next()
199 201 if bn == nullid:
200 202 bdone = 1
201 203 elif bn in amap:
202 204 return bn
203 205
204 206 return nullid
205 207
206 208 def mergedag(self, other, transaction, linkseq, accumulate = None):
207 209 """combine the nodes from other's DAG into ours"""
208 210 old = self.tip()
209 211 i = self.count()
210 212 l = []
211 213
212 214 # merge the other revision log into our DAG
213 215 for r in range(other.count()):
214 216 id = other.node(r)
215 217 if id not in self.nodemap:
216 218 (xn, yn) = other.parents(id)
217 219 l.append((id, xn, yn))
218 220 self.nodemap[id] = i
219 221 i += 1
220 222
221 223 # merge node date for new nodes
222 224 r = other.revisions([e[0] for e in l])
223 225 for e in l:
224 226 t = r.next()
225 227 if accumulate: accumulate(t)
226 228 self.addrevision(t, transaction, linkseq.next(), e[1], e[2])
227 229
228 230 # return the unmerged heads for later resolving
229 231 return (old, self.tip())
230 232
231 233 def group(self, linkmap):
232 234 # given a list of changeset revs, return a set of deltas and
233 235 # metadata corresponding to nodes the first delta is
234 236 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
235 237 # have this parent as it has all history before these
236 238 # changesets. parent is parent[0]
237 239
238 240 revs = []
239 241 needed = {}
240 242
241 243 # find file nodes/revs that match changeset revs
242 244 for i in xrange(0, self.count()):
243 245 if self.index[i][3] in linkmap:
244 246 revs.append(i)
245 247 needed[i] = 1
246 248
247 249 # if we don't have any revisions touched by these changesets, bail
248 250 if not revs: return struct.pack(">l", 0)
249 251
250 252 # add the parent of the first rev
251 253 p = self.parents(self.node(revs[0]))[0]
252 254 revs.insert(0, self.rev(p))
253 255
254 256 # for each delta that isn't contiguous in the log, we need to
255 257 # reconstruct the base, reconstruct the result, and then
256 258 # calculate the delta. We also need to do this where we've
257 259 # stored a full version and not a delta
258 260 for i in xrange(0, len(revs) - 1):
259 261 a, b = revs[i], revs[i + 1]
260 262 if a + 1 != b or self.base(b) == b:
261 263 for j in xrange(self.base(a), a + 1):
262 264 needed[j] = 1
263 265 for j in xrange(self.base(b), b + 1):
264 266 needed[j] = 1
265 267
266 268 # calculate spans to retrieve from datafile
267 269 needed = needed.keys()
268 270 needed.sort()
269 271 spans = []
270 272 for n in needed:
271 273 if n < 0: continue
272 274 o = self.start(n)
273 275 l = self.length(n)
274 276 spans.append((o, l, [(n, l)]))
275 277
276 278 # merge spans
277 279 merge = [spans.pop(0)]
278 280 while spans:
279 281 e = spans.pop(0)
280 282 f = merge[-1]
281 283 if e[0] == f[0] + f[1]:
282 284 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
283 285 else:
284 286 merge.append(e)
285 287
286 288 # read spans in, divide up chunks
287 289 chunks = {}
288 290 for span in merge:
289 291 # we reopen the file for each span to make http happy for now
290 292 f = self.opener(self.datafile)
291 293 f.seek(span[0])
292 294 data = f.read(span[1])
293 295
294 296 # divide up the span
295 297 pos = 0
296 298 for r, l in span[2]:
297 299 chunks[r] = data[pos: pos + l]
298 300 pos += l
299 301
300 302 # helper to reconstruct intermediate versions
301 303 def construct(text, base, rev):
302 304 for r in range(base + 1, rev + 1):
303 305 b = decompress(chunks[r])
304 306 text = self.patch(text, b)
305 307 return text
306 308
307 309 # build deltas
308 310 deltas = []
309 311 for d in range(0, len(revs) - 1):
310 312 a, b = revs[d], revs[d + 1]
311 313 n = self.node(b)
312 314
313 315 if a + 1 != b or self.base(b) == b:
314 316 if a >= 0:
315 317 base = self.base(a)
316 318 ta = decompress(chunks[self.base(a)])
317 319 ta = construct(ta, base, a)
318 320 else:
319 321 ta = ""
320 322
321 323 base = self.base(b)
322 324 if a > base:
323 325 base = a
324 326 tb = ta
325 327 else:
326 328 tb = decompress(chunks[self.base(b)])
327 329 tb = construct(tb, base, b)
328 330 d = self.diff(ta, tb)
329 331 else:
330 332 d = decompress(chunks[b])
331 333
332 334 p = self.parents(n)
333 335 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
334 336 l = struct.pack(">l", len(meta) + len(d) + 4)
335 337 deltas.append(l + meta + d)
336 338
337 339 l = struct.pack(">l", sum(map(len, deltas)) + 4)
338 340 deltas.insert(0, l)
339 341 return "".join(deltas)
340 342
341 343 def addgroup(self, data, linkmapper, transaction):
342 344 # given a set of deltas, add them to the revision log. the
343 345 # first delta is against its parent, which should be in our
344 346 # log, the rest are against the previous delta.
345 347
346 348 if len(data) <= 4: return
347 349
348 350 # retrieve the parent revision of the delta chain
349 351 chain = data[28:48]
350 352 text = self.revision(chain)
351 353
352 354 # track the base of the current delta log
353 355 r = self.count()
354 356 t = r - 1
355 357
356 358 base = prev = -1
357 359 start = end = 0
358 360 if r:
359 361 start = self.start(self.base(t))
360 362 end = self.end(t)
361 363 measure = self.length(self.base(t))
362 364 base = self.base(t)
363 365 prev = self.tip()
364 366
365 367 transaction.add(self.datafile, end)
366 368 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
367 369 dfh = self.opener(self.datafile, "a")
368 370 ifh = self.opener(self.indexfile, "a")
369 371
370 372 # loop through our set of deltas
371 373 pos = 4
372 374 while pos < len(data):
373 375 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
374 376 data[pos:pos+84])
375 377 link = linkmapper(cs)
376 378 delta = data[pos + 84:pos + l]
377 379 pos += l
378 380
379 381 # full versions are inserted when the needed deltas become
380 382 # comparable to the uncompressed text or when the previous
381 383 # version is not the one we have a delta against. We use
382 384 # the size of the previous full rev as a proxy for the
383 385 # current size.
384 386
385 387 if chain == prev:
386 388 cdelta = compress(delta)
387 389
388 390 if chain != prev or (end - start + len(cdelta)) > measure * 2:
389 391 # flush our writes here so we can read it in revision
390 392 dfh.flush()
391 393 ifh.flush()
392 394 text = self.revision(self.node(t))
393 395 text = self.patch(text, delta)
394 396 chk = self.addrevision(text, transaction, link, p1, p2)
395 397 if chk != node:
396 398 raise "consistency error adding group"
397 399 measure = len(text)
398 400 else:
399 401 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
400 402 self.index.append(e)
401 403 self.nodemap[node] = r
402 404 dfh.write(cdelta)
403 405 ifh.write(struct.pack(indexformat, *e))
404 406
405 407 t, r = r, r + 1
406 408 chain = prev
407 409 start = self.start(self.base(t))
408 410 end = self.end(t)
409 411
410 412 dfh.close()
411 413 ifh.close()
412 414 return node
General Comments 0
You need to be logged in to leave comments. Login now