##// END OF EJS Templates
Add changegroup support
mpm@selenic.com -
r46:93e868fa default
parent child Browse files
Show More
@@ -1,342 +1,352 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # mercurial - a minimal scalable distributed SCM
4 4 # v0.4d "oedipa maas"
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 # the psyco compiler makes commits about twice as fast
12 try:
13 import psyco
14 psyco.full()
15 except:
16 pass
11 # the psyco compiler makes commits a bit faster
12 # and makes changegroup merge about 20 times slower!
13 # try:
14 # import psyco
15 # psyco.full()
16 # except:
17 # pass
17 18
18 19 import sys, os, time
19 20 from mercurial import hg, mdiff, fancyopts
20 21
21 22 def help():
22 23 print """\
23 24 commands:
24 25
25 26 init create a new repository in this directory
26 27 branch <path> create a branch of <path> in this directory
27 28 merge <path> merge changes from <path> into local repository
28 29 checkout [changeset] checkout the latest or given changeset
29 30 status show new, missing, and changed files in working dir
30 31 add [files...] add the given files in the next commit
31 32 remove [files...] remove the given files in the next commit
32 33 addremove add all new files, delete all missing files
33 34 commit commit all changes to the repository
34 35 history show changeset history
35 36 log <file> show revision history of a single file
36 37 dump <file> [rev] dump the latest or given revision of a file
37 38 dumpmanifest [rev] dump the latest or given revision of the manifest
38 39 diff [files...] diff working directory (or selected files)
39 40 """
40 41
41 42 def filterfiles(list, files):
42 43 l = [ x for x in list if x in files ]
43 44
44 45 for f in files:
45 46 if f[-1] != os.sep: f += os.sep
46 47 l += [ x for x in list if x.startswith(f) ]
47 48 return l
48 49
49 50 def diff(files = None, node1 = None, node2 = None):
50 51
51 52 if node2:
52 53 change = repo.changelog.read(node2)
53 54 mmap2 = repo.manifest.read(change[0])
54 55 (c, a, d) = repo.diffrevs(node1, node2)
55 56 def read(f): return repo.file(f).read(mmap2[f])
56 57 else:
57 58 if not node1:
58 59 node1 = repo.current
59 60 (c, a, d) = repo.diffdir(repo.root, node1)
60 61 def read(f): return file(f).read()
61 62
62 63 change = repo.changelog.read(node1)
63 64 mmap = repo.manifest.read(change[0])
64 65
65 66 if files:
66 67 (c, a, d) = map(lambda x: filterfiles(x, files), (c, a, d))
67 68
68 69 for f in c:
69 70 to = repo.file(f).read(mmap[f])
70 71 tn = read(f)
71 72 sys.stdout.write(mdiff.unidiff(to, tn, f))
72 73 for f in a:
73 74 to = ""
74 75 tn = read(f)
75 76 sys.stdout.write(mdiff.unidiff(to, tn, f))
76 77 for f in d:
77 78 to = repo.file(f).read(mmap[f])
78 79 tn = ""
79 80 sys.stdout.write(mdiff.unidiff(to, tn, f))
80 81
81 82
82 83 options = {}
83 84 opts = [('v', 'verbose', None, 'verbose'),
84 85 ('d', 'debug', None, 'debug')]
85 86
86 87 args = fancyopts.fancyopts(sys.argv[1:], opts, options,
87 88 'hg [options] <command> [command options] [files]')
88 89
89 90 try:
90 91 cmd = args[0]
91 92 args = args[1:]
92 93 except:
93 94 cmd = ""
94 95
95 96 ui = hg.ui(options["verbose"], options["debug"])
96 97
97 98 if cmd == "init":
98 99 repo = hg.repository(ui, ".", create=1)
99 100 sys.exit(0)
100 101 elif cmd == "branch" or cmd == "clone":
101 102 os.system("cp -al %s/.hg .hg" % args[0])
102 103 sys.exit(0)
103 104 elif cmd == "help":
104 105 help()
105 106 sys.exit(0)
106 107 else:
107 108 try:
108 109 repo = hg.repository(ui=ui)
109 110 except:
110 111 print "Unable to open repository"
111 112 sys.exit(0)
112 113
113 114 if cmd == "checkout" or cmd == "co":
114 115 node = repo.changelog.tip()
115 116 if args:
116 117 node = repo.changelog.lookup(args[0])
117 118 repo.checkout(node)
118 119
119 120 elif cmd == "add":
120 121 repo.add(args)
121 122
122 123 elif cmd == "remove" or cmd == "rm" or cmd == "del" or cmd == "delete":
123 124 repo.remove(args)
124 125
125 126 elif cmd == "commit" or cmd == "checkin" or cmd == "ci":
126 127 if 1:
127 128 if len(args) > 0:
128 129 repo.commit(repo.current, args)
129 130 else:
130 131 repo.commit(repo.current)
131 132
132 133 elif cmd == "import" or cmd == "patch":
133 134 ioptions = {}
134 135 opts = [('p', 'strip', 1, 'path strip'),
135 136 ('b', 'base', "", 'base path')]
136 137
137 138 args = fancyopts.fancyopts(args, opts, ioptions,
138 139 'hg import [options] <patch names>')
139 140 d = ioptions["base"]
140 141 strip = ioptions["strip"]
141 142
142 143 for patch in args:
143 144 ui.status("applying %s\n" % patch)
144 145 pf = d + patch
145 146 os.system("patch -p%d < %s > /dev/null" % (strip, pf))
146 147 f = os.popen("lsdiff --strip %d %s" % (strip, pf))
147 148 files = f.read().splitlines()
148 149 f.close()
149 150 repo.commit(repo.current, files)
150 151
151 152 elif cmd == "status":
152 153 (c, a, d) = repo.diffdir(repo.root, repo.current)
153 154 for f in c: print "C", f
154 155 for f in a: print "?", f
155 156 for f in d: print "R", f
156 157
157 158 elif cmd == "diff":
158 159 revs = []
159 160
160 161 if args:
161 162 doptions = {}
162 163 opts = [('r', 'revision', [], 'revision')]
163 164 args = fancyopts.fancyopts(args, opts, doptions,
164 165 'hg diff [options] [files]')
165 166 revs = map(lambda x: repo.changelog.lookup(x), doptions['revision'])
166 167
167 168 if len(revs) > 2:
168 169 print "too many revisions to diff"
169 170 sys.exit(1)
170 171 else:
171 172 diff(args, *revs)
172 173
173 174 elif cmd == "export":
174 175 node = repo.changelog.lookup(args[0])
175 176 prev = repo.changelog.parents(node)[0]
176 177 diff(None, prev, node)
177 178
179 elif cmd == "debugchangegroup":
180 newer = repo.newer(repo.changelog.lookup(args[0]))
181 cg = repo.changegroup(newer)
182 sys.stdout.write(cg)
183
184 elif cmd == "debugaddchangegroup":
185 data = sys.stdin.read()
186 repo.addchangegroup(data)
187
178 188 elif cmd == "addremove":
179 189 (c, a, d) = repo.diffdir(repo.root, repo.current)
180 190 repo.add(a)
181 191 repo.remove(d)
182 192
183 193 elif cmd == "history":
184 194 for i in range(repo.changelog.count()):
185 195 n = repo.changelog.node(i)
186 196 changes = repo.changelog.read(n)
187 197 (p1, p2) = repo.changelog.parents(n)
188 198 (h, h1, h2) = map(hg.hex, (n, p1, p2))
189 199 (i1, i2) = map(repo.changelog.rev, (p1, p2))
190 200 print "rev: %4d:%s" % (i, h)
191 201 print "parents: %4d:%s" % (i1, h1)
192 202 if i2: print " %4d:%s" % (i2, h2)
193 203 print "manifest: %4d:%s" % (repo.manifest.rev(changes[0]),
194 204 hg.hex(changes[0]))
195 205 print "user:", changes[1]
196 206 print "date:", time.asctime(
197 207 time.localtime(float(changes[2].split(' ')[0])))
198 208 print "files:", " ".join(changes[3])
199 209 print "description:"
200 210 print changes[4]
201 211
202 212 elif cmd == "log":
203 213 if args:
204 214 r = repo.file(args[0])
205 215 for i in range(r.count()):
206 216 n = r.node(i)
207 217 (p1, p2) = r.parents(n)
208 218 (h, h1, h2) = map(hg.hex, (n, p1, p2))
209 219 (i1, i2) = map(r.rev, (p1, p2))
210 220 cr = r.linkrev(n)
211 221 cn = hg.hex(repo.changelog.node(cr))
212 222 print "rev: %4d:%s" % (i, h)
213 223 print "changeset: %4d:%s" % (cr, cn)
214 224 print "parents: %4d:%s" % (i1, h1)
215 225 if i2: print " %4d:%s" % (i2, h2)
216 226 else:
217 227 print "missing filename"
218 228
219 229 elif cmd == "dump":
220 230 if args:
221 231 r = repo.file(args[0])
222 232 n = r.tip()
223 233 if len(args) > 1: n = r.lookup(args[1])
224 234 sys.stdout.write(r.read(n))
225 235 else:
226 236 print "missing filename"
227 237
228 238 elif cmd == "dumpmanifest":
229 239 n = repo.manifest.tip()
230 240 if len(args) > 0:
231 241 n = repo.manifest.lookup(args[0])
232 242 m = repo.manifest.read(n)
233 243 files = m.keys()
234 244 files.sort()
235 245
236 246 for f in files:
237 247 print hg.hex(m[f]), f
238 248
239 249 elif cmd == "debughash":
240 250 f = repo.file(args[0])
241 251 print f.encodepath(args[0])
242 252
243 253 elif cmd == "debugindex":
244 254 r = hg.revlog(open, args[0], "")
245 255 print " rev offset length base linkrev"+\
246 256 " p1 p2 nodeid"
247 257 for i in range(r.count()):
248 258 e = r.index[i]
249 259 print "% 6d % 9d % 7d % 5d % 7d %s.. %s.. %s.." % (
250 260 i, e[0], e[1], e[2], e[3],
251 261 hg.hex(e[4][:5]), hg.hex(e[5][:5]), hg.hex(e[6][:5]))
252 262
253 263 elif cmd == "merge":
254 264 if args:
255 265 other = hg.repository(ui, args[0])
256 266 repo.merge(other)
257 267 else:
258 268 print "missing source repository"
259 269
260 270 elif cmd == "verify":
261 271 filelinkrevs = {}
262 272 filenodes = {}
263 273 manifestchangeset = {}
264 274 changesets = revisions = files = 0
265 275
266 276 print "checking changesets"
267 277 for i in range(repo.changelog.count()):
268 278 changesets += 1
269 279 n = repo.changelog.node(i)
270 280 changes = repo.changelog.read(n)
271 281 manifestchangeset[changes[0]] = n
272 282 for f in changes[3]:
273 283 revisions += 1
274 284 filelinkrevs.setdefault(f, []).append(i)
275 285
276 286 print "checking manifests"
277 287 for i in range(repo.manifest.count()):
278 288 n = repo.manifest.node(i)
279 289 ca = repo.changelog.node(repo.manifest.linkrev(n))
280 290 cc = manifestchangeset[n]
281 291 if ca != cc:
282 292 print "manifest %s points to %s, not %s" % \
283 293 (hg.hex(n), hg.hex(ca), hg.hex(cc))
284 294 m = repo.manifest.read(n)
285 295 for f, fn in m.items():
286 296 filenodes.setdefault(f, {})[fn] = 1
287 297
288 298 print "crosschecking files in changesets and manifests"
289 299 for f in filenodes:
290 300 if f not in filelinkrevs:
291 301 print "file %s in manifest but not in changesets"
292 302
293 303 for f in filelinkrevs:
294 304 if f not in filenodes:
295 305 print "file %s in changeset but not in manifest"
296 306
297 307 print "checking files"
298 308 for f in filenodes:
299 309 files += 1
300 310 fl = repo.file(f)
301 311 nodes = {"\0"*20: 1}
302 312 for i in range(fl.count()):
303 313 n = fl.node(i)
304 314
305 315 if n not in filenodes[f]:
306 316 print "%s:%s not in manifests" % (f, hg.hex(n))
307 317 else:
308 318 del filenodes[f][n]
309 319
310 320 flr = fl.linkrev(n)
311 321 if flr not in filelinkrevs[f]:
312 322 print "%s:%s points to unexpected changeset rev %d" \
313 323 % (f, hg.hex(n), fl.linkrev(n))
314 324 else:
315 325 filelinkrevs[f].remove(flr)
316 326
317 327 # verify contents
318 328 t = fl.read(n)
319 329
320 330 # verify parents
321 331 (p1, p2) = fl.parents(n)
322 332 if p1 not in nodes:
323 333 print "%s:%s unknown parent 1 %s" % (f, hg.hex(n), hg.hex(p1))
324 334 if p2 not in nodes:
325 335 print "file %s:%s unknown parent %s" % (f, hg.hex(n), hg.hex(p1))
326 336 nodes[n] = 1
327 337
328 338 # cross-check
329 339 for flr in filelinkrevs[f]:
330 340 print "changeset rev %d not in %s" % (flr, f)
331 341
332 342 for node in filenodes[f]:
333 343 print "node %s in manifests not in %s" % (hg.hex(n), f)
334 344
335 345
336 346 print "%d files, %d changesets, %d total revisions" % (files, changesets,
337 347 revisions)
338 348
339 349 else:
340 350 print "unknown command\n"
341 351 help()
342 352 sys.exit(1)
@@ -1,613 +1,758 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, sha, socket, os, time, base64, re, urllib2
9 9 import urllib
10 10 from mercurial import byterange
11 11 from mercurial.transaction import *
12 12 from mercurial.revlog import *
13 13
14 14 class filelog(revlog):
15 15 def __init__(self, opener, path):
16 16 s = self.encodepath(path)
17 17 revlog.__init__(self, opener, os.path.join("data", s + "i"),
18 18 os.path.join("data", s))
19 19
20 20 def encodepath(self, path):
21 21 s = sha.sha(path).digest()
22 22 s = base64.encodestring(s)[:-3]
23 23 s = re.sub("\+", "%", s)
24 24 s = re.sub("/", "_", s)
25 25 return s
26 26
27 27 def read(self, node):
28 28 return self.revision(node)
29 29 def add(self, text, transaction, link, p1=None, p2=None):
30 30 return self.addrevision(text, transaction, link, p1, p2)
31 31
32 32 def resolvedag(self, old, new, transaction, link):
33 33 """resolve unmerged heads in our DAG"""
34 34 if old == new: return None
35 35 a = self.ancestor(old, new)
36 36 if old == a: return new
37 37 return self.merge3(old, new, a, transaction, link)
38 38
39 39 def merge3(self, my, other, base, transaction, link):
40 40 """perform a 3-way merge and append the result"""
41 41 def temp(prefix, node):
42 42 (fd, name) = tempfile.mkstemp(prefix)
43 43 f = os.fdopen(fd, "w")
44 44 f.write(self.revision(node))
45 45 f.close()
46 46 return name
47 47
48 48 a = temp("local", my)
49 49 b = temp("remote", other)
50 50 c = temp("parent", base)
51 51
52 52 cmd = os.environ["HGMERGE"]
53 53 r = os.system("%s %s %s %s" % (cmd, a, b, c))
54 54 if r:
55 55 raise "Merge failed, implement rollback!"
56 56
57 57 t = open(a).read()
58 58 os.unlink(a)
59 59 os.unlink(b)
60 60 os.unlink(c)
61 61 return self.addrevision(t, transaction, link, my, other)
62 62
63 63 def merge(self, other, transaction, linkseq, link):
64 64 """perform a merge and resolve resulting heads"""
65 65 (o, n) = self.mergedag(other, transaction, linkseq)
66 66 return self.resolvedag(o, n, transaction, link)
67 67
68 68 class manifest(revlog):
69 69 def __init__(self, opener):
70 70 self.mapcache = None
71 71 self.listcache = None
72 72 self.addlist = None
73 73 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
74 74
75 75 def read(self, node):
76 76 if self.mapcache and self.mapcache[0] == node:
77 77 return self.mapcache[1]
78 78 text = self.revision(node)
79 79 map = {}
80 80 self.listcache = (text, text.splitlines(1))
81 81 for l in self.listcache[1]:
82 82 (f, n) = l.split('\0')
83 83 map[f] = bin(n[:40])
84 84 self.mapcache = (node, map)
85 85 return map
86 86
87 87 def diff(self, a, b):
88 88 # this is sneaky, as we're not actually using a and b
89 89 if self.listcache and len(self.listcache[0]) == len(a):
90 90 return mdiff.diff(self.listcache[1], self.addlist, 1)
91 91 else:
92 92 return mdiff.textdiff(a, b)
93 93
94 94 def add(self, map, transaction, link, p1=None, p2=None):
95 95 files = map.keys()
96 96 files.sort()
97 97
98 98 self.addlist = ["%s\000%s\n" % (f, hex(map[f])) for f in files]
99 99 text = "".join(self.addlist)
100 100
101 101 n = self.addrevision(text, transaction, link, p1, p2)
102 102 self.mapcache = (n, map)
103 103 self.listcache = (text, self.addlist)
104 104
105 105 return n
106 106
107 107 class changelog(revlog):
108 108 def __init__(self, opener):
109 109 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
110 110
111 111 def extract(self, text):
112 112 if not text:
113 113 return (nullid, "", "0", [], "")
114 114 last = text.index("\n\n")
115 115 desc = text[last + 2:]
116 116 l = text[:last].splitlines()
117 117 manifest = bin(l[0])
118 118 user = l[1]
119 119 date = l[2]
120 120 files = l[3:]
121 121 return (manifest, user, date, files, desc)
122 122
123 123 def read(self, node):
124 124 return self.extract(self.revision(node))
125 125
126 126 def add(self, manifest, list, desc, transaction, p1=None, p2=None):
127 127 try: user = os.environ["HGUSER"]
128 128 except: user = os.environ["LOGNAME"] + '@' + socket.getfqdn()
129 129 date = "%d %d" % (time.time(), time.timezone)
130 130 list.sort()
131 131 l = [hex(manifest), user, date] + list + ["", desc]
132 132 text = "\n".join(l)
133 133 return self.addrevision(text, transaction, self.count(), p1, p2)
134 134
135 135 def merge3(self, my, other, base):
136 136 pass
137 137
138 138 class dircache:
139 139 def __init__(self, opener, ui):
140 140 self.opener = opener
141 141 self.dirty = 0
142 142 self.ui = ui
143 143 self.map = None
144 144 def __del__(self):
145 145 if self.dirty: self.write()
146 146 def __getitem__(self, key):
147 147 try:
148 148 return self.map[key]
149 149 except TypeError:
150 150 self.read()
151 151 return self[key]
152 152
153 153 def read(self):
154 154 if self.map is not None: return self.map
155 155
156 156 self.map = {}
157 157 try:
158 158 st = self.opener("dircache").read()
159 159 except: return
160 160
161 161 pos = 0
162 162 while pos < len(st):
163 163 e = struct.unpack(">llll", st[pos:pos+16])
164 164 l = e[3]
165 165 pos += 16
166 166 f = st[pos:pos + l]
167 167 self.map[f] = e[:3]
168 168 pos += l
169 169
170 170 def update(self, files):
171 171 if not files: return
172 172 self.read()
173 173 self.dirty = 1
174 174 for f in files:
175 175 try:
176 176 s = os.stat(f)
177 177 self.map[f] = (s.st_mode, s.st_size, s.st_mtime)
178 178 except IOError:
179 179 self.remove(f)
180 180
181 181 def taint(self, files):
182 182 if not files: return
183 183 self.read()
184 184 self.dirty = 1
185 185 for f in files:
186 186 self.map[f] = (0, -1, 0)
187 187
188 188 def remove(self, files):
189 189 if not files: return
190 190 self.read()
191 191 self.dirty = 1
192 192 for f in files:
193 193 try:
194 194 del self.map[f]
195 195 except KeyError:
196 196 self.ui.warn("Not in dircache: %s\n" % f)
197 197 pass
198 198
199 199 def clear(self):
200 200 self.map = {}
201 201 self.dirty = 1
202 202
203 203 def write(self):
204 204 st = self.opener("dircache", "w")
205 205 for f, e in self.map.items():
206 206 e = struct.pack(">llll", e[0], e[1], e[2], len(f))
207 207 st.write(e + f)
208 208 self.dirty = 0
209 209
210 210 def copy(self):
211 211 self.read()
212 212 return self.map.copy()
213 213
214 214 # used to avoid circular references so destructors work
215 215 def opener(base):
216 216 p = base
217 217 def o(path, mode="r"):
218 218 if p[:7] == "http://":
219 219 f = os.path.join(p, urllib.quote(path))
220 220 return httprangereader(f)
221 221
222 222 f = os.path.join(p, path)
223 223
224 224 if mode != "r" and os.path.isfile(f):
225 225 s = os.stat(f)
226 226 if s.st_nlink > 1:
227 227 file(f + ".tmp", "w").write(file(f).read())
228 228 os.rename(f+".tmp", f)
229 229
230 230 return file(f, mode)
231 231
232 232 return o
233 233
234 234 class repository:
235 235 def __init__(self, ui, path=None, create=0):
236 236 self.remote = 0
237 237 if path and path[:7] == "http://":
238 238 self.remote = 1
239 239 self.path = path
240 240 else:
241 241 if not path:
242 242 p = os.getcwd()
243 243 while not os.path.isdir(os.path.join(p, ".hg")):
244 244 p = os.path.dirname(p)
245 245 if p == "/": raise "No repo found"
246 246 path = p
247 247 self.path = os.path.join(path, ".hg")
248 248
249 249 self.root = path
250 250 self.ui = ui
251 251
252 252 if create:
253 253 os.mkdir(self.path)
254 254 os.mkdir(self.join("data"))
255 255
256 256 self.opener = opener(self.path)
257 257 self.manifest = manifest(self.opener)
258 258 self.changelog = changelog(self.opener)
259 259 self.ignorelist = None
260 260
261 261 if not self.remote:
262 262 self.dircache = dircache(self.opener, ui)
263 263 try:
264 264 self.current = bin(self.opener("current").read())
265 265 except IOError:
266 266 self.current = None
267 267
268 268 def setcurrent(self, node):
269 269 self.current = node
270 270 self.opener("current", "w").write(hex(node))
271 271
272 272 def ignore(self, f):
273 273 if self.ignorelist is None:
274 274 self.ignorelist = []
275 275 try:
276 276 l = open(os.path.join(self.root, ".hgignore")).readlines()
277 277 for pat in l:
278 278 if pat != "\n":
279 279 self.ignorelist.append(re.compile(pat[:-1]))
280 280 except IOError: pass
281 281 for pat in self.ignorelist:
282 282 if pat.search(f): return True
283 283 return False
284 284
285 285 def join(self, f):
286 286 return os.path.join(self.path, f)
287 287
288 288 def file(self, f):
289 289 return filelog(self.opener, f)
290 290
291 291 def transaction(self):
292 292 return transaction(self.opener, self.join("journal"))
293 293
294 294 def merge(self, other):
295 295 tr = self.transaction()
296 296 changed = {}
297 297 new = {}
298 298 seqrev = self.changelog.count()
299 299 # some magic to allow fiddling in nested scope
300 300 nextrev = [seqrev]
301 301
302 302 # helpers for back-linking file revisions to local changeset
303 303 # revisions so we can immediately get to changeset from annotate
304 304 def accumulate(text):
305 305 # track which files are added in which changeset and the
306 306 # corresponding _local_ changeset revision
307 307 files = self.changelog.extract(text)[3]
308 308 for f in files:
309 309 changed.setdefault(f, []).append(nextrev[0])
310 310 nextrev[0] += 1
311 311
312 312 def seq(start):
313 313 while 1:
314 314 yield start
315 315 start += 1
316 316
317 317 def lseq(l):
318 318 for r in l:
319 319 yield r
320 320
321 321 # begin the import/merge of changesets
322 322 self.ui.status("merging new changesets\n")
323 323 (co, cn) = self.changelog.mergedag(other.changelog, tr,
324 324 seq(seqrev), accumulate)
325 325 resolverev = self.changelog.count()
326 326
327 327 # is there anything to do?
328 328 if co == cn:
329 329 tr.close()
330 330 return
331 331
332 332 # do we need to resolve?
333 333 simple = (co == self.changelog.ancestor(co, cn))
334 334
335 335 # merge all files changed by the changesets,
336 336 # keeping track of the new tips
337 337 changelist = changed.keys()
338 338 changelist.sort()
339 339 for f in changelist:
340 340 sys.stdout.write(".")
341 341 sys.stdout.flush()
342 342 r = self.file(f)
343 343 node = r.merge(other.file(f), tr, lseq(changed[f]), resolverev)
344 344 if node:
345 345 new[f] = node
346 346 sys.stdout.write("\n")
347 347
348 348 # begin the merge of the manifest
349 349 self.ui.status("merging manifests\n")
350 350 (mm, mo) = self.manifest.mergedag(other.manifest, tr, seq(seqrev))
351 351
352 352 # For simple merges, we don't need to resolve manifests or changesets
353 353 if simple:
354 354 tr.close()
355 355 return
356 356
357 357 ma = self.manifest.ancestor(mm, mo)
358 358
359 359 # resolve the manifest to point to all the merged files
360 360 self.ui.status("resolving manifests\n")
361 361 mmap = self.manifest.read(mm) # mine
362 362 omap = self.manifest.read(mo) # other
363 363 amap = self.manifest.read(ma) # ancestor
364 364 nmap = {}
365 365
366 366 for f, mid in mmap.iteritems():
367 367 if f in omap:
368 368 if mid != omap[f]:
369 369 nmap[f] = new.get(f, mid) # use merged version
370 370 else:
371 371 nmap[f] = new.get(f, mid) # they're the same
372 372 del omap[f]
373 373 elif f in amap:
374 374 if mid != amap[f]:
375 375 pass # we should prompt here
376 376 else:
377 377 pass # other deleted it
378 378 else:
379 379 nmap[f] = new.get(f, mid) # we created it
380 380
381 381 del mmap
382 382
383 383 for f, oid in omap.iteritems():
384 384 if f in amap:
385 385 if oid != amap[f]:
386 386 pass # this is the nasty case, we should prompt
387 387 else:
388 388 pass # probably safe
389 389 else:
390 390 nmap[f] = new.get(f, oid) # remote created it
391 391
392 392 del omap
393 393 del amap
394 394
395 395 node = self.manifest.add(nmap, tr, resolverev, mm, mo)
396 396
397 397 # Now all files and manifests are merged, we add the changed files
398 398 # and manifest id to the changelog
399 399 self.ui.status("committing merge changeset\n")
400 400 new = new.keys()
401 401 new.sort()
402 402 if co == cn: cn = -1
403 403
404 404 edittext = "\n"+"".join(["HG: changed %s\n" % f for f in new])
405 405 edittext = self.ui.edit(edittext)
406 406 n = self.changelog.add(node, new, edittext, tr, co, cn)
407 407
408 408 tr.close()
409 409
410 410 def commit(self, parent, update = None, text = ""):
411 411 tr = self.transaction()
412 412
413 413 try:
414 414 remove = [ l[:-1] for l in self.opener("to-remove") ]
415 415 os.unlink(self.join("to-remove"))
416 416
417 417 except IOError:
418 418 remove = []
419 419
420 420 if update == None:
421 421 update = self.diffdir(self.root, parent)[0]
422 422
423 423 # check in files
424 424 new = {}
425 425 linkrev = self.changelog.count()
426 426 for f in update:
427 427 try:
428 428 t = file(f).read()
429 429 except IOError:
430 430 remove.append(f)
431 431 continue
432 432 r = self.file(f)
433 433 new[f] = r.add(t, tr, linkrev)
434 434
435 435 # update manifest
436 436 mmap = self.manifest.read(self.manifest.tip())
437 437 mmap.update(new)
438 438 for f in remove:
439 439 del mmap[f]
440 440 mnode = self.manifest.add(mmap, tr, linkrev)
441 441
442 442 # add changeset
443 443 new = new.keys()
444 444 new.sort()
445 445
446 446 edittext = text + "\n"+"".join(["HG: changed %s\n" % f for f in new])
447 447 edittext += "".join(["HG: removed %s\n" % f for f in remove])
448 448 edittext = self.ui.edit(edittext)
449 449
450 450 n = self.changelog.add(mnode, new, edittext, tr)
451 451 tr.close()
452 452
453 453 self.setcurrent(n)
454 454 self.dircache.update(new)
455 455 self.dircache.remove(remove)
456 456
457 457 def checkdir(self, path):
458 458 d = os.path.dirname(path)
459 459 if not d: return
460 460 if not os.path.isdir(d):
461 461 self.checkdir(d)
462 462 os.mkdir(d)
463 463
464 464 def checkout(self, node):
465 465 # checkout is really dumb at the moment
466 466 # it ought to basically merge
467 467 change = self.changelog.read(node)
468 468 mmap = self.manifest.read(change[0])
469 469
470 470 l = mmap.keys()
471 471 l.sort()
472 472 stats = []
473 473 for f in l:
474 474 r = self.file(f)
475 475 t = r.revision(mmap[f])
476 476 try:
477 477 file(f, "w").write(t)
478 478 except:
479 479 self.checkdir(f)
480 480 file(f, "w").write(t)
481 481
482 482 self.setcurrent(node)
483 483 self.dircache.clear()
484 484 self.dircache.update(l)
485 485
486 486 def diffdir(self, path, changeset):
487 487 changed = []
488 488 mf = {}
489 489 added = []
490 490
491 491 if changeset:
492 492 change = self.changelog.read(changeset)
493 493 mf = self.manifest.read(change[0])
494 494
495 495 if changeset == self.current:
496 496 dc = self.dircache.copy()
497 497 else:
498 498 dc = dict.fromkeys(mf)
499 499
500 500 def fcmp(fn):
501 501 t1 = file(fn).read()
502 502 t2 = self.file(fn).revision(mf[fn])
503 503 return cmp(t1, t2)
504 504
505 505 for dir, subdirs, files in os.walk(self.root):
506 506 d = dir[len(self.root)+1:]
507 507 if ".hg" in subdirs: subdirs.remove(".hg")
508 508
509 509 for f in files:
510 510 fn = os.path.join(d, f)
511 511 try: s = os.stat(fn)
512 512 except: continue
513 513 if fn in dc:
514 514 c = dc[fn]
515 515 del dc[fn]
516 516 if not c:
517 517 if fcmp(fn):
518 518 changed.append(fn)
519 519 elif c[1] != s.st_size:
520 520 changed.append(fn)
521 521 elif c[0] != s.st_mode or c[2] != s.st_mtime:
522 522 if fcmp(fn):
523 523 changed.append(fn)
524 524 else:
525 525 if self.ignore(fn): continue
526 526 added.append(fn)
527 527
528 528 deleted = dc.keys()
529 529 deleted.sort()
530 530
531 531 return (changed, added, deleted)
532 532
533 533 def diffrevs(self, node1, node2):
534 534 changed, added = [], []
535 535
536 536 change = self.changelog.read(node1)
537 537 mf1 = self.manifest.read(change[0])
538 538 change = self.changelog.read(node2)
539 539 mf2 = self.manifest.read(change[0])
540 540
541 541 for fn in mf2:
542 542 if mf1.has_key(fn):
543 543 if mf1[fn] != mf2[fn]:
544 544 changed.append(fn)
545 545 del mf1[fn]
546 546 else:
547 547 added.append(fn)
548 548
549 549 deleted = mf1.keys()
550 550 deleted.sort()
551 551
552 552 return (changed, added, deleted)
553 553
554 554 def add(self, list):
555 555 self.dircache.taint(list)
556 556
557 557 def remove(self, list):
558 558 dl = self.opener("to-remove", "a")
559 559 for f in list:
560 560 dl.write(f + "\n")
561 561
562 def newer(self, node):
563 nodes = []
564 for i in xrange(self.changelog.rev(node) + 1, self.changelog.count()):
565 nodes.append(self.changelog.node(i))
566
567 return nodes
568
569 def changegroup(self, nodes):
570 # construct the link map
571 linkmap = {}
572 for n in nodes:
573 linkmap[self.changelog.rev(n)] = n
574
575 # construct a list of all changed files
576 changed = {}
577 for n in nodes:
578 c = self.changelog.read(n)
579 for f in c[3]:
580 changed[f] = 1
581 changed = changed.keys()
582 changed.sort()
583
584 # the changegroup is changesets + manifests + all file revs
585 cg = []
586 revs = [ self.changelog.rev(n) for n in nodes ]
587
588 g = self.changelog.group(linkmap)
589 cg.append(g)
590 g = self.manifest.group(linkmap)
591 cg.append(g)
592
593 for f in changed:
594 g = self.file(f).group(linkmap)
595 if not g: raise "couldn't find change to %s" % f
596 l = struct.pack(">l", len(f))
597 cg += [l, f, g]
598
599 return compress("".join(cg))
600
601 def addchangegroup(self, data):
602 data = decompress(data)
603 def getlen(data, pos):
604 return struct.unpack(">l", data[pos:pos + 4])[0]
605
606 tr = self.transaction()
607 simple = True
608
609 print "merging changesets"
610 # pull off the changeset group
611 l = getlen(data, 0)
612 csg = data[0:l]
613 pos = l
614 co = self.changelog.tip()
615 cn = self.changelog.addgroup(csg, lambda x: self.changelog.count(), tr)
616
617 print "merging manifests"
618 # pull off the manifest group
619 l = getlen(data, pos)
620 mfg = data[pos: pos + l]
621 pos += l
622 mo = self.manifest.tip()
623 mn = self.manifest.addgroup(mfg, lambda x: self.changelog.rev(x), tr)
624
625 # do we need a resolve?
626 if self.changelog.ancestor(co, cn) != co:
627 print "NEED RESOLVE"
628 simple = False
629 resolverev = self.changelog.count()
630
631 # process the files
632 print "merging files"
633 new = {}
634 while pos < len(data):
635 l = getlen(data, pos)
636 pos += 4
637 f = data[pos:pos + l]
638 pos += l
639
640 l = getlen(data, pos)
641 fg = data[pos: pos + l]
642 pos += l
643
644 fl = self.file(f)
645 o = fl.tip()
646 n = fl.addgroup(fg, lambda x: self.changelog.rev(x), tr)
647 if not simple:
648 new[fl] = fl.resolvedag(o, n, tr, resolverev)
649
650 # For simple merges, we don't need to resolve manifests or changesets
651 if simple:
652 tr.close()
653 return
654
655 # resolve the manifest to point to all the merged files
656 self.ui.status("resolving manifests\n")
657 ma = self.manifest.ancestor(mm, mo)
658 mmap = self.manifest.read(mm) # mine
659 omap = self.manifest.read(mo) # other
660 amap = self.manifest.read(ma) # ancestor
661 nmap = {}
662
663 for f, mid in mmap.iteritems():
664 if f in omap:
665 if mid != omap[f]:
666 nmap[f] = new.get(f, mid) # use merged version
667 else:
668 nmap[f] = new.get(f, mid) # they're the same
669 del omap[f]
670 elif f in amap:
671 if mid != amap[f]:
672 pass # we should prompt here
673 else:
674 pass # other deleted it
675 else:
676 nmap[f] = new.get(f, mid) # we created it
677
678 del mmap
679
680 for f, oid in omap.iteritems():
681 if f in amap:
682 if oid != amap[f]:
683 pass # this is the nasty case, we should prompt
684 else:
685 pass # probably safe
686 else:
687 nmap[f] = new.get(f, oid) # remote created it
688
689 del omap
690 del amap
691
692 node = self.manifest.add(nmap, tr, resolverev, mm, mo)
693
694 # Now all files and manifests are merged, we add the changed files
695 # and manifest id to the changelog
696 self.ui.status("committing merge changeset\n")
697 new = new.keys()
698 new.sort()
699 if co == cn: cn = -1
700
701 edittext = "\n"+"".join(["HG: changed %s\n" % f for f in new])
702 edittext = self.ui.edit(edittext)
703 n = self.changelog.add(node, new, edittext, tr, co, cn)
704
705 tr.close()
706
562 707 class ui:
563 708 def __init__(self, verbose=False, debug=False):
564 709 self.verbose = verbose
565 710 def write(self, *args):
566 711 for a in args:
567 712 sys.stdout.write(str(a))
568 713 def prompt(self, msg, pat):
569 714 while 1:
570 715 sys.stdout.write(msg)
571 716 r = sys.stdin.readline()[:-1]
572 717 if re.match(pat, r):
573 718 return r
574 719 def status(self, *msg):
575 720 self.write(*msg)
576 721 def warn(self, msg):
577 722 self.write(*msg)
578 723 def note(self, msg):
579 724 if self.verbose: self.write(*msg)
580 725 def debug(self, msg):
581 726 if self.debug: self.write(*msg)
582 727 def edit(self, text):
583 728 (fd, name) = tempfile.mkstemp("hg")
584 729 f = os.fdopen(fd, "w")
585 730 f.write(text)
586 731 f.close()
587 732
588 733 editor = os.environ.get("EDITOR", "vi")
589 734 r = os.system("%s %s" % (editor, name))
590 735 if r:
591 736 raise "Edit failed!"
592 737
593 738 t = open(name).read()
594 739 t = re.sub("(?m)^HG:.*\n", "", t)
595 740
596 741 return t
597 742
598 743
599 744 class httprangereader:
600 745 def __init__(self, url):
601 746 self.url = url
602 747 self.pos = 0
603 748 def seek(self, pos):
604 749 self.pos = pos
605 750 def read(self, bytes=None):
606 751 opener = urllib2.build_opener(byterange.HTTPRangeHandler())
607 752 urllib2.install_opener(opener)
608 753 req = urllib2.Request(self.url)
609 754 end = ''
610 755 if bytes: end = self.pos + bytes
611 756 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
612 757 f = urllib2.urlopen(req)
613 758 return f.read()
@@ -1,229 +1,412 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # This provides efficient delta storage with O(1) retrieve and append
4 4 # and O(changes) merge between branches
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 11 import zlib, struct, sha, os, tempfile, binascii
12 12 from mercurial import mdiff
13 13
14 14 def hex(node): return binascii.hexlify(node)
15 15 def bin(node): return binascii.unhexlify(node)
16 16
17 17 def compress(text):
18 18 return zlib.compress(text)
19 19
20 20 def decompress(bin):
21 21 return zlib.decompress(bin)
22 22
23 23 def hash(text, p1, p2):
24 24 l = [p1, p2]
25 25 l.sort()
26 26 return sha.sha(l[0] + l[1] + text).digest()
27 27
28 28 nullid = "\0" * 20
29 29 indexformat = ">4l20s20s20s"
30 30
31 31 class revlog:
32 32 def __init__(self, opener, indexfile, datafile):
33 33 self.indexfile = indexfile
34 34 self.datafile = datafile
35 35 self.index = []
36 36 self.opener = opener
37 37 self.cache = None
38 38 self.nodemap = {nullid: -1}
39 39 # read the whole index for now, handle on-demand later
40 40 try:
41 41 n = 0
42 42 i = self.opener(self.indexfile).read()
43 43 s = struct.calcsize(indexformat)
44 44 for f in range(0, len(i), s):
45 45 # offset, size, base, linkrev, p1, p2, nodeid
46 46 e = struct.unpack(indexformat, i[f:f + s])
47 47 self.nodemap[e[6]] = n
48 48 self.index.append(e)
49 49 n += 1
50 50 except IOError: pass
51 51
52 52 def tip(self): return self.node(len(self.index) - 1)
53 53 def count(self): return len(self.index)
54 54 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
55 55 def rev(self, node): return self.nodemap[node]
56 56 def linkrev(self, node): return self.index[self.nodemap[node]][3]
57 57 def parents(self, node):
58 58 if node == nullid: return (nullid, nullid)
59 59 return self.index[self.nodemap[node]][4:6]
60 60
61 61 def start(self, rev): return self.index[rev][0]
62 62 def length(self, rev): return self.index[rev][1]
63 63 def end(self, rev): return self.start(rev) + self.length(rev)
64 64 def base(self, rev): return self.index[rev][2]
65 65
66 66 def lookup(self, id):
67 67 try:
68 68 rev = int(id)
69 69 return self.node(rev)
70 70 except ValueError:
71 71 c = []
72 72 for n in self.nodemap:
73 73 if id in hex(n):
74 74 c.append(n)
75 75 if len(c) > 1: raise KeyError("Ambiguous identifier")
76 76 if len(c) < 1: raise KeyError
77 77 return c[0]
78 78
79 79 return None
80 80
81 81 def revisions(self, list):
82 82 # this can be optimized to do spans, etc
83 83 # be stupid for now
84 84 for node in list:
85 85 yield self.revision(node)
86 86
87 87 def diff(self, a, b):
88 88 return mdiff.textdiff(a, b)
89 89
90 90 def patch(self, text, patch):
91 91 return mdiff.patch(text, patch)
92 92
93 93 def revision(self, node):
94 94 if node == nullid: return ""
95 95 if self.cache and self.cache[0] == node: return self.cache[2]
96 96
97 97 text = None
98 98 rev = self.rev(node)
99 99 base = self.base(rev)
100 100 start = self.start(base)
101 101 end = self.end(rev)
102 102
103 103 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
104 104 base = self.cache[1]
105 105 start = self.start(base + 1)
106 106 text = self.cache[2]
107 107 last = 0
108 108
109 109 f = self.opener(self.datafile)
110 110 f.seek(start)
111 111 data = f.read(end - start)
112 112
113 113 if not text:
114 114 last = self.length(base)
115 115 text = decompress(data[:last])
116 116
117 117 for r in range(base + 1, rev + 1):
118 118 s = self.length(r)
119 119 b = decompress(data[last:last + s])
120 120 text = self.patch(text, b)
121 121 last = last + s
122 122
123 123 (p1, p2) = self.parents(node)
124 124 if node != hash(text, p1, p2):
125 125 raise "integrity check failed on %s:%d" % (self.datafile, rev)
126 126
127 127 self.cache = (node, rev, text)
128 128 return text
129 129
130 130 def addrevision(self, text, transaction, link, p1=None, p2=None):
131 131 if text is None: text = ""
132 132 if p1 is None: p1 = self.tip()
133 133 if p2 is None: p2 = nullid
134 134
135 135 node = hash(text, p1, p2)
136 136
137 137 n = self.count()
138 138 t = n - 1
139 139
140 140 if n:
141 141 start = self.start(self.base(t))
142 142 end = self.end(t)
143 143 prev = self.revision(self.tip())
144 144 data = compress(self.diff(prev, text))
145 145
146 146 # full versions are inserted when the needed deltas
147 147 # become comparable to the uncompressed text
148 148 if not n or (end + len(data) - start) > len(text) * 2:
149 149 data = compress(text)
150 150 base = n
151 151 else:
152 152 base = self.base(t)
153 153
154 154 offset = 0
155 155 if t >= 0:
156 156 offset = self.end(t)
157 157
158 158 e = (offset, len(data), base, link, p1, p2, node)
159 159
160 160 self.index.append(e)
161 161 self.nodemap[node] = n
162 162 entry = struct.pack(indexformat, *e)
163 163
164 164 transaction.add(self.datafile, e[0])
165 165 self.opener(self.datafile, "a").write(data)
166 166 transaction.add(self.indexfile, n * len(entry))
167 167 self.opener(self.indexfile, "a").write(entry)
168 168
169 169 self.cache = (node, n, text)
170 170 return node
171 171
172 172 def ancestor(self, a, b):
173 173 def expand(list, map):
174 174 a = []
175 175 while list:
176 176 n = list.pop(0)
177 177 map[n] = 1
178 178 yield n
179 179 for p in self.parents(n):
180 180 if p != nullid and p not in map:
181 181 list.append(p)
182 182 yield nullid
183 183
184 184 amap = {}
185 185 bmap = {}
186 186 ag = expand([a], amap)
187 187 bg = expand([b], bmap)
188 188 adone = bdone = 0
189 189
190 190 while not adone or not bdone:
191 191 if not adone:
192 192 an = ag.next()
193 193 if an == nullid:
194 194 adone = 1
195 195 elif an in bmap:
196 196 return an
197 197 if not bdone:
198 198 bn = bg.next()
199 199 if bn == nullid:
200 200 bdone = 1
201 201 elif bn in amap:
202 202 return bn
203 203
204 204 return nullid
205 205
206 206 def mergedag(self, other, transaction, linkseq, accumulate = None):
207 207 """combine the nodes from other's DAG into ours"""
208 208 old = self.tip()
209 209 i = self.count()
210 210 l = []
211 211
212 212 # merge the other revision log into our DAG
213 213 for r in range(other.count()):
214 214 id = other.node(r)
215 215 if id not in self.nodemap:
216 216 (xn, yn) = other.parents(id)
217 217 l.append((id, xn, yn))
218 218 self.nodemap[id] = i
219 219 i += 1
220 220
221 221 # merge node date for new nodes
222 222 r = other.revisions([e[0] for e in l])
223 223 for e in l:
224 224 t = r.next()
225 225 if accumulate: accumulate(t)
226 226 self.addrevision(t, transaction, linkseq.next(), e[1], e[2])
227 227
228 228 # return the unmerged heads for later resolving
229 229 return (old, self.tip())
230
231 def group(self, linkmap):
232 # given a list of changeset revs, return a set of deltas and
233 # metadata corresponding to nodes the first delta is
234 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
235 # have this parent as it has all history before these
236 # changesets. parent is parent[0]
237
238 revs = []
239 needed = {}
240
241 # find file nodes/revs that match changeset revs
242 for i in xrange(0, self.count()):
243 if self.index[i][3] in linkmap:
244 revs.append(i)
245 needed[i] = 1
246
247 # if we don't have any revisions touched by these changesets, bail
248 if not revs: return struct.pack(">l", 0)
249
250 # add the parent of the first rev
251 p = self.parents(self.node(revs[0]))[0]
252 revs.insert(0, self.rev(p))
253
254 # for each delta that isn't contiguous in the log, we need to
255 # reconstruct the base, reconstruct the result, and then
256 # calculate the delta. We also need to do this where we've
257 # stored a full version and not a delta
258 for i in xrange(0, len(revs) - 1):
259 a, b = revs[i], revs[i + 1]
260 if a + 1 != b or self.base(b) == b:
261 for j in xrange(self.base(a), a + 1):
262 needed[j] = 1
263 for j in xrange(self.base(b), b + 1):
264 needed[j] = 1
265
266 # calculate spans to retrieve from datafile
267 needed = needed.keys()
268 needed.sort()
269 spans = []
270 for n in needed:
271 if n < 0: continue
272 o = self.start(n)
273 l = self.length(n)
274 spans.append((o, l, [(n, l)]))
275
276 # merge spans
277 merge = [spans.pop(0)]
278 while spans:
279 e = spans.pop(0)
280 f = merge[-1]
281 if e[0] == f[0] + f[1]:
282 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
283 else:
284 merge.append(e)
285
286 # read spans in, divide up chunks
287 chunks = {}
288 for span in merge:
289 # we reopen the file for each span to make http happy for now
290 f = self.opener(self.datafile)
291 f.seek(span[0])
292 data = f.read(span[1])
293
294 # divide up the span
295 pos = 0
296 for r, l in span[2]:
297 chunks[r] = data[pos: pos + l]
298 pos += l
299
300 # helper to reconstruct intermediate versions
301 def construct(text, base, rev):
302 for r in range(base + 1, rev + 1):
303 b = decompress(chunks[r])
304 text = self.patch(text, b)
305 return text
306
307 # build deltas
308 deltas = []
309 for d in range(0, len(revs) - 1):
310 a, b = revs[d], revs[d + 1]
311 n = self.node(b)
312
313 if a + 1 != b or self.base(b) == b:
314 if a >= 0:
315 base = self.base(a)
316 ta = decompress(chunks[self.base(a)])
317 ta = construct(ta, base, a)
318 else:
319 ta = ""
320
321 base = self.base(b)
322 if a > base:
323 base = a
324 tb = ta
325 else:
326 tb = decompress(chunks[self.base(b)])
327 tb = construct(tb, base, b)
328 d = self.diff(ta, tb)
329 else:
330 d = decompress(chunks[b])
331
332 p = self.parents(n)
333 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
334 l = struct.pack(">l", len(meta) + len(d) + 4)
335 deltas.append(l + meta + d)
336
337 l = struct.pack(">l", sum(map(len, deltas)) + 4)
338 deltas.insert(0, l)
339 return "".join(deltas)
340
341 def addgroup(self, data, linkmapper, transaction):
342 # given a set of deltas, add them to the revision log. the
343 # first delta is against its parent, which should be in our
344 # log, the rest are against the previous delta.
345
346 if len(data) <= 4: return
347
348 # retrieve the parent revision of the delta chain
349 chain = data[28:48]
350 text = self.revision(chain)
351
352 # track the base of the current delta log
353 r = self.count()
354 t = r - 1
355
356 base = prev = -1
357 start = end = 0
358 if r:
359 start = self.start(self.base(t))
360 end = self.end(t)
361 measure = self.length(self.base(t))
362 base = self.base(t)
363 prev = self.tip()
364
365 transaction.add(self.datafile, end)
366 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
367 dfh = self.opener(self.datafile, "a")
368 ifh = self.opener(self.indexfile, "a")
369
370 # loop through our set of deltas
371 pos = 4
372 while pos < len(data):
373 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
374 data[pos:pos+84])
375 link = linkmapper(cs)
376 delta = data[pos + 84:pos + l]
377 pos += l
378
379 # full versions are inserted when the needed deltas become
380 # comparable to the uncompressed text or when the previous
381 # version is not the one we have a delta against. We use
382 # the size of the previous full rev as a proxy for the
383 # current size.
384
385 if chain == prev:
386 cdelta = compress(delta)
387
388 if chain != prev or (end - start + len(cdelta)) > measure * 2:
389 # flush our writes here so we can read it in revision
390 dfh.flush()
391 ifh.flush()
392 text = self.revision(self.node(t))
393 text = self.patch(text, delta)
394 chk = self.addrevision(text, transaction, link, p1, p2)
395 if chk != node:
396 raise "consistency error adding group"
397 measure = len(text)
398 else:
399 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
400 self.index.append(e)
401 self.nodemap[node] = r
402 dfh.write(cdelta)
403 ifh.write(struct.pack(indexformat, *e))
404
405 t, r = r, r + 1
406 chain = prev
407 start = self.start(self.base(t))
408 end = self.end(t)
409
410 dfh.close()
411 ifh.close()
412 return node
General Comments 0
You need to be logged in to leave comments. Login now