##// END OF EJS Templates
Beginning of multi-head support...
mpm@selenic.com -
r221:2bfe525e default
parent child Browse files
Show More
@@ -1,219 +1,240 b''
1 import os, re, traceback, sys, signal
1 import os, re, traceback, sys, signal, time
2 from mercurial import fancyopts, ui, hg
2 from mercurial import fancyopts, ui, hg
3
3
4 class UnknownCommand(Exception): pass
4 class UnknownCommand(Exception): pass
5
5
6 def filterfiles(list, files):
6 def filterfiles(list, files):
7 l = [ x for x in list if x in files ]
7 l = [ x for x in list if x in files ]
8
8
9 for f in files:
9 for f in files:
10 if f[-1] != os.sep: f += os.sep
10 if f[-1] != os.sep: f += os.sep
11 l += [ x for x in list if x.startswith(f) ]
11 l += [ x for x in list if x.startswith(f) ]
12 return l
12 return l
13
13
14 def relfilter(repo, args):
14 def relfilter(repo, args):
15 if os.getcwd() != repo.root:
15 if os.getcwd() != repo.root:
16 p = os.getcwd()[len(repo.root) + 1: ]
16 p = os.getcwd()[len(repo.root) + 1: ]
17 return filterfiles(p, args)
17 return filterfiles(p, args)
18 return args
18 return args
19
19
20 def relpath(repo, args):
20 def relpath(repo, args):
21 if os.getcwd() != repo.root:
21 if os.getcwd() != repo.root:
22 p = os.getcwd()[len(repo.root) + 1: ]
22 p = os.getcwd()[len(repo.root) + 1: ]
23 return [ os.path.join(p, x) for x in args ]
23 return [ os.path.join(p, x) for x in args ]
24 return args
24 return args
25
25
26 def help(ui, cmd=None):
26 def help(ui, cmd=None):
27 '''show help'''
27 '''show help'''
28 if cmd:
28 if cmd:
29 try:
29 try:
30 i = find(cmd)
30 i = find(cmd)
31 ui.write("%s\n\n" % i[2])
31 ui.write("%s\n\n" % i[2])
32 ui.write(i[0].__doc__, "\n")
32 ui.write(i[0].__doc__, "\n")
33 except UnknownCommand:
33 except UnknownCommand:
34 ui.warn("unknown command %s", cmd)
34 ui.warn("unknown command %s", cmd)
35 sys.exit(0)
35 sys.exit(0)
36
36
37 ui.status("""\
37 ui.status("""\
38 hg commands:
38 hg commands:
39
39
40 add [files...] add the given files in the next commit
40 add [files...] add the given files in the next commit
41 addremove add all new files, delete all missing files
41 addremove add all new files, delete all missing files
42 annotate [files...] show changeset number per file line
42 annotate [files...] show changeset number per file line
43 branch <path> create a branch of <path> in this directory
43 branch <path> create a branch of <path> in this directory
44 checkout [changeset] checkout the latest or given changeset
44 checkout [changeset] checkout the latest or given changeset
45 commit commit all changes to the repository
45 commit commit all changes to the repository
46 diff [files...] diff working directory (or selected files)
46 diff [files...] diff working directory (or selected files)
47 dump <file> [rev] dump the latest or given revision of a file
47 dump <file> [rev] dump the latest or given revision of a file
48 dumpmanifest [rev] dump the latest or given revision of the manifest
48 dumpmanifest [rev] dump the latest or given revision of the manifest
49 export <rev> dump the changeset header and diffs for a revision
49 export <rev> dump the changeset header and diffs for a revision
50 history show changeset history
50 history show changeset history
51 init create a new repository in this directory
51 init create a new repository in this directory
52 log <file> show revision history of a single file
52 log <file> show revision history of a single file
53 merge <path> merge changes from <path> into local repository
53 merge <path> merge changes from <path> into local repository
54 recover rollback an interrupted transaction
54 recover rollback an interrupted transaction
55 remove [files...] remove the given files in the next commit
55 remove [files...] remove the given files in the next commit
56 serve export the repository via HTTP
56 serve export the repository via HTTP
57 status show new, missing, and changed files in working dir
57 status show new, missing, and changed files in working dir
58 tags show current changeset tags
58 tags show current changeset tags
59 undo undo the last transaction
59 undo undo the last transaction
60 """)
60 """)
61
61
62 def init(ui):
62 def init(ui):
63 """create a repository"""
63 """create a repository"""
64 hg.repository(ui, ".", create=1)
64 hg.repository(ui, ".", create=1)
65
65
66 def branch(ui, path):
66 def branch(ui, path):
67 '''branch from a local repository'''
67 '''branch from a local repository'''
68 # this should eventually support remote repos
68 # this should eventually support remote repos
69 os.system("cp -al %s/.hg .hg" % path)
69 os.system("cp -al %s/.hg .hg" % path)
70
70
71 def checkout(ui, repo, changeset=None):
71 def checkout(ui, repo, changeset=None):
72 '''checkout a given changeset or the current tip'''
72 '''checkout a given changeset or the current tip'''
73 (c, a, d, u) = repo.diffdir(repo.root, repo.current)
73 (c, a, d, u) = repo.diffdir(repo.root, repo.current)
74 if c or a or d:
74 if c or a or d:
75 ui.warn("aborting (outstanding changes in working directory)\n")
75 ui.warn("aborting (outstanding changes in working directory)\n")
76 sys.exit(1)
76 sys.exit(1)
77
77
78 node = repo.changelog.tip()
78 node = repo.changelog.tip()
79 if changeset:
79 if changeset:
80 node = repo.lookup(changeset)
80 node = repo.lookup(changeset)
81 repo.checkout(node)
81 repo.checkout(node)
82
82
83 def annotate(u, repo, *args, **ops):
83 def annotate(u, repo, *args, **ops):
84 def getnode(rev):
84 def getnode(rev):
85 return hg.short(repo.changelog.node(rev))
85 return hg.short(repo.changelog.node(rev))
86
86
87 def getname(rev):
87 def getname(rev):
88 try:
88 try:
89 return bcache[rev]
89 return bcache[rev]
90 except KeyError:
90 except KeyError:
91 cl = repo.changelog.read(repo.changelog.node(rev))
91 cl = repo.changelog.read(repo.changelog.node(rev))
92 name = cl[1]
92 name = cl[1]
93 f = name.find('@')
93 f = name.find('@')
94 if f >= 0:
94 if f >= 0:
95 name = name[:f]
95 name = name[:f]
96 bcache[rev] = name
96 bcache[rev] = name
97 return name
97 return name
98
98
99 bcache = {}
99 bcache = {}
100 opmap = [['user', getname], ['number', str], ['changeset', getnode]]
100 opmap = [['user', getname], ['number', str], ['changeset', getnode]]
101 if not ops['user'] and not ops['changeset']:
101 if not ops['user'] and not ops['changeset']:
102 ops['number'] = 1
102 ops['number'] = 1
103
103
104 args = relpath(repo, args)
104 args = relpath(repo, args)
105 node = repo.current
105 node = repo.current
106 if ops['revision']:
106 if ops['revision']:
107 node = repo.changelog.lookup(ops['revision'])
107 node = repo.changelog.lookup(ops['revision'])
108 change = repo.changelog.read(node)
108 change = repo.changelog.read(node)
109 mmap = repo.manifest.read(change[0])
109 mmap = repo.manifest.read(change[0])
110 maxuserlen = 0
110 maxuserlen = 0
111 maxchangelen = 0
111 maxchangelen = 0
112 for f in args:
112 for f in args:
113 lines = repo.file(f).annotate(mmap[f])
113 lines = repo.file(f).annotate(mmap[f])
114 pieces = []
114 pieces = []
115
115
116 for o, f in opmap:
116 for o, f in opmap:
117 if ops[o]:
117 if ops[o]:
118 l = [ f(n) for n,t in lines ]
118 l = [ f(n) for n,t in lines ]
119 m = max(map(len, l))
119 m = max(map(len, l))
120 pieces.append([ "%*s" % (m, x) for x in l])
120 pieces.append([ "%*s" % (m, x) for x in l])
121
121
122 for p,l in zip(zip(*pieces), lines):
122 for p,l in zip(zip(*pieces), lines):
123 u.write(" ".join(p) + ": " + l[1])
123 u.write(" ".join(p) + ": " + l[1])
124
124
125 def heads(ui, repo):
126 '''show current repository heads'''
127 for n in repo.changelog.heads():
128 i = repo.changelog.rev(n)
129 changes = repo.changelog.read(n)
130 (p1, p2) = repo.changelog.parents(n)
131 (h, h1, h2) = map(hg.hex, (n, p1, p2))
132 (i1, i2) = map(repo.changelog.rev, (p1, p2))
133 print "rev: %4d:%s" % (i, h)
134 print "parents: %4d:%s" % (i1, h1)
135 if i2: print " %4d:%s" % (i2, h2)
136 print "manifest: %4d:%s" % (repo.manifest.rev(changes[0]),
137 hg.hex(changes[0]))
138 print "user:", changes[1]
139 print "date:", time.asctime(
140 time.localtime(float(changes[2].split(' ')[0])))
141 if ui.verbose: print "files:", " ".join(changes[3])
142 print "description:"
143 print changes[4]
144
125 def status(ui, repo):
145 def status(ui, repo):
126 '''show changed files in the working directory
146 '''show changed files in the working directory
127
147
128 C = changed
148 C = changed
129 A = added
149 A = added
130 R = removed
150 R = removed
131 ? = not tracked'''
151 ? = not tracked'''
132 (c, a, d, u) = repo.diffdir(repo.root, repo.current)
152 (c, a, d, u) = repo.diffdir(repo.root, repo.current)
133 (c, a, d, u) = map(lambda x: relfilter(repo, x), (c, a, d, u))
153 (c, a, d, u) = map(lambda x: relfilter(repo, x), (c, a, d, u))
134
154
135 for f in c: print "C", f
155 for f in c: print "C", f
136 for f in a: print "A", f
156 for f in a: print "A", f
137 for f in d: print "R", f
157 for f in d: print "R", f
138 for f in u: print "?", f
158 for f in u: print "?", f
139
159
140 def undo(ui, repo):
160 def undo(ui, repo):
141 repo.undo()
161 repo.undo()
142
162
143 table = {
163 table = {
144 "init": (init, [], 'hg init'),
164 "init": (init, [], 'hg init'),
145 "branch|clone": (branch, [], 'hg branch [path]'),
165 "branch|clone": (branch, [], 'hg branch [path]'),
166 "heads": (heads, [], 'hg heads'),
146 "help": (help, [], 'hg help [command]'),
167 "help": (help, [], 'hg help [command]'),
147 "checkout|co": (checkout, [], 'hg checkout [changeset]'),
168 "checkout|co": (checkout, [], 'hg checkout [changeset]'),
148 "ann|annotate": (annotate,
169 "ann|annotate": (annotate,
149 [('r', 'revision', '', 'revision'),
170 [('r', 'revision', '', 'revision'),
150 ('u', 'user', None, 'show user'),
171 ('u', 'user', None, 'show user'),
151 ('n', 'number', None, 'show revision number'),
172 ('n', 'number', None, 'show revision number'),
152 ('c', 'changeset', None, 'show changeset')],
173 ('c', 'changeset', None, 'show changeset')],
153 'hg annotate [-u] [-c] [-n] [-r id] [files]'),
174 'hg annotate [-u] [-c] [-n] [-r id] [files]'),
154 "status": (status, [], 'hg status'),
175 "status": (status, [], 'hg status'),
155 "undo": (undo, [], 'hg undo'),
176 "undo": (undo, [], 'hg undo'),
156 }
177 }
157
178
158 norepo = "init branch help"
179 norepo = "init branch help"
159
180
160 def find(cmd):
181 def find(cmd):
161 i = None
182 i = None
162 for e in table.keys():
183 for e in table.keys():
163 if re.match(e + "$", cmd):
184 if re.match(e + "$", cmd):
164 return table[e]
185 return table[e]
165
186
166 raise UnknownCommand(cmd)
187 raise UnknownCommand(cmd)
167
188
168 class SignalInterrupt(Exception): pass
189 class SignalInterrupt(Exception): pass
169
190
170 def catchterm(*args):
191 def catchterm(*args):
171 raise SignalInterrupt
192 raise SignalInterrupt
172
193
173 def dispatch(args):
194 def dispatch(args):
174 options = {}
195 options = {}
175 opts = [('v', 'verbose', None, 'verbose'),
196 opts = [('v', 'verbose', None, 'verbose'),
176 ('d', 'debug', None, 'debug'),
197 ('d', 'debug', None, 'debug'),
177 ('q', 'quiet', None, 'quiet'),
198 ('q', 'quiet', None, 'quiet'),
178 ('y', 'noninteractive', None, 'run non-interactively'),
199 ('y', 'noninteractive', None, 'run non-interactively'),
179 ]
200 ]
180
201
181 args = fancyopts.fancyopts(args, opts, options,
202 args = fancyopts.fancyopts(args, opts, options,
182 'hg [options] <command> [options] [files]')
203 'hg [options] <command> [options] [files]')
183
204
184 if not args:
205 if not args:
185 cmd = "help"
206 cmd = "help"
186 else:
207 else:
187 cmd, args = args[0], args[1:]
208 cmd, args = args[0], args[1:]
188
209
189 u = ui.ui(options["verbose"], options["debug"], options["quiet"],
210 u = ui.ui(options["verbose"], options["debug"], options["quiet"],
190 not options["noninteractive"])
211 not options["noninteractive"])
191
212
192 # deal with unfound commands later
213 # deal with unfound commands later
193 i = find(cmd)
214 i = find(cmd)
194
215
195 signal.signal(signal.SIGTERM, catchterm)
216 signal.signal(signal.SIGTERM, catchterm)
196
217
197 cmdoptions = {}
218 cmdoptions = {}
198 args = fancyopts.fancyopts(args, i[1], cmdoptions, i[2])
219 args = fancyopts.fancyopts(args, i[1], cmdoptions, i[2])
199
220
200 if cmd not in norepo.split():
221 if cmd not in norepo.split():
201 repo = hg.repository(ui = u)
222 repo = hg.repository(ui = u)
202 d = lambda: i[0](u, repo, *args, **cmdoptions)
223 d = lambda: i[0](u, repo, *args, **cmdoptions)
203 else:
224 else:
204 d = lambda: i[0](u, *args, **cmdoptions)
225 d = lambda: i[0](u, *args, **cmdoptions)
205
226
206 try:
227 try:
207 d()
228 d()
208 except SignalInterrupt:
229 except SignalInterrupt:
209 u.warn("killed!\n")
230 u.warn("killed!\n")
210 except KeyboardInterrupt:
231 except KeyboardInterrupt:
211 u.warn("interrupted!\n")
232 u.warn("interrupted!\n")
212 except TypeError, inst:
233 except TypeError, inst:
213 # was this an argument error?
234 # was this an argument error?
214 tb = traceback.extract_tb(sys.exc_info()[2])
235 tb = traceback.extract_tb(sys.exc_info()[2])
215 if len(tb) > 2: # no
236 if len(tb) > 2: # no
216 raise
237 raise
217 u.warn("%s: invalid arguments\n" % i[0].__name__)
238 u.warn("%s: invalid arguments\n" % i[0].__name__)
218 u.warn("syntax: %s\n" % i[2])
239 u.warn("syntax: %s\n" % i[2])
219 sys.exit(-1)
240 sys.exit(-1)
@@ -1,496 +1,507 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, binascii, heapq
11 import zlib, struct, sha, binascii, heapq
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16 def short(node): return hex(node[:4])
16 def short(node): return hex(node[:4])
17
17
18 def compress(text):
18 def compress(text):
19 if not text: return text
19 if not text: return text
20 if len(text) < 44:
20 if len(text) < 44:
21 if text[0] == '\0': return text
21 if text[0] == '\0': return text
22 return 'u' + text
22 return 'u' + text
23 bin = zlib.compress(text)
23 bin = zlib.compress(text)
24 if len(bin) > len(text):
24 if len(bin) > len(text):
25 if text[0] == '\0': return text
25 if text[0] == '\0': return text
26 return 'u' + text
26 return 'u' + text
27 return bin
27 return bin
28
28
29 def decompress(bin):
29 def decompress(bin):
30 if not bin: return bin
30 if not bin: return bin
31 t = bin[0]
31 t = bin[0]
32 if t == '\0': return bin
32 if t == '\0': return bin
33 if t == 'x': return zlib.decompress(bin)
33 if t == 'x': return zlib.decompress(bin)
34 if t == 'u': return bin[1:]
34 if t == 'u': return bin[1:]
35 raise "unknown compression type %s" % t
35 raise "unknown compression type %s" % t
36
36
37 def hash(text, p1, p2):
37 def hash(text, p1, p2):
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 return sha.sha(l[0] + l[1] + text).digest()
40 return sha.sha(l[0] + l[1] + text).digest()
41
41
42 nullid = "\0" * 20
42 nullid = "\0" * 20
43 indexformat = ">4l20s20s20s"
43 indexformat = ">4l20s20s20s"
44
44
45 class lazyparser:
45 class lazyparser:
46 def __init__(self, data):
46 def __init__(self, data):
47 self.data = data
47 self.data = data
48 self.s = struct.calcsize(indexformat)
48 self.s = struct.calcsize(indexformat)
49 self.l = len(data)/self.s
49 self.l = len(data)/self.s
50 self.index = [None] * self.l
50 self.index = [None] * self.l
51 self.map = {nullid: -1}
51 self.map = {nullid: -1}
52
52
53 def load(self, pos):
53 def load(self, pos):
54 block = pos / 1000
54 block = pos / 1000
55 i = block * 1000
55 i = block * 1000
56 end = min(self.l, i + 1000)
56 end = min(self.l, i + 1000)
57 while i < end:
57 while i < end:
58 d = self.data[i * self.s: (i + 1) * self.s]
58 d = self.data[i * self.s: (i + 1) * self.s]
59 e = struct.unpack(indexformat, d)
59 e = struct.unpack(indexformat, d)
60 self.index[i] = e
60 self.index[i] = e
61 self.map[e[6]] = i
61 self.map[e[6]] = i
62 i += 1
62 i += 1
63
63
64 class lazyindex:
64 class lazyindex:
65 def __init__(self, parser):
65 def __init__(self, parser):
66 self.p = parser
66 self.p = parser
67 def __len__(self):
67 def __len__(self):
68 return len(self.p.index)
68 return len(self.p.index)
69 def load(self, pos):
69 def load(self, pos):
70 self.p.load(pos)
70 self.p.load(pos)
71 return self.p.index[pos]
71 return self.p.index[pos]
72 def __getitem__(self, pos):
72 def __getitem__(self, pos):
73 return self.p.index[pos] or self.load(pos)
73 return self.p.index[pos] or self.load(pos)
74 def append(self, e):
74 def append(self, e):
75 self.p.index.append(e)
75 self.p.index.append(e)
76
76
77 class lazymap:
77 class lazymap:
78 def __init__(self, parser):
78 def __init__(self, parser):
79 self.p = parser
79 self.p = parser
80 def load(self, key):
80 def load(self, key):
81 n = self.p.data.find(key)
81 n = self.p.data.find(key)
82 if n < 0: raise KeyError("node " + hex(key))
82 if n < 0: raise KeyError("node " + hex(key))
83 pos = n / self.p.s
83 pos = n / self.p.s
84 self.p.load(pos)
84 self.p.load(pos)
85 def __contains__(self, key):
85 def __contains__(self, key):
86 try:
86 try:
87 self[key]
87 self[key]
88 return True
88 return True
89 except KeyError:
89 except KeyError:
90 return False
90 return False
91 def __iter__(self):
91 def __iter__(self):
92 for i in xrange(self.p.l):
92 for i in xrange(self.p.l):
93 try:
93 try:
94 yield self.p.index[i][6]
94 yield self.p.index[i][6]
95 except:
95 except:
96 self.p.load(i)
96 self.p.load(i)
97 yield self.p.index[i][6]
97 yield self.p.index[i][6]
98 def __getitem__(self, key):
98 def __getitem__(self, key):
99 try:
99 try:
100 return self.p.map[key]
100 return self.p.map[key]
101 except KeyError:
101 except KeyError:
102 try:
102 try:
103 self.load(key)
103 self.load(key)
104 return self.p.map[key]
104 return self.p.map[key]
105 except KeyError:
105 except KeyError:
106 raise KeyError("node " + hex(key))
106 raise KeyError("node " + hex(key))
107 def __setitem__(self, key, val):
107 def __setitem__(self, key, val):
108 self.p.map[key] = val
108 self.p.map[key] = val
109
109
110 class revlog:
110 class revlog:
111 def __init__(self, opener, indexfile, datafile):
111 def __init__(self, opener, indexfile, datafile):
112 self.indexfile = indexfile
112 self.indexfile = indexfile
113 self.datafile = datafile
113 self.datafile = datafile
114 self.opener = opener
114 self.opener = opener
115 self.cache = None
115 self.cache = None
116
116
117 try:
117 try:
118 i = self.opener(self.indexfile).read()
118 i = self.opener(self.indexfile).read()
119 except IOError:
119 except IOError:
120 i = ""
120 i = ""
121
121
122 if len(i) > 10000:
122 if len(i) > 10000:
123 # big index, let's parse it on demand
123 # big index, let's parse it on demand
124 parser = lazyparser(i)
124 parser = lazyparser(i)
125 self.index = lazyindex(parser)
125 self.index = lazyindex(parser)
126 self.nodemap = lazymap(parser)
126 self.nodemap = lazymap(parser)
127 else:
127 else:
128 s = struct.calcsize(indexformat)
128 s = struct.calcsize(indexformat)
129 l = len(i) / s
129 l = len(i) / s
130 self.index = [None] * l
130 self.index = [None] * l
131 m = [None] * l
131 m = [None] * l
132
132
133 n = 0
133 n = 0
134 for f in xrange(0, len(i), s):
134 for f in xrange(0, len(i), s):
135 # offset, size, base, linkrev, p1, p2, nodeid
135 # offset, size, base, linkrev, p1, p2, nodeid
136 e = struct.unpack(indexformat, i[f:f + s])
136 e = struct.unpack(indexformat, i[f:f + s])
137 m[n] = (e[6], n)
137 m[n] = (e[6], n)
138 self.index[n] = e
138 self.index[n] = e
139 n += 1
139 n += 1
140
140
141 self.nodemap = dict(m)
141 self.nodemap = dict(m)
142 self.nodemap[nullid] = -1
142 self.nodemap[nullid] = -1
143
143
144
144
145 def tip(self): return self.node(len(self.index) - 1)
145 def tip(self): return self.node(len(self.index) - 1)
146 def count(self): return len(self.index)
146 def count(self): return len(self.index)
147 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
147 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
148 def rev(self, node): return self.nodemap[node]
148 def rev(self, node): return self.nodemap[node]
149 def linkrev(self, node): return self.index[self.nodemap[node]][3]
149 def linkrev(self, node): return self.index[self.nodemap[node]][3]
150 def parents(self, node):
150 def parents(self, node):
151 if node == nullid: return (nullid, nullid)
151 if node == nullid: return (nullid, nullid)
152 return self.index[self.nodemap[node]][4:6]
152 return self.index[self.nodemap[node]][4:6]
153
153
154 def start(self, rev): return self.index[rev][0]
154 def start(self, rev): return self.index[rev][0]
155 def length(self, rev): return self.index[rev][1]
155 def length(self, rev): return self.index[rev][1]
156 def end(self, rev): return self.start(rev) + self.length(rev)
156 def end(self, rev): return self.start(rev) + self.length(rev)
157 def base(self, rev): return self.index[rev][2]
157 def base(self, rev): return self.index[rev][2]
158
158
159 def heads(self):
160 p = {}
161 h = []
162 for r in range(self.count() - 1, 0, -1):
163 n = self.node(r)
164 if n not in p:
165 h.append(n)
166 for pn in self.parents(n):
167 p[pn] = 1
168 return h
169
159 def lookup(self, id):
170 def lookup(self, id):
160 try:
171 try:
161 rev = int(id)
172 rev = int(id)
162 return self.node(rev)
173 return self.node(rev)
163 except ValueError:
174 except ValueError:
164 c = []
175 c = []
165 for n in self.nodemap:
176 for n in self.nodemap:
166 if id in hex(n):
177 if id in hex(n):
167 c.append(n)
178 c.append(n)
168 if len(c) > 1: raise KeyError("Ambiguous identifier")
179 if len(c) > 1: raise KeyError("Ambiguous identifier")
169 if len(c) < 1: raise KeyError("No match found")
180 if len(c) < 1: raise KeyError("No match found")
170 return c[0]
181 return c[0]
171
182
172 return None
183 return None
173
184
174 def diff(self, a, b):
185 def diff(self, a, b):
175 return mdiff.textdiff(a, b)
186 return mdiff.textdiff(a, b)
176
187
177 def patches(self, t, pl):
188 def patches(self, t, pl):
178 return mdiff.patches(t, pl)
189 return mdiff.patches(t, pl)
179
190
180 def delta(self, node):
191 def delta(self, node):
181 r = self.rev(node)
192 r = self.rev(node)
182 b = self.base(r)
193 b = self.base(r)
183 if r == b:
194 if r == b:
184 return self.diff(self.revision(self.node(r - 1)),
195 return self.diff(self.revision(self.node(r - 1)),
185 self.revision(node))
196 self.revision(node))
186 else:
197 else:
187 f = self.opener(self.datafile)
198 f = self.opener(self.datafile)
188 f.seek(self.start(r))
199 f.seek(self.start(r))
189 data = f.read(self.length(r))
200 data = f.read(self.length(r))
190 return decompress(data)
201 return decompress(data)
191
202
192 def revision(self, node):
203 def revision(self, node):
193 if node == nullid: return ""
204 if node == nullid: return ""
194 if self.cache and self.cache[0] == node: return self.cache[2]
205 if self.cache and self.cache[0] == node: return self.cache[2]
195
206
196 text = None
207 text = None
197 rev = self.rev(node)
208 rev = self.rev(node)
198 start, length, base, link, p1, p2, node = self.index[rev]
209 start, length, base, link, p1, p2, node = self.index[rev]
199 end = start + length
210 end = start + length
200 if base != rev: start = self.start(base)
211 if base != rev: start = self.start(base)
201
212
202 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
213 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
203 base = self.cache[1]
214 base = self.cache[1]
204 start = self.start(base + 1)
215 start = self.start(base + 1)
205 text = self.cache[2]
216 text = self.cache[2]
206 last = 0
217 last = 0
207
218
208 f = self.opener(self.datafile)
219 f = self.opener(self.datafile)
209 f.seek(start)
220 f.seek(start)
210 data = f.read(end - start)
221 data = f.read(end - start)
211
222
212 if not text:
223 if not text:
213 last = self.length(base)
224 last = self.length(base)
214 text = decompress(data[:last])
225 text = decompress(data[:last])
215
226
216 bins = []
227 bins = []
217 for r in xrange(base + 1, rev + 1):
228 for r in xrange(base + 1, rev + 1):
218 s = self.length(r)
229 s = self.length(r)
219 bins.append(decompress(data[last:last + s]))
230 bins.append(decompress(data[last:last + s]))
220 last = last + s
231 last = last + s
221
232
222 text = mdiff.patches(text, bins)
233 text = mdiff.patches(text, bins)
223
234
224 if node != hash(text, p1, p2):
235 if node != hash(text, p1, p2):
225 raise IOError("integrity check failed on %s:%d"
236 raise IOError("integrity check failed on %s:%d"
226 % (self.datafile, rev))
237 % (self.datafile, rev))
227
238
228 self.cache = (node, rev, text)
239 self.cache = (node, rev, text)
229 return text
240 return text
230
241
231 def addrevision(self, text, transaction, link, p1=None, p2=None):
242 def addrevision(self, text, transaction, link, p1=None, p2=None):
232 if text is None: text = ""
243 if text is None: text = ""
233 if p1 is None: p1 = self.tip()
244 if p1 is None: p1 = self.tip()
234 if p2 is None: p2 = nullid
245 if p2 is None: p2 = nullid
235
246
236 node = hash(text, p1, p2)
247 node = hash(text, p1, p2)
237
248
238 n = self.count()
249 n = self.count()
239 t = n - 1
250 t = n - 1
240
251
241 if n:
252 if n:
242 base = self.base(t)
253 base = self.base(t)
243 start = self.start(base)
254 start = self.start(base)
244 end = self.end(t)
255 end = self.end(t)
245 prev = self.revision(self.tip())
256 prev = self.revision(self.tip())
246 d = self.diff(prev, text)
257 d = self.diff(prev, text)
247 data = compress(d)
258 data = compress(d)
248 dist = end - start + len(data)
259 dist = end - start + len(data)
249
260
250 # full versions are inserted when the needed deltas
261 # full versions are inserted when the needed deltas
251 # become comparable to the uncompressed text
262 # become comparable to the uncompressed text
252 if not n or dist > len(text) * 2:
263 if not n or dist > len(text) * 2:
253 data = compress(text)
264 data = compress(text)
254 base = n
265 base = n
255 else:
266 else:
256 base = self.base(t)
267 base = self.base(t)
257
268
258 offset = 0
269 offset = 0
259 if t >= 0:
270 if t >= 0:
260 offset = self.end(t)
271 offset = self.end(t)
261
272
262 e = (offset, len(data), base, link, p1, p2, node)
273 e = (offset, len(data), base, link, p1, p2, node)
263
274
264 self.index.append(e)
275 self.index.append(e)
265 self.nodemap[node] = n
276 self.nodemap[node] = n
266 entry = struct.pack(indexformat, *e)
277 entry = struct.pack(indexformat, *e)
267
278
268 transaction.add(self.datafile, e[0])
279 transaction.add(self.datafile, e[0])
269 self.opener(self.datafile, "a").write(data)
280 self.opener(self.datafile, "a").write(data)
270 transaction.add(self.indexfile, n * len(entry))
281 transaction.add(self.indexfile, n * len(entry))
271 self.opener(self.indexfile, "a").write(entry)
282 self.opener(self.indexfile, "a").write(entry)
272
283
273 self.cache = (node, n, text)
284 self.cache = (node, n, text)
274 return node
285 return node
275
286
276 def ancestor(self, a, b):
287 def ancestor(self, a, b):
277 # calculate the distance of every node from root
288 # calculate the distance of every node from root
278 dist = {nullid: 0}
289 dist = {nullid: 0}
279 for i in xrange(self.count()):
290 for i in xrange(self.count()):
280 n = self.node(i)
291 n = self.node(i)
281 p1, p2 = self.parents(n)
292 p1, p2 = self.parents(n)
282 dist[n] = max(dist[p1], dist[p2]) + 1
293 dist[n] = max(dist[p1], dist[p2]) + 1
283
294
284 # traverse ancestors in order of decreasing distance from root
295 # traverse ancestors in order of decreasing distance from root
285 def ancestors(node):
296 def ancestors(node):
286 # we store negative distances because heap returns smallest member
297 # we store negative distances because heap returns smallest member
287 h = [(-dist[node], node)]
298 h = [(-dist[node], node)]
288 seen = {}
299 seen = {}
289 earliest = self.count()
300 earliest = self.count()
290 while h:
301 while h:
291 d, n = heapq.heappop(h)
302 d, n = heapq.heappop(h)
292 r = self.rev(n)
303 r = self.rev(n)
293 if n not in seen:
304 if n not in seen:
294 seen[n] = 1
305 seen[n] = 1
295 yield (-d, n)
306 yield (-d, n)
296 for p in self.parents(n):
307 for p in self.parents(n):
297 heapq.heappush(h, (-dist[p], p))
308 heapq.heappush(h, (-dist[p], p))
298
309
299 x = ancestors(a)
310 x = ancestors(a)
300 y = ancestors(b)
311 y = ancestors(b)
301 lx = x.next()
312 lx = x.next()
302 ly = y.next()
313 ly = y.next()
303
314
304 # increment each ancestor list until it is closer to root than
315 # increment each ancestor list until it is closer to root than
305 # the other, or they match
316 # the other, or they match
306 while 1:
317 while 1:
307 if lx == ly:
318 if lx == ly:
308 return lx[1]
319 return lx[1]
309 elif lx < ly:
320 elif lx < ly:
310 ly = y.next()
321 ly = y.next()
311 elif lx > ly:
322 elif lx > ly:
312 lx = x.next()
323 lx = x.next()
313
324
314 def group(self, linkmap):
325 def group(self, linkmap):
315 # given a list of changeset revs, return a set of deltas and
326 # given a list of changeset revs, return a set of deltas and
316 # metadata corresponding to nodes. the first delta is
327 # metadata corresponding to nodes. the first delta is
317 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
328 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
318 # have this parent as it has all history before these
329 # have this parent as it has all history before these
319 # changesets. parent is parent[0]
330 # changesets. parent is parent[0]
320
331
321 revs = []
332 revs = []
322 needed = {}
333 needed = {}
323
334
324 # find file nodes/revs that match changeset revs
335 # find file nodes/revs that match changeset revs
325 for i in xrange(0, self.count()):
336 for i in xrange(0, self.count()):
326 if self.index[i][3] in linkmap:
337 if self.index[i][3] in linkmap:
327 revs.append(i)
338 revs.append(i)
328 needed[i] = 1
339 needed[i] = 1
329
340
330 # if we don't have any revisions touched by these changesets, bail
341 # if we don't have any revisions touched by these changesets, bail
331 if not revs:
342 if not revs:
332 yield struct.pack(">l", 0)
343 yield struct.pack(">l", 0)
333 return
344 return
334
345
335 # add the parent of the first rev
346 # add the parent of the first rev
336 p = self.parents(self.node(revs[0]))[0]
347 p = self.parents(self.node(revs[0]))[0]
337 revs.insert(0, self.rev(p))
348 revs.insert(0, self.rev(p))
338
349
339 # for each delta that isn't contiguous in the log, we need to
350 # for each delta that isn't contiguous in the log, we need to
340 # reconstruct the base, reconstruct the result, and then
351 # reconstruct the base, reconstruct the result, and then
341 # calculate the delta. We also need to do this where we've
352 # calculate the delta. We also need to do this where we've
342 # stored a full version and not a delta
353 # stored a full version and not a delta
343 for i in xrange(0, len(revs) - 1):
354 for i in xrange(0, len(revs) - 1):
344 a, b = revs[i], revs[i + 1]
355 a, b = revs[i], revs[i + 1]
345 if a + 1 != b or self.base(b) == b:
356 if a + 1 != b or self.base(b) == b:
346 for j in xrange(self.base(a), a + 1):
357 for j in xrange(self.base(a), a + 1):
347 needed[j] = 1
358 needed[j] = 1
348 for j in xrange(self.base(b), b + 1):
359 for j in xrange(self.base(b), b + 1):
349 needed[j] = 1
360 needed[j] = 1
350
361
351 # calculate spans to retrieve from datafile
362 # calculate spans to retrieve from datafile
352 needed = needed.keys()
363 needed = needed.keys()
353 needed.sort()
364 needed.sort()
354 spans = []
365 spans = []
355 oo = -1
366 oo = -1
356 ol = 0
367 ol = 0
357 for n in needed:
368 for n in needed:
358 if n < 0: continue
369 if n < 0: continue
359 o = self.start(n)
370 o = self.start(n)
360 l = self.length(n)
371 l = self.length(n)
361 if oo + ol == o: # can we merge with the previous?
372 if oo + ol == o: # can we merge with the previous?
362 nl = spans[-1][2]
373 nl = spans[-1][2]
363 nl.append((n, l))
374 nl.append((n, l))
364 ol += l
375 ol += l
365 spans[-1] = (oo, ol, nl)
376 spans[-1] = (oo, ol, nl)
366 else:
377 else:
367 oo = o
378 oo = o
368 ol = l
379 ol = l
369 spans.append((oo, ol, [(n, l)]))
380 spans.append((oo, ol, [(n, l)]))
370
381
371 # read spans in, divide up chunks
382 # read spans in, divide up chunks
372 chunks = {}
383 chunks = {}
373 for span in spans:
384 for span in spans:
374 # we reopen the file for each span to make http happy for now
385 # we reopen the file for each span to make http happy for now
375 f = self.opener(self.datafile)
386 f = self.opener(self.datafile)
376 f.seek(span[0])
387 f.seek(span[0])
377 data = f.read(span[1])
388 data = f.read(span[1])
378
389
379 # divide up the span
390 # divide up the span
380 pos = 0
391 pos = 0
381 for r, l in span[2]:
392 for r, l in span[2]:
382 chunks[r] = decompress(data[pos: pos + l])
393 chunks[r] = decompress(data[pos: pos + l])
383 pos += l
394 pos += l
384
395
385 # helper to reconstruct intermediate versions
396 # helper to reconstruct intermediate versions
386 def construct(text, base, rev):
397 def construct(text, base, rev):
387 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
398 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
388 return mdiff.patches(text, bins)
399 return mdiff.patches(text, bins)
389
400
390 # build deltas
401 # build deltas
391 deltas = []
402 deltas = []
392 for d in xrange(0, len(revs) - 1):
403 for d in xrange(0, len(revs) - 1):
393 a, b = revs[d], revs[d + 1]
404 a, b = revs[d], revs[d + 1]
394 n = self.node(b)
405 n = self.node(b)
395
406
396 # do we need to construct a new delta?
407 # do we need to construct a new delta?
397 if a + 1 != b or self.base(b) == b:
408 if a + 1 != b or self.base(b) == b:
398 if a >= 0:
409 if a >= 0:
399 base = self.base(a)
410 base = self.base(a)
400 ta = chunks[self.base(a)]
411 ta = chunks[self.base(a)]
401 ta = construct(ta, base, a)
412 ta = construct(ta, base, a)
402 else:
413 else:
403 ta = ""
414 ta = ""
404
415
405 base = self.base(b)
416 base = self.base(b)
406 if a > base:
417 if a > base:
407 base = a
418 base = a
408 tb = ta
419 tb = ta
409 else:
420 else:
410 tb = chunks[self.base(b)]
421 tb = chunks[self.base(b)]
411 tb = construct(tb, base, b)
422 tb = construct(tb, base, b)
412 d = self.diff(ta, tb)
423 d = self.diff(ta, tb)
413 else:
424 else:
414 d = chunks[b]
425 d = chunks[b]
415
426
416 p = self.parents(n)
427 p = self.parents(n)
417 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
428 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
418 l = struct.pack(">l", len(meta) + len(d) + 4)
429 l = struct.pack(">l", len(meta) + len(d) + 4)
419 yield l
430 yield l
420 yield meta
431 yield meta
421 yield d
432 yield d
422
433
423 yield struct.pack(">l", 0)
434 yield struct.pack(">l", 0)
424
435
425 def addgroup(self, revs, linkmapper, transaction):
436 def addgroup(self, revs, linkmapper, transaction):
426 # given a set of deltas, add them to the revision log. the
437 # given a set of deltas, add them to the revision log. the
427 # first delta is against its parent, which should be in our
438 # first delta is against its parent, which should be in our
428 # log, the rest are against the previous delta.
439 # log, the rest are against the previous delta.
429
440
430 # track the base of the current delta log
441 # track the base of the current delta log
431 r = self.count()
442 r = self.count()
432 t = r - 1
443 t = r - 1
433 node = nullid
444 node = nullid
434
445
435 base = prev = -1
446 base = prev = -1
436 start = end = 0
447 start = end = 0
437 if r:
448 if r:
438 start = self.start(self.base(t))
449 start = self.start(self.base(t))
439 end = self.end(t)
450 end = self.end(t)
440 measure = self.length(self.base(t))
451 measure = self.length(self.base(t))
441 base = self.base(t)
452 base = self.base(t)
442 prev = self.tip()
453 prev = self.tip()
443
454
444 transaction.add(self.datafile, end)
455 transaction.add(self.datafile, end)
445 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
456 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
446 dfh = self.opener(self.datafile, "a")
457 dfh = self.opener(self.datafile, "a")
447 ifh = self.opener(self.indexfile, "a")
458 ifh = self.opener(self.indexfile, "a")
448
459
449 # loop through our set of deltas
460 # loop through our set of deltas
450 chain = None
461 chain = None
451 for chunk in revs:
462 for chunk in revs:
452 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
463 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
453 link = linkmapper(cs)
464 link = linkmapper(cs)
454 if node in self.nodemap:
465 if node in self.nodemap:
455 raise "already have %s" % hex(node[:4])
466 raise "already have %s" % hex(node[:4])
456 delta = chunk[80:]
467 delta = chunk[80:]
457
468
458 if not chain:
469 if not chain:
459 # retrieve the parent revision of the delta chain
470 # retrieve the parent revision of the delta chain
460 chain = p1
471 chain = p1
461 if not chain in self.nodemap:
472 if not chain in self.nodemap:
462 raise "unknown base %s" % short(chain[:4])
473 raise "unknown base %s" % short(chain[:4])
463
474
464 # full versions are inserted when the needed deltas become
475 # full versions are inserted when the needed deltas become
465 # comparable to the uncompressed text or when the previous
476 # comparable to the uncompressed text or when the previous
466 # version is not the one we have a delta against. We use
477 # version is not the one we have a delta against. We use
467 # the size of the previous full rev as a proxy for the
478 # the size of the previous full rev as a proxy for the
468 # current size.
479 # current size.
469
480
470 if chain == prev:
481 if chain == prev:
471 cdelta = compress(delta)
482 cdelta = compress(delta)
472
483
473 if chain != prev or (end - start + len(cdelta)) > measure * 2:
484 if chain != prev or (end - start + len(cdelta)) > measure * 2:
474 # flush our writes here so we can read it in revision
485 # flush our writes here so we can read it in revision
475 dfh.flush()
486 dfh.flush()
476 ifh.flush()
487 ifh.flush()
477 text = self.revision(chain)
488 text = self.revision(chain)
478 text = self.patches(text, [delta])
489 text = self.patches(text, [delta])
479 chk = self.addrevision(text, transaction, link, p1, p2)
490 chk = self.addrevision(text, transaction, link, p1, p2)
480 if chk != node:
491 if chk != node:
481 raise "consistency error adding group"
492 raise "consistency error adding group"
482 measure = len(text)
493 measure = len(text)
483 else:
494 else:
484 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
495 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
485 self.index.append(e)
496 self.index.append(e)
486 self.nodemap[node] = r
497 self.nodemap[node] = r
487 dfh.write(cdelta)
498 dfh.write(cdelta)
488 ifh.write(struct.pack(indexformat, *e))
499 ifh.write(struct.pack(indexformat, *e))
489
500
490 t, r, chain, prev = r, r + 1, node, node
501 t, r, chain, prev = r, r + 1, node, node
491 start = self.start(self.base(t))
502 start = self.start(self.base(t))
492 end = self.end(t)
503 end = self.end(t)
493
504
494 dfh.close()
505 dfh.close()
495 ifh.close()
506 ifh.close()
496 return node
507 return node
General Comments 0
You need to be logged in to leave comments. Login now