Show More
@@ -1,1861 +1,1870 b'' | |||||
1 | # localrepo.py - read/write repository class for mercurial |
|
1 | # localrepo.py - read/write repository class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | import struct, os, util |
|
8 | import struct, os, util | |
9 | import filelog, manifest, changelog, dirstate, repo |
|
9 | import filelog, manifest, changelog, dirstate, repo | |
10 | from node import * |
|
10 | from node import * | |
11 | from i18n import gettext as _ |
|
11 | from i18n import gettext as _ | |
12 | from demandload import * |
|
12 | from demandload import * | |
13 | demandload(globals(), "re lock transaction tempfile stat mdiff errno") |
|
13 | demandload(globals(), "re lock transaction tempfile stat mdiff errno") | |
14 |
|
14 | |||
15 | class localrepository(object): |
|
15 | class localrepository(object): | |
16 | def __init__(self, ui, path=None, create=0): |
|
16 | def __init__(self, ui, path=None, create=0): | |
17 | if not path: |
|
17 | if not path: | |
18 | p = os.getcwd() |
|
18 | p = os.getcwd() | |
19 | while not os.path.isdir(os.path.join(p, ".hg")): |
|
19 | while not os.path.isdir(os.path.join(p, ".hg")): | |
20 | oldp = p |
|
20 | oldp = p | |
21 | p = os.path.dirname(p) |
|
21 | p = os.path.dirname(p) | |
22 | if p == oldp: |
|
22 | if p == oldp: | |
23 | raise repo.RepoError(_("no repo found")) |
|
23 | raise repo.RepoError(_("no repo found")) | |
24 | path = p |
|
24 | path = p | |
25 | self.path = os.path.join(path, ".hg") |
|
25 | self.path = os.path.join(path, ".hg") | |
26 |
|
26 | |||
27 | if not create and not os.path.isdir(self.path): |
|
27 | if not create and not os.path.isdir(self.path): | |
28 | raise repo.RepoError(_("repository %s not found") % path) |
|
28 | raise repo.RepoError(_("repository %s not found") % path) | |
29 |
|
29 | |||
30 | self.root = os.path.abspath(path) |
|
30 | self.root = os.path.abspath(path) | |
31 | self.ui = ui |
|
31 | self.ui = ui | |
32 | self.opener = util.opener(self.path) |
|
32 | self.opener = util.opener(self.path) | |
33 | self.wopener = util.opener(self.root) |
|
33 | self.wopener = util.opener(self.root) | |
34 | self.manifest = manifest.manifest(self.opener) |
|
34 | self.manifest = manifest.manifest(self.opener) | |
35 | self.changelog = changelog.changelog(self.opener) |
|
35 | self.changelog = changelog.changelog(self.opener) | |
36 | self.tagscache = None |
|
36 | self.tagscache = None | |
37 | self.nodetagscache = None |
|
37 | self.nodetagscache = None | |
38 | self.encodepats = None |
|
38 | self.encodepats = None | |
39 | self.decodepats = None |
|
39 | self.decodepats = None | |
40 |
|
40 | |||
41 | if create: |
|
41 | if create: | |
42 | os.mkdir(self.path) |
|
42 | os.mkdir(self.path) | |
43 | os.mkdir(self.join("data")) |
|
43 | os.mkdir(self.join("data")) | |
44 |
|
44 | |||
45 | self.dirstate = dirstate.dirstate(self.opener, ui, self.root) |
|
45 | self.dirstate = dirstate.dirstate(self.opener, ui, self.root) | |
46 | try: |
|
46 | try: | |
47 | self.ui.readconfig(self.join("hgrc")) |
|
47 | self.ui.readconfig(self.join("hgrc")) | |
48 | except IOError: |
|
48 | except IOError: | |
49 | pass |
|
49 | pass | |
50 |
|
50 | |||
51 | def hook(self, name, throw=False, **args): |
|
51 | def hook(self, name, throw=False, **args): | |
52 | def runhook(name, cmd): |
|
52 | def runhook(name, cmd): | |
53 | self.ui.note(_("running hook %s: %s\n") % (name, cmd)) |
|
53 | self.ui.note(_("running hook %s: %s\n") % (name, cmd)) | |
54 | old = {} |
|
54 | old = {} | |
55 | for k, v in args.items(): |
|
55 | for k, v in args.items(): | |
56 | k = k.upper() |
|
56 | k = k.upper() | |
57 | old['HG_' + k] = os.environ.get(k, None) |
|
57 | old['HG_' + k] = os.environ.get(k, None) | |
58 | old[k] = os.environ.get(k, None) |
|
58 | old[k] = os.environ.get(k, None) | |
59 | os.environ['HG_' + k] = str(v) |
|
59 | os.environ['HG_' + k] = str(v) | |
60 | os.environ[k] = str(v) |
|
60 | os.environ[k] = str(v) | |
61 |
|
61 | |||
62 | try: |
|
62 | try: | |
63 | # Hooks run in the repository root |
|
63 | # Hooks run in the repository root | |
64 | olddir = os.getcwd() |
|
64 | olddir = os.getcwd() | |
65 | os.chdir(self.root) |
|
65 | os.chdir(self.root) | |
66 | r = os.system(cmd) |
|
66 | r = os.system(cmd) | |
67 | finally: |
|
67 | finally: | |
68 | for k, v in old.items(): |
|
68 | for k, v in old.items(): | |
69 | if v is not None: |
|
69 | if v is not None: | |
70 | os.environ[k] = v |
|
70 | os.environ[k] = v | |
71 | else: |
|
71 | else: | |
72 | del os.environ[k] |
|
72 | del os.environ[k] | |
73 |
|
73 | |||
74 | os.chdir(olddir) |
|
74 | os.chdir(olddir) | |
75 |
|
75 | |||
76 | if r: |
|
76 | if r: | |
77 | desc, r = util.explain_exit(r) |
|
77 | desc, r = util.explain_exit(r) | |
78 | if throw: |
|
78 | if throw: | |
79 | raise util.Abort(_('%s hook %s') % (name, desc)) |
|
79 | raise util.Abort(_('%s hook %s') % (name, desc)) | |
80 | self.ui.warn(_('error: %s hook %s\n') % (name, desc)) |
|
80 | self.ui.warn(_('error: %s hook %s\n') % (name, desc)) | |
81 | return False |
|
81 | return False | |
82 | return True |
|
82 | return True | |
83 |
|
83 | |||
84 | r = True |
|
84 | r = True | |
85 | for hname, cmd in self.ui.configitems("hooks"): |
|
85 | for hname, cmd in self.ui.configitems("hooks"): | |
86 | s = hname.split(".") |
|
86 | s = hname.split(".") | |
87 | if s[0] == name and cmd: |
|
87 | if s[0] == name and cmd: | |
88 | r = runhook(hname, cmd) and r |
|
88 | r = runhook(hname, cmd) and r | |
89 | return r |
|
89 | return r | |
90 |
|
90 | |||
91 | def tags(self): |
|
91 | def tags(self): | |
92 | '''return a mapping of tag to node''' |
|
92 | '''return a mapping of tag to node''' | |
93 | if not self.tagscache: |
|
93 | if not self.tagscache: | |
94 | self.tagscache = {} |
|
94 | self.tagscache = {} | |
95 | def addtag(self, k, n): |
|
95 | def addtag(self, k, n): | |
96 | try: |
|
96 | try: | |
97 | bin_n = bin(n) |
|
97 | bin_n = bin(n) | |
98 | except TypeError: |
|
98 | except TypeError: | |
99 | bin_n = '' |
|
99 | bin_n = '' | |
100 | self.tagscache[k.strip()] = bin_n |
|
100 | self.tagscache[k.strip()] = bin_n | |
101 |
|
101 | |||
102 | try: |
|
102 | try: | |
103 | # read each head of the tags file, ending with the tip |
|
103 | # read each head of the tags file, ending with the tip | |
104 | # and add each tag found to the map, with "newer" ones |
|
104 | # and add each tag found to the map, with "newer" ones | |
105 | # taking precedence |
|
105 | # taking precedence | |
106 | fl = self.file(".hgtags") |
|
106 | fl = self.file(".hgtags") | |
107 | h = fl.heads() |
|
107 | h = fl.heads() | |
108 | h.reverse() |
|
108 | h.reverse() | |
109 | for r in h: |
|
109 | for r in h: | |
110 | for l in fl.read(r).splitlines(): |
|
110 | for l in fl.read(r).splitlines(): | |
111 | if l: |
|
111 | if l: | |
112 | n, k = l.split(" ", 1) |
|
112 | n, k = l.split(" ", 1) | |
113 | addtag(self, k, n) |
|
113 | addtag(self, k, n) | |
114 | except KeyError: |
|
114 | except KeyError: | |
115 | pass |
|
115 | pass | |
116 |
|
116 | |||
117 | try: |
|
117 | try: | |
118 | f = self.opener("localtags") |
|
118 | f = self.opener("localtags") | |
119 | for l in f: |
|
119 | for l in f: | |
120 | n, k = l.split(" ", 1) |
|
120 | n, k = l.split(" ", 1) | |
121 | addtag(self, k, n) |
|
121 | addtag(self, k, n) | |
122 | except IOError: |
|
122 | except IOError: | |
123 | pass |
|
123 | pass | |
124 |
|
124 | |||
125 | self.tagscache['tip'] = self.changelog.tip() |
|
125 | self.tagscache['tip'] = self.changelog.tip() | |
126 |
|
126 | |||
127 | return self.tagscache |
|
127 | return self.tagscache | |
128 |
|
128 | |||
129 | def tagslist(self): |
|
129 | def tagslist(self): | |
130 | '''return a list of tags ordered by revision''' |
|
130 | '''return a list of tags ordered by revision''' | |
131 | l = [] |
|
131 | l = [] | |
132 | for t, n in self.tags().items(): |
|
132 | for t, n in self.tags().items(): | |
133 | try: |
|
133 | try: | |
134 | r = self.changelog.rev(n) |
|
134 | r = self.changelog.rev(n) | |
135 | except: |
|
135 | except: | |
136 | r = -2 # sort to the beginning of the list if unknown |
|
136 | r = -2 # sort to the beginning of the list if unknown | |
137 | l.append((r, t, n)) |
|
137 | l.append((r, t, n)) | |
138 | l.sort() |
|
138 | l.sort() | |
139 | return [(t, n) for r, t, n in l] |
|
139 | return [(t, n) for r, t, n in l] | |
140 |
|
140 | |||
141 | def nodetags(self, node): |
|
141 | def nodetags(self, node): | |
142 | '''return the tags associated with a node''' |
|
142 | '''return the tags associated with a node''' | |
143 | if not self.nodetagscache: |
|
143 | if not self.nodetagscache: | |
144 | self.nodetagscache = {} |
|
144 | self.nodetagscache = {} | |
145 | for t, n in self.tags().items(): |
|
145 | for t, n in self.tags().items(): | |
146 | self.nodetagscache.setdefault(n, []).append(t) |
|
146 | self.nodetagscache.setdefault(n, []).append(t) | |
147 | return self.nodetagscache.get(node, []) |
|
147 | return self.nodetagscache.get(node, []) | |
148 |
|
148 | |||
149 | def lookup(self, key): |
|
149 | def lookup(self, key): | |
150 | try: |
|
150 | try: | |
151 | return self.tags()[key] |
|
151 | return self.tags()[key] | |
152 | except KeyError: |
|
152 | except KeyError: | |
153 | try: |
|
153 | try: | |
154 | return self.changelog.lookup(key) |
|
154 | return self.changelog.lookup(key) | |
155 | except: |
|
155 | except: | |
156 | raise repo.RepoError(_("unknown revision '%s'") % key) |
|
156 | raise repo.RepoError(_("unknown revision '%s'") % key) | |
157 |
|
157 | |||
158 | def dev(self): |
|
158 | def dev(self): | |
159 | return os.stat(self.path).st_dev |
|
159 | return os.stat(self.path).st_dev | |
160 |
|
160 | |||
161 | def local(self): |
|
161 | def local(self): | |
162 | return True |
|
162 | return True | |
163 |
|
163 | |||
164 | def join(self, f): |
|
164 | def join(self, f): | |
165 | return os.path.join(self.path, f) |
|
165 | return os.path.join(self.path, f) | |
166 |
|
166 | |||
167 | def wjoin(self, f): |
|
167 | def wjoin(self, f): | |
168 | return os.path.join(self.root, f) |
|
168 | return os.path.join(self.root, f) | |
169 |
|
169 | |||
170 | def file(self, f): |
|
170 | def file(self, f): | |
171 | if f[0] == '/': |
|
171 | if f[0] == '/': | |
172 | f = f[1:] |
|
172 | f = f[1:] | |
173 | return filelog.filelog(self.opener, f) |
|
173 | return filelog.filelog(self.opener, f) | |
174 |
|
174 | |||
175 | def getcwd(self): |
|
175 | def getcwd(self): | |
176 | return self.dirstate.getcwd() |
|
176 | return self.dirstate.getcwd() | |
177 |
|
177 | |||
178 | def wfile(self, f, mode='r'): |
|
178 | def wfile(self, f, mode='r'): | |
179 | return self.wopener(f, mode) |
|
179 | return self.wopener(f, mode) | |
180 |
|
180 | |||
181 | def wread(self, filename): |
|
181 | def wread(self, filename): | |
182 | if self.encodepats == None: |
|
182 | if self.encodepats == None: | |
183 | l = [] |
|
183 | l = [] | |
184 | for pat, cmd in self.ui.configitems("encode"): |
|
184 | for pat, cmd in self.ui.configitems("encode"): | |
185 | mf = util.matcher("", "/", [pat], [], [])[1] |
|
185 | mf = util.matcher("", "/", [pat], [], [])[1] | |
186 | l.append((mf, cmd)) |
|
186 | l.append((mf, cmd)) | |
187 | self.encodepats = l |
|
187 | self.encodepats = l | |
188 |
|
188 | |||
189 | data = self.wopener(filename, 'r').read() |
|
189 | data = self.wopener(filename, 'r').read() | |
190 |
|
190 | |||
191 | for mf, cmd in self.encodepats: |
|
191 | for mf, cmd in self.encodepats: | |
192 | if mf(filename): |
|
192 | if mf(filename): | |
193 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) |
|
193 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | |
194 | data = util.filter(data, cmd) |
|
194 | data = util.filter(data, cmd) | |
195 | break |
|
195 | break | |
196 |
|
196 | |||
197 | return data |
|
197 | return data | |
198 |
|
198 | |||
199 | def wwrite(self, filename, data, fd=None): |
|
199 | def wwrite(self, filename, data, fd=None): | |
200 | if self.decodepats == None: |
|
200 | if self.decodepats == None: | |
201 | l = [] |
|
201 | l = [] | |
202 | for pat, cmd in self.ui.configitems("decode"): |
|
202 | for pat, cmd in self.ui.configitems("decode"): | |
203 | mf = util.matcher("", "/", [pat], [], [])[1] |
|
203 | mf = util.matcher("", "/", [pat], [], [])[1] | |
204 | l.append((mf, cmd)) |
|
204 | l.append((mf, cmd)) | |
205 | self.decodepats = l |
|
205 | self.decodepats = l | |
206 |
|
206 | |||
207 | for mf, cmd in self.decodepats: |
|
207 | for mf, cmd in self.decodepats: | |
208 | if mf(filename): |
|
208 | if mf(filename): | |
209 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) |
|
209 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | |
210 | data = util.filter(data, cmd) |
|
210 | data = util.filter(data, cmd) | |
211 | break |
|
211 | break | |
212 |
|
212 | |||
213 | if fd: |
|
213 | if fd: | |
214 | return fd.write(data) |
|
214 | return fd.write(data) | |
215 | return self.wopener(filename, 'w').write(data) |
|
215 | return self.wopener(filename, 'w').write(data) | |
216 |
|
216 | |||
217 | def transaction(self): |
|
217 | def transaction(self): | |
218 | # save dirstate for undo |
|
218 | # save dirstate for undo | |
219 | try: |
|
219 | try: | |
220 | ds = self.opener("dirstate").read() |
|
220 | ds = self.opener("dirstate").read() | |
221 | except IOError: |
|
221 | except IOError: | |
222 | ds = "" |
|
222 | ds = "" | |
223 | self.opener("journal.dirstate", "w").write(ds) |
|
223 | self.opener("journal.dirstate", "w").write(ds) | |
224 |
|
224 | |||
225 | def after(): |
|
225 | def after(): | |
226 | util.rename(self.join("journal"), self.join("undo")) |
|
226 | util.rename(self.join("journal"), self.join("undo")) | |
227 | util.rename(self.join("journal.dirstate"), |
|
227 | util.rename(self.join("journal.dirstate"), | |
228 | self.join("undo.dirstate")) |
|
228 | self.join("undo.dirstate")) | |
229 |
|
229 | |||
230 | return transaction.transaction(self.ui.warn, self.opener, |
|
230 | return transaction.transaction(self.ui.warn, self.opener, | |
231 | self.join("journal"), after) |
|
231 | self.join("journal"), after) | |
232 |
|
232 | |||
233 | def recover(self): |
|
233 | def recover(self): | |
234 | l = self.lock() |
|
234 | l = self.lock() | |
235 | if os.path.exists(self.join("journal")): |
|
235 | if os.path.exists(self.join("journal")): | |
236 | self.ui.status(_("rolling back interrupted transaction\n")) |
|
236 | self.ui.status(_("rolling back interrupted transaction\n")) | |
237 | transaction.rollback(self.opener, self.join("journal")) |
|
237 | transaction.rollback(self.opener, self.join("journal")) | |
238 | self.manifest = manifest.manifest(self.opener) |
|
238 | self.reload() | |
239 | self.changelog = changelog.changelog(self.opener) |
|
|||
240 | return True |
|
239 | return True | |
241 | else: |
|
240 | else: | |
242 | self.ui.warn(_("no interrupted transaction available\n")) |
|
241 | self.ui.warn(_("no interrupted transaction available\n")) | |
243 | return False |
|
242 | return False | |
244 |
|
243 | |||
245 | def undo(self, wlock=None): |
|
244 | def undo(self, wlock=None): | |
246 | if not wlock: |
|
245 | if not wlock: | |
247 | wlock = self.wlock() |
|
246 | wlock = self.wlock() | |
248 | l = self.lock() |
|
247 | l = self.lock() | |
249 | if os.path.exists(self.join("undo")): |
|
248 | if os.path.exists(self.join("undo")): | |
250 | self.ui.status(_("rolling back last transaction\n")) |
|
249 | self.ui.status(_("rolling back last transaction\n")) | |
251 | transaction.rollback(self.opener, self.join("undo")) |
|
250 | transaction.rollback(self.opener, self.join("undo")) | |
252 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) |
|
251 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) | |
253 |
self. |
|
252 | self.reload() | |
|
253 | self.wreload() | |||
254 | else: |
|
254 | else: | |
255 | self.ui.warn(_("no undo information available\n")) |
|
255 | self.ui.warn(_("no undo information available\n")) | |
256 |
|
256 | |||
|
257 | def wreload(self): | |||
|
258 | self.dirstate.read() | |||
|
259 | ||||
|
260 | def reload(self): | |||
|
261 | self.changelog.load() | |||
|
262 | self.manifest.load() | |||
|
263 | self.tagscache = None | |||
|
264 | self.nodetagscache = None | |||
|
265 | ||||
257 | def do_lock(self, lockname, wait, releasefn=None, acquirefn=None): |
|
266 | def do_lock(self, lockname, wait, releasefn=None, acquirefn=None): | |
258 | try: |
|
267 | try: | |
259 | l = lock.lock(self.join(lockname), 0, releasefn) |
|
268 | l = lock.lock(self.join(lockname), 0, releasefn) | |
260 | except lock.LockHeld, inst: |
|
269 | except lock.LockHeld, inst: | |
261 | if not wait: |
|
270 | if not wait: | |
262 | raise inst |
|
271 | raise inst | |
263 | self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0]) |
|
272 | self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0]) | |
264 | l = lock.lock(self.join(lockname), wait, releasefn) |
|
273 | l = lock.lock(self.join(lockname), wait, releasefn) | |
265 | if acquirefn: |
|
274 | if acquirefn: | |
266 | acquirefn() |
|
275 | acquirefn() | |
267 | return l |
|
276 | return l | |
268 |
|
277 | |||
269 | def lock(self, wait=1): |
|
278 | def lock(self, wait=1): | |
270 | return self.do_lock("lock", wait) |
|
279 | return self.do_lock("lock", wait, acquirefn=self.reload) | |
271 |
|
280 | |||
272 | def wlock(self, wait=1): |
|
281 | def wlock(self, wait=1): | |
273 | return self.do_lock("wlock", wait, |
|
282 | return self.do_lock("wlock", wait, | |
274 | self.dirstate.write, |
|
283 | self.dirstate.write, | |
275 |
self. |
|
284 | self.wreload) | |
276 |
|
285 | |||
277 | def checkfilemerge(self, filename, text, filelog, manifest1, manifest2): |
|
286 | def checkfilemerge(self, filename, text, filelog, manifest1, manifest2): | |
278 | "determine whether a new filenode is needed" |
|
287 | "determine whether a new filenode is needed" | |
279 | fp1 = manifest1.get(filename, nullid) |
|
288 | fp1 = manifest1.get(filename, nullid) | |
280 | fp2 = manifest2.get(filename, nullid) |
|
289 | fp2 = manifest2.get(filename, nullid) | |
281 |
|
290 | |||
282 | if fp2 != nullid: |
|
291 | if fp2 != nullid: | |
283 | # is one parent an ancestor of the other? |
|
292 | # is one parent an ancestor of the other? | |
284 | fpa = filelog.ancestor(fp1, fp2) |
|
293 | fpa = filelog.ancestor(fp1, fp2) | |
285 | if fpa == fp1: |
|
294 | if fpa == fp1: | |
286 | fp1, fp2 = fp2, nullid |
|
295 | fp1, fp2 = fp2, nullid | |
287 | elif fpa == fp2: |
|
296 | elif fpa == fp2: | |
288 | fp2 = nullid |
|
297 | fp2 = nullid | |
289 |
|
298 | |||
290 | # is the file unmodified from the parent? report existing entry |
|
299 | # is the file unmodified from the parent? report existing entry | |
291 | if fp2 == nullid and text == filelog.read(fp1): |
|
300 | if fp2 == nullid and text == filelog.read(fp1): | |
292 | return (fp1, None, None) |
|
301 | return (fp1, None, None) | |
293 |
|
302 | |||
294 | return (None, fp1, fp2) |
|
303 | return (None, fp1, fp2) | |
295 |
|
304 | |||
296 | def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None): |
|
305 | def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None): | |
297 | orig_parent = self.dirstate.parents()[0] or nullid |
|
306 | orig_parent = self.dirstate.parents()[0] or nullid | |
298 | p1 = p1 or self.dirstate.parents()[0] or nullid |
|
307 | p1 = p1 or self.dirstate.parents()[0] or nullid | |
299 | p2 = p2 or self.dirstate.parents()[1] or nullid |
|
308 | p2 = p2 or self.dirstate.parents()[1] or nullid | |
300 | c1 = self.changelog.read(p1) |
|
309 | c1 = self.changelog.read(p1) | |
301 | c2 = self.changelog.read(p2) |
|
310 | c2 = self.changelog.read(p2) | |
302 | m1 = self.manifest.read(c1[0]) |
|
311 | m1 = self.manifest.read(c1[0]) | |
303 | mf1 = self.manifest.readflags(c1[0]) |
|
312 | mf1 = self.manifest.readflags(c1[0]) | |
304 | m2 = self.manifest.read(c2[0]) |
|
313 | m2 = self.manifest.read(c2[0]) | |
305 | changed = [] |
|
314 | changed = [] | |
306 |
|
315 | |||
307 | if orig_parent == p1: |
|
316 | if orig_parent == p1: | |
308 | update_dirstate = 1 |
|
317 | update_dirstate = 1 | |
309 | else: |
|
318 | else: | |
310 | update_dirstate = 0 |
|
319 | update_dirstate = 0 | |
311 |
|
320 | |||
312 | if not wlock: |
|
321 | if not wlock: | |
313 | wlock = self.wlock() |
|
322 | wlock = self.wlock() | |
314 | l = self.lock() |
|
323 | l = self.lock() | |
315 | tr = self.transaction() |
|
324 | tr = self.transaction() | |
316 | mm = m1.copy() |
|
325 | mm = m1.copy() | |
317 | mfm = mf1.copy() |
|
326 | mfm = mf1.copy() | |
318 | linkrev = self.changelog.count() |
|
327 | linkrev = self.changelog.count() | |
319 | for f in files: |
|
328 | for f in files: | |
320 | try: |
|
329 | try: | |
321 | t = self.wread(f) |
|
330 | t = self.wread(f) | |
322 | tm = util.is_exec(self.wjoin(f), mfm.get(f, False)) |
|
331 | tm = util.is_exec(self.wjoin(f), mfm.get(f, False)) | |
323 | r = self.file(f) |
|
332 | r = self.file(f) | |
324 | mfm[f] = tm |
|
333 | mfm[f] = tm | |
325 |
|
334 | |||
326 | (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2) |
|
335 | (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2) | |
327 | if entry: |
|
336 | if entry: | |
328 | mm[f] = entry |
|
337 | mm[f] = entry | |
329 | continue |
|
338 | continue | |
330 |
|
339 | |||
331 | mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2) |
|
340 | mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2) | |
332 | changed.append(f) |
|
341 | changed.append(f) | |
333 | if update_dirstate: |
|
342 | if update_dirstate: | |
334 | self.dirstate.update([f], "n") |
|
343 | self.dirstate.update([f], "n") | |
335 | except IOError: |
|
344 | except IOError: | |
336 | try: |
|
345 | try: | |
337 | del mm[f] |
|
346 | del mm[f] | |
338 | del mfm[f] |
|
347 | del mfm[f] | |
339 | if update_dirstate: |
|
348 | if update_dirstate: | |
340 | self.dirstate.forget([f]) |
|
349 | self.dirstate.forget([f]) | |
341 | except: |
|
350 | except: | |
342 | # deleted from p2? |
|
351 | # deleted from p2? | |
343 | pass |
|
352 | pass | |
344 |
|
353 | |||
345 | mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0]) |
|
354 | mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0]) | |
346 | user = user or self.ui.username() |
|
355 | user = user or self.ui.username() | |
347 | n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date) |
|
356 | n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date) | |
348 | tr.close() |
|
357 | tr.close() | |
349 | if update_dirstate: |
|
358 | if update_dirstate: | |
350 | self.dirstate.setparents(n, nullid) |
|
359 | self.dirstate.setparents(n, nullid) | |
351 |
|
360 | |||
352 | def commit(self, files=None, text="", user=None, date=None, |
|
361 | def commit(self, files=None, text="", user=None, date=None, | |
353 | match=util.always, force=False, wlock=None): |
|
362 | match=util.always, force=False, wlock=None): | |
354 | commit = [] |
|
363 | commit = [] | |
355 | remove = [] |
|
364 | remove = [] | |
356 | changed = [] |
|
365 | changed = [] | |
357 |
|
366 | |||
358 | if files: |
|
367 | if files: | |
359 | for f in files: |
|
368 | for f in files: | |
360 | s = self.dirstate.state(f) |
|
369 | s = self.dirstate.state(f) | |
361 | if s in 'nmai': |
|
370 | if s in 'nmai': | |
362 | commit.append(f) |
|
371 | commit.append(f) | |
363 | elif s == 'r': |
|
372 | elif s == 'r': | |
364 | remove.append(f) |
|
373 | remove.append(f) | |
365 | else: |
|
374 | else: | |
366 | self.ui.warn(_("%s not tracked!\n") % f) |
|
375 | self.ui.warn(_("%s not tracked!\n") % f) | |
367 | else: |
|
376 | else: | |
368 | modified, added, removed, deleted, unknown = self.changes(match=match) |
|
377 | modified, added, removed, deleted, unknown = self.changes(match=match) | |
369 | commit = modified + added |
|
378 | commit = modified + added | |
370 | remove = removed |
|
379 | remove = removed | |
371 |
|
380 | |||
372 | p1, p2 = self.dirstate.parents() |
|
381 | p1, p2 = self.dirstate.parents() | |
373 | c1 = self.changelog.read(p1) |
|
382 | c1 = self.changelog.read(p1) | |
374 | c2 = self.changelog.read(p2) |
|
383 | c2 = self.changelog.read(p2) | |
375 | m1 = self.manifest.read(c1[0]) |
|
384 | m1 = self.manifest.read(c1[0]) | |
376 | mf1 = self.manifest.readflags(c1[0]) |
|
385 | mf1 = self.manifest.readflags(c1[0]) | |
377 | m2 = self.manifest.read(c2[0]) |
|
386 | m2 = self.manifest.read(c2[0]) | |
378 |
|
387 | |||
379 | if not commit and not remove and not force and p2 == nullid: |
|
388 | if not commit and not remove and not force and p2 == nullid: | |
380 | self.ui.status(_("nothing changed\n")) |
|
389 | self.ui.status(_("nothing changed\n")) | |
381 | return None |
|
390 | return None | |
382 |
|
391 | |||
383 | xp1 = hex(p1) |
|
392 | xp1 = hex(p1) | |
384 | if p2 == nullid: xp2 = '' |
|
393 | if p2 == nullid: xp2 = '' | |
385 | else: xp2 = hex(p2) |
|
394 | else: xp2 = hex(p2) | |
386 |
|
395 | |||
387 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) |
|
396 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) | |
388 |
|
397 | |||
389 | if not wlock: |
|
398 | if not wlock: | |
390 | wlock = self.wlock() |
|
399 | wlock = self.wlock() | |
391 | l = self.lock() |
|
400 | l = self.lock() | |
392 | tr = self.transaction() |
|
401 | tr = self.transaction() | |
393 |
|
402 | |||
394 | # check in files |
|
403 | # check in files | |
395 | new = {} |
|
404 | new = {} | |
396 | linkrev = self.changelog.count() |
|
405 | linkrev = self.changelog.count() | |
397 | commit.sort() |
|
406 | commit.sort() | |
398 | for f in commit: |
|
407 | for f in commit: | |
399 | self.ui.note(f + "\n") |
|
408 | self.ui.note(f + "\n") | |
400 | try: |
|
409 | try: | |
401 | mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False)) |
|
410 | mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False)) | |
402 | t = self.wread(f) |
|
411 | t = self.wread(f) | |
403 | except IOError: |
|
412 | except IOError: | |
404 | self.ui.warn(_("trouble committing %s!\n") % f) |
|
413 | self.ui.warn(_("trouble committing %s!\n") % f) | |
405 | raise |
|
414 | raise | |
406 |
|
415 | |||
407 | r = self.file(f) |
|
416 | r = self.file(f) | |
408 |
|
417 | |||
409 | meta = {} |
|
418 | meta = {} | |
410 | cp = self.dirstate.copied(f) |
|
419 | cp = self.dirstate.copied(f) | |
411 | if cp: |
|
420 | if cp: | |
412 | meta["copy"] = cp |
|
421 | meta["copy"] = cp | |
413 | meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid))) |
|
422 | meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid))) | |
414 | self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"])) |
|
423 | self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"])) | |
415 | fp1, fp2 = nullid, nullid |
|
424 | fp1, fp2 = nullid, nullid | |
416 | else: |
|
425 | else: | |
417 | entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2) |
|
426 | entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2) | |
418 | if entry: |
|
427 | if entry: | |
419 | new[f] = entry |
|
428 | new[f] = entry | |
420 | continue |
|
429 | continue | |
421 |
|
430 | |||
422 | new[f] = r.add(t, meta, tr, linkrev, fp1, fp2) |
|
431 | new[f] = r.add(t, meta, tr, linkrev, fp1, fp2) | |
423 | # remember what we've added so that we can later calculate |
|
432 | # remember what we've added so that we can later calculate | |
424 | # the files to pull from a set of changesets |
|
433 | # the files to pull from a set of changesets | |
425 | changed.append(f) |
|
434 | changed.append(f) | |
426 |
|
435 | |||
427 | # update manifest |
|
436 | # update manifest | |
428 | m1 = m1.copy() |
|
437 | m1 = m1.copy() | |
429 | m1.update(new) |
|
438 | m1.update(new) | |
430 | for f in remove: |
|
439 | for f in remove: | |
431 | if f in m1: |
|
440 | if f in m1: | |
432 | del m1[f] |
|
441 | del m1[f] | |
433 | mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], |
|
442 | mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], | |
434 | (new, remove)) |
|
443 | (new, remove)) | |
435 |
|
444 | |||
436 | # add changeset |
|
445 | # add changeset | |
437 | new = new.keys() |
|
446 | new = new.keys() | |
438 | new.sort() |
|
447 | new.sort() | |
439 |
|
448 | |||
440 | if not text: |
|
449 | if not text: | |
441 | edittext = [""] |
|
450 | edittext = [""] | |
442 | if p2 != nullid: |
|
451 | if p2 != nullid: | |
443 | edittext.append("HG: branch merge") |
|
452 | edittext.append("HG: branch merge") | |
444 | edittext.extend(["HG: changed %s" % f for f in changed]) |
|
453 | edittext.extend(["HG: changed %s" % f for f in changed]) | |
445 | edittext.extend(["HG: removed %s" % f for f in remove]) |
|
454 | edittext.extend(["HG: removed %s" % f for f in remove]) | |
446 | if not changed and not remove: |
|
455 | if not changed and not remove: | |
447 | edittext.append("HG: no files changed") |
|
456 | edittext.append("HG: no files changed") | |
448 | edittext.append("") |
|
457 | edittext.append("") | |
449 | # run editor in the repository root |
|
458 | # run editor in the repository root | |
450 | olddir = os.getcwd() |
|
459 | olddir = os.getcwd() | |
451 | os.chdir(self.root) |
|
460 | os.chdir(self.root) | |
452 | edittext = self.ui.edit("\n".join(edittext)) |
|
461 | edittext = self.ui.edit("\n".join(edittext)) | |
453 | os.chdir(olddir) |
|
462 | os.chdir(olddir) | |
454 | if not edittext.rstrip(): |
|
463 | if not edittext.rstrip(): | |
455 | return None |
|
464 | return None | |
456 | text = edittext |
|
465 | text = edittext | |
457 |
|
466 | |||
458 | user = user or self.ui.username() |
|
467 | user = user or self.ui.username() | |
459 | n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date) |
|
468 | n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date) | |
460 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
|
469 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, | |
461 | parent2=xp2) |
|
470 | parent2=xp2) | |
462 | tr.close() |
|
471 | tr.close() | |
463 |
|
472 | |||
464 | self.dirstate.setparents(n) |
|
473 | self.dirstate.setparents(n) | |
465 | self.dirstate.update(new, "n") |
|
474 | self.dirstate.update(new, "n") | |
466 | self.dirstate.forget(remove) |
|
475 | self.dirstate.forget(remove) | |
467 |
|
476 | |||
468 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) |
|
477 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) | |
469 | return n |
|
478 | return n | |
470 |
|
479 | |||
471 | def walk(self, node=None, files=[], match=util.always): |
|
480 | def walk(self, node=None, files=[], match=util.always): | |
472 | if node: |
|
481 | if node: | |
473 | fdict = dict.fromkeys(files) |
|
482 | fdict = dict.fromkeys(files) | |
474 | for fn in self.manifest.read(self.changelog.read(node)[0]): |
|
483 | for fn in self.manifest.read(self.changelog.read(node)[0]): | |
475 | fdict.pop(fn, None) |
|
484 | fdict.pop(fn, None) | |
476 | if match(fn): |
|
485 | if match(fn): | |
477 | yield 'm', fn |
|
486 | yield 'm', fn | |
478 | for fn in fdict: |
|
487 | for fn in fdict: | |
479 | self.ui.warn(_('%s: No such file in rev %s\n') % ( |
|
488 | self.ui.warn(_('%s: No such file in rev %s\n') % ( | |
480 | util.pathto(self.getcwd(), fn), short(node))) |
|
489 | util.pathto(self.getcwd(), fn), short(node))) | |
481 | else: |
|
490 | else: | |
482 | for src, fn in self.dirstate.walk(files, match): |
|
491 | for src, fn in self.dirstate.walk(files, match): | |
483 | yield src, fn |
|
492 | yield src, fn | |
484 |
|
493 | |||
485 | def changes(self, node1=None, node2=None, files=[], match=util.always, |
|
494 | def changes(self, node1=None, node2=None, files=[], match=util.always, | |
486 | wlock=None): |
|
495 | wlock=None): | |
487 | """return changes between two nodes or node and working directory |
|
496 | """return changes between two nodes or node and working directory | |
488 |
|
497 | |||
489 | If node1 is None, use the first dirstate parent instead. |
|
498 | If node1 is None, use the first dirstate parent instead. | |
490 | If node2 is None, compare node1 with working directory. |
|
499 | If node2 is None, compare node1 with working directory. | |
491 | """ |
|
500 | """ | |
492 |
|
501 | |||
493 | def fcmp(fn, mf): |
|
502 | def fcmp(fn, mf): | |
494 | t1 = self.wread(fn) |
|
503 | t1 = self.wread(fn) | |
495 | t2 = self.file(fn).read(mf.get(fn, nullid)) |
|
504 | t2 = self.file(fn).read(mf.get(fn, nullid)) | |
496 | return cmp(t1, t2) |
|
505 | return cmp(t1, t2) | |
497 |
|
506 | |||
498 | def mfmatches(node): |
|
507 | def mfmatches(node): | |
499 | change = self.changelog.read(node) |
|
508 | change = self.changelog.read(node) | |
500 | mf = dict(self.manifest.read(change[0])) |
|
509 | mf = dict(self.manifest.read(change[0])) | |
501 | for fn in mf.keys(): |
|
510 | for fn in mf.keys(): | |
502 | if not match(fn): |
|
511 | if not match(fn): | |
503 | del mf[fn] |
|
512 | del mf[fn] | |
504 | return mf |
|
513 | return mf | |
505 |
|
514 | |||
506 | # are we comparing the working directory? |
|
515 | # are we comparing the working directory? | |
507 | if not node2: |
|
516 | if not node2: | |
508 | if not wlock: |
|
517 | if not wlock: | |
509 | try: |
|
518 | try: | |
510 | wlock = self.wlock(wait=0) |
|
519 | wlock = self.wlock(wait=0) | |
511 | except lock.LockException: |
|
520 | except lock.LockException: | |
512 | wlock = None |
|
521 | wlock = None | |
513 | lookup, modified, added, removed, deleted, unknown = ( |
|
522 | lookup, modified, added, removed, deleted, unknown = ( | |
514 | self.dirstate.changes(files, match)) |
|
523 | self.dirstate.changes(files, match)) | |
515 |
|
524 | |||
516 | # are we comparing working dir against its parent? |
|
525 | # are we comparing working dir against its parent? | |
517 | if not node1: |
|
526 | if not node1: | |
518 | if lookup: |
|
527 | if lookup: | |
519 | # do a full compare of any files that might have changed |
|
528 | # do a full compare of any files that might have changed | |
520 | mf2 = mfmatches(self.dirstate.parents()[0]) |
|
529 | mf2 = mfmatches(self.dirstate.parents()[0]) | |
521 | for f in lookup: |
|
530 | for f in lookup: | |
522 | if fcmp(f, mf2): |
|
531 | if fcmp(f, mf2): | |
523 | modified.append(f) |
|
532 | modified.append(f) | |
524 | elif wlock is not None: |
|
533 | elif wlock is not None: | |
525 | self.dirstate.update([f], "n") |
|
534 | self.dirstate.update([f], "n") | |
526 | else: |
|
535 | else: | |
527 | # we are comparing working dir against non-parent |
|
536 | # we are comparing working dir against non-parent | |
528 | # generate a pseudo-manifest for the working dir |
|
537 | # generate a pseudo-manifest for the working dir | |
529 | mf2 = mfmatches(self.dirstate.parents()[0]) |
|
538 | mf2 = mfmatches(self.dirstate.parents()[0]) | |
530 | for f in lookup + modified + added: |
|
539 | for f in lookup + modified + added: | |
531 | mf2[f] = "" |
|
540 | mf2[f] = "" | |
532 | for f in removed: |
|
541 | for f in removed: | |
533 | if f in mf2: |
|
542 | if f in mf2: | |
534 | del mf2[f] |
|
543 | del mf2[f] | |
535 | else: |
|
544 | else: | |
536 | # we are comparing two revisions |
|
545 | # we are comparing two revisions | |
537 | deleted, unknown = [], [] |
|
546 | deleted, unknown = [], [] | |
538 | mf2 = mfmatches(node2) |
|
547 | mf2 = mfmatches(node2) | |
539 |
|
548 | |||
540 | if node1: |
|
549 | if node1: | |
541 | # flush lists from dirstate before comparing manifests |
|
550 | # flush lists from dirstate before comparing manifests | |
542 | modified, added = [], [] |
|
551 | modified, added = [], [] | |
543 |
|
552 | |||
544 | mf1 = mfmatches(node1) |
|
553 | mf1 = mfmatches(node1) | |
545 |
|
554 | |||
546 | for fn in mf2: |
|
555 | for fn in mf2: | |
547 | if mf1.has_key(fn): |
|
556 | if mf1.has_key(fn): | |
548 | if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)): |
|
557 | if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)): | |
549 | modified.append(fn) |
|
558 | modified.append(fn) | |
550 | del mf1[fn] |
|
559 | del mf1[fn] | |
551 | else: |
|
560 | else: | |
552 | added.append(fn) |
|
561 | added.append(fn) | |
553 |
|
562 | |||
554 | removed = mf1.keys() |
|
563 | removed = mf1.keys() | |
555 |
|
564 | |||
556 | # sort and return results: |
|
565 | # sort and return results: | |
557 | for l in modified, added, removed, deleted, unknown: |
|
566 | for l in modified, added, removed, deleted, unknown: | |
558 | l.sort() |
|
567 | l.sort() | |
559 | return (modified, added, removed, deleted, unknown) |
|
568 | return (modified, added, removed, deleted, unknown) | |
560 |
|
569 | |||
561 | def add(self, list, wlock=None): |
|
570 | def add(self, list, wlock=None): | |
562 | if not wlock: |
|
571 | if not wlock: | |
563 | wlock = self.wlock() |
|
572 | wlock = self.wlock() | |
564 | for f in list: |
|
573 | for f in list: | |
565 | p = self.wjoin(f) |
|
574 | p = self.wjoin(f) | |
566 | if not os.path.exists(p): |
|
575 | if not os.path.exists(p): | |
567 | self.ui.warn(_("%s does not exist!\n") % f) |
|
576 | self.ui.warn(_("%s does not exist!\n") % f) | |
568 | elif not os.path.isfile(p): |
|
577 | elif not os.path.isfile(p): | |
569 | self.ui.warn(_("%s not added: only files supported currently\n") |
|
578 | self.ui.warn(_("%s not added: only files supported currently\n") | |
570 | % f) |
|
579 | % f) | |
571 | elif self.dirstate.state(f) in 'an': |
|
580 | elif self.dirstate.state(f) in 'an': | |
572 | self.ui.warn(_("%s already tracked!\n") % f) |
|
581 | self.ui.warn(_("%s already tracked!\n") % f) | |
573 | else: |
|
582 | else: | |
574 | self.dirstate.update([f], "a") |
|
583 | self.dirstate.update([f], "a") | |
575 |
|
584 | |||
576 | def forget(self, list, wlock=None): |
|
585 | def forget(self, list, wlock=None): | |
577 | if not wlock: |
|
586 | if not wlock: | |
578 | wlock = self.wlock() |
|
587 | wlock = self.wlock() | |
579 | for f in list: |
|
588 | for f in list: | |
580 | if self.dirstate.state(f) not in 'ai': |
|
589 | if self.dirstate.state(f) not in 'ai': | |
581 | self.ui.warn(_("%s not added!\n") % f) |
|
590 | self.ui.warn(_("%s not added!\n") % f) | |
582 | else: |
|
591 | else: | |
583 | self.dirstate.forget([f]) |
|
592 | self.dirstate.forget([f]) | |
584 |
|
593 | |||
585 | def remove(self, list, unlink=False, wlock=None): |
|
594 | def remove(self, list, unlink=False, wlock=None): | |
586 | if unlink: |
|
595 | if unlink: | |
587 | for f in list: |
|
596 | for f in list: | |
588 | try: |
|
597 | try: | |
589 | util.unlink(self.wjoin(f)) |
|
598 | util.unlink(self.wjoin(f)) | |
590 | except OSError, inst: |
|
599 | except OSError, inst: | |
591 | if inst.errno != errno.ENOENT: |
|
600 | if inst.errno != errno.ENOENT: | |
592 | raise |
|
601 | raise | |
593 | if not wlock: |
|
602 | if not wlock: | |
594 | wlock = self.wlock() |
|
603 | wlock = self.wlock() | |
595 | for f in list: |
|
604 | for f in list: | |
596 | p = self.wjoin(f) |
|
605 | p = self.wjoin(f) | |
597 | if os.path.exists(p): |
|
606 | if os.path.exists(p): | |
598 | self.ui.warn(_("%s still exists!\n") % f) |
|
607 | self.ui.warn(_("%s still exists!\n") % f) | |
599 | elif self.dirstate.state(f) == 'a': |
|
608 | elif self.dirstate.state(f) == 'a': | |
600 | self.dirstate.forget([f]) |
|
609 | self.dirstate.forget([f]) | |
601 | elif f not in self.dirstate: |
|
610 | elif f not in self.dirstate: | |
602 | self.ui.warn(_("%s not tracked!\n") % f) |
|
611 | self.ui.warn(_("%s not tracked!\n") % f) | |
603 | else: |
|
612 | else: | |
604 | self.dirstate.update([f], "r") |
|
613 | self.dirstate.update([f], "r") | |
605 |
|
614 | |||
606 | def undelete(self, list, wlock=None): |
|
615 | def undelete(self, list, wlock=None): | |
607 | p = self.dirstate.parents()[0] |
|
616 | p = self.dirstate.parents()[0] | |
608 | mn = self.changelog.read(p)[0] |
|
617 | mn = self.changelog.read(p)[0] | |
609 | mf = self.manifest.readflags(mn) |
|
618 | mf = self.manifest.readflags(mn) | |
610 | m = self.manifest.read(mn) |
|
619 | m = self.manifest.read(mn) | |
611 | if not wlock: |
|
620 | if not wlock: | |
612 | wlock = self.wlock() |
|
621 | wlock = self.wlock() | |
613 | for f in list: |
|
622 | for f in list: | |
614 | if self.dirstate.state(f) not in "r": |
|
623 | if self.dirstate.state(f) not in "r": | |
615 | self.ui.warn("%s not removed!\n" % f) |
|
624 | self.ui.warn("%s not removed!\n" % f) | |
616 | else: |
|
625 | else: | |
617 | t = self.file(f).read(m[f]) |
|
626 | t = self.file(f).read(m[f]) | |
618 | self.wwrite(f, t) |
|
627 | self.wwrite(f, t) | |
619 | util.set_exec(self.wjoin(f), mf[f]) |
|
628 | util.set_exec(self.wjoin(f), mf[f]) | |
620 | self.dirstate.update([f], "n") |
|
629 | self.dirstate.update([f], "n") | |
621 |
|
630 | |||
622 | def copy(self, source, dest, wlock=None): |
|
631 | def copy(self, source, dest, wlock=None): | |
623 | p = self.wjoin(dest) |
|
632 | p = self.wjoin(dest) | |
624 | if not os.path.exists(p): |
|
633 | if not os.path.exists(p): | |
625 | self.ui.warn(_("%s does not exist!\n") % dest) |
|
634 | self.ui.warn(_("%s does not exist!\n") % dest) | |
626 | elif not os.path.isfile(p): |
|
635 | elif not os.path.isfile(p): | |
627 | self.ui.warn(_("copy failed: %s is not a file\n") % dest) |
|
636 | self.ui.warn(_("copy failed: %s is not a file\n") % dest) | |
628 | else: |
|
637 | else: | |
629 | if not wlock: |
|
638 | if not wlock: | |
630 | wlock = self.wlock() |
|
639 | wlock = self.wlock() | |
631 | if self.dirstate.state(dest) == '?': |
|
640 | if self.dirstate.state(dest) == '?': | |
632 | self.dirstate.update([dest], "a") |
|
641 | self.dirstate.update([dest], "a") | |
633 | self.dirstate.copy(source, dest) |
|
642 | self.dirstate.copy(source, dest) | |
634 |
|
643 | |||
635 | def heads(self, start=None): |
|
644 | def heads(self, start=None): | |
636 | heads = self.changelog.heads(start) |
|
645 | heads = self.changelog.heads(start) | |
637 | # sort the output in rev descending order |
|
646 | # sort the output in rev descending order | |
638 | heads = [(-self.changelog.rev(h), h) for h in heads] |
|
647 | heads = [(-self.changelog.rev(h), h) for h in heads] | |
639 | heads.sort() |
|
648 | heads.sort() | |
640 | return [n for (r, n) in heads] |
|
649 | return [n for (r, n) in heads] | |
641 |
|
650 | |||
642 | # branchlookup returns a dict giving a list of branches for |
|
651 | # branchlookup returns a dict giving a list of branches for | |
643 | # each head. A branch is defined as the tag of a node or |
|
652 | # each head. A branch is defined as the tag of a node or | |
644 | # the branch of the node's parents. If a node has multiple |
|
653 | # the branch of the node's parents. If a node has multiple | |
645 | # branch tags, tags are eliminated if they are visible from other |
|
654 | # branch tags, tags are eliminated if they are visible from other | |
646 | # branch tags. |
|
655 | # branch tags. | |
647 | # |
|
656 | # | |
648 | # So, for this graph: a->b->c->d->e |
|
657 | # So, for this graph: a->b->c->d->e | |
649 | # \ / |
|
658 | # \ / | |
650 | # aa -----/ |
|
659 | # aa -----/ | |
651 | # a has tag 2.6.12 |
|
660 | # a has tag 2.6.12 | |
652 | # d has tag 2.6.13 |
|
661 | # d has tag 2.6.13 | |
653 | # e would have branch tags for 2.6.12 and 2.6.13. Because the node |
|
662 | # e would have branch tags for 2.6.12 and 2.6.13. Because the node | |
654 | # for 2.6.12 can be reached from the node 2.6.13, that is eliminated |
|
663 | # for 2.6.12 can be reached from the node 2.6.13, that is eliminated | |
655 | # from the list. |
|
664 | # from the list. | |
656 | # |
|
665 | # | |
657 | # It is possible that more than one head will have the same branch tag. |
|
666 | # It is possible that more than one head will have the same branch tag. | |
658 | # callers need to check the result for multiple heads under the same |
|
667 | # callers need to check the result for multiple heads under the same | |
659 | # branch tag if that is a problem for them (ie checkout of a specific |
|
668 | # branch tag if that is a problem for them (ie checkout of a specific | |
660 | # branch). |
|
669 | # branch). | |
661 | # |
|
670 | # | |
662 | # passing in a specific branch will limit the depth of the search |
|
671 | # passing in a specific branch will limit the depth of the search | |
663 | # through the parents. It won't limit the branches returned in the |
|
672 | # through the parents. It won't limit the branches returned in the | |
664 | # result though. |
|
673 | # result though. | |
665 | def branchlookup(self, heads=None, branch=None): |
|
674 | def branchlookup(self, heads=None, branch=None): | |
666 | if not heads: |
|
675 | if not heads: | |
667 | heads = self.heads() |
|
676 | heads = self.heads() | |
668 | headt = [ h for h in heads ] |
|
677 | headt = [ h for h in heads ] | |
669 | chlog = self.changelog |
|
678 | chlog = self.changelog | |
670 | branches = {} |
|
679 | branches = {} | |
671 | merges = [] |
|
680 | merges = [] | |
672 | seenmerge = {} |
|
681 | seenmerge = {} | |
673 |
|
682 | |||
674 | # traverse the tree once for each head, recording in the branches |
|
683 | # traverse the tree once for each head, recording in the branches | |
675 | # dict which tags are visible from this head. The branches |
|
684 | # dict which tags are visible from this head. The branches | |
676 | # dict also records which tags are visible from each tag |
|
685 | # dict also records which tags are visible from each tag | |
677 | # while we traverse. |
|
686 | # while we traverse. | |
678 | while headt or merges: |
|
687 | while headt or merges: | |
679 | if merges: |
|
688 | if merges: | |
680 | n, found = merges.pop() |
|
689 | n, found = merges.pop() | |
681 | visit = [n] |
|
690 | visit = [n] | |
682 | else: |
|
691 | else: | |
683 | h = headt.pop() |
|
692 | h = headt.pop() | |
684 | visit = [h] |
|
693 | visit = [h] | |
685 | found = [h] |
|
694 | found = [h] | |
686 | seen = {} |
|
695 | seen = {} | |
687 | while visit: |
|
696 | while visit: | |
688 | n = visit.pop() |
|
697 | n = visit.pop() | |
689 | if n in seen: |
|
698 | if n in seen: | |
690 | continue |
|
699 | continue | |
691 | pp = chlog.parents(n) |
|
700 | pp = chlog.parents(n) | |
692 | tags = self.nodetags(n) |
|
701 | tags = self.nodetags(n) | |
693 | if tags: |
|
702 | if tags: | |
694 | for x in tags: |
|
703 | for x in tags: | |
695 | if x == 'tip': |
|
704 | if x == 'tip': | |
696 | continue |
|
705 | continue | |
697 | for f in found: |
|
706 | for f in found: | |
698 | branches.setdefault(f, {})[n] = 1 |
|
707 | branches.setdefault(f, {})[n] = 1 | |
699 | branches.setdefault(n, {})[n] = 1 |
|
708 | branches.setdefault(n, {})[n] = 1 | |
700 | break |
|
709 | break | |
701 | if n not in found: |
|
710 | if n not in found: | |
702 | found.append(n) |
|
711 | found.append(n) | |
703 | if branch in tags: |
|
712 | if branch in tags: | |
704 | continue |
|
713 | continue | |
705 | seen[n] = 1 |
|
714 | seen[n] = 1 | |
706 | if pp[1] != nullid and n not in seenmerge: |
|
715 | if pp[1] != nullid and n not in seenmerge: | |
707 | merges.append((pp[1], [x for x in found])) |
|
716 | merges.append((pp[1], [x for x in found])) | |
708 | seenmerge[n] = 1 |
|
717 | seenmerge[n] = 1 | |
709 | if pp[0] != nullid: |
|
718 | if pp[0] != nullid: | |
710 | visit.append(pp[0]) |
|
719 | visit.append(pp[0]) | |
711 | # traverse the branches dict, eliminating branch tags from each |
|
720 | # traverse the branches dict, eliminating branch tags from each | |
712 | # head that are visible from another branch tag for that head. |
|
721 | # head that are visible from another branch tag for that head. | |
713 | out = {} |
|
722 | out = {} | |
714 | viscache = {} |
|
723 | viscache = {} | |
715 | for h in heads: |
|
724 | for h in heads: | |
716 | def visible(node): |
|
725 | def visible(node): | |
717 | if node in viscache: |
|
726 | if node in viscache: | |
718 | return viscache[node] |
|
727 | return viscache[node] | |
719 | ret = {} |
|
728 | ret = {} | |
720 | visit = [node] |
|
729 | visit = [node] | |
721 | while visit: |
|
730 | while visit: | |
722 | x = visit.pop() |
|
731 | x = visit.pop() | |
723 | if x in viscache: |
|
732 | if x in viscache: | |
724 | ret.update(viscache[x]) |
|
733 | ret.update(viscache[x]) | |
725 | elif x not in ret: |
|
734 | elif x not in ret: | |
726 | ret[x] = 1 |
|
735 | ret[x] = 1 | |
727 | if x in branches: |
|
736 | if x in branches: | |
728 | visit[len(visit):] = branches[x].keys() |
|
737 | visit[len(visit):] = branches[x].keys() | |
729 | viscache[node] = ret |
|
738 | viscache[node] = ret | |
730 | return ret |
|
739 | return ret | |
731 | if h not in branches: |
|
740 | if h not in branches: | |
732 | continue |
|
741 | continue | |
733 | # O(n^2), but somewhat limited. This only searches the |
|
742 | # O(n^2), but somewhat limited. This only searches the | |
734 | # tags visible from a specific head, not all the tags in the |
|
743 | # tags visible from a specific head, not all the tags in the | |
735 | # whole repo. |
|
744 | # whole repo. | |
736 | for b in branches[h]: |
|
745 | for b in branches[h]: | |
737 | vis = False |
|
746 | vis = False | |
738 | for bb in branches[h].keys(): |
|
747 | for bb in branches[h].keys(): | |
739 | if b != bb: |
|
748 | if b != bb: | |
740 | if b in visible(bb): |
|
749 | if b in visible(bb): | |
741 | vis = True |
|
750 | vis = True | |
742 | break |
|
751 | break | |
743 | if not vis: |
|
752 | if not vis: | |
744 | l = out.setdefault(h, []) |
|
753 | l = out.setdefault(h, []) | |
745 | l[len(l):] = self.nodetags(b) |
|
754 | l[len(l):] = self.nodetags(b) | |
746 | return out |
|
755 | return out | |
747 |
|
756 | |||
748 | def branches(self, nodes): |
|
757 | def branches(self, nodes): | |
749 | if not nodes: |
|
758 | if not nodes: | |
750 | nodes = [self.changelog.tip()] |
|
759 | nodes = [self.changelog.tip()] | |
751 | b = [] |
|
760 | b = [] | |
752 | for n in nodes: |
|
761 | for n in nodes: | |
753 | t = n |
|
762 | t = n | |
754 | while n: |
|
763 | while n: | |
755 | p = self.changelog.parents(n) |
|
764 | p = self.changelog.parents(n) | |
756 | if p[1] != nullid or p[0] == nullid: |
|
765 | if p[1] != nullid or p[0] == nullid: | |
757 | b.append((t, n, p[0], p[1])) |
|
766 | b.append((t, n, p[0], p[1])) | |
758 | break |
|
767 | break | |
759 | n = p[0] |
|
768 | n = p[0] | |
760 | return b |
|
769 | return b | |
761 |
|
770 | |||
762 | def between(self, pairs): |
|
771 | def between(self, pairs): | |
763 | r = [] |
|
772 | r = [] | |
764 |
|
773 | |||
765 | for top, bottom in pairs: |
|
774 | for top, bottom in pairs: | |
766 | n, l, i = top, [], 0 |
|
775 | n, l, i = top, [], 0 | |
767 | f = 1 |
|
776 | f = 1 | |
768 |
|
777 | |||
769 | while n != bottom: |
|
778 | while n != bottom: | |
770 | p = self.changelog.parents(n)[0] |
|
779 | p = self.changelog.parents(n)[0] | |
771 | if i == f: |
|
780 | if i == f: | |
772 | l.append(n) |
|
781 | l.append(n) | |
773 | f = f * 2 |
|
782 | f = f * 2 | |
774 | n = p |
|
783 | n = p | |
775 | i += 1 |
|
784 | i += 1 | |
776 |
|
785 | |||
777 | r.append(l) |
|
786 | r.append(l) | |
778 |
|
787 | |||
779 | return r |
|
788 | return r | |
780 |
|
789 | |||
781 | def findincoming(self, remote, base=None, heads=None): |
|
790 | def findincoming(self, remote, base=None, heads=None): | |
782 | m = self.changelog.nodemap |
|
791 | m = self.changelog.nodemap | |
783 | search = [] |
|
792 | search = [] | |
784 | fetch = {} |
|
793 | fetch = {} | |
785 | seen = {} |
|
794 | seen = {} | |
786 | seenbranch = {} |
|
795 | seenbranch = {} | |
787 | if base == None: |
|
796 | if base == None: | |
788 | base = {} |
|
797 | base = {} | |
789 |
|
798 | |||
790 | # assume we're closer to the tip than the root |
|
799 | # assume we're closer to the tip than the root | |
791 | # and start by examining the heads |
|
800 | # and start by examining the heads | |
792 | self.ui.status(_("searching for changes\n")) |
|
801 | self.ui.status(_("searching for changes\n")) | |
793 |
|
802 | |||
794 | if not heads: |
|
803 | if not heads: | |
795 | heads = remote.heads() |
|
804 | heads = remote.heads() | |
796 |
|
805 | |||
797 | unknown = [] |
|
806 | unknown = [] | |
798 | for h in heads: |
|
807 | for h in heads: | |
799 | if h not in m: |
|
808 | if h not in m: | |
800 | unknown.append(h) |
|
809 | unknown.append(h) | |
801 | else: |
|
810 | else: | |
802 | base[h] = 1 |
|
811 | base[h] = 1 | |
803 |
|
812 | |||
804 | if not unknown: |
|
813 | if not unknown: | |
805 | return None |
|
814 | return None | |
806 |
|
815 | |||
807 | rep = {} |
|
816 | rep = {} | |
808 | reqcnt = 0 |
|
817 | reqcnt = 0 | |
809 |
|
818 | |||
810 | # search through remote branches |
|
819 | # search through remote branches | |
811 | # a 'branch' here is a linear segment of history, with four parts: |
|
820 | # a 'branch' here is a linear segment of history, with four parts: | |
812 | # head, root, first parent, second parent |
|
821 | # head, root, first parent, second parent | |
813 | # (a branch always has two parents (or none) by definition) |
|
822 | # (a branch always has two parents (or none) by definition) | |
814 | unknown = remote.branches(unknown) |
|
823 | unknown = remote.branches(unknown) | |
815 | while unknown: |
|
824 | while unknown: | |
816 | r = [] |
|
825 | r = [] | |
817 | while unknown: |
|
826 | while unknown: | |
818 | n = unknown.pop(0) |
|
827 | n = unknown.pop(0) | |
819 | if n[0] in seen: |
|
828 | if n[0] in seen: | |
820 | continue |
|
829 | continue | |
821 |
|
830 | |||
822 | self.ui.debug(_("examining %s:%s\n") |
|
831 | self.ui.debug(_("examining %s:%s\n") | |
823 | % (short(n[0]), short(n[1]))) |
|
832 | % (short(n[0]), short(n[1]))) | |
824 | if n[0] == nullid: |
|
833 | if n[0] == nullid: | |
825 | break |
|
834 | break | |
826 | if n in seenbranch: |
|
835 | if n in seenbranch: | |
827 | self.ui.debug(_("branch already found\n")) |
|
836 | self.ui.debug(_("branch already found\n")) | |
828 | continue |
|
837 | continue | |
829 | if n[1] and n[1] in m: # do we know the base? |
|
838 | if n[1] and n[1] in m: # do we know the base? | |
830 | self.ui.debug(_("found incomplete branch %s:%s\n") |
|
839 | self.ui.debug(_("found incomplete branch %s:%s\n") | |
831 | % (short(n[0]), short(n[1]))) |
|
840 | % (short(n[0]), short(n[1]))) | |
832 | search.append(n) # schedule branch range for scanning |
|
841 | search.append(n) # schedule branch range for scanning | |
833 | seenbranch[n] = 1 |
|
842 | seenbranch[n] = 1 | |
834 | else: |
|
843 | else: | |
835 | if n[1] not in seen and n[1] not in fetch: |
|
844 | if n[1] not in seen and n[1] not in fetch: | |
836 | if n[2] in m and n[3] in m: |
|
845 | if n[2] in m and n[3] in m: | |
837 | self.ui.debug(_("found new changeset %s\n") % |
|
846 | self.ui.debug(_("found new changeset %s\n") % | |
838 | short(n[1])) |
|
847 | short(n[1])) | |
839 | fetch[n[1]] = 1 # earliest unknown |
|
848 | fetch[n[1]] = 1 # earliest unknown | |
840 | base[n[2]] = 1 # latest known |
|
849 | base[n[2]] = 1 # latest known | |
841 | continue |
|
850 | continue | |
842 |
|
851 | |||
843 | for a in n[2:4]: |
|
852 | for a in n[2:4]: | |
844 | if a not in rep: |
|
853 | if a not in rep: | |
845 | r.append(a) |
|
854 | r.append(a) | |
846 | rep[a] = 1 |
|
855 | rep[a] = 1 | |
847 |
|
856 | |||
848 | seen[n[0]] = 1 |
|
857 | seen[n[0]] = 1 | |
849 |
|
858 | |||
850 | if r: |
|
859 | if r: | |
851 | reqcnt += 1 |
|
860 | reqcnt += 1 | |
852 | self.ui.debug(_("request %d: %s\n") % |
|
861 | self.ui.debug(_("request %d: %s\n") % | |
853 | (reqcnt, " ".join(map(short, r)))) |
|
862 | (reqcnt, " ".join(map(short, r)))) | |
854 | for p in range(0, len(r), 10): |
|
863 | for p in range(0, len(r), 10): | |
855 | for b in remote.branches(r[p:p+10]): |
|
864 | for b in remote.branches(r[p:p+10]): | |
856 | self.ui.debug(_("received %s:%s\n") % |
|
865 | self.ui.debug(_("received %s:%s\n") % | |
857 | (short(b[0]), short(b[1]))) |
|
866 | (short(b[0]), short(b[1]))) | |
858 | if b[0] in m: |
|
867 | if b[0] in m: | |
859 | self.ui.debug(_("found base node %s\n") |
|
868 | self.ui.debug(_("found base node %s\n") | |
860 | % short(b[0])) |
|
869 | % short(b[0])) | |
861 | base[b[0]] = 1 |
|
870 | base[b[0]] = 1 | |
862 | elif b[0] not in seen: |
|
871 | elif b[0] not in seen: | |
863 | unknown.append(b) |
|
872 | unknown.append(b) | |
864 |
|
873 | |||
865 | # do binary search on the branches we found |
|
874 | # do binary search on the branches we found | |
866 | while search: |
|
875 | while search: | |
867 | n = search.pop(0) |
|
876 | n = search.pop(0) | |
868 | reqcnt += 1 |
|
877 | reqcnt += 1 | |
869 | l = remote.between([(n[0], n[1])])[0] |
|
878 | l = remote.between([(n[0], n[1])])[0] | |
870 | l.append(n[1]) |
|
879 | l.append(n[1]) | |
871 | p = n[0] |
|
880 | p = n[0] | |
872 | f = 1 |
|
881 | f = 1 | |
873 | for i in l: |
|
882 | for i in l: | |
874 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) |
|
883 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) | |
875 | if i in m: |
|
884 | if i in m: | |
876 | if f <= 2: |
|
885 | if f <= 2: | |
877 | self.ui.debug(_("found new branch changeset %s\n") % |
|
886 | self.ui.debug(_("found new branch changeset %s\n") % | |
878 | short(p)) |
|
887 | short(p)) | |
879 | fetch[p] = 1 |
|
888 | fetch[p] = 1 | |
880 | base[i] = 1 |
|
889 | base[i] = 1 | |
881 | else: |
|
890 | else: | |
882 | self.ui.debug(_("narrowed branch search to %s:%s\n") |
|
891 | self.ui.debug(_("narrowed branch search to %s:%s\n") | |
883 | % (short(p), short(i))) |
|
892 | % (short(p), short(i))) | |
884 | search.append((p, i)) |
|
893 | search.append((p, i)) | |
885 | break |
|
894 | break | |
886 | p, f = i, f * 2 |
|
895 | p, f = i, f * 2 | |
887 |
|
896 | |||
888 | # sanity check our fetch list |
|
897 | # sanity check our fetch list | |
889 | for f in fetch.keys(): |
|
898 | for f in fetch.keys(): | |
890 | if f in m: |
|
899 | if f in m: | |
891 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) |
|
900 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) | |
892 |
|
901 | |||
893 | if base.keys() == [nullid]: |
|
902 | if base.keys() == [nullid]: | |
894 | self.ui.warn(_("warning: pulling from an unrelated repository!\n")) |
|
903 | self.ui.warn(_("warning: pulling from an unrelated repository!\n")) | |
895 |
|
904 | |||
896 | self.ui.note(_("found new changesets starting at ") + |
|
905 | self.ui.note(_("found new changesets starting at ") + | |
897 | " ".join([short(f) for f in fetch]) + "\n") |
|
906 | " ".join([short(f) for f in fetch]) + "\n") | |
898 |
|
907 | |||
899 | self.ui.debug(_("%d total queries\n") % reqcnt) |
|
908 | self.ui.debug(_("%d total queries\n") % reqcnt) | |
900 |
|
909 | |||
901 | return fetch.keys() |
|
910 | return fetch.keys() | |
902 |
|
911 | |||
903 | def findoutgoing(self, remote, base=None, heads=None): |
|
912 | def findoutgoing(self, remote, base=None, heads=None): | |
904 | if base == None: |
|
913 | if base == None: | |
905 | base = {} |
|
914 | base = {} | |
906 | self.findincoming(remote, base, heads) |
|
915 | self.findincoming(remote, base, heads) | |
907 |
|
916 | |||
908 | self.ui.debug(_("common changesets up to ") |
|
917 | self.ui.debug(_("common changesets up to ") | |
909 | + " ".join(map(short, base.keys())) + "\n") |
|
918 | + " ".join(map(short, base.keys())) + "\n") | |
910 |
|
919 | |||
911 | remain = dict.fromkeys(self.changelog.nodemap) |
|
920 | remain = dict.fromkeys(self.changelog.nodemap) | |
912 |
|
921 | |||
913 | # prune everything remote has from the tree |
|
922 | # prune everything remote has from the tree | |
914 | del remain[nullid] |
|
923 | del remain[nullid] | |
915 | remove = base.keys() |
|
924 | remove = base.keys() | |
916 | while remove: |
|
925 | while remove: | |
917 | n = remove.pop(0) |
|
926 | n = remove.pop(0) | |
918 | if n in remain: |
|
927 | if n in remain: | |
919 | del remain[n] |
|
928 | del remain[n] | |
920 | for p in self.changelog.parents(n): |
|
929 | for p in self.changelog.parents(n): | |
921 | remove.append(p) |
|
930 | remove.append(p) | |
922 |
|
931 | |||
923 | # find every node whose parents have been pruned |
|
932 | # find every node whose parents have been pruned | |
924 | subset = [] |
|
933 | subset = [] | |
925 | for n in remain: |
|
934 | for n in remain: | |
926 | p1, p2 = self.changelog.parents(n) |
|
935 | p1, p2 = self.changelog.parents(n) | |
927 | if p1 not in remain and p2 not in remain: |
|
936 | if p1 not in remain and p2 not in remain: | |
928 | subset.append(n) |
|
937 | subset.append(n) | |
929 |
|
938 | |||
930 | # this is the set of all roots we have to push |
|
939 | # this is the set of all roots we have to push | |
931 | return subset |
|
940 | return subset | |
932 |
|
941 | |||
933 | def pull(self, remote, heads=None): |
|
942 | def pull(self, remote, heads=None): | |
934 | l = self.lock() |
|
943 | l = self.lock() | |
935 |
|
944 | |||
936 | # if we have an empty repo, fetch everything |
|
945 | # if we have an empty repo, fetch everything | |
937 | if self.changelog.tip() == nullid: |
|
946 | if self.changelog.tip() == nullid: | |
938 | self.ui.status(_("requesting all changes\n")) |
|
947 | self.ui.status(_("requesting all changes\n")) | |
939 | fetch = [nullid] |
|
948 | fetch = [nullid] | |
940 | else: |
|
949 | else: | |
941 | fetch = self.findincoming(remote) |
|
950 | fetch = self.findincoming(remote) | |
942 |
|
951 | |||
943 | if not fetch: |
|
952 | if not fetch: | |
944 | self.ui.status(_("no changes found\n")) |
|
953 | self.ui.status(_("no changes found\n")) | |
945 | return 1 |
|
954 | return 1 | |
946 |
|
955 | |||
947 | if heads is None: |
|
956 | if heads is None: | |
948 | cg = remote.changegroup(fetch, 'pull') |
|
957 | cg = remote.changegroup(fetch, 'pull') | |
949 | else: |
|
958 | else: | |
950 | cg = remote.changegroupsubset(fetch, heads, 'pull') |
|
959 | cg = remote.changegroupsubset(fetch, heads, 'pull') | |
951 | return self.addchangegroup(cg) |
|
960 | return self.addchangegroup(cg) | |
952 |
|
961 | |||
953 | def push(self, remote, force=False, revs=None): |
|
962 | def push(self, remote, force=False, revs=None): | |
954 | lock = remote.lock() |
|
963 | lock = remote.lock() | |
955 |
|
964 | |||
956 | base = {} |
|
965 | base = {} | |
957 | heads = remote.heads() |
|
966 | heads = remote.heads() | |
958 | inc = self.findincoming(remote, base, heads) |
|
967 | inc = self.findincoming(remote, base, heads) | |
959 | if not force and inc: |
|
968 | if not force and inc: | |
960 | self.ui.warn(_("abort: unsynced remote changes!\n")) |
|
969 | self.ui.warn(_("abort: unsynced remote changes!\n")) | |
961 | self.ui.status(_("(did you forget to sync? use push -f to force)\n")) |
|
970 | self.ui.status(_("(did you forget to sync? use push -f to force)\n")) | |
962 | return 1 |
|
971 | return 1 | |
963 |
|
972 | |||
964 | update = self.findoutgoing(remote, base) |
|
973 | update = self.findoutgoing(remote, base) | |
965 | if revs is not None: |
|
974 | if revs is not None: | |
966 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) |
|
975 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) | |
967 | else: |
|
976 | else: | |
968 | bases, heads = update, self.changelog.heads() |
|
977 | bases, heads = update, self.changelog.heads() | |
969 |
|
978 | |||
970 | if not bases: |
|
979 | if not bases: | |
971 | self.ui.status(_("no changes found\n")) |
|
980 | self.ui.status(_("no changes found\n")) | |
972 | return 1 |
|
981 | return 1 | |
973 | elif not force: |
|
982 | elif not force: | |
974 | if len(bases) < len(heads): |
|
983 | if len(bases) < len(heads): | |
975 | self.ui.warn(_("abort: push creates new remote branches!\n")) |
|
984 | self.ui.warn(_("abort: push creates new remote branches!\n")) | |
976 | self.ui.status(_("(did you forget to merge?" |
|
985 | self.ui.status(_("(did you forget to merge?" | |
977 | " use push -f to force)\n")) |
|
986 | " use push -f to force)\n")) | |
978 | return 1 |
|
987 | return 1 | |
979 |
|
988 | |||
980 | if revs is None: |
|
989 | if revs is None: | |
981 | cg = self.changegroup(update, 'push') |
|
990 | cg = self.changegroup(update, 'push') | |
982 | else: |
|
991 | else: | |
983 | cg = self.changegroupsubset(update, revs, 'push') |
|
992 | cg = self.changegroupsubset(update, revs, 'push') | |
984 | return remote.addchangegroup(cg) |
|
993 | return remote.addchangegroup(cg) | |
985 |
|
994 | |||
986 | def changegroupsubset(self, bases, heads, source): |
|
995 | def changegroupsubset(self, bases, heads, source): | |
987 | """This function generates a changegroup consisting of all the nodes |
|
996 | """This function generates a changegroup consisting of all the nodes | |
988 | that are descendents of any of the bases, and ancestors of any of |
|
997 | that are descendents of any of the bases, and ancestors of any of | |
989 | the heads. |
|
998 | the heads. | |
990 |
|
999 | |||
991 | It is fairly complex as determining which filenodes and which |
|
1000 | It is fairly complex as determining which filenodes and which | |
992 | manifest nodes need to be included for the changeset to be complete |
|
1001 | manifest nodes need to be included for the changeset to be complete | |
993 | is non-trivial. |
|
1002 | is non-trivial. | |
994 |
|
1003 | |||
995 | Another wrinkle is doing the reverse, figuring out which changeset in |
|
1004 | Another wrinkle is doing the reverse, figuring out which changeset in | |
996 | the changegroup a particular filenode or manifestnode belongs to.""" |
|
1005 | the changegroup a particular filenode or manifestnode belongs to.""" | |
997 |
|
1006 | |||
998 | self.hook('preoutgoing', throw=True, source=source) |
|
1007 | self.hook('preoutgoing', throw=True, source=source) | |
999 |
|
1008 | |||
1000 | # Set up some initial variables |
|
1009 | # Set up some initial variables | |
1001 | # Make it easy to refer to self.changelog |
|
1010 | # Make it easy to refer to self.changelog | |
1002 | cl = self.changelog |
|
1011 | cl = self.changelog | |
1003 | # msng is short for missing - compute the list of changesets in this |
|
1012 | # msng is short for missing - compute the list of changesets in this | |
1004 | # changegroup. |
|
1013 | # changegroup. | |
1005 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) |
|
1014 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) | |
1006 | # Some bases may turn out to be superfluous, and some heads may be |
|
1015 | # Some bases may turn out to be superfluous, and some heads may be | |
1007 | # too. nodesbetween will return the minimal set of bases and heads |
|
1016 | # too. nodesbetween will return the minimal set of bases and heads | |
1008 | # necessary to re-create the changegroup. |
|
1017 | # necessary to re-create the changegroup. | |
1009 |
|
1018 | |||
1010 | # Known heads are the list of heads that it is assumed the recipient |
|
1019 | # Known heads are the list of heads that it is assumed the recipient | |
1011 | # of this changegroup will know about. |
|
1020 | # of this changegroup will know about. | |
1012 | knownheads = {} |
|
1021 | knownheads = {} | |
1013 | # We assume that all parents of bases are known heads. |
|
1022 | # We assume that all parents of bases are known heads. | |
1014 | for n in bases: |
|
1023 | for n in bases: | |
1015 | for p in cl.parents(n): |
|
1024 | for p in cl.parents(n): | |
1016 | if p != nullid: |
|
1025 | if p != nullid: | |
1017 | knownheads[p] = 1 |
|
1026 | knownheads[p] = 1 | |
1018 | knownheads = knownheads.keys() |
|
1027 | knownheads = knownheads.keys() | |
1019 | if knownheads: |
|
1028 | if knownheads: | |
1020 | # Now that we know what heads are known, we can compute which |
|
1029 | # Now that we know what heads are known, we can compute which | |
1021 | # changesets are known. The recipient must know about all |
|
1030 | # changesets are known. The recipient must know about all | |
1022 | # changesets required to reach the known heads from the null |
|
1031 | # changesets required to reach the known heads from the null | |
1023 | # changeset. |
|
1032 | # changeset. | |
1024 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) |
|
1033 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) | |
1025 | junk = None |
|
1034 | junk = None | |
1026 | # Transform the list into an ersatz set. |
|
1035 | # Transform the list into an ersatz set. | |
1027 | has_cl_set = dict.fromkeys(has_cl_set) |
|
1036 | has_cl_set = dict.fromkeys(has_cl_set) | |
1028 | else: |
|
1037 | else: | |
1029 | # If there were no known heads, the recipient cannot be assumed to |
|
1038 | # If there were no known heads, the recipient cannot be assumed to | |
1030 | # know about any changesets. |
|
1039 | # know about any changesets. | |
1031 | has_cl_set = {} |
|
1040 | has_cl_set = {} | |
1032 |
|
1041 | |||
1033 | # Make it easy to refer to self.manifest |
|
1042 | # Make it easy to refer to self.manifest | |
1034 | mnfst = self.manifest |
|
1043 | mnfst = self.manifest | |
1035 | # We don't know which manifests are missing yet |
|
1044 | # We don't know which manifests are missing yet | |
1036 | msng_mnfst_set = {} |
|
1045 | msng_mnfst_set = {} | |
1037 | # Nor do we know which filenodes are missing. |
|
1046 | # Nor do we know which filenodes are missing. | |
1038 | msng_filenode_set = {} |
|
1047 | msng_filenode_set = {} | |
1039 |
|
1048 | |||
1040 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex |
|
1049 | junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex | |
1041 | junk = None |
|
1050 | junk = None | |
1042 |
|
1051 | |||
1043 | # A changeset always belongs to itself, so the changenode lookup |
|
1052 | # A changeset always belongs to itself, so the changenode lookup | |
1044 | # function for a changenode is identity. |
|
1053 | # function for a changenode is identity. | |
1045 | def identity(x): |
|
1054 | def identity(x): | |
1046 | return x |
|
1055 | return x | |
1047 |
|
1056 | |||
1048 | # A function generating function. Sets up an environment for the |
|
1057 | # A function generating function. Sets up an environment for the | |
1049 | # inner function. |
|
1058 | # inner function. | |
1050 | def cmp_by_rev_func(revlog): |
|
1059 | def cmp_by_rev_func(revlog): | |
1051 | # Compare two nodes by their revision number in the environment's |
|
1060 | # Compare two nodes by their revision number in the environment's | |
1052 | # revision history. Since the revision number both represents the |
|
1061 | # revision history. Since the revision number both represents the | |
1053 | # most efficient order to read the nodes in, and represents a |
|
1062 | # most efficient order to read the nodes in, and represents a | |
1054 | # topological sorting of the nodes, this function is often useful. |
|
1063 | # topological sorting of the nodes, this function is often useful. | |
1055 | def cmp_by_rev(a, b): |
|
1064 | def cmp_by_rev(a, b): | |
1056 | return cmp(revlog.rev(a), revlog.rev(b)) |
|
1065 | return cmp(revlog.rev(a), revlog.rev(b)) | |
1057 | return cmp_by_rev |
|
1066 | return cmp_by_rev | |
1058 |
|
1067 | |||
1059 | # If we determine that a particular file or manifest node must be a |
|
1068 | # If we determine that a particular file or manifest node must be a | |
1060 | # node that the recipient of the changegroup will already have, we can |
|
1069 | # node that the recipient of the changegroup will already have, we can | |
1061 | # also assume the recipient will have all the parents. This function |
|
1070 | # also assume the recipient will have all the parents. This function | |
1062 | # prunes them from the set of missing nodes. |
|
1071 | # prunes them from the set of missing nodes. | |
1063 | def prune_parents(revlog, hasset, msngset): |
|
1072 | def prune_parents(revlog, hasset, msngset): | |
1064 | haslst = hasset.keys() |
|
1073 | haslst = hasset.keys() | |
1065 | haslst.sort(cmp_by_rev_func(revlog)) |
|
1074 | haslst.sort(cmp_by_rev_func(revlog)) | |
1066 | for node in haslst: |
|
1075 | for node in haslst: | |
1067 | parentlst = [p for p in revlog.parents(node) if p != nullid] |
|
1076 | parentlst = [p for p in revlog.parents(node) if p != nullid] | |
1068 | while parentlst: |
|
1077 | while parentlst: | |
1069 | n = parentlst.pop() |
|
1078 | n = parentlst.pop() | |
1070 | if n not in hasset: |
|
1079 | if n not in hasset: | |
1071 | hasset[n] = 1 |
|
1080 | hasset[n] = 1 | |
1072 | p = [p for p in revlog.parents(n) if p != nullid] |
|
1081 | p = [p for p in revlog.parents(n) if p != nullid] | |
1073 | parentlst.extend(p) |
|
1082 | parentlst.extend(p) | |
1074 | for n in hasset: |
|
1083 | for n in hasset: | |
1075 | msngset.pop(n, None) |
|
1084 | msngset.pop(n, None) | |
1076 |
|
1085 | |||
1077 | # This is a function generating function used to set up an environment |
|
1086 | # This is a function generating function used to set up an environment | |
1078 | # for the inner function to execute in. |
|
1087 | # for the inner function to execute in. | |
1079 | def manifest_and_file_collector(changedfileset): |
|
1088 | def manifest_and_file_collector(changedfileset): | |
1080 | # This is an information gathering function that gathers |
|
1089 | # This is an information gathering function that gathers | |
1081 | # information from each changeset node that goes out as part of |
|
1090 | # information from each changeset node that goes out as part of | |
1082 | # the changegroup. The information gathered is a list of which |
|
1091 | # the changegroup. The information gathered is a list of which | |
1083 | # manifest nodes are potentially required (the recipient may |
|
1092 | # manifest nodes are potentially required (the recipient may | |
1084 | # already have them) and total list of all files which were |
|
1093 | # already have them) and total list of all files which were | |
1085 | # changed in any changeset in the changegroup. |
|
1094 | # changed in any changeset in the changegroup. | |
1086 | # |
|
1095 | # | |
1087 | # We also remember the first changenode we saw any manifest |
|
1096 | # We also remember the first changenode we saw any manifest | |
1088 | # referenced by so we can later determine which changenode 'owns' |
|
1097 | # referenced by so we can later determine which changenode 'owns' | |
1089 | # the manifest. |
|
1098 | # the manifest. | |
1090 | def collect_manifests_and_files(clnode): |
|
1099 | def collect_manifests_and_files(clnode): | |
1091 | c = cl.read(clnode) |
|
1100 | c = cl.read(clnode) | |
1092 | for f in c[3]: |
|
1101 | for f in c[3]: | |
1093 | # This is to make sure we only have one instance of each |
|
1102 | # This is to make sure we only have one instance of each | |
1094 | # filename string for each filename. |
|
1103 | # filename string for each filename. | |
1095 | changedfileset.setdefault(f, f) |
|
1104 | changedfileset.setdefault(f, f) | |
1096 | msng_mnfst_set.setdefault(c[0], clnode) |
|
1105 | msng_mnfst_set.setdefault(c[0], clnode) | |
1097 | return collect_manifests_and_files |
|
1106 | return collect_manifests_and_files | |
1098 |
|
1107 | |||
1099 | # Figure out which manifest nodes (of the ones we think might be part |
|
1108 | # Figure out which manifest nodes (of the ones we think might be part | |
1100 | # of the changegroup) the recipient must know about and remove them |
|
1109 | # of the changegroup) the recipient must know about and remove them | |
1101 | # from the changegroup. |
|
1110 | # from the changegroup. | |
1102 | def prune_manifests(): |
|
1111 | def prune_manifests(): | |
1103 | has_mnfst_set = {} |
|
1112 | has_mnfst_set = {} | |
1104 | for n in msng_mnfst_set: |
|
1113 | for n in msng_mnfst_set: | |
1105 | # If a 'missing' manifest thinks it belongs to a changenode |
|
1114 | # If a 'missing' manifest thinks it belongs to a changenode | |
1106 | # the recipient is assumed to have, obviously the recipient |
|
1115 | # the recipient is assumed to have, obviously the recipient | |
1107 | # must have that manifest. |
|
1116 | # must have that manifest. | |
1108 | linknode = cl.node(mnfst.linkrev(n)) |
|
1117 | linknode = cl.node(mnfst.linkrev(n)) | |
1109 | if linknode in has_cl_set: |
|
1118 | if linknode in has_cl_set: | |
1110 | has_mnfst_set[n] = 1 |
|
1119 | has_mnfst_set[n] = 1 | |
1111 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) |
|
1120 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) | |
1112 |
|
1121 | |||
1113 | # Use the information collected in collect_manifests_and_files to say |
|
1122 | # Use the information collected in collect_manifests_and_files to say | |
1114 | # which changenode any manifestnode belongs to. |
|
1123 | # which changenode any manifestnode belongs to. | |
1115 | def lookup_manifest_link(mnfstnode): |
|
1124 | def lookup_manifest_link(mnfstnode): | |
1116 | return msng_mnfst_set[mnfstnode] |
|
1125 | return msng_mnfst_set[mnfstnode] | |
1117 |
|
1126 | |||
1118 | # A function generating function that sets up the initial environment |
|
1127 | # A function generating function that sets up the initial environment | |
1119 | # the inner function. |
|
1128 | # the inner function. | |
1120 | def filenode_collector(changedfiles): |
|
1129 | def filenode_collector(changedfiles): | |
1121 | next_rev = [0] |
|
1130 | next_rev = [0] | |
1122 | # This gathers information from each manifestnode included in the |
|
1131 | # This gathers information from each manifestnode included in the | |
1123 | # changegroup about which filenodes the manifest node references |
|
1132 | # changegroup about which filenodes the manifest node references | |
1124 | # so we can include those in the changegroup too. |
|
1133 | # so we can include those in the changegroup too. | |
1125 | # |
|
1134 | # | |
1126 | # It also remembers which changenode each filenode belongs to. It |
|
1135 | # It also remembers which changenode each filenode belongs to. It | |
1127 | # does this by assuming the a filenode belongs to the changenode |
|
1136 | # does this by assuming the a filenode belongs to the changenode | |
1128 | # the first manifest that references it belongs to. |
|
1137 | # the first manifest that references it belongs to. | |
1129 | def collect_msng_filenodes(mnfstnode): |
|
1138 | def collect_msng_filenodes(mnfstnode): | |
1130 | r = mnfst.rev(mnfstnode) |
|
1139 | r = mnfst.rev(mnfstnode) | |
1131 | if r == next_rev[0]: |
|
1140 | if r == next_rev[0]: | |
1132 | # If the last rev we looked at was the one just previous, |
|
1141 | # If the last rev we looked at was the one just previous, | |
1133 | # we only need to see a diff. |
|
1142 | # we only need to see a diff. | |
1134 | delta = mdiff.patchtext(mnfst.delta(mnfstnode)) |
|
1143 | delta = mdiff.patchtext(mnfst.delta(mnfstnode)) | |
1135 | # For each line in the delta |
|
1144 | # For each line in the delta | |
1136 | for dline in delta.splitlines(): |
|
1145 | for dline in delta.splitlines(): | |
1137 | # get the filename and filenode for that line |
|
1146 | # get the filename and filenode for that line | |
1138 | f, fnode = dline.split('\0') |
|
1147 | f, fnode = dline.split('\0') | |
1139 | fnode = bin(fnode[:40]) |
|
1148 | fnode = bin(fnode[:40]) | |
1140 | f = changedfiles.get(f, None) |
|
1149 | f = changedfiles.get(f, None) | |
1141 | # And if the file is in the list of files we care |
|
1150 | # And if the file is in the list of files we care | |
1142 | # about. |
|
1151 | # about. | |
1143 | if f is not None: |
|
1152 | if f is not None: | |
1144 | # Get the changenode this manifest belongs to |
|
1153 | # Get the changenode this manifest belongs to | |
1145 | clnode = msng_mnfst_set[mnfstnode] |
|
1154 | clnode = msng_mnfst_set[mnfstnode] | |
1146 | # Create the set of filenodes for the file if |
|
1155 | # Create the set of filenodes for the file if | |
1147 | # there isn't one already. |
|
1156 | # there isn't one already. | |
1148 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1157 | ndset = msng_filenode_set.setdefault(f, {}) | |
1149 | # And set the filenode's changelog node to the |
|
1158 | # And set the filenode's changelog node to the | |
1150 | # manifest's if it hasn't been set already. |
|
1159 | # manifest's if it hasn't been set already. | |
1151 | ndset.setdefault(fnode, clnode) |
|
1160 | ndset.setdefault(fnode, clnode) | |
1152 | else: |
|
1161 | else: | |
1153 | # Otherwise we need a full manifest. |
|
1162 | # Otherwise we need a full manifest. | |
1154 | m = mnfst.read(mnfstnode) |
|
1163 | m = mnfst.read(mnfstnode) | |
1155 | # For every file in we care about. |
|
1164 | # For every file in we care about. | |
1156 | for f in changedfiles: |
|
1165 | for f in changedfiles: | |
1157 | fnode = m.get(f, None) |
|
1166 | fnode = m.get(f, None) | |
1158 | # If it's in the manifest |
|
1167 | # If it's in the manifest | |
1159 | if fnode is not None: |
|
1168 | if fnode is not None: | |
1160 | # See comments above. |
|
1169 | # See comments above. | |
1161 | clnode = msng_mnfst_set[mnfstnode] |
|
1170 | clnode = msng_mnfst_set[mnfstnode] | |
1162 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1171 | ndset = msng_filenode_set.setdefault(f, {}) | |
1163 | ndset.setdefault(fnode, clnode) |
|
1172 | ndset.setdefault(fnode, clnode) | |
1164 | # Remember the revision we hope to see next. |
|
1173 | # Remember the revision we hope to see next. | |
1165 | next_rev[0] = r + 1 |
|
1174 | next_rev[0] = r + 1 | |
1166 | return collect_msng_filenodes |
|
1175 | return collect_msng_filenodes | |
1167 |
|
1176 | |||
1168 | # We have a list of filenodes we think we need for a file, lets remove |
|
1177 | # We have a list of filenodes we think we need for a file, lets remove | |
1169 | # all those we now the recipient must have. |
|
1178 | # all those we now the recipient must have. | |
1170 | def prune_filenodes(f, filerevlog): |
|
1179 | def prune_filenodes(f, filerevlog): | |
1171 | msngset = msng_filenode_set[f] |
|
1180 | msngset = msng_filenode_set[f] | |
1172 | hasset = {} |
|
1181 | hasset = {} | |
1173 | # If a 'missing' filenode thinks it belongs to a changenode we |
|
1182 | # If a 'missing' filenode thinks it belongs to a changenode we | |
1174 | # assume the recipient must have, then the recipient must have |
|
1183 | # assume the recipient must have, then the recipient must have | |
1175 | # that filenode. |
|
1184 | # that filenode. | |
1176 | for n in msngset: |
|
1185 | for n in msngset: | |
1177 | clnode = cl.node(filerevlog.linkrev(n)) |
|
1186 | clnode = cl.node(filerevlog.linkrev(n)) | |
1178 | if clnode in has_cl_set: |
|
1187 | if clnode in has_cl_set: | |
1179 | hasset[n] = 1 |
|
1188 | hasset[n] = 1 | |
1180 | prune_parents(filerevlog, hasset, msngset) |
|
1189 | prune_parents(filerevlog, hasset, msngset) | |
1181 |
|
1190 | |||
1182 | # A function generator function that sets up the a context for the |
|
1191 | # A function generator function that sets up the a context for the | |
1183 | # inner function. |
|
1192 | # inner function. | |
1184 | def lookup_filenode_link_func(fname): |
|
1193 | def lookup_filenode_link_func(fname): | |
1185 | msngset = msng_filenode_set[fname] |
|
1194 | msngset = msng_filenode_set[fname] | |
1186 | # Lookup the changenode the filenode belongs to. |
|
1195 | # Lookup the changenode the filenode belongs to. | |
1187 | def lookup_filenode_link(fnode): |
|
1196 | def lookup_filenode_link(fnode): | |
1188 | return msngset[fnode] |
|
1197 | return msngset[fnode] | |
1189 | return lookup_filenode_link |
|
1198 | return lookup_filenode_link | |
1190 |
|
1199 | |||
1191 | # Now that we have all theses utility functions to help out and |
|
1200 | # Now that we have all theses utility functions to help out and | |
1192 | # logically divide up the task, generate the group. |
|
1201 | # logically divide up the task, generate the group. | |
1193 | def gengroup(): |
|
1202 | def gengroup(): | |
1194 | # The set of changed files starts empty. |
|
1203 | # The set of changed files starts empty. | |
1195 | changedfiles = {} |
|
1204 | changedfiles = {} | |
1196 | # Create a changenode group generator that will call our functions |
|
1205 | # Create a changenode group generator that will call our functions | |
1197 | # back to lookup the owning changenode and collect information. |
|
1206 | # back to lookup the owning changenode and collect information. | |
1198 | group = cl.group(msng_cl_lst, identity, |
|
1207 | group = cl.group(msng_cl_lst, identity, | |
1199 | manifest_and_file_collector(changedfiles)) |
|
1208 | manifest_and_file_collector(changedfiles)) | |
1200 | for chnk in group: |
|
1209 | for chnk in group: | |
1201 | yield chnk |
|
1210 | yield chnk | |
1202 |
|
1211 | |||
1203 | # The list of manifests has been collected by the generator |
|
1212 | # The list of manifests has been collected by the generator | |
1204 | # calling our functions back. |
|
1213 | # calling our functions back. | |
1205 | prune_manifests() |
|
1214 | prune_manifests() | |
1206 | msng_mnfst_lst = msng_mnfst_set.keys() |
|
1215 | msng_mnfst_lst = msng_mnfst_set.keys() | |
1207 | # Sort the manifestnodes by revision number. |
|
1216 | # Sort the manifestnodes by revision number. | |
1208 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) |
|
1217 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) | |
1209 | # Create a generator for the manifestnodes that calls our lookup |
|
1218 | # Create a generator for the manifestnodes that calls our lookup | |
1210 | # and data collection functions back. |
|
1219 | # and data collection functions back. | |
1211 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, |
|
1220 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, | |
1212 | filenode_collector(changedfiles)) |
|
1221 | filenode_collector(changedfiles)) | |
1213 | for chnk in group: |
|
1222 | for chnk in group: | |
1214 | yield chnk |
|
1223 | yield chnk | |
1215 |
|
1224 | |||
1216 | # These are no longer needed, dereference and toss the memory for |
|
1225 | # These are no longer needed, dereference and toss the memory for | |
1217 | # them. |
|
1226 | # them. | |
1218 | msng_mnfst_lst = None |
|
1227 | msng_mnfst_lst = None | |
1219 | msng_mnfst_set.clear() |
|
1228 | msng_mnfst_set.clear() | |
1220 |
|
1229 | |||
1221 | changedfiles = changedfiles.keys() |
|
1230 | changedfiles = changedfiles.keys() | |
1222 | changedfiles.sort() |
|
1231 | changedfiles.sort() | |
1223 | # Go through all our files in order sorted by name. |
|
1232 | # Go through all our files in order sorted by name. | |
1224 | for fname in changedfiles: |
|
1233 | for fname in changedfiles: | |
1225 | filerevlog = self.file(fname) |
|
1234 | filerevlog = self.file(fname) | |
1226 | # Toss out the filenodes that the recipient isn't really |
|
1235 | # Toss out the filenodes that the recipient isn't really | |
1227 | # missing. |
|
1236 | # missing. | |
1228 | if msng_filenode_set.has_key(fname): |
|
1237 | if msng_filenode_set.has_key(fname): | |
1229 | prune_filenodes(fname, filerevlog) |
|
1238 | prune_filenodes(fname, filerevlog) | |
1230 | msng_filenode_lst = msng_filenode_set[fname].keys() |
|
1239 | msng_filenode_lst = msng_filenode_set[fname].keys() | |
1231 | else: |
|
1240 | else: | |
1232 | msng_filenode_lst = [] |
|
1241 | msng_filenode_lst = [] | |
1233 | # If any filenodes are left, generate the group for them, |
|
1242 | # If any filenodes are left, generate the group for them, | |
1234 | # otherwise don't bother. |
|
1243 | # otherwise don't bother. | |
1235 | if len(msng_filenode_lst) > 0: |
|
1244 | if len(msng_filenode_lst) > 0: | |
1236 | yield struct.pack(">l", len(fname) + 4) + fname |
|
1245 | yield struct.pack(">l", len(fname) + 4) + fname | |
1237 | # Sort the filenodes by their revision # |
|
1246 | # Sort the filenodes by their revision # | |
1238 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) |
|
1247 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) | |
1239 | # Create a group generator and only pass in a changenode |
|
1248 | # Create a group generator and only pass in a changenode | |
1240 | # lookup function as we need to collect no information |
|
1249 | # lookup function as we need to collect no information | |
1241 | # from filenodes. |
|
1250 | # from filenodes. | |
1242 | group = filerevlog.group(msng_filenode_lst, |
|
1251 | group = filerevlog.group(msng_filenode_lst, | |
1243 | lookup_filenode_link_func(fname)) |
|
1252 | lookup_filenode_link_func(fname)) | |
1244 | for chnk in group: |
|
1253 | for chnk in group: | |
1245 | yield chnk |
|
1254 | yield chnk | |
1246 | if msng_filenode_set.has_key(fname): |
|
1255 | if msng_filenode_set.has_key(fname): | |
1247 | # Don't need this anymore, toss it to free memory. |
|
1256 | # Don't need this anymore, toss it to free memory. | |
1248 | del msng_filenode_set[fname] |
|
1257 | del msng_filenode_set[fname] | |
1249 | # Signal that no more groups are left. |
|
1258 | # Signal that no more groups are left. | |
1250 | yield struct.pack(">l", 0) |
|
1259 | yield struct.pack(">l", 0) | |
1251 |
|
1260 | |||
1252 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) |
|
1261 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) | |
1253 |
|
1262 | |||
1254 | return util.chunkbuffer(gengroup()) |
|
1263 | return util.chunkbuffer(gengroup()) | |
1255 |
|
1264 | |||
1256 | def changegroup(self, basenodes, source): |
|
1265 | def changegroup(self, basenodes, source): | |
1257 | """Generate a changegroup of all nodes that we have that a recipient |
|
1266 | """Generate a changegroup of all nodes that we have that a recipient | |
1258 | doesn't. |
|
1267 | doesn't. | |
1259 |
|
1268 | |||
1260 | This is much easier than the previous function as we can assume that |
|
1269 | This is much easier than the previous function as we can assume that | |
1261 | the recipient has any changenode we aren't sending them.""" |
|
1270 | the recipient has any changenode we aren't sending them.""" | |
1262 |
|
1271 | |||
1263 | self.hook('preoutgoing', throw=True, source=source) |
|
1272 | self.hook('preoutgoing', throw=True, source=source) | |
1264 |
|
1273 | |||
1265 | cl = self.changelog |
|
1274 | cl = self.changelog | |
1266 | nodes = cl.nodesbetween(basenodes, None)[0] |
|
1275 | nodes = cl.nodesbetween(basenodes, None)[0] | |
1267 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) |
|
1276 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) | |
1268 |
|
1277 | |||
1269 | def identity(x): |
|
1278 | def identity(x): | |
1270 | return x |
|
1279 | return x | |
1271 |
|
1280 | |||
1272 | def gennodelst(revlog): |
|
1281 | def gennodelst(revlog): | |
1273 | for r in xrange(0, revlog.count()): |
|
1282 | for r in xrange(0, revlog.count()): | |
1274 | n = revlog.node(r) |
|
1283 | n = revlog.node(r) | |
1275 | if revlog.linkrev(n) in revset: |
|
1284 | if revlog.linkrev(n) in revset: | |
1276 | yield n |
|
1285 | yield n | |
1277 |
|
1286 | |||
1278 | def changed_file_collector(changedfileset): |
|
1287 | def changed_file_collector(changedfileset): | |
1279 | def collect_changed_files(clnode): |
|
1288 | def collect_changed_files(clnode): | |
1280 | c = cl.read(clnode) |
|
1289 | c = cl.read(clnode) | |
1281 | for fname in c[3]: |
|
1290 | for fname in c[3]: | |
1282 | changedfileset[fname] = 1 |
|
1291 | changedfileset[fname] = 1 | |
1283 | return collect_changed_files |
|
1292 | return collect_changed_files | |
1284 |
|
1293 | |||
1285 | def lookuprevlink_func(revlog): |
|
1294 | def lookuprevlink_func(revlog): | |
1286 | def lookuprevlink(n): |
|
1295 | def lookuprevlink(n): | |
1287 | return cl.node(revlog.linkrev(n)) |
|
1296 | return cl.node(revlog.linkrev(n)) | |
1288 | return lookuprevlink |
|
1297 | return lookuprevlink | |
1289 |
|
1298 | |||
1290 | def gengroup(): |
|
1299 | def gengroup(): | |
1291 | # construct a list of all changed files |
|
1300 | # construct a list of all changed files | |
1292 | changedfiles = {} |
|
1301 | changedfiles = {} | |
1293 |
|
1302 | |||
1294 | for chnk in cl.group(nodes, identity, |
|
1303 | for chnk in cl.group(nodes, identity, | |
1295 | changed_file_collector(changedfiles)): |
|
1304 | changed_file_collector(changedfiles)): | |
1296 | yield chnk |
|
1305 | yield chnk | |
1297 | changedfiles = changedfiles.keys() |
|
1306 | changedfiles = changedfiles.keys() | |
1298 | changedfiles.sort() |
|
1307 | changedfiles.sort() | |
1299 |
|
1308 | |||
1300 | mnfst = self.manifest |
|
1309 | mnfst = self.manifest | |
1301 | nodeiter = gennodelst(mnfst) |
|
1310 | nodeiter = gennodelst(mnfst) | |
1302 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): |
|
1311 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): | |
1303 | yield chnk |
|
1312 | yield chnk | |
1304 |
|
1313 | |||
1305 | for fname in changedfiles: |
|
1314 | for fname in changedfiles: | |
1306 | filerevlog = self.file(fname) |
|
1315 | filerevlog = self.file(fname) | |
1307 | nodeiter = gennodelst(filerevlog) |
|
1316 | nodeiter = gennodelst(filerevlog) | |
1308 | nodeiter = list(nodeiter) |
|
1317 | nodeiter = list(nodeiter) | |
1309 | if nodeiter: |
|
1318 | if nodeiter: | |
1310 | yield struct.pack(">l", len(fname) + 4) + fname |
|
1319 | yield struct.pack(">l", len(fname) + 4) + fname | |
1311 | lookup = lookuprevlink_func(filerevlog) |
|
1320 | lookup = lookuprevlink_func(filerevlog) | |
1312 | for chnk in filerevlog.group(nodeiter, lookup): |
|
1321 | for chnk in filerevlog.group(nodeiter, lookup): | |
1313 | yield chnk |
|
1322 | yield chnk | |
1314 |
|
1323 | |||
1315 | yield struct.pack(">l", 0) |
|
1324 | yield struct.pack(">l", 0) | |
1316 | self.hook('outgoing', node=hex(nodes[0]), source=source) |
|
1325 | self.hook('outgoing', node=hex(nodes[0]), source=source) | |
1317 |
|
1326 | |||
1318 | return util.chunkbuffer(gengroup()) |
|
1327 | return util.chunkbuffer(gengroup()) | |
1319 |
|
1328 | |||
1320 | def addchangegroup(self, source): |
|
1329 | def addchangegroup(self, source): | |
1321 |
|
1330 | |||
1322 | def getchunk(): |
|
1331 | def getchunk(): | |
1323 | d = source.read(4) |
|
1332 | d = source.read(4) | |
1324 | if not d: |
|
1333 | if not d: | |
1325 | return "" |
|
1334 | return "" | |
1326 | l = struct.unpack(">l", d)[0] |
|
1335 | l = struct.unpack(">l", d)[0] | |
1327 | if l <= 4: |
|
1336 | if l <= 4: | |
1328 | return "" |
|
1337 | return "" | |
1329 | d = source.read(l - 4) |
|
1338 | d = source.read(l - 4) | |
1330 | if len(d) < l - 4: |
|
1339 | if len(d) < l - 4: | |
1331 | raise repo.RepoError(_("premature EOF reading chunk" |
|
1340 | raise repo.RepoError(_("premature EOF reading chunk" | |
1332 | " (got %d bytes, expected %d)") |
|
1341 | " (got %d bytes, expected %d)") | |
1333 | % (len(d), l - 4)) |
|
1342 | % (len(d), l - 4)) | |
1334 | return d |
|
1343 | return d | |
1335 |
|
1344 | |||
1336 | def getgroup(): |
|
1345 | def getgroup(): | |
1337 | while 1: |
|
1346 | while 1: | |
1338 | c = getchunk() |
|
1347 | c = getchunk() | |
1339 | if not c: |
|
1348 | if not c: | |
1340 | break |
|
1349 | break | |
1341 | yield c |
|
1350 | yield c | |
1342 |
|
1351 | |||
1343 | def csmap(x): |
|
1352 | def csmap(x): | |
1344 | self.ui.debug(_("add changeset %s\n") % short(x)) |
|
1353 | self.ui.debug(_("add changeset %s\n") % short(x)) | |
1345 | return self.changelog.count() |
|
1354 | return self.changelog.count() | |
1346 |
|
1355 | |||
1347 | def revmap(x): |
|
1356 | def revmap(x): | |
1348 | return self.changelog.rev(x) |
|
1357 | return self.changelog.rev(x) | |
1349 |
|
1358 | |||
1350 | if not source: |
|
1359 | if not source: | |
1351 | return |
|
1360 | return | |
1352 |
|
1361 | |||
1353 | self.hook('prechangegroup', throw=True) |
|
1362 | self.hook('prechangegroup', throw=True) | |
1354 |
|
1363 | |||
1355 | changesets = files = revisions = 0 |
|
1364 | changesets = files = revisions = 0 | |
1356 |
|
1365 | |||
1357 | tr = self.transaction() |
|
1366 | tr = self.transaction() | |
1358 |
|
1367 | |||
1359 | oldheads = len(self.changelog.heads()) |
|
1368 | oldheads = len(self.changelog.heads()) | |
1360 |
|
1369 | |||
1361 | # pull off the changeset group |
|
1370 | # pull off the changeset group | |
1362 | self.ui.status(_("adding changesets\n")) |
|
1371 | self.ui.status(_("adding changesets\n")) | |
1363 | co = self.changelog.tip() |
|
1372 | co = self.changelog.tip() | |
1364 | cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique |
|
1373 | cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique | |
1365 | cnr, cor = map(self.changelog.rev, (cn, co)) |
|
1374 | cnr, cor = map(self.changelog.rev, (cn, co)) | |
1366 | if cn == nullid: |
|
1375 | if cn == nullid: | |
1367 | cnr = cor |
|
1376 | cnr = cor | |
1368 | changesets = cnr - cor |
|
1377 | changesets = cnr - cor | |
1369 |
|
1378 | |||
1370 | # pull off the manifest group |
|
1379 | # pull off the manifest group | |
1371 | self.ui.status(_("adding manifests\n")) |
|
1380 | self.ui.status(_("adding manifests\n")) | |
1372 | mm = self.manifest.tip() |
|
1381 | mm = self.manifest.tip() | |
1373 | mo = self.manifest.addgroup(getgroup(), revmap, tr) |
|
1382 | mo = self.manifest.addgroup(getgroup(), revmap, tr) | |
1374 |
|
1383 | |||
1375 | # process the files |
|
1384 | # process the files | |
1376 | self.ui.status(_("adding file changes\n")) |
|
1385 | self.ui.status(_("adding file changes\n")) | |
1377 | while 1: |
|
1386 | while 1: | |
1378 | f = getchunk() |
|
1387 | f = getchunk() | |
1379 | if not f: |
|
1388 | if not f: | |
1380 | break |
|
1389 | break | |
1381 | self.ui.debug(_("adding %s revisions\n") % f) |
|
1390 | self.ui.debug(_("adding %s revisions\n") % f) | |
1382 | fl = self.file(f) |
|
1391 | fl = self.file(f) | |
1383 | o = fl.count() |
|
1392 | o = fl.count() | |
1384 | n = fl.addgroup(getgroup(), revmap, tr) |
|
1393 | n = fl.addgroup(getgroup(), revmap, tr) | |
1385 | revisions += fl.count() - o |
|
1394 | revisions += fl.count() - o | |
1386 | files += 1 |
|
1395 | files += 1 | |
1387 |
|
1396 | |||
1388 | newheads = len(self.changelog.heads()) |
|
1397 | newheads = len(self.changelog.heads()) | |
1389 | heads = "" |
|
1398 | heads = "" | |
1390 | if oldheads and newheads > oldheads: |
|
1399 | if oldheads and newheads > oldheads: | |
1391 | heads = _(" (+%d heads)") % (newheads - oldheads) |
|
1400 | heads = _(" (+%d heads)") % (newheads - oldheads) | |
1392 |
|
1401 | |||
1393 | self.ui.status(_("added %d changesets" |
|
1402 | self.ui.status(_("added %d changesets" | |
1394 | " with %d changes to %d files%s\n") |
|
1403 | " with %d changes to %d files%s\n") | |
1395 | % (changesets, revisions, files, heads)) |
|
1404 | % (changesets, revisions, files, heads)) | |
1396 |
|
1405 | |||
1397 | self.hook('pretxnchangegroup', throw=True, |
|
1406 | self.hook('pretxnchangegroup', throw=True, | |
1398 | node=hex(self.changelog.node(cor+1))) |
|
1407 | node=hex(self.changelog.node(cor+1))) | |
1399 |
|
1408 | |||
1400 | tr.close() |
|
1409 | tr.close() | |
1401 |
|
1410 | |||
1402 | if changesets > 0: |
|
1411 | if changesets > 0: | |
1403 | self.hook("changegroup", node=hex(self.changelog.node(cor+1))) |
|
1412 | self.hook("changegroup", node=hex(self.changelog.node(cor+1))) | |
1404 |
|
1413 | |||
1405 | for i in range(cor + 1, cnr + 1): |
|
1414 | for i in range(cor + 1, cnr + 1): | |
1406 | self.hook("incoming", node=hex(self.changelog.node(i))) |
|
1415 | self.hook("incoming", node=hex(self.changelog.node(i))) | |
1407 |
|
1416 | |||
1408 | def update(self, node, allow=False, force=False, choose=None, |
|
1417 | def update(self, node, allow=False, force=False, choose=None, | |
1409 | moddirstate=True, forcemerge=False, wlock=None): |
|
1418 | moddirstate=True, forcemerge=False, wlock=None): | |
1410 | pl = self.dirstate.parents() |
|
1419 | pl = self.dirstate.parents() | |
1411 | if not force and pl[1] != nullid: |
|
1420 | if not force and pl[1] != nullid: | |
1412 | self.ui.warn(_("aborting: outstanding uncommitted merges\n")) |
|
1421 | self.ui.warn(_("aborting: outstanding uncommitted merges\n")) | |
1413 | return 1 |
|
1422 | return 1 | |
1414 |
|
1423 | |||
1415 | err = False |
|
1424 | err = False | |
1416 |
|
1425 | |||
1417 | p1, p2 = pl[0], node |
|
1426 | p1, p2 = pl[0], node | |
1418 | pa = self.changelog.ancestor(p1, p2) |
|
1427 | pa = self.changelog.ancestor(p1, p2) | |
1419 | m1n = self.changelog.read(p1)[0] |
|
1428 | m1n = self.changelog.read(p1)[0] | |
1420 | m2n = self.changelog.read(p2)[0] |
|
1429 | m2n = self.changelog.read(p2)[0] | |
1421 | man = self.manifest.ancestor(m1n, m2n) |
|
1430 | man = self.manifest.ancestor(m1n, m2n) | |
1422 | m1 = self.manifest.read(m1n) |
|
1431 | m1 = self.manifest.read(m1n) | |
1423 | mf1 = self.manifest.readflags(m1n) |
|
1432 | mf1 = self.manifest.readflags(m1n) | |
1424 | m2 = self.manifest.read(m2n).copy() |
|
1433 | m2 = self.manifest.read(m2n).copy() | |
1425 | mf2 = self.manifest.readflags(m2n) |
|
1434 | mf2 = self.manifest.readflags(m2n) | |
1426 | ma = self.manifest.read(man) |
|
1435 | ma = self.manifest.read(man) | |
1427 | mfa = self.manifest.readflags(man) |
|
1436 | mfa = self.manifest.readflags(man) | |
1428 |
|
1437 | |||
1429 | modified, added, removed, deleted, unknown = self.changes() |
|
1438 | modified, added, removed, deleted, unknown = self.changes() | |
1430 |
|
1439 | |||
1431 | # is this a jump, or a merge? i.e. is there a linear path |
|
1440 | # is this a jump, or a merge? i.e. is there a linear path | |
1432 | # from p1 to p2? |
|
1441 | # from p1 to p2? | |
1433 | linear_path = (pa == p1 or pa == p2) |
|
1442 | linear_path = (pa == p1 or pa == p2) | |
1434 |
|
1443 | |||
1435 | if allow and linear_path: |
|
1444 | if allow and linear_path: | |
1436 | raise util.Abort(_("there is nothing to merge, " |
|
1445 | raise util.Abort(_("there is nothing to merge, " | |
1437 | "just use 'hg update'")) |
|
1446 | "just use 'hg update'")) | |
1438 | if allow and not forcemerge: |
|
1447 | if allow and not forcemerge: | |
1439 | if modified or added or removed: |
|
1448 | if modified or added or removed: | |
1440 | raise util.Abort(_("outstanding uncommited changes")) |
|
1449 | raise util.Abort(_("outstanding uncommited changes")) | |
1441 | if not forcemerge and not force: |
|
1450 | if not forcemerge and not force: | |
1442 | for f in unknown: |
|
1451 | for f in unknown: | |
1443 | if f in m2: |
|
1452 | if f in m2: | |
1444 | t1 = self.wread(f) |
|
1453 | t1 = self.wread(f) | |
1445 | t2 = self.file(f).read(m2[f]) |
|
1454 | t2 = self.file(f).read(m2[f]) | |
1446 | if cmp(t1, t2) != 0: |
|
1455 | if cmp(t1, t2) != 0: | |
1447 | raise util.Abort(_("'%s' already exists in the working" |
|
1456 | raise util.Abort(_("'%s' already exists in the working" | |
1448 | " dir and differs from remote") % f) |
|
1457 | " dir and differs from remote") % f) | |
1449 |
|
1458 | |||
1450 | # resolve the manifest to determine which files |
|
1459 | # resolve the manifest to determine which files | |
1451 | # we care about merging |
|
1460 | # we care about merging | |
1452 | self.ui.note(_("resolving manifests\n")) |
|
1461 | self.ui.note(_("resolving manifests\n")) | |
1453 | self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") % |
|
1462 | self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") % | |
1454 | (force, allow, moddirstate, linear_path)) |
|
1463 | (force, allow, moddirstate, linear_path)) | |
1455 | self.ui.debug(_(" ancestor %s local %s remote %s\n") % |
|
1464 | self.ui.debug(_(" ancestor %s local %s remote %s\n") % | |
1456 | (short(man), short(m1n), short(m2n))) |
|
1465 | (short(man), short(m1n), short(m2n))) | |
1457 |
|
1466 | |||
1458 | merge = {} |
|
1467 | merge = {} | |
1459 | get = {} |
|
1468 | get = {} | |
1460 | remove = [] |
|
1469 | remove = [] | |
1461 |
|
1470 | |||
1462 | # construct a working dir manifest |
|
1471 | # construct a working dir manifest | |
1463 | mw = m1.copy() |
|
1472 | mw = m1.copy() | |
1464 | mfw = mf1.copy() |
|
1473 | mfw = mf1.copy() | |
1465 | umap = dict.fromkeys(unknown) |
|
1474 | umap = dict.fromkeys(unknown) | |
1466 |
|
1475 | |||
1467 | for f in added + modified + unknown: |
|
1476 | for f in added + modified + unknown: | |
1468 | mw[f] = "" |
|
1477 | mw[f] = "" | |
1469 | mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False)) |
|
1478 | mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False)) | |
1470 |
|
1479 | |||
1471 | if moddirstate and not wlock: |
|
1480 | if moddirstate and not wlock: | |
1472 | wlock = self.wlock() |
|
1481 | wlock = self.wlock() | |
1473 |
|
1482 | |||
1474 | for f in deleted + removed: |
|
1483 | for f in deleted + removed: | |
1475 | if f in mw: |
|
1484 | if f in mw: | |
1476 | del mw[f] |
|
1485 | del mw[f] | |
1477 |
|
1486 | |||
1478 | # If we're jumping between revisions (as opposed to merging), |
|
1487 | # If we're jumping between revisions (as opposed to merging), | |
1479 | # and if neither the working directory nor the target rev has |
|
1488 | # and if neither the working directory nor the target rev has | |
1480 | # the file, then we need to remove it from the dirstate, to |
|
1489 | # the file, then we need to remove it from the dirstate, to | |
1481 | # prevent the dirstate from listing the file when it is no |
|
1490 | # prevent the dirstate from listing the file when it is no | |
1482 | # longer in the manifest. |
|
1491 | # longer in the manifest. | |
1483 | if moddirstate and linear_path and f not in m2: |
|
1492 | if moddirstate and linear_path and f not in m2: | |
1484 | self.dirstate.forget((f,)) |
|
1493 | self.dirstate.forget((f,)) | |
1485 |
|
1494 | |||
1486 | # Compare manifests |
|
1495 | # Compare manifests | |
1487 | for f, n in mw.iteritems(): |
|
1496 | for f, n in mw.iteritems(): | |
1488 | if choose and not choose(f): |
|
1497 | if choose and not choose(f): | |
1489 | continue |
|
1498 | continue | |
1490 | if f in m2: |
|
1499 | if f in m2: | |
1491 | s = 0 |
|
1500 | s = 0 | |
1492 |
|
1501 | |||
1493 | # is the wfile new since m1, and match m2? |
|
1502 | # is the wfile new since m1, and match m2? | |
1494 | if f not in m1: |
|
1503 | if f not in m1: | |
1495 | t1 = self.wread(f) |
|
1504 | t1 = self.wread(f) | |
1496 | t2 = self.file(f).read(m2[f]) |
|
1505 | t2 = self.file(f).read(m2[f]) | |
1497 | if cmp(t1, t2) == 0: |
|
1506 | if cmp(t1, t2) == 0: | |
1498 | n = m2[f] |
|
1507 | n = m2[f] | |
1499 | del t1, t2 |
|
1508 | del t1, t2 | |
1500 |
|
1509 | |||
1501 | # are files different? |
|
1510 | # are files different? | |
1502 | if n != m2[f]: |
|
1511 | if n != m2[f]: | |
1503 | a = ma.get(f, nullid) |
|
1512 | a = ma.get(f, nullid) | |
1504 | # are both different from the ancestor? |
|
1513 | # are both different from the ancestor? | |
1505 | if n != a and m2[f] != a: |
|
1514 | if n != a and m2[f] != a: | |
1506 | self.ui.debug(_(" %s versions differ, resolve\n") % f) |
|
1515 | self.ui.debug(_(" %s versions differ, resolve\n") % f) | |
1507 | # merge executable bits |
|
1516 | # merge executable bits | |
1508 | # "if we changed or they changed, change in merge" |
|
1517 | # "if we changed or they changed, change in merge" | |
1509 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
1518 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | |
1510 | mode = ((a^b) | (a^c)) ^ a |
|
1519 | mode = ((a^b) | (a^c)) ^ a | |
1511 | merge[f] = (m1.get(f, nullid), m2[f], mode) |
|
1520 | merge[f] = (m1.get(f, nullid), m2[f], mode) | |
1512 | s = 1 |
|
1521 | s = 1 | |
1513 | # are we clobbering? |
|
1522 | # are we clobbering? | |
1514 | # is remote's version newer? |
|
1523 | # is remote's version newer? | |
1515 | # or are we going back in time? |
|
1524 | # or are we going back in time? | |
1516 | elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): |
|
1525 | elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]): | |
1517 | self.ui.debug(_(" remote %s is newer, get\n") % f) |
|
1526 | self.ui.debug(_(" remote %s is newer, get\n") % f) | |
1518 | get[f] = m2[f] |
|
1527 | get[f] = m2[f] | |
1519 | s = 1 |
|
1528 | s = 1 | |
1520 | elif f in umap: |
|
1529 | elif f in umap: | |
1521 | # this unknown file is the same as the checkout |
|
1530 | # this unknown file is the same as the checkout | |
1522 | get[f] = m2[f] |
|
1531 | get[f] = m2[f] | |
1523 |
|
1532 | |||
1524 | if not s and mfw[f] != mf2[f]: |
|
1533 | if not s and mfw[f] != mf2[f]: | |
1525 | if force: |
|
1534 | if force: | |
1526 | self.ui.debug(_(" updating permissions for %s\n") % f) |
|
1535 | self.ui.debug(_(" updating permissions for %s\n") % f) | |
1527 | util.set_exec(self.wjoin(f), mf2[f]) |
|
1536 | util.set_exec(self.wjoin(f), mf2[f]) | |
1528 | else: |
|
1537 | else: | |
1529 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] |
|
1538 | a, b, c = mfa.get(f, 0), mfw[f], mf2[f] | |
1530 | mode = ((a^b) | (a^c)) ^ a |
|
1539 | mode = ((a^b) | (a^c)) ^ a | |
1531 | if mode != b: |
|
1540 | if mode != b: | |
1532 | self.ui.debug(_(" updating permissions for %s\n") |
|
1541 | self.ui.debug(_(" updating permissions for %s\n") | |
1533 | % f) |
|
1542 | % f) | |
1534 | util.set_exec(self.wjoin(f), mode) |
|
1543 | util.set_exec(self.wjoin(f), mode) | |
1535 | del m2[f] |
|
1544 | del m2[f] | |
1536 | elif f in ma: |
|
1545 | elif f in ma: | |
1537 | if n != ma[f]: |
|
1546 | if n != ma[f]: | |
1538 | r = _("d") |
|
1547 | r = _("d") | |
1539 | if not force and (linear_path or allow): |
|
1548 | if not force and (linear_path or allow): | |
1540 | r = self.ui.prompt( |
|
1549 | r = self.ui.prompt( | |
1541 | (_(" local changed %s which remote deleted\n") % f) + |
|
1550 | (_(" local changed %s which remote deleted\n") % f) + | |
1542 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) |
|
1551 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) | |
1543 | if r == _("d"): |
|
1552 | if r == _("d"): | |
1544 | remove.append(f) |
|
1553 | remove.append(f) | |
1545 | else: |
|
1554 | else: | |
1546 | self.ui.debug(_("other deleted %s\n") % f) |
|
1555 | self.ui.debug(_("other deleted %s\n") % f) | |
1547 | remove.append(f) # other deleted it |
|
1556 | remove.append(f) # other deleted it | |
1548 | else: |
|
1557 | else: | |
1549 | # file is created on branch or in working directory |
|
1558 | # file is created on branch or in working directory | |
1550 | if force and f not in umap: |
|
1559 | if force and f not in umap: | |
1551 | self.ui.debug(_("remote deleted %s, clobbering\n") % f) |
|
1560 | self.ui.debug(_("remote deleted %s, clobbering\n") % f) | |
1552 | remove.append(f) |
|
1561 | remove.append(f) | |
1553 | elif n == m1.get(f, nullid): # same as parent |
|
1562 | elif n == m1.get(f, nullid): # same as parent | |
1554 | if p2 == pa: # going backwards? |
|
1563 | if p2 == pa: # going backwards? | |
1555 | self.ui.debug(_("remote deleted %s\n") % f) |
|
1564 | self.ui.debug(_("remote deleted %s\n") % f) | |
1556 | remove.append(f) |
|
1565 | remove.append(f) | |
1557 | else: |
|
1566 | else: | |
1558 | self.ui.debug(_("local modified %s, keeping\n") % f) |
|
1567 | self.ui.debug(_("local modified %s, keeping\n") % f) | |
1559 | else: |
|
1568 | else: | |
1560 | self.ui.debug(_("working dir created %s, keeping\n") % f) |
|
1569 | self.ui.debug(_("working dir created %s, keeping\n") % f) | |
1561 |
|
1570 | |||
1562 | for f, n in m2.iteritems(): |
|
1571 | for f, n in m2.iteritems(): | |
1563 | if choose and not choose(f): |
|
1572 | if choose and not choose(f): | |
1564 | continue |
|
1573 | continue | |
1565 | if f[0] == "/": |
|
1574 | if f[0] == "/": | |
1566 | continue |
|
1575 | continue | |
1567 | if f in ma and n != ma[f]: |
|
1576 | if f in ma and n != ma[f]: | |
1568 | r = _("k") |
|
1577 | r = _("k") | |
1569 | if not force and (linear_path or allow): |
|
1578 | if not force and (linear_path or allow): | |
1570 | r = self.ui.prompt( |
|
1579 | r = self.ui.prompt( | |
1571 | (_("remote changed %s which local deleted\n") % f) + |
|
1580 | (_("remote changed %s which local deleted\n") % f) + | |
1572 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) |
|
1581 | _("(k)eep or (d)elete?"), _("[kd]"), _("k")) | |
1573 | if r == _("k"): |
|
1582 | if r == _("k"): | |
1574 | get[f] = n |
|
1583 | get[f] = n | |
1575 | elif f not in ma: |
|
1584 | elif f not in ma: | |
1576 | self.ui.debug(_("remote created %s\n") % f) |
|
1585 | self.ui.debug(_("remote created %s\n") % f) | |
1577 | get[f] = n |
|
1586 | get[f] = n | |
1578 | else: |
|
1587 | else: | |
1579 | if force or p2 == pa: # going backwards? |
|
1588 | if force or p2 == pa: # going backwards? | |
1580 | self.ui.debug(_("local deleted %s, recreating\n") % f) |
|
1589 | self.ui.debug(_("local deleted %s, recreating\n") % f) | |
1581 | get[f] = n |
|
1590 | get[f] = n | |
1582 | else: |
|
1591 | else: | |
1583 | self.ui.debug(_("local deleted %s\n") % f) |
|
1592 | self.ui.debug(_("local deleted %s\n") % f) | |
1584 |
|
1593 | |||
1585 | del mw, m1, m2, ma |
|
1594 | del mw, m1, m2, ma | |
1586 |
|
1595 | |||
1587 | if force: |
|
1596 | if force: | |
1588 | for f in merge: |
|
1597 | for f in merge: | |
1589 | get[f] = merge[f][1] |
|
1598 | get[f] = merge[f][1] | |
1590 | merge = {} |
|
1599 | merge = {} | |
1591 |
|
1600 | |||
1592 | if linear_path or force: |
|
1601 | if linear_path or force: | |
1593 | # we don't need to do any magic, just jump to the new rev |
|
1602 | # we don't need to do any magic, just jump to the new rev | |
1594 | branch_merge = False |
|
1603 | branch_merge = False | |
1595 | p1, p2 = p2, nullid |
|
1604 | p1, p2 = p2, nullid | |
1596 | else: |
|
1605 | else: | |
1597 | if not allow: |
|
1606 | if not allow: | |
1598 | self.ui.status(_("this update spans a branch" |
|
1607 | self.ui.status(_("this update spans a branch" | |
1599 | " affecting the following files:\n")) |
|
1608 | " affecting the following files:\n")) | |
1600 | fl = merge.keys() + get.keys() |
|
1609 | fl = merge.keys() + get.keys() | |
1601 | fl.sort() |
|
1610 | fl.sort() | |
1602 | for f in fl: |
|
1611 | for f in fl: | |
1603 | cf = "" |
|
1612 | cf = "" | |
1604 | if f in merge: |
|
1613 | if f in merge: | |
1605 | cf = _(" (resolve)") |
|
1614 | cf = _(" (resolve)") | |
1606 | self.ui.status(" %s%s\n" % (f, cf)) |
|
1615 | self.ui.status(" %s%s\n" % (f, cf)) | |
1607 | self.ui.warn(_("aborting update spanning branches!\n")) |
|
1616 | self.ui.warn(_("aborting update spanning branches!\n")) | |
1608 | self.ui.status(_("(use update -m to merge across branches" |
|
1617 | self.ui.status(_("(use update -m to merge across branches" | |
1609 | " or -C to lose changes)\n")) |
|
1618 | " or -C to lose changes)\n")) | |
1610 | return 1 |
|
1619 | return 1 | |
1611 | branch_merge = True |
|
1620 | branch_merge = True | |
1612 |
|
1621 | |||
1613 | # get the files we don't need to change |
|
1622 | # get the files we don't need to change | |
1614 | files = get.keys() |
|
1623 | files = get.keys() | |
1615 | files.sort() |
|
1624 | files.sort() | |
1616 | for f in files: |
|
1625 | for f in files: | |
1617 | if f[0] == "/": |
|
1626 | if f[0] == "/": | |
1618 | continue |
|
1627 | continue | |
1619 | self.ui.note(_("getting %s\n") % f) |
|
1628 | self.ui.note(_("getting %s\n") % f) | |
1620 | t = self.file(f).read(get[f]) |
|
1629 | t = self.file(f).read(get[f]) | |
1621 | self.wwrite(f, t) |
|
1630 | self.wwrite(f, t) | |
1622 | util.set_exec(self.wjoin(f), mf2[f]) |
|
1631 | util.set_exec(self.wjoin(f), mf2[f]) | |
1623 | if moddirstate: |
|
1632 | if moddirstate: | |
1624 | if branch_merge: |
|
1633 | if branch_merge: | |
1625 | self.dirstate.update([f], 'n', st_mtime=-1) |
|
1634 | self.dirstate.update([f], 'n', st_mtime=-1) | |
1626 | else: |
|
1635 | else: | |
1627 | self.dirstate.update([f], 'n') |
|
1636 | self.dirstate.update([f], 'n') | |
1628 |
|
1637 | |||
1629 | # merge the tricky bits |
|
1638 | # merge the tricky bits | |
1630 | files = merge.keys() |
|
1639 | files = merge.keys() | |
1631 | files.sort() |
|
1640 | files.sort() | |
1632 | for f in files: |
|
1641 | for f in files: | |
1633 | self.ui.status(_("merging %s\n") % f) |
|
1642 | self.ui.status(_("merging %s\n") % f) | |
1634 | my, other, flag = merge[f] |
|
1643 | my, other, flag = merge[f] | |
1635 | ret = self.merge3(f, my, other) |
|
1644 | ret = self.merge3(f, my, other) | |
1636 | if ret: |
|
1645 | if ret: | |
1637 | err = True |
|
1646 | err = True | |
1638 | util.set_exec(self.wjoin(f), flag) |
|
1647 | util.set_exec(self.wjoin(f), flag) | |
1639 | if moddirstate: |
|
1648 | if moddirstate: | |
1640 | if branch_merge: |
|
1649 | if branch_merge: | |
1641 | # We've done a branch merge, mark this file as merged |
|
1650 | # We've done a branch merge, mark this file as merged | |
1642 | # so that we properly record the merger later |
|
1651 | # so that we properly record the merger later | |
1643 | self.dirstate.update([f], 'm') |
|
1652 | self.dirstate.update([f], 'm') | |
1644 | else: |
|
1653 | else: | |
1645 | # We've update-merged a locally modified file, so |
|
1654 | # We've update-merged a locally modified file, so | |
1646 | # we set the dirstate to emulate a normal checkout |
|
1655 | # we set the dirstate to emulate a normal checkout | |
1647 | # of that file some time in the past. Thus our |
|
1656 | # of that file some time in the past. Thus our | |
1648 | # merge will appear as a normal local file |
|
1657 | # merge will appear as a normal local file | |
1649 | # modification. |
|
1658 | # modification. | |
1650 | f_len = len(self.file(f).read(other)) |
|
1659 | f_len = len(self.file(f).read(other)) | |
1651 | self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) |
|
1660 | self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1) | |
1652 |
|
1661 | |||
1653 | remove.sort() |
|
1662 | remove.sort() | |
1654 | for f in remove: |
|
1663 | for f in remove: | |
1655 | self.ui.note(_("removing %s\n") % f) |
|
1664 | self.ui.note(_("removing %s\n") % f) | |
1656 | try: |
|
1665 | try: | |
1657 | util.unlink(self.wjoin(f)) |
|
1666 | util.unlink(self.wjoin(f)) | |
1658 | except OSError, inst: |
|
1667 | except OSError, inst: | |
1659 | if inst.errno != errno.ENOENT: |
|
1668 | if inst.errno != errno.ENOENT: | |
1660 | self.ui.warn(_("update failed to remove %s: %s!\n") % |
|
1669 | self.ui.warn(_("update failed to remove %s: %s!\n") % | |
1661 | (f, inst.strerror)) |
|
1670 | (f, inst.strerror)) | |
1662 | if moddirstate: |
|
1671 | if moddirstate: | |
1663 | if branch_merge: |
|
1672 | if branch_merge: | |
1664 | self.dirstate.update(remove, 'r') |
|
1673 | self.dirstate.update(remove, 'r') | |
1665 | else: |
|
1674 | else: | |
1666 | self.dirstate.forget(remove) |
|
1675 | self.dirstate.forget(remove) | |
1667 |
|
1676 | |||
1668 | if moddirstate: |
|
1677 | if moddirstate: | |
1669 | self.dirstate.setparents(p1, p2) |
|
1678 | self.dirstate.setparents(p1, p2) | |
1670 | return err |
|
1679 | return err | |
1671 |
|
1680 | |||
1672 | def merge3(self, fn, my, other): |
|
1681 | def merge3(self, fn, my, other): | |
1673 | """perform a 3-way merge in the working directory""" |
|
1682 | """perform a 3-way merge in the working directory""" | |
1674 |
|
1683 | |||
1675 | def temp(prefix, node): |
|
1684 | def temp(prefix, node): | |
1676 | pre = "%s~%s." % (os.path.basename(fn), prefix) |
|
1685 | pre = "%s~%s." % (os.path.basename(fn), prefix) | |
1677 | (fd, name) = tempfile.mkstemp("", pre) |
|
1686 | (fd, name) = tempfile.mkstemp("", pre) | |
1678 | f = os.fdopen(fd, "wb") |
|
1687 | f = os.fdopen(fd, "wb") | |
1679 | self.wwrite(fn, fl.read(node), f) |
|
1688 | self.wwrite(fn, fl.read(node), f) | |
1680 | f.close() |
|
1689 | f.close() | |
1681 | return name |
|
1690 | return name | |
1682 |
|
1691 | |||
1683 | fl = self.file(fn) |
|
1692 | fl = self.file(fn) | |
1684 | base = fl.ancestor(my, other) |
|
1693 | base = fl.ancestor(my, other) | |
1685 | a = self.wjoin(fn) |
|
1694 | a = self.wjoin(fn) | |
1686 | b = temp("base", base) |
|
1695 | b = temp("base", base) | |
1687 | c = temp("other", other) |
|
1696 | c = temp("other", other) | |
1688 |
|
1697 | |||
1689 | self.ui.note(_("resolving %s\n") % fn) |
|
1698 | self.ui.note(_("resolving %s\n") % fn) | |
1690 | self.ui.debug(_("file %s: my %s other %s ancestor %s\n") % |
|
1699 | self.ui.debug(_("file %s: my %s other %s ancestor %s\n") % | |
1691 | (fn, short(my), short(other), short(base))) |
|
1700 | (fn, short(my), short(other), short(base))) | |
1692 |
|
1701 | |||
1693 | cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge") |
|
1702 | cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge") | |
1694 | or "hgmerge") |
|
1703 | or "hgmerge") | |
1695 | r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c)) |
|
1704 | r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c)) | |
1696 | if r: |
|
1705 | if r: | |
1697 | self.ui.warn(_("merging %s failed!\n") % fn) |
|
1706 | self.ui.warn(_("merging %s failed!\n") % fn) | |
1698 |
|
1707 | |||
1699 | os.unlink(b) |
|
1708 | os.unlink(b) | |
1700 | os.unlink(c) |
|
1709 | os.unlink(c) | |
1701 | return r |
|
1710 | return r | |
1702 |
|
1711 | |||
1703 | def verify(self): |
|
1712 | def verify(self): | |
1704 | filelinkrevs = {} |
|
1713 | filelinkrevs = {} | |
1705 | filenodes = {} |
|
1714 | filenodes = {} | |
1706 | changesets = revisions = files = 0 |
|
1715 | changesets = revisions = files = 0 | |
1707 | errors = [0] |
|
1716 | errors = [0] | |
1708 | neededmanifests = {} |
|
1717 | neededmanifests = {} | |
1709 |
|
1718 | |||
1710 | def err(msg): |
|
1719 | def err(msg): | |
1711 | self.ui.warn(msg + "\n") |
|
1720 | self.ui.warn(msg + "\n") | |
1712 | errors[0] += 1 |
|
1721 | errors[0] += 1 | |
1713 |
|
1722 | |||
1714 | def checksize(obj, name): |
|
1723 | def checksize(obj, name): | |
1715 | d = obj.checksize() |
|
1724 | d = obj.checksize() | |
1716 | if d[0]: |
|
1725 | if d[0]: | |
1717 | err(_("%s data length off by %d bytes") % (name, d[0])) |
|
1726 | err(_("%s data length off by %d bytes") % (name, d[0])) | |
1718 | if d[1]: |
|
1727 | if d[1]: | |
1719 | err(_("%s index contains %d extra bytes") % (name, d[1])) |
|
1728 | err(_("%s index contains %d extra bytes") % (name, d[1])) | |
1720 |
|
1729 | |||
1721 | seen = {} |
|
1730 | seen = {} | |
1722 | self.ui.status(_("checking changesets\n")) |
|
1731 | self.ui.status(_("checking changesets\n")) | |
1723 | checksize(self.changelog, "changelog") |
|
1732 | checksize(self.changelog, "changelog") | |
1724 |
|
1733 | |||
1725 | for i in range(self.changelog.count()): |
|
1734 | for i in range(self.changelog.count()): | |
1726 | changesets += 1 |
|
1735 | changesets += 1 | |
1727 | n = self.changelog.node(i) |
|
1736 | n = self.changelog.node(i) | |
1728 | l = self.changelog.linkrev(n) |
|
1737 | l = self.changelog.linkrev(n) | |
1729 | if l != i: |
|
1738 | if l != i: | |
1730 | err(_("incorrect link (%d) for changeset revision %d") %(l, i)) |
|
1739 | err(_("incorrect link (%d) for changeset revision %d") %(l, i)) | |
1731 | if n in seen: |
|
1740 | if n in seen: | |
1732 | err(_("duplicate changeset at revision %d") % i) |
|
1741 | err(_("duplicate changeset at revision %d") % i) | |
1733 | seen[n] = 1 |
|
1742 | seen[n] = 1 | |
1734 |
|
1743 | |||
1735 | for p in self.changelog.parents(n): |
|
1744 | for p in self.changelog.parents(n): | |
1736 | if p not in self.changelog.nodemap: |
|
1745 | if p not in self.changelog.nodemap: | |
1737 | err(_("changeset %s has unknown parent %s") % |
|
1746 | err(_("changeset %s has unknown parent %s") % | |
1738 | (short(n), short(p))) |
|
1747 | (short(n), short(p))) | |
1739 | try: |
|
1748 | try: | |
1740 | changes = self.changelog.read(n) |
|
1749 | changes = self.changelog.read(n) | |
1741 | except KeyboardInterrupt: |
|
1750 | except KeyboardInterrupt: | |
1742 | self.ui.warn(_("interrupted")) |
|
1751 | self.ui.warn(_("interrupted")) | |
1743 | raise |
|
1752 | raise | |
1744 | except Exception, inst: |
|
1753 | except Exception, inst: | |
1745 | err(_("unpacking changeset %s: %s") % (short(n), inst)) |
|
1754 | err(_("unpacking changeset %s: %s") % (short(n), inst)) | |
1746 |
|
1755 | |||
1747 | neededmanifests[changes[0]] = n |
|
1756 | neededmanifests[changes[0]] = n | |
1748 |
|
1757 | |||
1749 | for f in changes[3]: |
|
1758 | for f in changes[3]: | |
1750 | filelinkrevs.setdefault(f, []).append(i) |
|
1759 | filelinkrevs.setdefault(f, []).append(i) | |
1751 |
|
1760 | |||
1752 | seen = {} |
|
1761 | seen = {} | |
1753 | self.ui.status(_("checking manifests\n")) |
|
1762 | self.ui.status(_("checking manifests\n")) | |
1754 | checksize(self.manifest, "manifest") |
|
1763 | checksize(self.manifest, "manifest") | |
1755 |
|
1764 | |||
1756 | for i in range(self.manifest.count()): |
|
1765 | for i in range(self.manifest.count()): | |
1757 | n = self.manifest.node(i) |
|
1766 | n = self.manifest.node(i) | |
1758 | l = self.manifest.linkrev(n) |
|
1767 | l = self.manifest.linkrev(n) | |
1759 |
|
1768 | |||
1760 | if l < 0 or l >= self.changelog.count(): |
|
1769 | if l < 0 or l >= self.changelog.count(): | |
1761 | err(_("bad manifest link (%d) at revision %d") % (l, i)) |
|
1770 | err(_("bad manifest link (%d) at revision %d") % (l, i)) | |
1762 |
|
1771 | |||
1763 | if n in neededmanifests: |
|
1772 | if n in neededmanifests: | |
1764 | del neededmanifests[n] |
|
1773 | del neededmanifests[n] | |
1765 |
|
1774 | |||
1766 | if n in seen: |
|
1775 | if n in seen: | |
1767 | err(_("duplicate manifest at revision %d") % i) |
|
1776 | err(_("duplicate manifest at revision %d") % i) | |
1768 |
|
1777 | |||
1769 | seen[n] = 1 |
|
1778 | seen[n] = 1 | |
1770 |
|
1779 | |||
1771 | for p in self.manifest.parents(n): |
|
1780 | for p in self.manifest.parents(n): | |
1772 | if p not in self.manifest.nodemap: |
|
1781 | if p not in self.manifest.nodemap: | |
1773 | err(_("manifest %s has unknown parent %s") % |
|
1782 | err(_("manifest %s has unknown parent %s") % | |
1774 | (short(n), short(p))) |
|
1783 | (short(n), short(p))) | |
1775 |
|
1784 | |||
1776 | try: |
|
1785 | try: | |
1777 | delta = mdiff.patchtext(self.manifest.delta(n)) |
|
1786 | delta = mdiff.patchtext(self.manifest.delta(n)) | |
1778 | except KeyboardInterrupt: |
|
1787 | except KeyboardInterrupt: | |
1779 | self.ui.warn(_("interrupted")) |
|
1788 | self.ui.warn(_("interrupted")) | |
1780 | raise |
|
1789 | raise | |
1781 | except Exception, inst: |
|
1790 | except Exception, inst: | |
1782 | err(_("unpacking manifest %s: %s") % (short(n), inst)) |
|
1791 | err(_("unpacking manifest %s: %s") % (short(n), inst)) | |
1783 |
|
1792 | |||
1784 | ff = [ l.split('\0') for l in delta.splitlines() ] |
|
1793 | ff = [ l.split('\0') for l in delta.splitlines() ] | |
1785 | for f, fn in ff: |
|
1794 | for f, fn in ff: | |
1786 | filenodes.setdefault(f, {})[bin(fn[:40])] = 1 |
|
1795 | filenodes.setdefault(f, {})[bin(fn[:40])] = 1 | |
1787 |
|
1796 | |||
1788 | self.ui.status(_("crosschecking files in changesets and manifests\n")) |
|
1797 | self.ui.status(_("crosschecking files in changesets and manifests\n")) | |
1789 |
|
1798 | |||
1790 | for m, c in neededmanifests.items(): |
|
1799 | for m, c in neededmanifests.items(): | |
1791 | err(_("Changeset %s refers to unknown manifest %s") % |
|
1800 | err(_("Changeset %s refers to unknown manifest %s") % | |
1792 | (short(m), short(c))) |
|
1801 | (short(m), short(c))) | |
1793 | del neededmanifests |
|
1802 | del neededmanifests | |
1794 |
|
1803 | |||
1795 | for f in filenodes: |
|
1804 | for f in filenodes: | |
1796 | if f not in filelinkrevs: |
|
1805 | if f not in filelinkrevs: | |
1797 | err(_("file %s in manifest but not in changesets") % f) |
|
1806 | err(_("file %s in manifest but not in changesets") % f) | |
1798 |
|
1807 | |||
1799 | for f in filelinkrevs: |
|
1808 | for f in filelinkrevs: | |
1800 | if f not in filenodes: |
|
1809 | if f not in filenodes: | |
1801 | err(_("file %s in changeset but not in manifest") % f) |
|
1810 | err(_("file %s in changeset but not in manifest") % f) | |
1802 |
|
1811 | |||
1803 | self.ui.status(_("checking files\n")) |
|
1812 | self.ui.status(_("checking files\n")) | |
1804 | ff = filenodes.keys() |
|
1813 | ff = filenodes.keys() | |
1805 | ff.sort() |
|
1814 | ff.sort() | |
1806 | for f in ff: |
|
1815 | for f in ff: | |
1807 | if f == "/dev/null": |
|
1816 | if f == "/dev/null": | |
1808 | continue |
|
1817 | continue | |
1809 | files += 1 |
|
1818 | files += 1 | |
1810 | fl = self.file(f) |
|
1819 | fl = self.file(f) | |
1811 | checksize(fl, f) |
|
1820 | checksize(fl, f) | |
1812 |
|
1821 | |||
1813 | nodes = {nullid: 1} |
|
1822 | nodes = {nullid: 1} | |
1814 | seen = {} |
|
1823 | seen = {} | |
1815 | for i in range(fl.count()): |
|
1824 | for i in range(fl.count()): | |
1816 | revisions += 1 |
|
1825 | revisions += 1 | |
1817 | n = fl.node(i) |
|
1826 | n = fl.node(i) | |
1818 |
|
1827 | |||
1819 | if n in seen: |
|
1828 | if n in seen: | |
1820 | err(_("%s: duplicate revision %d") % (f, i)) |
|
1829 | err(_("%s: duplicate revision %d") % (f, i)) | |
1821 | if n not in filenodes[f]: |
|
1830 | if n not in filenodes[f]: | |
1822 | err(_("%s: %d:%s not in manifests") % (f, i, short(n))) |
|
1831 | err(_("%s: %d:%s not in manifests") % (f, i, short(n))) | |
1823 | else: |
|
1832 | else: | |
1824 | del filenodes[f][n] |
|
1833 | del filenodes[f][n] | |
1825 |
|
1834 | |||
1826 | flr = fl.linkrev(n) |
|
1835 | flr = fl.linkrev(n) | |
1827 | if flr not in filelinkrevs[f]: |
|
1836 | if flr not in filelinkrevs[f]: | |
1828 | err(_("%s:%s points to unexpected changeset %d") |
|
1837 | err(_("%s:%s points to unexpected changeset %d") | |
1829 | % (f, short(n), flr)) |
|
1838 | % (f, short(n), flr)) | |
1830 | else: |
|
1839 | else: | |
1831 | filelinkrevs[f].remove(flr) |
|
1840 | filelinkrevs[f].remove(flr) | |
1832 |
|
1841 | |||
1833 | # verify contents |
|
1842 | # verify contents | |
1834 | try: |
|
1843 | try: | |
1835 | t = fl.read(n) |
|
1844 | t = fl.read(n) | |
1836 | except KeyboardInterrupt: |
|
1845 | except KeyboardInterrupt: | |
1837 | self.ui.warn(_("interrupted")) |
|
1846 | self.ui.warn(_("interrupted")) | |
1838 | raise |
|
1847 | raise | |
1839 | except Exception, inst: |
|
1848 | except Exception, inst: | |
1840 | err(_("unpacking file %s %s: %s") % (f, short(n), inst)) |
|
1849 | err(_("unpacking file %s %s: %s") % (f, short(n), inst)) | |
1841 |
|
1850 | |||
1842 | # verify parents |
|
1851 | # verify parents | |
1843 | (p1, p2) = fl.parents(n) |
|
1852 | (p1, p2) = fl.parents(n) | |
1844 | if p1 not in nodes: |
|
1853 | if p1 not in nodes: | |
1845 | err(_("file %s:%s unknown parent 1 %s") % |
|
1854 | err(_("file %s:%s unknown parent 1 %s") % | |
1846 | (f, short(n), short(p1))) |
|
1855 | (f, short(n), short(p1))) | |
1847 | if p2 not in nodes: |
|
1856 | if p2 not in nodes: | |
1848 | err(_("file %s:%s unknown parent 2 %s") % |
|
1857 | err(_("file %s:%s unknown parent 2 %s") % | |
1849 | (f, short(n), short(p1))) |
|
1858 | (f, short(n), short(p1))) | |
1850 | nodes[n] = 1 |
|
1859 | nodes[n] = 1 | |
1851 |
|
1860 | |||
1852 | # cross-check |
|
1861 | # cross-check | |
1853 | for node in filenodes[f]: |
|
1862 | for node in filenodes[f]: | |
1854 | err(_("node %s in manifests not in %s") % (hex(node), f)) |
|
1863 | err(_("node %s in manifests not in %s") % (hex(node), f)) | |
1855 |
|
1864 | |||
1856 | self.ui.status(_("%d files, %d changesets, %d total revisions\n") % |
|
1865 | self.ui.status(_("%d files, %d changesets, %d total revisions\n") % | |
1857 | (files, changesets, revisions)) |
|
1866 | (files, changesets, revisions)) | |
1858 |
|
1867 | |||
1859 | if errors[0]: |
|
1868 | if errors[0]: | |
1860 | self.ui.warn(_("%d integrity errors encountered!\n") % errors[0]) |
|
1869 | self.ui.warn(_("%d integrity errors encountered!\n") % errors[0]) | |
1861 | return 1 |
|
1870 | return 1 |
@@ -1,864 +1,882 b'' | |||||
1 | """ |
|
1 | """ | |
2 | revlog.py - storage back-end for mercurial |
|
2 | revlog.py - storage back-end for mercurial | |
3 |
|
3 | |||
4 | This provides efficient delta storage with O(1) retrieve and append |
|
4 | This provides efficient delta storage with O(1) retrieve and append | |
5 | and O(changes) merge between branches |
|
5 | and O(changes) merge between branches | |
6 |
|
6 | |||
7 | Copyright 2005 Matt Mackall <mpm@selenic.com> |
|
7 | Copyright 2005 Matt Mackall <mpm@selenic.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License, incorporated herein by reference. |
|
10 | of the GNU General Public License, incorporated herein by reference. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | from node import * |
|
13 | from node import * | |
14 | from i18n import gettext as _ |
|
14 | from i18n import gettext as _ | |
15 | from demandload import demandload |
|
15 | from demandload import demandload | |
16 | demandload(globals(), "binascii errno heapq mdiff sha struct zlib") |
|
16 | demandload(globals(), "binascii errno heapq mdiff os sha struct zlib") | |
17 |
|
17 | |||
18 | def hash(text, p1, p2): |
|
18 | def hash(text, p1, p2): | |
19 | """generate a hash from the given text and its parent hashes |
|
19 | """generate a hash from the given text and its parent hashes | |
20 |
|
20 | |||
21 | This hash combines both the current file contents and its history |
|
21 | This hash combines both the current file contents and its history | |
22 | in a manner that makes it easy to distinguish nodes with the same |
|
22 | in a manner that makes it easy to distinguish nodes with the same | |
23 | content in the revision graph. |
|
23 | content in the revision graph. | |
24 | """ |
|
24 | """ | |
25 | l = [p1, p2] |
|
25 | l = [p1, p2] | |
26 | l.sort() |
|
26 | l.sort() | |
27 | s = sha.new(l[0]) |
|
27 | s = sha.new(l[0]) | |
28 | s.update(l[1]) |
|
28 | s.update(l[1]) | |
29 | s.update(text) |
|
29 | s.update(text) | |
30 | return s.digest() |
|
30 | return s.digest() | |
31 |
|
31 | |||
32 | def compress(text): |
|
32 | def compress(text): | |
33 | """ generate a possibly-compressed representation of text """ |
|
33 | """ generate a possibly-compressed representation of text """ | |
34 | if not text: return ("", text) |
|
34 | if not text: return ("", text) | |
35 | if len(text) < 44: |
|
35 | if len(text) < 44: | |
36 | if text[0] == '\0': return ("", text) |
|
36 | if text[0] == '\0': return ("", text) | |
37 | return ('u', text) |
|
37 | return ('u', text) | |
38 | bin = zlib.compress(text) |
|
38 | bin = zlib.compress(text) | |
39 | if len(bin) > len(text): |
|
39 | if len(bin) > len(text): | |
40 | if text[0] == '\0': return ("", text) |
|
40 | if text[0] == '\0': return ("", text) | |
41 | return ('u', text) |
|
41 | return ('u', text) | |
42 | return ("", bin) |
|
42 | return ("", bin) | |
43 |
|
43 | |||
44 | def decompress(bin): |
|
44 | def decompress(bin): | |
45 | """ decompress the given input """ |
|
45 | """ decompress the given input """ | |
46 | if not bin: return bin |
|
46 | if not bin: return bin | |
47 | t = bin[0] |
|
47 | t = bin[0] | |
48 | if t == '\0': return bin |
|
48 | if t == '\0': return bin | |
49 | if t == 'x': return zlib.decompress(bin) |
|
49 | if t == 'x': return zlib.decompress(bin) | |
50 | if t == 'u': return bin[1:] |
|
50 | if t == 'u': return bin[1:] | |
51 | raise RevlogError(_("unknown compression type %s") % t) |
|
51 | raise RevlogError(_("unknown compression type %s") % t) | |
52 |
|
52 | |||
53 | indexformat = ">4l20s20s20s" |
|
53 | indexformat = ">4l20s20s20s" | |
54 |
|
54 | |||
55 | class lazyparser(object): |
|
55 | class lazyparser(object): | |
56 | """ |
|
56 | """ | |
57 | this class avoids the need to parse the entirety of large indices |
|
57 | this class avoids the need to parse the entirety of large indices | |
58 |
|
58 | |||
59 | By default we parse and load 1000 entries at a time. |
|
59 | By default we parse and load 1000 entries at a time. | |
60 |
|
60 | |||
61 | If no position is specified, we load the whole index, and replace |
|
61 | If no position is specified, we load the whole index, and replace | |
62 | the lazy objects in revlog with the underlying objects for |
|
62 | the lazy objects in revlog with the underlying objects for | |
63 | efficiency in cases where we look at most of the nodes. |
|
63 | efficiency in cases where we look at most of the nodes. | |
64 | """ |
|
64 | """ | |
65 | def __init__(self, data, revlog): |
|
65 | def __init__(self, data, revlog): | |
66 | self.data = data |
|
66 | self.data = data | |
67 | self.s = struct.calcsize(indexformat) |
|
67 | self.s = struct.calcsize(indexformat) | |
68 | self.l = len(data)/self.s |
|
68 | self.l = len(data)/self.s | |
69 | self.index = [None] * self.l |
|
69 | self.index = [None] * self.l | |
70 | self.map = {nullid: -1} |
|
70 | self.map = {nullid: -1} | |
71 | self.all = 0 |
|
71 | self.all = 0 | |
72 | self.revlog = revlog |
|
72 | self.revlog = revlog | |
73 |
|
73 | |||
74 | def trunc(self, pos): |
|
74 | def trunc(self, pos): | |
75 | self.l = pos/self.s |
|
75 | self.l = pos/self.s | |
76 |
|
76 | |||
77 | def load(self, pos=None): |
|
77 | def load(self, pos=None): | |
78 | if self.all: return |
|
78 | if self.all: return | |
79 | if pos is not None: |
|
79 | if pos is not None: | |
80 | block = pos / 1000 |
|
80 | block = pos / 1000 | |
81 | i = block * 1000 |
|
81 | i = block * 1000 | |
82 | end = min(self.l, i + 1000) |
|
82 | end = min(self.l, i + 1000) | |
83 | else: |
|
83 | else: | |
84 | self.all = 1 |
|
84 | self.all = 1 | |
85 | i = 0 |
|
85 | i = 0 | |
86 | end = self.l |
|
86 | end = self.l | |
87 | self.revlog.index = self.index |
|
87 | self.revlog.index = self.index | |
88 | self.revlog.nodemap = self.map |
|
88 | self.revlog.nodemap = self.map | |
89 |
|
89 | |||
90 | while i < end: |
|
90 | while i < end: | |
91 | d = self.data[i * self.s: (i + 1) * self.s] |
|
91 | d = self.data[i * self.s: (i + 1) * self.s] | |
92 | e = struct.unpack(indexformat, d) |
|
92 | e = struct.unpack(indexformat, d) | |
93 | self.index[i] = e |
|
93 | self.index[i] = e | |
94 | self.map[e[6]] = i |
|
94 | self.map[e[6]] = i | |
95 | i += 1 |
|
95 | i += 1 | |
96 |
|
96 | |||
97 | class lazyindex(object): |
|
97 | class lazyindex(object): | |
98 | """a lazy version of the index array""" |
|
98 | """a lazy version of the index array""" | |
99 | def __init__(self, parser): |
|
99 | def __init__(self, parser): | |
100 | self.p = parser |
|
100 | self.p = parser | |
101 | def __len__(self): |
|
101 | def __len__(self): | |
102 | return len(self.p.index) |
|
102 | return len(self.p.index) | |
103 | def load(self, pos): |
|
103 | def load(self, pos): | |
104 | if pos < 0: |
|
104 | if pos < 0: | |
105 | pos += len(self.p.index) |
|
105 | pos += len(self.p.index) | |
106 | self.p.load(pos) |
|
106 | self.p.load(pos) | |
107 | return self.p.index[pos] |
|
107 | return self.p.index[pos] | |
108 | def __getitem__(self, pos): |
|
108 | def __getitem__(self, pos): | |
109 | return self.p.index[pos] or self.load(pos) |
|
109 | return self.p.index[pos] or self.load(pos) | |
110 | def __delitem__(self, pos): |
|
110 | def __delitem__(self, pos): | |
111 | del self.p.index[pos] |
|
111 | del self.p.index[pos] | |
112 | def append(self, e): |
|
112 | def append(self, e): | |
113 | self.p.index.append(e) |
|
113 | self.p.index.append(e) | |
114 | def trunc(self, pos): |
|
114 | def trunc(self, pos): | |
115 | self.p.trunc(pos) |
|
115 | self.p.trunc(pos) | |
116 |
|
116 | |||
117 | class lazymap(object): |
|
117 | class lazymap(object): | |
118 | """a lazy version of the node map""" |
|
118 | """a lazy version of the node map""" | |
119 | def __init__(self, parser): |
|
119 | def __init__(self, parser): | |
120 | self.p = parser |
|
120 | self.p = parser | |
121 | def load(self, key): |
|
121 | def load(self, key): | |
122 | if self.p.all: return |
|
122 | if self.p.all: return | |
123 | n = self.p.data.find(key) |
|
123 | n = self.p.data.find(key) | |
124 | if n < 0: |
|
124 | if n < 0: | |
125 | raise KeyError(key) |
|
125 | raise KeyError(key) | |
126 | pos = n / self.p.s |
|
126 | pos = n / self.p.s | |
127 | self.p.load(pos) |
|
127 | self.p.load(pos) | |
128 | def __contains__(self, key): |
|
128 | def __contains__(self, key): | |
129 | self.p.load() |
|
129 | self.p.load() | |
130 | return key in self.p.map |
|
130 | return key in self.p.map | |
131 | def __iter__(self): |
|
131 | def __iter__(self): | |
132 | yield nullid |
|
132 | yield nullid | |
133 | for i in xrange(self.p.l): |
|
133 | for i in xrange(self.p.l): | |
134 | try: |
|
134 | try: | |
135 | yield self.p.index[i][6] |
|
135 | yield self.p.index[i][6] | |
136 | except: |
|
136 | except: | |
137 | self.p.load(i) |
|
137 | self.p.load(i) | |
138 | yield self.p.index[i][6] |
|
138 | yield self.p.index[i][6] | |
139 | def __getitem__(self, key): |
|
139 | def __getitem__(self, key): | |
140 | try: |
|
140 | try: | |
141 | return self.p.map[key] |
|
141 | return self.p.map[key] | |
142 | except KeyError: |
|
142 | except KeyError: | |
143 | try: |
|
143 | try: | |
144 | self.load(key) |
|
144 | self.load(key) | |
145 | return self.p.map[key] |
|
145 | return self.p.map[key] | |
146 | except KeyError: |
|
146 | except KeyError: | |
147 | raise KeyError("node " + hex(key)) |
|
147 | raise KeyError("node " + hex(key)) | |
148 | def __setitem__(self, key, val): |
|
148 | def __setitem__(self, key, val): | |
149 | self.p.map[key] = val |
|
149 | self.p.map[key] = val | |
150 | def __delitem__(self, key): |
|
150 | def __delitem__(self, key): | |
151 | del self.p.map[key] |
|
151 | del self.p.map[key] | |
152 |
|
152 | |||
153 | class RevlogError(Exception): pass |
|
153 | class RevlogError(Exception): pass | |
154 |
|
154 | |||
155 | class revlog(object): |
|
155 | class revlog(object): | |
156 | """ |
|
156 | """ | |
157 | the underlying revision storage object |
|
157 | the underlying revision storage object | |
158 |
|
158 | |||
159 | A revlog consists of two parts, an index and the revision data. |
|
159 | A revlog consists of two parts, an index and the revision data. | |
160 |
|
160 | |||
161 | The index is a file with a fixed record size containing |
|
161 | The index is a file with a fixed record size containing | |
162 | information on each revision, includings its nodeid (hash), the |
|
162 | information on each revision, includings its nodeid (hash), the | |
163 | nodeids of its parents, the position and offset of its data within |
|
163 | nodeids of its parents, the position and offset of its data within | |
164 | the data file, and the revision it's based on. Finally, each entry |
|
164 | the data file, and the revision it's based on. Finally, each entry | |
165 | contains a linkrev entry that can serve as a pointer to external |
|
165 | contains a linkrev entry that can serve as a pointer to external | |
166 | data. |
|
166 | data. | |
167 |
|
167 | |||
168 | The revision data itself is a linear collection of data chunks. |
|
168 | The revision data itself is a linear collection of data chunks. | |
169 | Each chunk represents a revision and is usually represented as a |
|
169 | Each chunk represents a revision and is usually represented as a | |
170 | delta against the previous chunk. To bound lookup time, runs of |
|
170 | delta against the previous chunk. To bound lookup time, runs of | |
171 | deltas are limited to about 2 times the length of the original |
|
171 | deltas are limited to about 2 times the length of the original | |
172 | version data. This makes retrieval of a version proportional to |
|
172 | version data. This makes retrieval of a version proportional to | |
173 | its size, or O(1) relative to the number of revisions. |
|
173 | its size, or O(1) relative to the number of revisions. | |
174 |
|
174 | |||
175 | Both pieces of the revlog are written to in an append-only |
|
175 | Both pieces of the revlog are written to in an append-only | |
176 | fashion, which means we never need to rewrite a file to insert or |
|
176 | fashion, which means we never need to rewrite a file to insert or | |
177 | remove data, and can use some simple techniques to avoid the need |
|
177 | remove data, and can use some simple techniques to avoid the need | |
178 | for locking while reading. |
|
178 | for locking while reading. | |
179 | """ |
|
179 | """ | |
180 | def __init__(self, opener, indexfile, datafile): |
|
180 | def __init__(self, opener, indexfile, datafile): | |
181 | """ |
|
181 | """ | |
182 | create a revlog object |
|
182 | create a revlog object | |
183 |
|
183 | |||
184 | opener is a function that abstracts the file opening operation |
|
184 | opener is a function that abstracts the file opening operation | |
185 | and can be used to implement COW semantics or the like. |
|
185 | and can be used to implement COW semantics or the like. | |
186 | """ |
|
186 | """ | |
187 | self.indexfile = indexfile |
|
187 | self.indexfile = indexfile | |
188 | self.datafile = datafile |
|
188 | self.datafile = datafile | |
189 | self.opener = opener |
|
189 | self.opener = opener | |
|
190 | ||||
|
191 | self.indexstat = None | |||
190 | self.cache = None |
|
192 | self.cache = None | |
191 | self.chunkcache = None |
|
193 | self.chunkcache = None | |
|
194 | self.load() | |||
192 |
|
195 | |||
|
196 | def load(self): | |||
193 | try: |
|
197 | try: | |
194 |
|
|
198 | f = self.opener(self.indexfile) | |
195 | except IOError, inst: |
|
199 | except IOError, inst: | |
196 | if inst.errno != errno.ENOENT: |
|
200 | if inst.errno != errno.ENOENT: | |
197 | raise |
|
201 | raise | |
198 | i = "" |
|
202 | i = "" | |
|
203 | else: | |||
|
204 | try: | |||
|
205 | st = os.fstat(f.fileno()) | |||
|
206 | except AttributeError, inst: | |||
|
207 | st = None | |||
|
208 | else: | |||
|
209 | oldst = self.indexstat | |||
|
210 | if (oldst and st.st_dev == oldst.st_dev | |||
|
211 | and st.st_ino == oldst.st_ino | |||
|
212 | and st.st_mtime == oldst.st_mtime | |||
|
213 | and st.st_ctime == oldst.st_ctime): | |||
|
214 | return | |||
|
215 | self.indexstat = st | |||
|
216 | i = f.read() | |||
199 |
|
217 | |||
200 | if i and i[:4] != "\0\0\0\0": |
|
218 | if i and i[:4] != "\0\0\0\0": | |
201 | raise RevlogError(_("incompatible revlog signature on %s") % |
|
219 | raise RevlogError(_("incompatible revlog signature on %s") % | |
202 | self.indexfile) |
|
220 | self.indexfile) | |
203 |
|
221 | |||
204 | if len(i) > 10000: |
|
222 | if len(i) > 10000: | |
205 | # big index, let's parse it on demand |
|
223 | # big index, let's parse it on demand | |
206 | parser = lazyparser(i, self) |
|
224 | parser = lazyparser(i, self) | |
207 | self.index = lazyindex(parser) |
|
225 | self.index = lazyindex(parser) | |
208 | self.nodemap = lazymap(parser) |
|
226 | self.nodemap = lazymap(parser) | |
209 | else: |
|
227 | else: | |
210 | s = struct.calcsize(indexformat) |
|
228 | s = struct.calcsize(indexformat) | |
211 | l = len(i) / s |
|
229 | l = len(i) / s | |
212 | self.index = [None] * l |
|
230 | self.index = [None] * l | |
213 | m = [None] * l |
|
231 | m = [None] * l | |
214 |
|
232 | |||
215 | n = 0 |
|
233 | n = 0 | |
216 | for f in xrange(0, l * s, s): |
|
234 | for f in xrange(0, l * s, s): | |
217 | # offset, size, base, linkrev, p1, p2, nodeid |
|
235 | # offset, size, base, linkrev, p1, p2, nodeid | |
218 | e = struct.unpack(indexformat, i[f:f + s]) |
|
236 | e = struct.unpack(indexformat, i[f:f + s]) | |
219 | m[n] = (e[6], n) |
|
237 | m[n] = (e[6], n) | |
220 | self.index[n] = e |
|
238 | self.index[n] = e | |
221 | n += 1 |
|
239 | n += 1 | |
222 |
|
240 | |||
223 | self.nodemap = dict(m) |
|
241 | self.nodemap = dict(m) | |
224 | self.nodemap[nullid] = -1 |
|
242 | self.nodemap[nullid] = -1 | |
225 |
|
243 | |||
226 | def tip(self): return self.node(len(self.index) - 1) |
|
244 | def tip(self): return self.node(len(self.index) - 1) | |
227 | def count(self): return len(self.index) |
|
245 | def count(self): return len(self.index) | |
228 | def node(self, rev): return (rev < 0) and nullid or self.index[rev][6] |
|
246 | def node(self, rev): return (rev < 0) and nullid or self.index[rev][6] | |
229 | def rev(self, node): |
|
247 | def rev(self, node): | |
230 | try: |
|
248 | try: | |
231 | return self.nodemap[node] |
|
249 | return self.nodemap[node] | |
232 | except KeyError: |
|
250 | except KeyError: | |
233 | raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node))) |
|
251 | raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node))) | |
234 | def linkrev(self, node): return self.index[self.rev(node)][3] |
|
252 | def linkrev(self, node): return self.index[self.rev(node)][3] | |
235 | def parents(self, node): |
|
253 | def parents(self, node): | |
236 | if node == nullid: return (nullid, nullid) |
|
254 | if node == nullid: return (nullid, nullid) | |
237 | return self.index[self.rev(node)][4:6] |
|
255 | return self.index[self.rev(node)][4:6] | |
238 |
|
256 | |||
239 | def start(self, rev): return self.index[rev][0] |
|
257 | def start(self, rev): return self.index[rev][0] | |
240 | def length(self, rev): return self.index[rev][1] |
|
258 | def length(self, rev): return self.index[rev][1] | |
241 | def end(self, rev): return self.start(rev) + self.length(rev) |
|
259 | def end(self, rev): return self.start(rev) + self.length(rev) | |
242 | def base(self, rev): return self.index[rev][2] |
|
260 | def base(self, rev): return self.index[rev][2] | |
243 |
|
261 | |||
244 | def reachable(self, rev, stop=None): |
|
262 | def reachable(self, rev, stop=None): | |
245 | reachable = {} |
|
263 | reachable = {} | |
246 | visit = [rev] |
|
264 | visit = [rev] | |
247 | reachable[rev] = 1 |
|
265 | reachable[rev] = 1 | |
248 | if stop: |
|
266 | if stop: | |
249 | stopn = self.rev(stop) |
|
267 | stopn = self.rev(stop) | |
250 | else: |
|
268 | else: | |
251 | stopn = 0 |
|
269 | stopn = 0 | |
252 | while visit: |
|
270 | while visit: | |
253 | n = visit.pop(0) |
|
271 | n = visit.pop(0) | |
254 | if n == stop: |
|
272 | if n == stop: | |
255 | continue |
|
273 | continue | |
256 | if n == nullid: |
|
274 | if n == nullid: | |
257 | continue |
|
275 | continue | |
258 | for p in self.parents(n): |
|
276 | for p in self.parents(n): | |
259 | if self.rev(p) < stopn: |
|
277 | if self.rev(p) < stopn: | |
260 | continue |
|
278 | continue | |
261 | if p not in reachable: |
|
279 | if p not in reachable: | |
262 | reachable[p] = 1 |
|
280 | reachable[p] = 1 | |
263 | visit.append(p) |
|
281 | visit.append(p) | |
264 | return reachable |
|
282 | return reachable | |
265 |
|
283 | |||
266 | def nodesbetween(self, roots=None, heads=None): |
|
284 | def nodesbetween(self, roots=None, heads=None): | |
267 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
285 | """Return a tuple containing three elements. Elements 1 and 2 contain | |
268 | a final list bases and heads after all the unreachable ones have been |
|
286 | a final list bases and heads after all the unreachable ones have been | |
269 | pruned. Element 0 contains a topologically sorted list of all |
|
287 | pruned. Element 0 contains a topologically sorted list of all | |
270 |
|
288 | |||
271 | nodes that satisfy these constraints: |
|
289 | nodes that satisfy these constraints: | |
272 | 1. All nodes must be descended from a node in roots (the nodes on |
|
290 | 1. All nodes must be descended from a node in roots (the nodes on | |
273 | roots are considered descended from themselves). |
|
291 | roots are considered descended from themselves). | |
274 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
292 | 2. All nodes must also be ancestors of a node in heads (the nodes in | |
275 | heads are considered to be their own ancestors). |
|
293 | heads are considered to be their own ancestors). | |
276 |
|
294 | |||
277 | If roots is unspecified, nullid is assumed as the only root. |
|
295 | If roots is unspecified, nullid is assumed as the only root. | |
278 | If heads is unspecified, it is taken to be the output of the |
|
296 | If heads is unspecified, it is taken to be the output of the | |
279 | heads method (i.e. a list of all nodes in the repository that |
|
297 | heads method (i.e. a list of all nodes in the repository that | |
280 | have no children).""" |
|
298 | have no children).""" | |
281 | nonodes = ([], [], []) |
|
299 | nonodes = ([], [], []) | |
282 | if roots is not None: |
|
300 | if roots is not None: | |
283 | roots = list(roots) |
|
301 | roots = list(roots) | |
284 | if not roots: |
|
302 | if not roots: | |
285 | return nonodes |
|
303 | return nonodes | |
286 | lowestrev = min([self.rev(n) for n in roots]) |
|
304 | lowestrev = min([self.rev(n) for n in roots]) | |
287 | else: |
|
305 | else: | |
288 | roots = [nullid] # Everybody's a descendent of nullid |
|
306 | roots = [nullid] # Everybody's a descendent of nullid | |
289 | lowestrev = -1 |
|
307 | lowestrev = -1 | |
290 | if (lowestrev == -1) and (heads is None): |
|
308 | if (lowestrev == -1) and (heads is None): | |
291 | # We want _all_ the nodes! |
|
309 | # We want _all_ the nodes! | |
292 | return ([self.node(r) for r in xrange(0, self.count())], |
|
310 | return ([self.node(r) for r in xrange(0, self.count())], | |
293 | [nullid], list(self.heads())) |
|
311 | [nullid], list(self.heads())) | |
294 | if heads is None: |
|
312 | if heads is None: | |
295 | # All nodes are ancestors, so the latest ancestor is the last |
|
313 | # All nodes are ancestors, so the latest ancestor is the last | |
296 | # node. |
|
314 | # node. | |
297 | highestrev = self.count() - 1 |
|
315 | highestrev = self.count() - 1 | |
298 | # Set ancestors to None to signal that every node is an ancestor. |
|
316 | # Set ancestors to None to signal that every node is an ancestor. | |
299 | ancestors = None |
|
317 | ancestors = None | |
300 | # Set heads to an empty dictionary for later discovery of heads |
|
318 | # Set heads to an empty dictionary for later discovery of heads | |
301 | heads = {} |
|
319 | heads = {} | |
302 | else: |
|
320 | else: | |
303 | heads = list(heads) |
|
321 | heads = list(heads) | |
304 | if not heads: |
|
322 | if not heads: | |
305 | return nonodes |
|
323 | return nonodes | |
306 | ancestors = {} |
|
324 | ancestors = {} | |
307 | # Start at the top and keep marking parents until we're done. |
|
325 | # Start at the top and keep marking parents until we're done. | |
308 | nodestotag = heads[:] |
|
326 | nodestotag = heads[:] | |
309 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
327 | # Turn heads into a dictionary so we can remove 'fake' heads. | |
310 | # Also, later we will be using it to filter out the heads we can't |
|
328 | # Also, later we will be using it to filter out the heads we can't | |
311 | # find from roots. |
|
329 | # find from roots. | |
312 | heads = dict.fromkeys(heads, 0) |
|
330 | heads = dict.fromkeys(heads, 0) | |
313 | # Remember where the top was so we can use it as a limit later. |
|
331 | # Remember where the top was so we can use it as a limit later. | |
314 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
332 | highestrev = max([self.rev(n) for n in nodestotag]) | |
315 | while nodestotag: |
|
333 | while nodestotag: | |
316 | # grab a node to tag |
|
334 | # grab a node to tag | |
317 | n = nodestotag.pop() |
|
335 | n = nodestotag.pop() | |
318 | # Never tag nullid |
|
336 | # Never tag nullid | |
319 | if n == nullid: |
|
337 | if n == nullid: | |
320 | continue |
|
338 | continue | |
321 | # A node's revision number represents its place in a |
|
339 | # A node's revision number represents its place in a | |
322 | # topologically sorted list of nodes. |
|
340 | # topologically sorted list of nodes. | |
323 | r = self.rev(n) |
|
341 | r = self.rev(n) | |
324 | if r >= lowestrev: |
|
342 | if r >= lowestrev: | |
325 | if n not in ancestors: |
|
343 | if n not in ancestors: | |
326 | # If we are possibly a descendent of one of the roots |
|
344 | # If we are possibly a descendent of one of the roots | |
327 | # and we haven't already been marked as an ancestor |
|
345 | # and we haven't already been marked as an ancestor | |
328 | ancestors[n] = 1 # Mark as ancestor |
|
346 | ancestors[n] = 1 # Mark as ancestor | |
329 | # Add non-nullid parents to list of nodes to tag. |
|
347 | # Add non-nullid parents to list of nodes to tag. | |
330 | nodestotag.extend([p for p in self.parents(n) if |
|
348 | nodestotag.extend([p for p in self.parents(n) if | |
331 | p != nullid]) |
|
349 | p != nullid]) | |
332 | elif n in heads: # We've seen it before, is it a fake head? |
|
350 | elif n in heads: # We've seen it before, is it a fake head? | |
333 | # So it is, real heads should not be the ancestors of |
|
351 | # So it is, real heads should not be the ancestors of | |
334 | # any other heads. |
|
352 | # any other heads. | |
335 | heads.pop(n) |
|
353 | heads.pop(n) | |
336 | if not ancestors: |
|
354 | if not ancestors: | |
337 | return nonodes |
|
355 | return nonodes | |
338 | # Now that we have our set of ancestors, we want to remove any |
|
356 | # Now that we have our set of ancestors, we want to remove any | |
339 | # roots that are not ancestors. |
|
357 | # roots that are not ancestors. | |
340 |
|
358 | |||
341 | # If one of the roots was nullid, everything is included anyway. |
|
359 | # If one of the roots was nullid, everything is included anyway. | |
342 | if lowestrev > -1: |
|
360 | if lowestrev > -1: | |
343 | # But, since we weren't, let's recompute the lowest rev to not |
|
361 | # But, since we weren't, let's recompute the lowest rev to not | |
344 | # include roots that aren't ancestors. |
|
362 | # include roots that aren't ancestors. | |
345 |
|
363 | |||
346 | # Filter out roots that aren't ancestors of heads |
|
364 | # Filter out roots that aren't ancestors of heads | |
347 | roots = [n for n in roots if n in ancestors] |
|
365 | roots = [n for n in roots if n in ancestors] | |
348 | # Recompute the lowest revision |
|
366 | # Recompute the lowest revision | |
349 | if roots: |
|
367 | if roots: | |
350 | lowestrev = min([self.rev(n) for n in roots]) |
|
368 | lowestrev = min([self.rev(n) for n in roots]) | |
351 | else: |
|
369 | else: | |
352 | # No more roots? Return empty list |
|
370 | # No more roots? Return empty list | |
353 | return nonodes |
|
371 | return nonodes | |
354 | else: |
|
372 | else: | |
355 | # We are descending from nullid, and don't need to care about |
|
373 | # We are descending from nullid, and don't need to care about | |
356 | # any other roots. |
|
374 | # any other roots. | |
357 | lowestrev = -1 |
|
375 | lowestrev = -1 | |
358 | roots = [nullid] |
|
376 | roots = [nullid] | |
359 | # Transform our roots list into a 'set' (i.e. a dictionary where the |
|
377 | # Transform our roots list into a 'set' (i.e. a dictionary where the | |
360 | # values don't matter. |
|
378 | # values don't matter. | |
361 | descendents = dict.fromkeys(roots, 1) |
|
379 | descendents = dict.fromkeys(roots, 1) | |
362 | # Also, keep the original roots so we can filter out roots that aren't |
|
380 | # Also, keep the original roots so we can filter out roots that aren't | |
363 | # 'real' roots (i.e. are descended from other roots). |
|
381 | # 'real' roots (i.e. are descended from other roots). | |
364 | roots = descendents.copy() |
|
382 | roots = descendents.copy() | |
365 | # Our topologically sorted list of output nodes. |
|
383 | # Our topologically sorted list of output nodes. | |
366 | orderedout = [] |
|
384 | orderedout = [] | |
367 | # Don't start at nullid since we don't want nullid in our output list, |
|
385 | # Don't start at nullid since we don't want nullid in our output list, | |
368 | # and if nullid shows up in descedents, empty parents will look like |
|
386 | # and if nullid shows up in descedents, empty parents will look like | |
369 | # they're descendents. |
|
387 | # they're descendents. | |
370 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
388 | for r in xrange(max(lowestrev, 0), highestrev + 1): | |
371 | n = self.node(r) |
|
389 | n = self.node(r) | |
372 | isdescendent = False |
|
390 | isdescendent = False | |
373 | if lowestrev == -1: # Everybody is a descendent of nullid |
|
391 | if lowestrev == -1: # Everybody is a descendent of nullid | |
374 | isdescendent = True |
|
392 | isdescendent = True | |
375 | elif n in descendents: |
|
393 | elif n in descendents: | |
376 | # n is already a descendent |
|
394 | # n is already a descendent | |
377 | isdescendent = True |
|
395 | isdescendent = True | |
378 | # This check only needs to be done here because all the roots |
|
396 | # This check only needs to be done here because all the roots | |
379 | # will start being marked is descendents before the loop. |
|
397 | # will start being marked is descendents before the loop. | |
380 | if n in roots: |
|
398 | if n in roots: | |
381 | # If n was a root, check if it's a 'real' root. |
|
399 | # If n was a root, check if it's a 'real' root. | |
382 | p = tuple(self.parents(n)) |
|
400 | p = tuple(self.parents(n)) | |
383 | # If any of its parents are descendents, it's not a root. |
|
401 | # If any of its parents are descendents, it's not a root. | |
384 | if (p[0] in descendents) or (p[1] in descendents): |
|
402 | if (p[0] in descendents) or (p[1] in descendents): | |
385 | roots.pop(n) |
|
403 | roots.pop(n) | |
386 | else: |
|
404 | else: | |
387 | p = tuple(self.parents(n)) |
|
405 | p = tuple(self.parents(n)) | |
388 | # A node is a descendent if either of its parents are |
|
406 | # A node is a descendent if either of its parents are | |
389 | # descendents. (We seeded the dependents list with the roots |
|
407 | # descendents. (We seeded the dependents list with the roots | |
390 | # up there, remember?) |
|
408 | # up there, remember?) | |
391 | if (p[0] in descendents) or (p[1] in descendents): |
|
409 | if (p[0] in descendents) or (p[1] in descendents): | |
392 | descendents[n] = 1 |
|
410 | descendents[n] = 1 | |
393 | isdescendent = True |
|
411 | isdescendent = True | |
394 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
412 | if isdescendent and ((ancestors is None) or (n in ancestors)): | |
395 | # Only include nodes that are both descendents and ancestors. |
|
413 | # Only include nodes that are both descendents and ancestors. | |
396 | orderedout.append(n) |
|
414 | orderedout.append(n) | |
397 | if (ancestors is not None) and (n in heads): |
|
415 | if (ancestors is not None) and (n in heads): | |
398 | # We're trying to figure out which heads are reachable |
|
416 | # We're trying to figure out which heads are reachable | |
399 | # from roots. |
|
417 | # from roots. | |
400 | # Mark this head as having been reached |
|
418 | # Mark this head as having been reached | |
401 | heads[n] = 1 |
|
419 | heads[n] = 1 | |
402 | elif ancestors is None: |
|
420 | elif ancestors is None: | |
403 | # Otherwise, we're trying to discover the heads. |
|
421 | # Otherwise, we're trying to discover the heads. | |
404 | # Assume this is a head because if it isn't, the next step |
|
422 | # Assume this is a head because if it isn't, the next step | |
405 | # will eventually remove it. |
|
423 | # will eventually remove it. | |
406 | heads[n] = 1 |
|
424 | heads[n] = 1 | |
407 | # But, obviously its parents aren't. |
|
425 | # But, obviously its parents aren't. | |
408 | for p in self.parents(n): |
|
426 | for p in self.parents(n): | |
409 | heads.pop(p, None) |
|
427 | heads.pop(p, None) | |
410 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
428 | heads = [n for n in heads.iterkeys() if heads[n] != 0] | |
411 | roots = roots.keys() |
|
429 | roots = roots.keys() | |
412 | assert orderedout |
|
430 | assert orderedout | |
413 | assert roots |
|
431 | assert roots | |
414 | assert heads |
|
432 | assert heads | |
415 | return (orderedout, roots, heads) |
|
433 | return (orderedout, roots, heads) | |
416 |
|
434 | |||
417 | def heads(self, start=None): |
|
435 | def heads(self, start=None): | |
418 | """return the list of all nodes that have no children |
|
436 | """return the list of all nodes that have no children | |
419 |
|
437 | |||
420 | if start is specified, only heads that are descendants of |
|
438 | if start is specified, only heads that are descendants of | |
421 | start will be returned |
|
439 | start will be returned | |
422 |
|
440 | |||
423 | """ |
|
441 | """ | |
424 | if start is None: |
|
442 | if start is None: | |
425 | start = nullid |
|
443 | start = nullid | |
426 | reachable = {start: 1} |
|
444 | reachable = {start: 1} | |
427 | heads = {start: 1} |
|
445 | heads = {start: 1} | |
428 | startrev = self.rev(start) |
|
446 | startrev = self.rev(start) | |
429 |
|
447 | |||
430 | for r in xrange(startrev + 1, self.count()): |
|
448 | for r in xrange(startrev + 1, self.count()): | |
431 | n = self.node(r) |
|
449 | n = self.node(r) | |
432 | for pn in self.parents(n): |
|
450 | for pn in self.parents(n): | |
433 | if pn in reachable: |
|
451 | if pn in reachable: | |
434 | reachable[n] = 1 |
|
452 | reachable[n] = 1 | |
435 | heads[n] = 1 |
|
453 | heads[n] = 1 | |
436 | if pn in heads: |
|
454 | if pn in heads: | |
437 | del heads[pn] |
|
455 | del heads[pn] | |
438 | return heads.keys() |
|
456 | return heads.keys() | |
439 |
|
457 | |||
440 | def children(self, node): |
|
458 | def children(self, node): | |
441 | """find the children of a given node""" |
|
459 | """find the children of a given node""" | |
442 | c = [] |
|
460 | c = [] | |
443 | p = self.rev(node) |
|
461 | p = self.rev(node) | |
444 | for r in range(p + 1, self.count()): |
|
462 | for r in range(p + 1, self.count()): | |
445 | n = self.node(r) |
|
463 | n = self.node(r) | |
446 | for pn in self.parents(n): |
|
464 | for pn in self.parents(n): | |
447 | if pn == node: |
|
465 | if pn == node: | |
448 | c.append(n) |
|
466 | c.append(n) | |
449 | continue |
|
467 | continue | |
450 | elif pn == nullid: |
|
468 | elif pn == nullid: | |
451 | continue |
|
469 | continue | |
452 | return c |
|
470 | return c | |
453 |
|
471 | |||
454 | def lookup(self, id): |
|
472 | def lookup(self, id): | |
455 | """locate a node based on revision number or subset of hex nodeid""" |
|
473 | """locate a node based on revision number or subset of hex nodeid""" | |
456 | try: |
|
474 | try: | |
457 | rev = int(id) |
|
475 | rev = int(id) | |
458 | if str(rev) != id: raise ValueError |
|
476 | if str(rev) != id: raise ValueError | |
459 | if rev < 0: rev = self.count() + rev |
|
477 | if rev < 0: rev = self.count() + rev | |
460 | if rev < 0 or rev >= self.count(): raise ValueError |
|
478 | if rev < 0 or rev >= self.count(): raise ValueError | |
461 | return self.node(rev) |
|
479 | return self.node(rev) | |
462 | except (ValueError, OverflowError): |
|
480 | except (ValueError, OverflowError): | |
463 | c = [] |
|
481 | c = [] | |
464 | for n in self.nodemap: |
|
482 | for n in self.nodemap: | |
465 | if hex(n).startswith(id): |
|
483 | if hex(n).startswith(id): | |
466 | c.append(n) |
|
484 | c.append(n) | |
467 | if len(c) > 1: raise RevlogError(_("Ambiguous identifier")) |
|
485 | if len(c) > 1: raise RevlogError(_("Ambiguous identifier")) | |
468 | if len(c) < 1: raise RevlogError(_("No match found")) |
|
486 | if len(c) < 1: raise RevlogError(_("No match found")) | |
469 | return c[0] |
|
487 | return c[0] | |
470 |
|
488 | |||
471 | return None |
|
489 | return None | |
472 |
|
490 | |||
473 | def diff(self, a, b): |
|
491 | def diff(self, a, b): | |
474 | """return a delta between two revisions""" |
|
492 | """return a delta between two revisions""" | |
475 | return mdiff.textdiff(a, b) |
|
493 | return mdiff.textdiff(a, b) | |
476 |
|
494 | |||
477 | def patches(self, t, pl): |
|
495 | def patches(self, t, pl): | |
478 | """apply a list of patches to a string""" |
|
496 | """apply a list of patches to a string""" | |
479 | return mdiff.patches(t, pl) |
|
497 | return mdiff.patches(t, pl) | |
480 |
|
498 | |||
481 | def chunk(self, rev): |
|
499 | def chunk(self, rev): | |
482 | start, length = self.start(rev), self.length(rev) |
|
500 | start, length = self.start(rev), self.length(rev) | |
483 | end = start + length |
|
501 | end = start + length | |
484 |
|
502 | |||
485 | def loadcache(): |
|
503 | def loadcache(): | |
486 | cache_length = max(4096 * 1024, length) # 4Mo |
|
504 | cache_length = max(4096 * 1024, length) # 4Mo | |
487 | df = self.opener(self.datafile) |
|
505 | df = self.opener(self.datafile) | |
488 | df.seek(start) |
|
506 | df.seek(start) | |
489 | self.chunkcache = (start, df.read(cache_length)) |
|
507 | self.chunkcache = (start, df.read(cache_length)) | |
490 |
|
508 | |||
491 | if not self.chunkcache: |
|
509 | if not self.chunkcache: | |
492 | loadcache() |
|
510 | loadcache() | |
493 |
|
511 | |||
494 | cache_start = self.chunkcache[0] |
|
512 | cache_start = self.chunkcache[0] | |
495 | cache_end = cache_start + len(self.chunkcache[1]) |
|
513 | cache_end = cache_start + len(self.chunkcache[1]) | |
496 | if start >= cache_start and end <= cache_end: |
|
514 | if start >= cache_start and end <= cache_end: | |
497 | # it is cached |
|
515 | # it is cached | |
498 | offset = start - cache_start |
|
516 | offset = start - cache_start | |
499 | else: |
|
517 | else: | |
500 | loadcache() |
|
518 | loadcache() | |
501 | offset = 0 |
|
519 | offset = 0 | |
502 |
|
520 | |||
503 | #def checkchunk(): |
|
521 | #def checkchunk(): | |
504 | # df = self.opener(self.datafile) |
|
522 | # df = self.opener(self.datafile) | |
505 | # df.seek(start) |
|
523 | # df.seek(start) | |
506 | # return df.read(length) |
|
524 | # return df.read(length) | |
507 | #assert s == checkchunk() |
|
525 | #assert s == checkchunk() | |
508 | return decompress(self.chunkcache[1][offset:offset + length]) |
|
526 | return decompress(self.chunkcache[1][offset:offset + length]) | |
509 |
|
527 | |||
510 | def delta(self, node): |
|
528 | def delta(self, node): | |
511 | """return or calculate a delta between a node and its predecessor""" |
|
529 | """return or calculate a delta between a node and its predecessor""" | |
512 | r = self.rev(node) |
|
530 | r = self.rev(node) | |
513 | b = self.base(r) |
|
531 | b = self.base(r) | |
514 | if r == b: |
|
532 | if r == b: | |
515 | return self.diff(self.revision(self.node(r - 1)), |
|
533 | return self.diff(self.revision(self.node(r - 1)), | |
516 | self.revision(node)) |
|
534 | self.revision(node)) | |
517 | else: |
|
535 | else: | |
518 | return self.chunk(r) |
|
536 | return self.chunk(r) | |
519 |
|
537 | |||
520 | def revision(self, node): |
|
538 | def revision(self, node): | |
521 | """return an uncompressed revision of a given""" |
|
539 | """return an uncompressed revision of a given""" | |
522 | if node == nullid: return "" |
|
540 | if node == nullid: return "" | |
523 | if self.cache and self.cache[0] == node: return self.cache[2] |
|
541 | if self.cache and self.cache[0] == node: return self.cache[2] | |
524 |
|
542 | |||
525 | # look up what we need to read |
|
543 | # look up what we need to read | |
526 | text = None |
|
544 | text = None | |
527 | rev = self.rev(node) |
|
545 | rev = self.rev(node) | |
528 | base = self.base(rev) |
|
546 | base = self.base(rev) | |
529 |
|
547 | |||
530 | # do we have useful data cached? |
|
548 | # do we have useful data cached? | |
531 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
549 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: | |
532 | base = self.cache[1] |
|
550 | base = self.cache[1] | |
533 | text = self.cache[2] |
|
551 | text = self.cache[2] | |
534 | else: |
|
552 | else: | |
535 | text = self.chunk(base) |
|
553 | text = self.chunk(base) | |
536 |
|
554 | |||
537 | bins = [] |
|
555 | bins = [] | |
538 | for r in xrange(base + 1, rev + 1): |
|
556 | for r in xrange(base + 1, rev + 1): | |
539 | bins.append(self.chunk(r)) |
|
557 | bins.append(self.chunk(r)) | |
540 |
|
558 | |||
541 | text = mdiff.patches(text, bins) |
|
559 | text = mdiff.patches(text, bins) | |
542 |
|
560 | |||
543 | p1, p2 = self.parents(node) |
|
561 | p1, p2 = self.parents(node) | |
544 | if node != hash(text, p1, p2): |
|
562 | if node != hash(text, p1, p2): | |
545 | raise RevlogError(_("integrity check failed on %s:%d") |
|
563 | raise RevlogError(_("integrity check failed on %s:%d") | |
546 | % (self.datafile, rev)) |
|
564 | % (self.datafile, rev)) | |
547 |
|
565 | |||
548 | self.cache = (node, rev, text) |
|
566 | self.cache = (node, rev, text) | |
549 | return text |
|
567 | return text | |
550 |
|
568 | |||
551 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
569 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): | |
552 | """add a revision to the log |
|
570 | """add a revision to the log | |
553 |
|
571 | |||
554 | text - the revision data to add |
|
572 | text - the revision data to add | |
555 | transaction - the transaction object used for rollback |
|
573 | transaction - the transaction object used for rollback | |
556 | link - the linkrev data to add |
|
574 | link - the linkrev data to add | |
557 | p1, p2 - the parent nodeids of the revision |
|
575 | p1, p2 - the parent nodeids of the revision | |
558 | d - an optional precomputed delta |
|
576 | d - an optional precomputed delta | |
559 | """ |
|
577 | """ | |
560 | if text is None: text = "" |
|
578 | if text is None: text = "" | |
561 | if p1 is None: p1 = self.tip() |
|
579 | if p1 is None: p1 = self.tip() | |
562 | if p2 is None: p2 = nullid |
|
580 | if p2 is None: p2 = nullid | |
563 |
|
581 | |||
564 | node = hash(text, p1, p2) |
|
582 | node = hash(text, p1, p2) | |
565 |
|
583 | |||
566 | if node in self.nodemap: |
|
584 | if node in self.nodemap: | |
567 | return node |
|
585 | return node | |
568 |
|
586 | |||
569 | n = self.count() |
|
587 | n = self.count() | |
570 | t = n - 1 |
|
588 | t = n - 1 | |
571 |
|
589 | |||
572 | if n: |
|
590 | if n: | |
573 | base = self.base(t) |
|
591 | base = self.base(t) | |
574 | start = self.start(base) |
|
592 | start = self.start(base) | |
575 | end = self.end(t) |
|
593 | end = self.end(t) | |
576 | if not d: |
|
594 | if not d: | |
577 | prev = self.revision(self.tip()) |
|
595 | prev = self.revision(self.tip()) | |
578 | d = self.diff(prev, str(text)) |
|
596 | d = self.diff(prev, str(text)) | |
579 | data = compress(d) |
|
597 | data = compress(d) | |
580 | l = len(data[1]) + len(data[0]) |
|
598 | l = len(data[1]) + len(data[0]) | |
581 | dist = end - start + l |
|
599 | dist = end - start + l | |
582 |
|
600 | |||
583 | # full versions are inserted when the needed deltas |
|
601 | # full versions are inserted when the needed deltas | |
584 | # become comparable to the uncompressed text |
|
602 | # become comparable to the uncompressed text | |
585 | if not n or dist > len(text) * 2: |
|
603 | if not n or dist > len(text) * 2: | |
586 | data = compress(text) |
|
604 | data = compress(text) | |
587 | l = len(data[1]) + len(data[0]) |
|
605 | l = len(data[1]) + len(data[0]) | |
588 | base = n |
|
606 | base = n | |
589 | else: |
|
607 | else: | |
590 | base = self.base(t) |
|
608 | base = self.base(t) | |
591 |
|
609 | |||
592 | offset = 0 |
|
610 | offset = 0 | |
593 | if t >= 0: |
|
611 | if t >= 0: | |
594 | offset = self.end(t) |
|
612 | offset = self.end(t) | |
595 |
|
613 | |||
596 | e = (offset, l, base, link, p1, p2, node) |
|
614 | e = (offset, l, base, link, p1, p2, node) | |
597 |
|
615 | |||
598 | self.index.append(e) |
|
616 | self.index.append(e) | |
599 | self.nodemap[node] = n |
|
617 | self.nodemap[node] = n | |
600 | entry = struct.pack(indexformat, *e) |
|
618 | entry = struct.pack(indexformat, *e) | |
601 |
|
619 | |||
602 | transaction.add(self.datafile, e[0]) |
|
620 | transaction.add(self.datafile, e[0]) | |
603 | f = self.opener(self.datafile, "a") |
|
621 | f = self.opener(self.datafile, "a") | |
604 | if data[0]: |
|
622 | if data[0]: | |
605 | f.write(data[0]) |
|
623 | f.write(data[0]) | |
606 | f.write(data[1]) |
|
624 | f.write(data[1]) | |
607 | transaction.add(self.indexfile, n * len(entry)) |
|
625 | transaction.add(self.indexfile, n * len(entry)) | |
608 | self.opener(self.indexfile, "a").write(entry) |
|
626 | self.opener(self.indexfile, "a").write(entry) | |
609 |
|
627 | |||
610 | self.cache = (node, n, text) |
|
628 | self.cache = (node, n, text) | |
611 | return node |
|
629 | return node | |
612 |
|
630 | |||
613 | def ancestor(self, a, b): |
|
631 | def ancestor(self, a, b): | |
614 | """calculate the least common ancestor of nodes a and b""" |
|
632 | """calculate the least common ancestor of nodes a and b""" | |
615 | # calculate the distance of every node from root |
|
633 | # calculate the distance of every node from root | |
616 | dist = {nullid: 0} |
|
634 | dist = {nullid: 0} | |
617 | for i in xrange(self.count()): |
|
635 | for i in xrange(self.count()): | |
618 | n = self.node(i) |
|
636 | n = self.node(i) | |
619 | p1, p2 = self.parents(n) |
|
637 | p1, p2 = self.parents(n) | |
620 | dist[n] = max(dist[p1], dist[p2]) + 1 |
|
638 | dist[n] = max(dist[p1], dist[p2]) + 1 | |
621 |
|
639 | |||
622 | # traverse ancestors in order of decreasing distance from root |
|
640 | # traverse ancestors in order of decreasing distance from root | |
623 | def ancestors(node): |
|
641 | def ancestors(node): | |
624 | # we store negative distances because heap returns smallest member |
|
642 | # we store negative distances because heap returns smallest member | |
625 | h = [(-dist[node], node)] |
|
643 | h = [(-dist[node], node)] | |
626 | seen = {} |
|
644 | seen = {} | |
627 | while h: |
|
645 | while h: | |
628 | d, n = heapq.heappop(h) |
|
646 | d, n = heapq.heappop(h) | |
629 | if n not in seen: |
|
647 | if n not in seen: | |
630 | seen[n] = 1 |
|
648 | seen[n] = 1 | |
631 | yield (-d, n) |
|
649 | yield (-d, n) | |
632 | for p in self.parents(n): |
|
650 | for p in self.parents(n): | |
633 | heapq.heappush(h, (-dist[p], p)) |
|
651 | heapq.heappush(h, (-dist[p], p)) | |
634 |
|
652 | |||
635 | def generations(node): |
|
653 | def generations(node): | |
636 | sg, s = None, {} |
|
654 | sg, s = None, {} | |
637 | for g,n in ancestors(node): |
|
655 | for g,n in ancestors(node): | |
638 | if g != sg: |
|
656 | if g != sg: | |
639 | if sg: |
|
657 | if sg: | |
640 | yield sg, s |
|
658 | yield sg, s | |
641 | sg, s = g, {n:1} |
|
659 | sg, s = g, {n:1} | |
642 | else: |
|
660 | else: | |
643 | s[n] = 1 |
|
661 | s[n] = 1 | |
644 | yield sg, s |
|
662 | yield sg, s | |
645 |
|
663 | |||
646 | x = generations(a) |
|
664 | x = generations(a) | |
647 | y = generations(b) |
|
665 | y = generations(b) | |
648 | gx = x.next() |
|
666 | gx = x.next() | |
649 | gy = y.next() |
|
667 | gy = y.next() | |
650 |
|
668 | |||
651 | # increment each ancestor list until it is closer to root than |
|
669 | # increment each ancestor list until it is closer to root than | |
652 | # the other, or they match |
|
670 | # the other, or they match | |
653 | while 1: |
|
671 | while 1: | |
654 | #print "ancestor gen %s %s" % (gx[0], gy[0]) |
|
672 | #print "ancestor gen %s %s" % (gx[0], gy[0]) | |
655 | if gx[0] == gy[0]: |
|
673 | if gx[0] == gy[0]: | |
656 | # find the intersection |
|
674 | # find the intersection | |
657 | i = [ n for n in gx[1] if n in gy[1] ] |
|
675 | i = [ n for n in gx[1] if n in gy[1] ] | |
658 | if i: |
|
676 | if i: | |
659 | return i[0] |
|
677 | return i[0] | |
660 | else: |
|
678 | else: | |
661 | #print "next" |
|
679 | #print "next" | |
662 | gy = y.next() |
|
680 | gy = y.next() | |
663 | gx = x.next() |
|
681 | gx = x.next() | |
664 | elif gx[0] < gy[0]: |
|
682 | elif gx[0] < gy[0]: | |
665 | #print "next y" |
|
683 | #print "next y" | |
666 | gy = y.next() |
|
684 | gy = y.next() | |
667 | else: |
|
685 | else: | |
668 | #print "next x" |
|
686 | #print "next x" | |
669 | gx = x.next() |
|
687 | gx = x.next() | |
670 |
|
688 | |||
671 | def group(self, nodelist, lookup, infocollect=None): |
|
689 | def group(self, nodelist, lookup, infocollect=None): | |
672 | """calculate a delta group |
|
690 | """calculate a delta group | |
673 |
|
691 | |||
674 | Given a list of changeset revs, return a set of deltas and |
|
692 | Given a list of changeset revs, return a set of deltas and | |
675 | metadata corresponding to nodes. the first delta is |
|
693 | metadata corresponding to nodes. the first delta is | |
676 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
694 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to | |
677 | have this parent as it has all history before these |
|
695 | have this parent as it has all history before these | |
678 | changesets. parent is parent[0] |
|
696 | changesets. parent is parent[0] | |
679 | """ |
|
697 | """ | |
680 | revs = [self.rev(n) for n in nodelist] |
|
698 | revs = [self.rev(n) for n in nodelist] | |
681 |
|
699 | |||
682 | # if we don't have any revisions touched by these changesets, bail |
|
700 | # if we don't have any revisions touched by these changesets, bail | |
683 | if not revs: |
|
701 | if not revs: | |
684 | yield struct.pack(">l", 0) |
|
702 | yield struct.pack(">l", 0) | |
685 | return |
|
703 | return | |
686 |
|
704 | |||
687 | # add the parent of the first rev |
|
705 | # add the parent of the first rev | |
688 | p = self.parents(self.node(revs[0]))[0] |
|
706 | p = self.parents(self.node(revs[0]))[0] | |
689 | revs.insert(0, self.rev(p)) |
|
707 | revs.insert(0, self.rev(p)) | |
690 |
|
708 | |||
691 | # build deltas |
|
709 | # build deltas | |
692 | for d in xrange(0, len(revs) - 1): |
|
710 | for d in xrange(0, len(revs) - 1): | |
693 | a, b = revs[d], revs[d + 1] |
|
711 | a, b = revs[d], revs[d + 1] | |
694 | na = self.node(a) |
|
712 | na = self.node(a) | |
695 | nb = self.node(b) |
|
713 | nb = self.node(b) | |
696 |
|
714 | |||
697 | if infocollect is not None: |
|
715 | if infocollect is not None: | |
698 | infocollect(nb) |
|
716 | infocollect(nb) | |
699 |
|
717 | |||
700 | # do we need to construct a new delta? |
|
718 | # do we need to construct a new delta? | |
701 | if a + 1 != b or self.base(b) == b: |
|
719 | if a + 1 != b or self.base(b) == b: | |
702 | ta = self.revision(na) |
|
720 | ta = self.revision(na) | |
703 | tb = self.revision(nb) |
|
721 | tb = self.revision(nb) | |
704 | d = self.diff(ta, tb) |
|
722 | d = self.diff(ta, tb) | |
705 | else: |
|
723 | else: | |
706 | d = self.chunk(b) |
|
724 | d = self.chunk(b) | |
707 |
|
725 | |||
708 | p = self.parents(nb) |
|
726 | p = self.parents(nb) | |
709 | meta = nb + p[0] + p[1] + lookup(nb) |
|
727 | meta = nb + p[0] + p[1] + lookup(nb) | |
710 | l = struct.pack(">l", len(meta) + len(d) + 4) |
|
728 | l = struct.pack(">l", len(meta) + len(d) + 4) | |
711 | yield l |
|
729 | yield l | |
712 | yield meta |
|
730 | yield meta | |
713 | yield d |
|
731 | yield d | |
714 |
|
732 | |||
715 | yield struct.pack(">l", 0) |
|
733 | yield struct.pack(">l", 0) | |
716 |
|
734 | |||
717 | def addgroup(self, revs, linkmapper, transaction, unique=0): |
|
735 | def addgroup(self, revs, linkmapper, transaction, unique=0): | |
718 | """ |
|
736 | """ | |
719 | add a delta group |
|
737 | add a delta group | |
720 |
|
738 | |||
721 | given a set of deltas, add them to the revision log. the |
|
739 | given a set of deltas, add them to the revision log. the | |
722 | first delta is against its parent, which should be in our |
|
740 | first delta is against its parent, which should be in our | |
723 | log, the rest are against the previous delta. |
|
741 | log, the rest are against the previous delta. | |
724 | """ |
|
742 | """ | |
725 |
|
743 | |||
726 | #track the base of the current delta log |
|
744 | #track the base of the current delta log | |
727 | r = self.count() |
|
745 | r = self.count() | |
728 | t = r - 1 |
|
746 | t = r - 1 | |
729 | node = nullid |
|
747 | node = nullid | |
730 |
|
748 | |||
731 | base = prev = -1 |
|
749 | base = prev = -1 | |
732 | start = end = measure = 0 |
|
750 | start = end = measure = 0 | |
733 | if r: |
|
751 | if r: | |
734 | base = self.base(t) |
|
752 | base = self.base(t) | |
735 | start = self.start(base) |
|
753 | start = self.start(base) | |
736 | end = self.end(t) |
|
754 | end = self.end(t) | |
737 | measure = self.length(base) |
|
755 | measure = self.length(base) | |
738 | prev = self.tip() |
|
756 | prev = self.tip() | |
739 |
|
757 | |||
740 | transaction.add(self.datafile, end) |
|
758 | transaction.add(self.datafile, end) | |
741 | transaction.add(self.indexfile, r * struct.calcsize(indexformat)) |
|
759 | transaction.add(self.indexfile, r * struct.calcsize(indexformat)) | |
742 | dfh = self.opener(self.datafile, "a") |
|
760 | dfh = self.opener(self.datafile, "a") | |
743 | ifh = self.opener(self.indexfile, "a") |
|
761 | ifh = self.opener(self.indexfile, "a") | |
744 |
|
762 | |||
745 | # loop through our set of deltas |
|
763 | # loop through our set of deltas | |
746 | chain = None |
|
764 | chain = None | |
747 | for chunk in revs: |
|
765 | for chunk in revs: | |
748 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
766 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) | |
749 | link = linkmapper(cs) |
|
767 | link = linkmapper(cs) | |
750 | if node in self.nodemap: |
|
768 | if node in self.nodemap: | |
751 | # this can happen if two branches make the same change |
|
769 | # this can happen if two branches make the same change | |
752 | # if unique: |
|
770 | # if unique: | |
753 | # raise RevlogError(_("already have %s") % hex(node[:4])) |
|
771 | # raise RevlogError(_("already have %s") % hex(node[:4])) | |
754 | chain = node |
|
772 | chain = node | |
755 | continue |
|
773 | continue | |
756 | delta = chunk[80:] |
|
774 | delta = chunk[80:] | |
757 |
|
775 | |||
758 | for p in (p1, p2): |
|
776 | for p in (p1, p2): | |
759 | if not p in self.nodemap: |
|
777 | if not p in self.nodemap: | |
760 | raise RevlogError(_("unknown parent %s") % short(p1)) |
|
778 | raise RevlogError(_("unknown parent %s") % short(p1)) | |
761 |
|
779 | |||
762 | if not chain: |
|
780 | if not chain: | |
763 | # retrieve the parent revision of the delta chain |
|
781 | # retrieve the parent revision of the delta chain | |
764 | chain = p1 |
|
782 | chain = p1 | |
765 | if not chain in self.nodemap: |
|
783 | if not chain in self.nodemap: | |
766 | raise RevlogError(_("unknown base %s") % short(chain[:4])) |
|
784 | raise RevlogError(_("unknown base %s") % short(chain[:4])) | |
767 |
|
785 | |||
768 | # full versions are inserted when the needed deltas become |
|
786 | # full versions are inserted when the needed deltas become | |
769 | # comparable to the uncompressed text or when the previous |
|
787 | # comparable to the uncompressed text or when the previous | |
770 | # version is not the one we have a delta against. We use |
|
788 | # version is not the one we have a delta against. We use | |
771 | # the size of the previous full rev as a proxy for the |
|
789 | # the size of the previous full rev as a proxy for the | |
772 | # current size. |
|
790 | # current size. | |
773 |
|
791 | |||
774 | if chain == prev: |
|
792 | if chain == prev: | |
775 | tempd = compress(delta) |
|
793 | tempd = compress(delta) | |
776 | cdelta = tempd[0] + tempd[1] |
|
794 | cdelta = tempd[0] + tempd[1] | |
777 |
|
795 | |||
778 | if chain != prev or (end - start + len(cdelta)) > measure * 2: |
|
796 | if chain != prev or (end - start + len(cdelta)) > measure * 2: | |
779 | # flush our writes here so we can read it in revision |
|
797 | # flush our writes here so we can read it in revision | |
780 | dfh.flush() |
|
798 | dfh.flush() | |
781 | ifh.flush() |
|
799 | ifh.flush() | |
782 | text = self.revision(chain) |
|
800 | text = self.revision(chain) | |
783 | text = self.patches(text, [delta]) |
|
801 | text = self.patches(text, [delta]) | |
784 | chk = self.addrevision(text, transaction, link, p1, p2) |
|
802 | chk = self.addrevision(text, transaction, link, p1, p2) | |
785 | if chk != node: |
|
803 | if chk != node: | |
786 | raise RevlogError(_("consistency error adding group")) |
|
804 | raise RevlogError(_("consistency error adding group")) | |
787 | measure = len(text) |
|
805 | measure = len(text) | |
788 | else: |
|
806 | else: | |
789 | e = (end, len(cdelta), base, link, p1, p2, node) |
|
807 | e = (end, len(cdelta), base, link, p1, p2, node) | |
790 | self.index.append(e) |
|
808 | self.index.append(e) | |
791 | self.nodemap[node] = r |
|
809 | self.nodemap[node] = r | |
792 | dfh.write(cdelta) |
|
810 | dfh.write(cdelta) | |
793 | ifh.write(struct.pack(indexformat, *e)) |
|
811 | ifh.write(struct.pack(indexformat, *e)) | |
794 |
|
812 | |||
795 | t, r, chain, prev = r, r + 1, node, node |
|
813 | t, r, chain, prev = r, r + 1, node, node | |
796 | base = self.base(t) |
|
814 | base = self.base(t) | |
797 | start = self.start(base) |
|
815 | start = self.start(base) | |
798 | end = self.end(t) |
|
816 | end = self.end(t) | |
799 |
|
817 | |||
800 | dfh.close() |
|
818 | dfh.close() | |
801 | ifh.close() |
|
819 | ifh.close() | |
802 | return node |
|
820 | return node | |
803 |
|
821 | |||
804 | def strip(self, rev, minlink): |
|
822 | def strip(self, rev, minlink): | |
805 | if self.count() == 0 or rev >= self.count(): |
|
823 | if self.count() == 0 or rev >= self.count(): | |
806 | return |
|
824 | return | |
807 |
|
825 | |||
808 | # When stripping away a revision, we need to make sure it |
|
826 | # When stripping away a revision, we need to make sure it | |
809 | # does not actually belong to an older changeset. |
|
827 | # does not actually belong to an older changeset. | |
810 | # The minlink parameter defines the oldest revision |
|
828 | # The minlink parameter defines the oldest revision | |
811 | # we're allowed to strip away. |
|
829 | # we're allowed to strip away. | |
812 | while minlink > self.index[rev][3]: |
|
830 | while minlink > self.index[rev][3]: | |
813 | rev += 1 |
|
831 | rev += 1 | |
814 | if rev >= self.count(): |
|
832 | if rev >= self.count(): | |
815 | return |
|
833 | return | |
816 |
|
834 | |||
817 | # first truncate the files on disk |
|
835 | # first truncate the files on disk | |
818 | end = self.start(rev) |
|
836 | end = self.start(rev) | |
819 | self.opener(self.datafile, "a").truncate(end) |
|
837 | self.opener(self.datafile, "a").truncate(end) | |
820 | end = rev * struct.calcsize(indexformat) |
|
838 | end = rev * struct.calcsize(indexformat) | |
821 | self.opener(self.indexfile, "a").truncate(end) |
|
839 | self.opener(self.indexfile, "a").truncate(end) | |
822 |
|
840 | |||
823 | # then reset internal state in memory to forget those revisions |
|
841 | # then reset internal state in memory to forget those revisions | |
824 | self.cache = None |
|
842 | self.cache = None | |
825 | self.chunkcache = None |
|
843 | self.chunkcache = None | |
826 | for p in self.index[rev:]: |
|
844 | for p in self.index[rev:]: | |
827 | del self.nodemap[p[6]] |
|
845 | del self.nodemap[p[6]] | |
828 | del self.index[rev:] |
|
846 | del self.index[rev:] | |
829 |
|
847 | |||
830 | # truncating the lazyindex also truncates the lazymap. |
|
848 | # truncating the lazyindex also truncates the lazymap. | |
831 | if isinstance(self.index, lazyindex): |
|
849 | if isinstance(self.index, lazyindex): | |
832 | self.index.trunc(end) |
|
850 | self.index.trunc(end) | |
833 |
|
851 | |||
834 |
|
852 | |||
835 | def checksize(self): |
|
853 | def checksize(self): | |
836 | expected = 0 |
|
854 | expected = 0 | |
837 | if self.count(): |
|
855 | if self.count(): | |
838 | expected = self.end(self.count() - 1) |
|
856 | expected = self.end(self.count() - 1) | |
839 |
|
857 | |||
840 | try: |
|
858 | try: | |
841 | f = self.opener(self.datafile) |
|
859 | f = self.opener(self.datafile) | |
842 | f.seek(0, 2) |
|
860 | f.seek(0, 2) | |
843 | actual = f.tell() |
|
861 | actual = f.tell() | |
844 | dd = actual - expected |
|
862 | dd = actual - expected | |
845 | except IOError, inst: |
|
863 | except IOError, inst: | |
846 | if inst.errno != errno.ENOENT: |
|
864 | if inst.errno != errno.ENOENT: | |
847 | raise |
|
865 | raise | |
848 | dd = 0 |
|
866 | dd = 0 | |
849 |
|
867 | |||
850 | try: |
|
868 | try: | |
851 | f = self.opener(self.indexfile) |
|
869 | f = self.opener(self.indexfile) | |
852 | f.seek(0, 2) |
|
870 | f.seek(0, 2) | |
853 | actual = f.tell() |
|
871 | actual = f.tell() | |
854 | s = struct.calcsize(indexformat) |
|
872 | s = struct.calcsize(indexformat) | |
855 | i = actual / s |
|
873 | i = actual / s | |
856 | di = actual - (i * s) |
|
874 | di = actual - (i * s) | |
857 | except IOError, inst: |
|
875 | except IOError, inst: | |
858 | if inst.errno != errno.ENOENT: |
|
876 | if inst.errno != errno.ENOENT: | |
859 | raise |
|
877 | raise | |
860 | di = 0 |
|
878 | di = 0 | |
861 |
|
879 | |||
862 | return (dd, di) |
|
880 | return (dd, di) | |
863 |
|
881 | |||
864 |
|
882 |
General Comments 0
You need to be logged in to leave comments.
Login now