Show More
@@ -1,452 +1,459 b'' | |||||
1 | # remotefilelog.py - filelog implementation where filelog history is stored |
|
1 | # remotefilelog.py - filelog implementation where filelog history is stored | |
2 | # remotely |
|
2 | # remotely | |
3 | # |
|
3 | # | |
4 | # Copyright 2013 Facebook, Inc. |
|
4 | # Copyright 2013 Facebook, Inc. | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import collections |
|
10 | import collections | |
11 | import os |
|
11 | import os | |
12 |
|
12 | |||
13 |
from mercurial.node import |
|
13 | from mercurial.node import ( | |
|
14 | bin, | |||
|
15 | nullid, | |||
|
16 | wdirfilenodeids, | |||
|
17 | wdirid, | |||
|
18 | ) | |||
14 | from mercurial.i18n import _ |
|
19 | from mercurial.i18n import _ | |
15 | from mercurial import ( |
|
20 | from mercurial import ( | |
16 | ancestor, |
|
21 | ancestor, | |
17 | error, |
|
22 | error, | |
18 | mdiff, |
|
23 | mdiff, | |
19 | revlog, |
|
24 | revlog, | |
20 | ) |
|
25 | ) | |
21 | from mercurial.utils import storageutil |
|
26 | from mercurial.utils import storageutil | |
22 |
|
27 | |||
23 | from . import ( |
|
28 | from . import ( | |
24 | constants, |
|
29 | constants, | |
25 | fileserverclient, |
|
30 | fileserverclient, | |
26 | shallowutil, |
|
31 | shallowutil, | |
27 | ) |
|
32 | ) | |
28 |
|
33 | |||
29 | class remotefilelognodemap(object): |
|
34 | class remotefilelognodemap(object): | |
30 | def __init__(self, filename, store): |
|
35 | def __init__(self, filename, store): | |
31 | self._filename = filename |
|
36 | self._filename = filename | |
32 | self._store = store |
|
37 | self._store = store | |
33 |
|
38 | |||
34 | def __contains__(self, node): |
|
39 | def __contains__(self, node): | |
35 | missing = self._store.getmissing([(self._filename, node)]) |
|
40 | missing = self._store.getmissing([(self._filename, node)]) | |
36 | return not bool(missing) |
|
41 | return not bool(missing) | |
37 |
|
42 | |||
38 | def __get__(self, node): |
|
43 | def __get__(self, node): | |
39 | if node not in self: |
|
44 | if node not in self: | |
40 | raise KeyError(node) |
|
45 | raise KeyError(node) | |
41 | return node |
|
46 | return node | |
42 |
|
47 | |||
43 | class remotefilelog(object): |
|
48 | class remotefilelog(object): | |
44 |
|
49 | |||
45 | _generaldelta = True |
|
50 | _generaldelta = True | |
46 |
|
51 | |||
47 | def __init__(self, opener, path, repo): |
|
52 | def __init__(self, opener, path, repo): | |
48 | self.opener = opener |
|
53 | self.opener = opener | |
49 | self.filename = path |
|
54 | self.filename = path | |
50 | self.repo = repo |
|
55 | self.repo = repo | |
51 | self.nodemap = remotefilelognodemap(self.filename, repo.contentstore) |
|
56 | self.nodemap = remotefilelognodemap(self.filename, repo.contentstore) | |
52 |
|
57 | |||
53 | self.version = 1 |
|
58 | self.version = 1 | |
54 |
|
59 | |||
55 | def read(self, node): |
|
60 | def read(self, node): | |
56 | """returns the file contents at this node""" |
|
61 | """returns the file contents at this node""" | |
57 | t = self.revision(node) |
|
62 | t = self.revision(node) | |
58 | if not t.startswith('\1\n'): |
|
63 | if not t.startswith('\1\n'): | |
59 | return t |
|
64 | return t | |
60 | s = t.index('\1\n', 2) |
|
65 | s = t.index('\1\n', 2) | |
61 | return t[s + 2:] |
|
66 | return t[s + 2:] | |
62 |
|
67 | |||
63 | def add(self, text, meta, transaction, linknode, p1=None, p2=None): |
|
68 | def add(self, text, meta, transaction, linknode, p1=None, p2=None): | |
64 | # hash with the metadata, like in vanilla filelogs |
|
69 | # hash with the metadata, like in vanilla filelogs | |
65 | hashtext = shallowutil.createrevlogtext(text, meta.get('copy'), |
|
70 | hashtext = shallowutil.createrevlogtext(text, meta.get('copy'), | |
66 | meta.get('copyrev')) |
|
71 | meta.get('copyrev')) | |
67 | node = storageutil.hashrevisionsha1(hashtext, p1, p2) |
|
72 | node = storageutil.hashrevisionsha1(hashtext, p1, p2) | |
68 | return self.addrevision(hashtext, transaction, linknode, p1, p2, |
|
73 | return self.addrevision(hashtext, transaction, linknode, p1, p2, | |
69 | node=node) |
|
74 | node=node) | |
70 |
|
75 | |||
71 | def _createfileblob(self, text, meta, flags, p1, p2, node, linknode): |
|
76 | def _createfileblob(self, text, meta, flags, p1, p2, node, linknode): | |
72 | # text passed to "_createfileblob" does not include filelog metadata |
|
77 | # text passed to "_createfileblob" does not include filelog metadata | |
73 | header = shallowutil.buildfileblobheader(len(text), flags) |
|
78 | header = shallowutil.buildfileblobheader(len(text), flags) | |
74 | data = "%s\0%s" % (header, text) |
|
79 | data = "%s\0%s" % (header, text) | |
75 |
|
80 | |||
76 | realp1 = p1 |
|
81 | realp1 = p1 | |
77 | copyfrom = "" |
|
82 | copyfrom = "" | |
78 | if meta and 'copy' in meta: |
|
83 | if meta and 'copy' in meta: | |
79 | copyfrom = meta['copy'] |
|
84 | copyfrom = meta['copy'] | |
80 | realp1 = bin(meta['copyrev']) |
|
85 | realp1 = bin(meta['copyrev']) | |
81 |
|
86 | |||
82 | data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom) |
|
87 | data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom) | |
83 |
|
88 | |||
84 | visited = set() |
|
89 | visited = set() | |
85 |
|
90 | |||
86 | pancestors = {} |
|
91 | pancestors = {} | |
87 | queue = [] |
|
92 | queue = [] | |
88 | if realp1 != nullid: |
|
93 | if realp1 != nullid: | |
89 | p1flog = self |
|
94 | p1flog = self | |
90 | if copyfrom: |
|
95 | if copyfrom: | |
91 | p1flog = remotefilelog(self.opener, copyfrom, self.repo) |
|
96 | p1flog = remotefilelog(self.opener, copyfrom, self.repo) | |
92 |
|
97 | |||
93 | pancestors.update(p1flog.ancestormap(realp1)) |
|
98 | pancestors.update(p1flog.ancestormap(realp1)) | |
94 | queue.append(realp1) |
|
99 | queue.append(realp1) | |
95 | visited.add(realp1) |
|
100 | visited.add(realp1) | |
96 | if p2 != nullid: |
|
101 | if p2 != nullid: | |
97 | pancestors.update(self.ancestormap(p2)) |
|
102 | pancestors.update(self.ancestormap(p2)) | |
98 | queue.append(p2) |
|
103 | queue.append(p2) | |
99 | visited.add(p2) |
|
104 | visited.add(p2) | |
100 |
|
105 | |||
101 | ancestortext = "" |
|
106 | ancestortext = "" | |
102 |
|
107 | |||
103 | # add the ancestors in topological order |
|
108 | # add the ancestors in topological order | |
104 | while queue: |
|
109 | while queue: | |
105 | c = queue.pop(0) |
|
110 | c = queue.pop(0) | |
106 | pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c] |
|
111 | pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c] | |
107 |
|
112 | |||
108 | pacopyfrom = pacopyfrom or '' |
|
113 | pacopyfrom = pacopyfrom or '' | |
109 | ancestortext += "%s%s%s%s%s\0" % ( |
|
114 | ancestortext += "%s%s%s%s%s\0" % ( | |
110 | c, pa1, pa2, ancestorlinknode, pacopyfrom) |
|
115 | c, pa1, pa2, ancestorlinknode, pacopyfrom) | |
111 |
|
116 | |||
112 | if pa1 != nullid and pa1 not in visited: |
|
117 | if pa1 != nullid and pa1 not in visited: | |
113 | queue.append(pa1) |
|
118 | queue.append(pa1) | |
114 | visited.add(pa1) |
|
119 | visited.add(pa1) | |
115 | if pa2 != nullid and pa2 not in visited: |
|
120 | if pa2 != nullid and pa2 not in visited: | |
116 | queue.append(pa2) |
|
121 | queue.append(pa2) | |
117 | visited.add(pa2) |
|
122 | visited.add(pa2) | |
118 |
|
123 | |||
119 | data += ancestortext |
|
124 | data += ancestortext | |
120 |
|
125 | |||
121 | return data |
|
126 | return data | |
122 |
|
127 | |||
123 | def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None, |
|
128 | def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None, | |
124 | node=None, flags=revlog.REVIDX_DEFAULT_FLAGS): |
|
129 | node=None, flags=revlog.REVIDX_DEFAULT_FLAGS): | |
125 | # text passed to "addrevision" includes hg filelog metadata header |
|
130 | # text passed to "addrevision" includes hg filelog metadata header | |
126 | if node is None: |
|
131 | if node is None: | |
127 | node = storageutil.hashrevisionsha1(text, p1, p2) |
|
132 | node = storageutil.hashrevisionsha1(text, p1, p2) | |
128 |
|
133 | |||
129 | meta, metaoffset = storageutil.parsemeta(text) |
|
134 | meta, metaoffset = storageutil.parsemeta(text) | |
130 | rawtext, validatehash = self._processflags(text, flags, 'write') |
|
135 | rawtext, validatehash = self._processflags(text, flags, 'write') | |
131 | return self.addrawrevision(rawtext, transaction, linknode, p1, p2, |
|
136 | return self.addrawrevision(rawtext, transaction, linknode, p1, p2, | |
132 | node, flags, cachedelta, |
|
137 | node, flags, cachedelta, | |
133 | _metatuple=(meta, metaoffset)) |
|
138 | _metatuple=(meta, metaoffset)) | |
134 |
|
139 | |||
135 | def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node, |
|
140 | def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node, | |
136 | flags, cachedelta=None, _metatuple=None): |
|
141 | flags, cachedelta=None, _metatuple=None): | |
137 | if _metatuple: |
|
142 | if _metatuple: | |
138 | # _metatuple: used by "addrevision" internally by remotefilelog |
|
143 | # _metatuple: used by "addrevision" internally by remotefilelog | |
139 | # meta was parsed confidently |
|
144 | # meta was parsed confidently | |
140 | meta, metaoffset = _metatuple |
|
145 | meta, metaoffset = _metatuple | |
141 | else: |
|
146 | else: | |
142 | # not from self.addrevision, but something else (repo._filecommit) |
|
147 | # not from self.addrevision, but something else (repo._filecommit) | |
143 | # calls addrawrevision directly. remotefilelog needs to get and |
|
148 | # calls addrawrevision directly. remotefilelog needs to get and | |
144 | # strip filelog metadata. |
|
149 | # strip filelog metadata. | |
145 | # we don't have confidence about whether rawtext contains filelog |
|
150 | # we don't have confidence about whether rawtext contains filelog | |
146 | # metadata or not (flag processor could replace it), so we just |
|
151 | # metadata or not (flag processor could replace it), so we just | |
147 | # parse it as best-effort. |
|
152 | # parse it as best-effort. | |
148 | # in LFS (flags != 0)'s case, the best way is to call LFS code to |
|
153 | # in LFS (flags != 0)'s case, the best way is to call LFS code to | |
149 | # get the meta information, instead of storageutil.parsemeta. |
|
154 | # get the meta information, instead of storageutil.parsemeta. | |
150 | meta, metaoffset = storageutil.parsemeta(rawtext) |
|
155 | meta, metaoffset = storageutil.parsemeta(rawtext) | |
151 | if flags != 0: |
|
156 | if flags != 0: | |
152 | # when flags != 0, be conservative and do not mangle rawtext, since |
|
157 | # when flags != 0, be conservative and do not mangle rawtext, since | |
153 | # a read flag processor expects the text not being mangled at all. |
|
158 | # a read flag processor expects the text not being mangled at all. | |
154 | metaoffset = 0 |
|
159 | metaoffset = 0 | |
155 | if metaoffset: |
|
160 | if metaoffset: | |
156 | # remotefilelog fileblob stores copy metadata in its ancestortext, |
|
161 | # remotefilelog fileblob stores copy metadata in its ancestortext, | |
157 | # not its main blob. so we need to remove filelog metadata |
|
162 | # not its main blob. so we need to remove filelog metadata | |
158 | # (containing copy information) from text. |
|
163 | # (containing copy information) from text. | |
159 | blobtext = rawtext[metaoffset:] |
|
164 | blobtext = rawtext[metaoffset:] | |
160 | else: |
|
165 | else: | |
161 | blobtext = rawtext |
|
166 | blobtext = rawtext | |
162 | data = self._createfileblob(blobtext, meta, flags, p1, p2, node, |
|
167 | data = self._createfileblob(blobtext, meta, flags, p1, p2, node, | |
163 | linknode) |
|
168 | linknode) | |
164 | self.repo.contentstore.addremotefilelognode(self.filename, node, data) |
|
169 | self.repo.contentstore.addremotefilelognode(self.filename, node, data) | |
165 |
|
170 | |||
166 | return node |
|
171 | return node | |
167 |
|
172 | |||
168 | def renamed(self, node): |
|
173 | def renamed(self, node): | |
169 | ancestors = self.repo.metadatastore.getancestors(self.filename, node) |
|
174 | ancestors = self.repo.metadatastore.getancestors(self.filename, node) | |
170 | p1, p2, linknode, copyfrom = ancestors[node] |
|
175 | p1, p2, linknode, copyfrom = ancestors[node] | |
171 | if copyfrom: |
|
176 | if copyfrom: | |
172 | return (copyfrom, p1) |
|
177 | return (copyfrom, p1) | |
173 |
|
178 | |||
174 | return False |
|
179 | return False | |
175 |
|
180 | |||
176 | def size(self, node): |
|
181 | def size(self, node): | |
177 | """return the size of a given revision""" |
|
182 | """return the size of a given revision""" | |
178 | return len(self.read(node)) |
|
183 | return len(self.read(node)) | |
179 |
|
184 | |||
180 | rawsize = size |
|
185 | rawsize = size | |
181 |
|
186 | |||
182 | def cmp(self, node, text): |
|
187 | def cmp(self, node, text): | |
183 | """compare text with a given file revision |
|
188 | """compare text with a given file revision | |
184 |
|
189 | |||
185 | returns True if text is different than what is stored. |
|
190 | returns True if text is different than what is stored. | |
186 | """ |
|
191 | """ | |
187 |
|
192 | |||
188 | if node == nullid: |
|
193 | if node == nullid: | |
189 | return True |
|
194 | return True | |
190 |
|
195 | |||
191 | nodetext = self.read(node) |
|
196 | nodetext = self.read(node) | |
192 | return nodetext != text |
|
197 | return nodetext != text | |
193 |
|
198 | |||
194 | def __nonzero__(self): |
|
199 | def __nonzero__(self): | |
195 | return True |
|
200 | return True | |
196 |
|
201 | |||
197 | __bool__ = __nonzero__ |
|
202 | __bool__ = __nonzero__ | |
198 |
|
203 | |||
199 | def __len__(self): |
|
204 | def __len__(self): | |
200 | if self.filename == '.hgtags': |
|
205 | if self.filename == '.hgtags': | |
201 | # The length of .hgtags is used to fast path tag checking. |
|
206 | # The length of .hgtags is used to fast path tag checking. | |
202 | # remotefilelog doesn't support .hgtags since the entire .hgtags |
|
207 | # remotefilelog doesn't support .hgtags since the entire .hgtags | |
203 | # history is needed. Use the excludepattern setting to make |
|
208 | # history is needed. Use the excludepattern setting to make | |
204 | # .hgtags a normal filelog. |
|
209 | # .hgtags a normal filelog. | |
205 | return 0 |
|
210 | return 0 | |
206 |
|
211 | |||
207 | raise RuntimeError("len not supported") |
|
212 | raise RuntimeError("len not supported") | |
208 |
|
213 | |||
209 | def empty(self): |
|
214 | def empty(self): | |
210 | return False |
|
215 | return False | |
211 |
|
216 | |||
212 | def flags(self, node): |
|
217 | def flags(self, node): | |
213 | if isinstance(node, int): |
|
218 | if isinstance(node, int): | |
214 | raise error.ProgrammingError( |
|
219 | raise error.ProgrammingError( | |
215 | 'remotefilelog does not accept integer rev for flags') |
|
220 | 'remotefilelog does not accept integer rev for flags') | |
216 | store = self.repo.contentstore |
|
221 | store = self.repo.contentstore | |
217 | return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) |
|
222 | return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) | |
218 |
|
223 | |||
219 | def parents(self, node): |
|
224 | def parents(self, node): | |
220 | if node == nullid: |
|
225 | if node == nullid: | |
221 | return nullid, nullid |
|
226 | return nullid, nullid | |
222 |
|
227 | |||
223 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) |
|
228 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) | |
224 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
229 | p1, p2, linknode, copyfrom = ancestormap[node] | |
225 | if copyfrom: |
|
230 | if copyfrom: | |
226 | p1 = nullid |
|
231 | p1 = nullid | |
227 |
|
232 | |||
228 | return p1, p2 |
|
233 | return p1, p2 | |
229 |
|
234 | |||
230 | def parentrevs(self, rev): |
|
235 | def parentrevs(self, rev): | |
231 | # TODO(augie): this is a node and should be a rev, but for now |
|
236 | # TODO(augie): this is a node and should be a rev, but for now | |
232 | # nothing in core seems to actually break. |
|
237 | # nothing in core seems to actually break. | |
233 | return self.parents(rev) |
|
238 | return self.parents(rev) | |
234 |
|
239 | |||
235 | def linknode(self, node): |
|
240 | def linknode(self, node): | |
236 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) |
|
241 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) | |
237 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
242 | p1, p2, linknode, copyfrom = ancestormap[node] | |
238 | return linknode |
|
243 | return linknode | |
239 |
|
244 | |||
240 | def linkrev(self, node): |
|
245 | def linkrev(self, node): | |
241 | return self.repo.unfiltered().changelog.rev(self.linknode(node)) |
|
246 | return self.repo.unfiltered().changelog.rev(self.linknode(node)) | |
242 |
|
247 | |||
243 | def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, |
|
248 | def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, | |
244 | assumehaveparentrevisions=False, deltaprevious=False, |
|
249 | assumehaveparentrevisions=False, deltaprevious=False, | |
245 | deltamode=None): |
|
250 | deltamode=None): | |
246 | # we don't use any of these parameters here |
|
251 | # we don't use any of these parameters here | |
247 | del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious |
|
252 | del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious | |
248 | del deltamode |
|
253 | del deltamode | |
249 | prevnode = None |
|
254 | prevnode = None | |
250 | for node in nodes: |
|
255 | for node in nodes: | |
251 | p1, p2 = self.parents(node) |
|
256 | p1, p2 = self.parents(node) | |
252 | if prevnode is None: |
|
257 | if prevnode is None: | |
253 | basenode = prevnode = p1 |
|
258 | basenode = prevnode = p1 | |
254 | if basenode == node: |
|
259 | if basenode == node: | |
255 | basenode = nullid |
|
260 | basenode = nullid | |
256 | if basenode != nullid: |
|
261 | if basenode != nullid: | |
257 | revision = None |
|
262 | revision = None | |
258 | delta = self.revdiff(basenode, node) |
|
263 | delta = self.revdiff(basenode, node) | |
259 | else: |
|
264 | else: | |
260 | revision = self.revision(node, raw=True) |
|
265 | revision = self.revision(node, raw=True) | |
261 | delta = None |
|
266 | delta = None | |
262 | yield revlog.revlogrevisiondelta( |
|
267 | yield revlog.revlogrevisiondelta( | |
263 | node=node, |
|
268 | node=node, | |
264 | p1node=p1, |
|
269 | p1node=p1, | |
265 | p2node=p2, |
|
270 | p2node=p2, | |
266 | linknode=self.linknode(node), |
|
271 | linknode=self.linknode(node), | |
267 | basenode=basenode, |
|
272 | basenode=basenode, | |
268 | flags=self.flags(node), |
|
273 | flags=self.flags(node), | |
269 | baserevisionsize=None, |
|
274 | baserevisionsize=None, | |
270 | revision=revision, |
|
275 | revision=revision, | |
271 | delta=delta, |
|
276 | delta=delta, | |
272 | ) |
|
277 | ) | |
273 |
|
278 | |||
274 | def revdiff(self, node1, node2): |
|
279 | def revdiff(self, node1, node2): | |
275 | return mdiff.textdiff(self.revision(node1, raw=True), |
|
280 | return mdiff.textdiff(self.revision(node1, raw=True), | |
276 | self.revision(node2, raw=True)) |
|
281 | self.revision(node2, raw=True)) | |
277 |
|
282 | |||
278 | def lookup(self, node): |
|
283 | def lookup(self, node): | |
279 | if len(node) == 40: |
|
284 | if len(node) == 40: | |
280 | node = bin(node) |
|
285 | node = bin(node) | |
281 | if len(node) != 20: |
|
286 | if len(node) != 20: | |
282 | raise error.LookupError(node, self.filename, |
|
287 | raise error.LookupError(node, self.filename, | |
283 | _('invalid lookup input')) |
|
288 | _('invalid lookup input')) | |
284 |
|
289 | |||
285 | return node |
|
290 | return node | |
286 |
|
291 | |||
287 | def rev(self, node): |
|
292 | def rev(self, node): | |
288 | # This is a hack to make TortoiseHG work. |
|
293 | # This is a hack to make TortoiseHG work. | |
289 | return node |
|
294 | return node | |
290 |
|
295 | |||
291 | def node(self, rev): |
|
296 | def node(self, rev): | |
292 | # This is a hack. |
|
297 | # This is a hack. | |
293 | if isinstance(rev, int): |
|
298 | if isinstance(rev, int): | |
294 | raise error.ProgrammingError( |
|
299 | raise error.ProgrammingError( | |
295 | 'remotefilelog does not convert integer rev to node') |
|
300 | 'remotefilelog does not convert integer rev to node') | |
296 | return rev |
|
301 | return rev | |
297 |
|
302 | |||
298 | def revision(self, node, raw=False): |
|
303 | def revision(self, node, raw=False): | |
299 | """returns the revlog contents at this node. |
|
304 | """returns the revlog contents at this node. | |
300 | this includes the meta data traditionally included in file revlogs. |
|
305 | this includes the meta data traditionally included in file revlogs. | |
301 | this is generally only used for bundling and communicating with vanilla |
|
306 | this is generally only used for bundling and communicating with vanilla | |
302 | hg clients. |
|
307 | hg clients. | |
303 | """ |
|
308 | """ | |
304 | if node == nullid: |
|
309 | if node == nullid: | |
305 | return "" |
|
310 | return "" | |
306 | if len(node) != 20: |
|
311 | if len(node) != 20: | |
307 | raise error.LookupError(node, self.filename, |
|
312 | raise error.LookupError(node, self.filename, | |
308 | _('invalid revision input')) |
|
313 | _('invalid revision input')) | |
|
314 | if node == wdirid or node in wdirfilenodeids: | |||
|
315 | raise error.WdirUnsupported | |||
309 |
|
316 | |||
310 | store = self.repo.contentstore |
|
317 | store = self.repo.contentstore | |
311 | rawtext = store.get(self.filename, node) |
|
318 | rawtext = store.get(self.filename, node) | |
312 | if raw: |
|
319 | if raw: | |
313 | return rawtext |
|
320 | return rawtext | |
314 | flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) |
|
321 | flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) | |
315 | if flags == 0: |
|
322 | if flags == 0: | |
316 | return rawtext |
|
323 | return rawtext | |
317 | text, verifyhash = self._processflags(rawtext, flags, 'read') |
|
324 | text, verifyhash = self._processflags(rawtext, flags, 'read') | |
318 | return text |
|
325 | return text | |
319 |
|
326 | |||
320 | def _processflags(self, text, flags, operation, raw=False): |
|
327 | def _processflags(self, text, flags, operation, raw=False): | |
321 | # mostly copied from hg/mercurial/revlog.py |
|
328 | # mostly copied from hg/mercurial/revlog.py | |
322 | validatehash = True |
|
329 | validatehash = True | |
323 | orderedflags = revlog.REVIDX_FLAGS_ORDER |
|
330 | orderedflags = revlog.REVIDX_FLAGS_ORDER | |
324 | if operation == 'write': |
|
331 | if operation == 'write': | |
325 | orderedflags = reversed(orderedflags) |
|
332 | orderedflags = reversed(orderedflags) | |
326 | for flag in orderedflags: |
|
333 | for flag in orderedflags: | |
327 | if flag & flags: |
|
334 | if flag & flags: | |
328 | vhash = True |
|
335 | vhash = True | |
329 | if flag not in revlog._flagprocessors: |
|
336 | if flag not in revlog._flagprocessors: | |
330 | message = _("missing processor for flag '%#x'") % (flag) |
|
337 | message = _("missing processor for flag '%#x'") % (flag) | |
331 | raise revlog.RevlogError(message) |
|
338 | raise revlog.RevlogError(message) | |
332 | readfunc, writefunc, rawfunc = revlog._flagprocessors[flag] |
|
339 | readfunc, writefunc, rawfunc = revlog._flagprocessors[flag] | |
333 | if raw: |
|
340 | if raw: | |
334 | vhash = rawfunc(self, text) |
|
341 | vhash = rawfunc(self, text) | |
335 | elif operation == 'read': |
|
342 | elif operation == 'read': | |
336 | text, vhash = readfunc(self, text) |
|
343 | text, vhash = readfunc(self, text) | |
337 | elif operation == 'write': |
|
344 | elif operation == 'write': | |
338 | text, vhash = writefunc(self, text) |
|
345 | text, vhash = writefunc(self, text) | |
339 | validatehash = validatehash and vhash |
|
346 | validatehash = validatehash and vhash | |
340 | return text, validatehash |
|
347 | return text, validatehash | |
341 |
|
348 | |||
342 | def _read(self, id): |
|
349 | def _read(self, id): | |
343 | """reads the raw file blob from disk, cache, or server""" |
|
350 | """reads the raw file blob from disk, cache, or server""" | |
344 | fileservice = self.repo.fileservice |
|
351 | fileservice = self.repo.fileservice | |
345 | localcache = fileservice.localcache |
|
352 | localcache = fileservice.localcache | |
346 | cachekey = fileserverclient.getcachekey(self.repo.name, self.filename, |
|
353 | cachekey = fileserverclient.getcachekey(self.repo.name, self.filename, | |
347 | id) |
|
354 | id) | |
348 | try: |
|
355 | try: | |
349 | return localcache.read(cachekey) |
|
356 | return localcache.read(cachekey) | |
350 | except KeyError: |
|
357 | except KeyError: | |
351 | pass |
|
358 | pass | |
352 |
|
359 | |||
353 | localkey = fileserverclient.getlocalkey(self.filename, id) |
|
360 | localkey = fileserverclient.getlocalkey(self.filename, id) | |
354 | localpath = os.path.join(self.localpath, localkey) |
|
361 | localpath = os.path.join(self.localpath, localkey) | |
355 | try: |
|
362 | try: | |
356 | return shallowutil.readfile(localpath) |
|
363 | return shallowutil.readfile(localpath) | |
357 | except IOError: |
|
364 | except IOError: | |
358 | pass |
|
365 | pass | |
359 |
|
366 | |||
360 | fileservice.prefetch([(self.filename, id)]) |
|
367 | fileservice.prefetch([(self.filename, id)]) | |
361 | try: |
|
368 | try: | |
362 | return localcache.read(cachekey) |
|
369 | return localcache.read(cachekey) | |
363 | except KeyError: |
|
370 | except KeyError: | |
364 | pass |
|
371 | pass | |
365 |
|
372 | |||
366 | raise error.LookupError(id, self.filename, _('no node')) |
|
373 | raise error.LookupError(id, self.filename, _('no node')) | |
367 |
|
374 | |||
368 | def ancestormap(self, node): |
|
375 | def ancestormap(self, node): | |
369 | return self.repo.metadatastore.getancestors(self.filename, node) |
|
376 | return self.repo.metadatastore.getancestors(self.filename, node) | |
370 |
|
377 | |||
371 | def ancestor(self, a, b): |
|
378 | def ancestor(self, a, b): | |
372 | if a == nullid or b == nullid: |
|
379 | if a == nullid or b == nullid: | |
373 | return nullid |
|
380 | return nullid | |
374 |
|
381 | |||
375 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
382 | revmap, parentfunc = self._buildrevgraph(a, b) | |
376 | nodemap = dict(((v, k) for (k, v) in revmap.iteritems())) |
|
383 | nodemap = dict(((v, k) for (k, v) in revmap.iteritems())) | |
377 |
|
384 | |||
378 | ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b]) |
|
385 | ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b]) | |
379 | if ancs: |
|
386 | if ancs: | |
380 | # choose a consistent winner when there's a tie |
|
387 | # choose a consistent winner when there's a tie | |
381 | return min(map(nodemap.__getitem__, ancs)) |
|
388 | return min(map(nodemap.__getitem__, ancs)) | |
382 | return nullid |
|
389 | return nullid | |
383 |
|
390 | |||
384 | def commonancestorsheads(self, a, b): |
|
391 | def commonancestorsheads(self, a, b): | |
385 | """calculate all the heads of the common ancestors of nodes a and b""" |
|
392 | """calculate all the heads of the common ancestors of nodes a and b""" | |
386 |
|
393 | |||
387 | if a == nullid or b == nullid: |
|
394 | if a == nullid or b == nullid: | |
388 | return nullid |
|
395 | return nullid | |
389 |
|
396 | |||
390 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
397 | revmap, parentfunc = self._buildrevgraph(a, b) | |
391 | nodemap = dict(((v, k) for (k, v) in revmap.iteritems())) |
|
398 | nodemap = dict(((v, k) for (k, v) in revmap.iteritems())) | |
392 |
|
399 | |||
393 | ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b]) |
|
400 | ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b]) | |
394 | return map(nodemap.__getitem__, ancs) |
|
401 | return map(nodemap.__getitem__, ancs) | |
395 |
|
402 | |||
396 | def _buildrevgraph(self, a, b): |
|
403 | def _buildrevgraph(self, a, b): | |
397 | """Builds a numeric revision graph for the given two nodes. |
|
404 | """Builds a numeric revision graph for the given two nodes. | |
398 | Returns a node->rev map and a rev->[revs] parent function. |
|
405 | Returns a node->rev map and a rev->[revs] parent function. | |
399 | """ |
|
406 | """ | |
400 | amap = self.ancestormap(a) |
|
407 | amap = self.ancestormap(a) | |
401 | bmap = self.ancestormap(b) |
|
408 | bmap = self.ancestormap(b) | |
402 |
|
409 | |||
403 | # Union the two maps |
|
410 | # Union the two maps | |
404 | parentsmap = collections.defaultdict(list) |
|
411 | parentsmap = collections.defaultdict(list) | |
405 | allparents = set() |
|
412 | allparents = set() | |
406 | for mapping in (amap, bmap): |
|
413 | for mapping in (amap, bmap): | |
407 | for node, pdata in mapping.iteritems(): |
|
414 | for node, pdata in mapping.iteritems(): | |
408 | parents = parentsmap[node] |
|
415 | parents = parentsmap[node] | |
409 | p1, p2, linknode, copyfrom = pdata |
|
416 | p1, p2, linknode, copyfrom = pdata | |
410 | # Don't follow renames (copyfrom). |
|
417 | # Don't follow renames (copyfrom). | |
411 | # remotefilectx.ancestor does that. |
|
418 | # remotefilectx.ancestor does that. | |
412 | if p1 != nullid and not copyfrom: |
|
419 | if p1 != nullid and not copyfrom: | |
413 | parents.append(p1) |
|
420 | parents.append(p1) | |
414 | allparents.add(p1) |
|
421 | allparents.add(p1) | |
415 | if p2 != nullid: |
|
422 | if p2 != nullid: | |
416 | parents.append(p2) |
|
423 | parents.append(p2) | |
417 | allparents.add(p2) |
|
424 | allparents.add(p2) | |
418 |
|
425 | |||
419 | # Breadth first traversal to build linkrev graph |
|
426 | # Breadth first traversal to build linkrev graph | |
420 | parentrevs = collections.defaultdict(list) |
|
427 | parentrevs = collections.defaultdict(list) | |
421 | revmap = {} |
|
428 | revmap = {} | |
422 | queue = collections.deque(((None, n) for n in parentsmap |
|
429 | queue = collections.deque(((None, n) for n in parentsmap | |
423 | if n not in allparents)) |
|
430 | if n not in allparents)) | |
424 | while queue: |
|
431 | while queue: | |
425 | prevrev, current = queue.pop() |
|
432 | prevrev, current = queue.pop() | |
426 | if current in revmap: |
|
433 | if current in revmap: | |
427 | if prevrev: |
|
434 | if prevrev: | |
428 | parentrevs[prevrev].append(revmap[current]) |
|
435 | parentrevs[prevrev].append(revmap[current]) | |
429 | continue |
|
436 | continue | |
430 |
|
437 | |||
431 | # Assign linkrevs in reverse order, so start at |
|
438 | # Assign linkrevs in reverse order, so start at | |
432 | # len(parentsmap) and work backwards. |
|
439 | # len(parentsmap) and work backwards. | |
433 | currentrev = len(parentsmap) - len(revmap) - 1 |
|
440 | currentrev = len(parentsmap) - len(revmap) - 1 | |
434 | revmap[current] = currentrev |
|
441 | revmap[current] = currentrev | |
435 |
|
442 | |||
436 | if prevrev: |
|
443 | if prevrev: | |
437 | parentrevs[prevrev].append(currentrev) |
|
444 | parentrevs[prevrev].append(currentrev) | |
438 |
|
445 | |||
439 | for parent in parentsmap.get(current): |
|
446 | for parent in parentsmap.get(current): | |
440 | queue.appendleft((currentrev, parent)) |
|
447 | queue.appendleft((currentrev, parent)) | |
441 |
|
448 | |||
442 | return revmap, parentrevs.__getitem__ |
|
449 | return revmap, parentrevs.__getitem__ | |
443 |
|
450 | |||
444 | def strip(self, minlink, transaction): |
|
451 | def strip(self, minlink, transaction): | |
445 | pass |
|
452 | pass | |
446 |
|
453 | |||
447 | # misc unused things |
|
454 | # misc unused things | |
448 | def files(self): |
|
455 | def files(self): | |
449 | return [] |
|
456 | return [] | |
450 |
|
457 | |||
451 | def checksize(self): |
|
458 | def checksize(self): | |
452 | return 0, 0 |
|
459 | return 0, 0 |
@@ -1,45 +1,40 b'' | |||||
1 | #require no-windows |
|
1 | #require no-windows | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
3 | $ . "$TESTDIR/remotefilelog-library.sh" | |
4 |
|
4 | |||
5 | $ hg init master |
|
5 | $ hg init master | |
6 | $ cd master |
|
6 | $ cd master | |
7 | $ cat >> .hg/hgrc <<EOF |
|
7 | $ cat >> .hg/hgrc <<EOF | |
8 | > [remotefilelog] |
|
8 | > [remotefilelog] | |
9 | > server=True |
|
9 | > server=True | |
10 | > EOF |
|
10 | > EOF | |
11 | $ echo x > x |
|
11 | $ echo x > x | |
12 | $ hg commit -qAm x |
|
12 | $ hg commit -qAm x | |
13 | $ echo y >> x |
|
13 | $ echo y >> x | |
14 | $ hg commit -qAm y |
|
14 | $ hg commit -qAm y | |
15 | $ echo z >> x |
|
15 | $ echo z >> x | |
16 | $ hg commit -qAm z |
|
16 | $ hg commit -qAm z | |
17 | $ echo a > a |
|
17 | $ echo a > a | |
18 | $ hg commit -qAm a |
|
18 | $ hg commit -qAm a | |
19 |
|
19 | |||
20 | $ cd .. |
|
20 | $ cd .. | |
21 |
|
21 | |||
22 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
22 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
23 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
23 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
24 | $ cd shallow |
|
24 | $ cd shallow | |
25 |
|
25 | |||
26 | Test blame |
|
26 | Test blame | |
27 |
|
27 | |||
28 | $ hg blame x |
|
28 | $ hg blame x | |
29 | 0: x |
|
29 | 0: x | |
30 | 1: y |
|
30 | 1: y | |
31 | 2: z |
|
31 | 2: z | |
32 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
32 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
33 |
|
33 | |||
34 | Test grepping the working directory. |
|
34 | Test grepping the working directory. | |
35 |
|
35 | |||
36 | $ hg grep --all-files x |
|
36 | $ hg grep --all-files x | |
37 | x:x |
|
37 | x:x | |
38 | BROKEN: modifications in the wdir tries to fetch from the server. |
|
|||
39 | $ echo foo >> x |
|
38 | $ echo foo >> x | |
40 | $ hg grep --all-files x |
|
39 | $ hg grep --all-files x | |
41 | remote: abort: working directory revision cannot be specified |
|
40 | x:x | |
42 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
|||
43 | abort: error downloading file contents: |
|
|||
44 | 'connection closed early' |
|
|||
45 | [255] |
|
General Comments 0
You need to be logged in to leave comments.
Login now