Show More
@@ -1,461 +1,461 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import errno |
|
3 | import errno | |
4 | import os |
|
4 | import os | |
5 | import shutil |
|
5 | import shutil | |
6 | import stat |
|
6 | import stat | |
7 | import time |
|
7 | import time | |
8 |
|
8 | |||
9 | from mercurial.i18n import _ |
|
9 | from mercurial.i18n import _ | |
10 | from mercurial.node import bin, hex |
|
10 | from mercurial.node import bin, hex | |
11 | from mercurial.pycompat import open |
|
11 | from mercurial.pycompat import open | |
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
13 | error, |
|
13 | error, | |
14 | pycompat, |
|
14 | pycompat, | |
15 | util, |
|
15 | util, | |
16 | ) |
|
16 | ) | |
17 | from mercurial.utils import hashutil |
|
17 | from mercurial.utils import hashutil | |
18 | from . import ( |
|
18 | from . import ( | |
19 | constants, |
|
19 | constants, | |
20 | shallowutil, |
|
20 | shallowutil, | |
21 | ) |
|
21 | ) | |
22 |
|
22 | |||
23 |
|
23 | |||
24 | class basestore(object): |
|
24 | class basestore(object): | |
25 | def __init__(self, repo, path, reponame, shared=False): |
|
25 | def __init__(self, repo, path, reponame, shared=False): | |
26 | """Creates a remotefilelog store object for the given repo name. |
|
26 | """Creates a remotefilelog store object for the given repo name. | |
27 |
|
27 | |||
28 | `path` - The file path where this store keeps its data |
|
28 | `path` - The file path where this store keeps its data | |
29 | `reponame` - The name of the repo. This is used to partition data from |
|
29 | `reponame` - The name of the repo. This is used to partition data from | |
30 | many repos. |
|
30 | many repos. | |
31 | `shared` - True if this store is a shared cache of data from the central |
|
31 | `shared` - True if this store is a shared cache of data from the central | |
32 | server, for many repos on this machine. False means this store is for |
|
32 | server, for many repos on this machine. False means this store is for | |
33 | the local data for one repo. |
|
33 | the local data for one repo. | |
34 | """ |
|
34 | """ | |
35 | self.repo = repo |
|
35 | self.repo = repo | |
36 | self.ui = repo.ui |
|
36 | self.ui = repo.ui | |
37 | self._path = path |
|
37 | self._path = path | |
38 | self._reponame = reponame |
|
38 | self._reponame = reponame | |
39 | self._shared = shared |
|
39 | self._shared = shared | |
40 | self._uid = os.getuid() if not pycompat.iswindows else None |
|
40 | self._uid = os.getuid() if not pycompat.iswindows else None | |
41 |
|
41 | |||
42 | self._validatecachelog = self.ui.config( |
|
42 | self._validatecachelog = self.ui.config( | |
43 | b"remotefilelog", b"validatecachelog" |
|
43 | b"remotefilelog", b"validatecachelog" | |
44 | ) |
|
44 | ) | |
45 | self._validatecache = self.ui.config( |
|
45 | self._validatecache = self.ui.config( | |
46 | b"remotefilelog", b"validatecache", b'on' |
|
46 | b"remotefilelog", b"validatecache", b'on' | |
47 | ) |
|
47 | ) | |
48 | if self._validatecache not in (b'on', b'strict', b'off'): |
|
48 | if self._validatecache not in (b'on', b'strict', b'off'): | |
49 | self._validatecache = b'on' |
|
49 | self._validatecache = b'on' | |
50 | if self._validatecache == b'off': |
|
50 | if self._validatecache == b'off': | |
51 | self._validatecache = False |
|
51 | self._validatecache = False | |
52 |
|
52 | |||
53 | if shared: |
|
53 | if shared: | |
54 | shallowutil.mkstickygroupdir(self.ui, path) |
|
54 | shallowutil.mkstickygroupdir(self.ui, path) | |
55 |
|
55 | |||
56 | def getmissing(self, keys): |
|
56 | def getmissing(self, keys): | |
57 | missing = [] |
|
57 | missing = [] | |
58 | for name, node in keys: |
|
58 | for name, node in keys: | |
59 | filepath = self._getfilepath(name, node) |
|
59 | filepath = self._getfilepath(name, node) | |
60 | exists = os.path.exists(filepath) |
|
60 | exists = os.path.exists(filepath) | |
61 | if ( |
|
61 | if ( | |
62 | exists |
|
62 | exists | |
63 | and self._validatecache == b'strict' |
|
63 | and self._validatecache == b'strict' | |
64 | and not self._validatekey(filepath, b'contains') |
|
64 | and not self._validatekey(filepath, b'contains') | |
65 | ): |
|
65 | ): | |
66 | exists = False |
|
66 | exists = False | |
67 | if not exists: |
|
67 | if not exists: | |
68 | missing.append((name, node)) |
|
68 | missing.append((name, node)) | |
69 |
|
69 | |||
70 | return missing |
|
70 | return missing | |
71 |
|
71 | |||
72 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE |
|
72 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE | |
73 |
|
73 | |||
74 | def markledger(self, ledger, options=None): |
|
74 | def markledger(self, ledger, options=None): | |
75 | if options and options.get(constants.OPTION_PACKSONLY): |
|
75 | if options and options.get(constants.OPTION_PACKSONLY): | |
76 | return |
|
76 | return | |
77 | if self._shared: |
|
77 | if self._shared: | |
78 | for filename, nodes in self._getfiles(): |
|
78 | for filename, nodes in self._getfiles(): | |
79 | for node in nodes: |
|
79 | for node in nodes: | |
80 | ledger.markdataentry(self, filename, node) |
|
80 | ledger.markdataentry(self, filename, node) | |
81 | ledger.markhistoryentry(self, filename, node) |
|
81 | ledger.markhistoryentry(self, filename, node) | |
82 |
|
82 | |||
83 | def cleanup(self, ledger): |
|
83 | def cleanup(self, ledger): | |
84 | ui = self.ui |
|
84 | ui = self.ui | |
85 | entries = ledger.sources.get(self, []) |
|
85 | entries = ledger.sources.get(self, []) | |
86 | count = 0 |
|
86 | count = 0 | |
87 | progress = ui.makeprogress( |
|
87 | progress = ui.makeprogress( | |
88 | _(b"cleaning up"), unit=b"files", total=len(entries) |
|
88 | _(b"cleaning up"), unit=b"files", total=len(entries) | |
89 | ) |
|
89 | ) | |
90 | for entry in entries: |
|
90 | for entry in entries: | |
91 | if entry.gced or (entry.datarepacked and entry.historyrepacked): |
|
91 | if entry.gced or (entry.datarepacked and entry.historyrepacked): | |
92 | progress.update(count) |
|
92 | progress.update(count) | |
93 | path = self._getfilepath(entry.filename, entry.node) |
|
93 | path = self._getfilepath(entry.filename, entry.node) | |
94 | util.tryunlink(path) |
|
94 | util.tryunlink(path) | |
95 | count += 1 |
|
95 | count += 1 | |
96 | progress.complete() |
|
96 | progress.complete() | |
97 |
|
97 | |||
98 | # Clean up the repo cache directory. |
|
98 | # Clean up the repo cache directory. | |
99 | self._cleanupdirectory(self._getrepocachepath()) |
|
99 | self._cleanupdirectory(self._getrepocachepath()) | |
100 |
|
100 | |||
101 | # BELOW THIS ARE NON-STANDARD APIS |
|
101 | # BELOW THIS ARE NON-STANDARD APIS | |
102 |
|
102 | |||
103 | def _cleanupdirectory(self, rootdir): |
|
103 | def _cleanupdirectory(self, rootdir): | |
104 | """Removes the empty directories and unnecessary files within the root |
|
104 | """Removes the empty directories and unnecessary files within the root | |
105 | directory recursively. Note that this method does not remove the root |
|
105 | directory recursively. Note that this method does not remove the root | |
106 | directory itself.""" |
|
106 | directory itself.""" | |
107 |
|
107 | |||
108 | oldfiles = set() |
|
108 | oldfiles = set() | |
109 | otherfiles = set() |
|
109 | otherfiles = set() | |
110 | # osutil.listdir returns stat information which saves some rmdir/listdir |
|
110 | # osutil.listdir returns stat information which saves some rmdir/listdir | |
111 | # syscalls. |
|
111 | # syscalls. | |
112 | for name, mode in util.osutil.listdir(rootdir): |
|
112 | for name, mode in util.osutil.listdir(rootdir): | |
113 | if stat.S_ISDIR(mode): |
|
113 | if stat.S_ISDIR(mode): | |
114 | dirpath = os.path.join(rootdir, name) |
|
114 | dirpath = os.path.join(rootdir, name) | |
115 | self._cleanupdirectory(dirpath) |
|
115 | self._cleanupdirectory(dirpath) | |
116 |
|
116 | |||
117 | # Now that the directory specified by dirpath is potentially |
|
117 | # Now that the directory specified by dirpath is potentially | |
118 | # empty, try and remove it. |
|
118 | # empty, try and remove it. | |
119 | try: |
|
119 | try: | |
120 | os.rmdir(dirpath) |
|
120 | os.rmdir(dirpath) | |
121 | except OSError: |
|
121 | except OSError: | |
122 | pass |
|
122 | pass | |
123 |
|
123 | |||
124 | elif stat.S_ISREG(mode): |
|
124 | elif stat.S_ISREG(mode): | |
125 | if name.endswith(b'_old'): |
|
125 | if name.endswith(b'_old'): | |
126 | oldfiles.add(name[:-4]) |
|
126 | oldfiles.add(name[:-4]) | |
127 | else: |
|
127 | else: | |
128 | otherfiles.add(name) |
|
128 | otherfiles.add(name) | |
129 |
|
129 | |||
130 | # Remove the files which end with suffix '_old' and have no |
|
130 | # Remove the files which end with suffix '_old' and have no | |
131 | # corresponding file without the suffix '_old'. See addremotefilelognode |
|
131 | # corresponding file without the suffix '_old'. See addremotefilelognode | |
132 | # method for the generation/purpose of files with '_old' suffix. |
|
132 | # method for the generation/purpose of files with '_old' suffix. | |
133 | for filename in oldfiles - otherfiles: |
|
133 | for filename in oldfiles - otherfiles: | |
134 | filepath = os.path.join(rootdir, filename + b'_old') |
|
134 | filepath = os.path.join(rootdir, filename + b'_old') | |
135 | util.tryunlink(filepath) |
|
135 | util.tryunlink(filepath) | |
136 |
|
136 | |||
137 | def _getfiles(self): |
|
137 | def _getfiles(self): | |
138 | """Return a list of (filename, [node,...]) for all the revisions that |
|
138 | """Return a list of (filename, [node,...]) for all the revisions that | |
139 | exist in the store. |
|
139 | exist in the store. | |
140 |
|
140 | |||
141 | This is useful for obtaining a list of all the contents of the store |
|
141 | This is useful for obtaining a list of all the contents of the store | |
142 | when performing a repack to another store, since the store API requires |
|
142 | when performing a repack to another store, since the store API requires | |
143 | name+node keys and not namehash+node keys. |
|
143 | name+node keys and not namehash+node keys. | |
144 | """ |
|
144 | """ | |
145 | existing = {} |
|
145 | existing = {} | |
146 | for filenamehash, node in self._listkeys(): |
|
146 | for filenamehash, node in self._listkeys(): | |
147 | existing.setdefault(filenamehash, []).append(node) |
|
147 | existing.setdefault(filenamehash, []).append(node) | |
148 |
|
148 | |||
149 | filenamemap = self._resolvefilenames(existing.keys()) |
|
149 | filenamemap = self._resolvefilenames(existing.keys()) | |
150 |
|
150 | |||
151 | for filename, sha in pycompat.iteritems(filenamemap): |
|
151 | for filename, sha in pycompat.iteritems(filenamemap): | |
152 | yield (filename, existing[sha]) |
|
152 | yield (filename, existing[sha]) | |
153 |
|
153 | |||
154 | def _resolvefilenames(self, hashes): |
|
154 | def _resolvefilenames(self, hashes): | |
155 | """Given a list of filename hashes that are present in the |
|
155 | """Given a list of filename hashes that are present in the | |
156 | remotefilelog store, return a mapping from filename->hash. |
|
156 | remotefilelog store, return a mapping from filename->hash. | |
157 |
|
157 | |||
158 | This is useful when converting remotefilelog blobs into other storage |
|
158 | This is useful when converting remotefilelog blobs into other storage | |
159 | formats. |
|
159 | formats. | |
160 | """ |
|
160 | """ | |
161 | if not hashes: |
|
161 | if not hashes: | |
162 | return {} |
|
162 | return {} | |
163 |
|
163 | |||
164 | filenames = {} |
|
164 | filenames = {} | |
165 | missingfilename = set(hashes) |
|
165 | missingfilename = set(hashes) | |
166 |
|
166 | |||
167 | # Start with a full manifest, since it'll cover the majority of files |
|
167 | # Start with a full manifest, since it'll cover the majority of files | |
168 | for filename in self.repo[b'tip'].manifest(): |
|
168 | for filename in self.repo[b'tip'].manifest(): | |
169 | sha = hashutil.sha1(filename).digest() |
|
169 | sha = hashutil.sha1(filename).digest() | |
170 | if sha in missingfilename: |
|
170 | if sha in missingfilename: | |
171 | filenames[filename] = sha |
|
171 | filenames[filename] = sha | |
172 | missingfilename.discard(sha) |
|
172 | missingfilename.discard(sha) | |
173 |
|
173 | |||
174 | # Scan the changelog until we've found every file name |
|
174 | # Scan the changelog until we've found every file name | |
175 | cl = self.repo.unfiltered().changelog |
|
175 | cl = self.repo.unfiltered().changelog | |
176 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): |
|
176 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): | |
177 | if not missingfilename: |
|
177 | if not missingfilename: | |
178 | break |
|
178 | break | |
179 | files = cl.readfiles(cl.node(rev)) |
|
179 | files = cl.readfiles(cl.node(rev)) | |
180 | for filename in files: |
|
180 | for filename in files: | |
181 | sha = hashutil.sha1(filename).digest() |
|
181 | sha = hashutil.sha1(filename).digest() | |
182 | if sha in missingfilename: |
|
182 | if sha in missingfilename: | |
183 | filenames[filename] = sha |
|
183 | filenames[filename] = sha | |
184 | missingfilename.discard(sha) |
|
184 | missingfilename.discard(sha) | |
185 |
|
185 | |||
186 | return filenames |
|
186 | return filenames | |
187 |
|
187 | |||
188 | def _getrepocachepath(self): |
|
188 | def _getrepocachepath(self): | |
189 | return ( |
|
189 | return ( | |
190 | os.path.join(self._path, self._reponame) |
|
190 | os.path.join(self._path, self._reponame) | |
191 | if self._shared |
|
191 | if self._shared | |
192 | else self._path |
|
192 | else self._path | |
193 | ) |
|
193 | ) | |
194 |
|
194 | |||
195 | def _listkeys(self): |
|
195 | def _listkeys(self): | |
196 | """List all the remotefilelog keys that exist in the store. |
|
196 | """List all the remotefilelog keys that exist in the store. | |
197 |
|
197 | |||
198 | Returns a iterator of (filename hash, filecontent hash) tuples. |
|
198 | Returns a iterator of (filename hash, filecontent hash) tuples. | |
199 | """ |
|
199 | """ | |
200 |
|
200 | |||
201 | for root, dirs, files in os.walk(self._getrepocachepath()): |
|
201 | for root, dirs, files in os.walk(self._getrepocachepath()): | |
202 | for filename in files: |
|
202 | for filename in files: | |
203 | if len(filename) != 40: |
|
203 | if len(filename) != 40: | |
204 | continue |
|
204 | continue | |
205 | node = filename |
|
205 | node = filename | |
206 | if self._shared: |
|
206 | if self._shared: | |
207 | # .../1a/85ffda..be21 |
|
207 | # .../1a/85ffda..be21 | |
208 | filenamehash = root[-41:-39] + root[-38:] |
|
208 | filenamehash = root[-41:-39] + root[-38:] | |
209 | else: |
|
209 | else: | |
210 | filenamehash = root[-40:] |
|
210 | filenamehash = root[-40:] | |
211 | yield (bin(filenamehash), bin(node)) |
|
211 | yield (bin(filenamehash), bin(node)) | |
212 |
|
212 | |||
213 | def _getfilepath(self, name, node): |
|
213 | def _getfilepath(self, name, node): | |
214 | node = hex(node) |
|
214 | node = hex(node) | |
215 | if self._shared: |
|
215 | if self._shared: | |
216 | key = shallowutil.getcachekey(self._reponame, name, node) |
|
216 | key = shallowutil.getcachekey(self._reponame, name, node) | |
217 | else: |
|
217 | else: | |
218 | key = shallowutil.getlocalkey(name, node) |
|
218 | key = shallowutil.getlocalkey(name, node) | |
219 |
|
219 | |||
220 | return os.path.join(self._path, key) |
|
220 | return os.path.join(self._path, key) | |
221 |
|
221 | |||
222 | def _getdata(self, name, node): |
|
222 | def _getdata(self, name, node): | |
223 | filepath = self._getfilepath(name, node) |
|
223 | filepath = self._getfilepath(name, node) | |
224 | try: |
|
224 | try: | |
225 | data = shallowutil.readfile(filepath) |
|
225 | data = shallowutil.readfile(filepath) | |
226 | if self._validatecache and not self._validatedata(data, filepath): |
|
226 | if self._validatecache and not self._validatedata(data, filepath): | |
227 | if self._validatecachelog: |
|
227 | if self._validatecachelog: | |
228 | with open(self._validatecachelog, b'ab+') as f: |
|
228 | with open(self._validatecachelog, b'ab+') as f: | |
229 | f.write(b"corrupt %s during read\n" % filepath) |
|
229 | f.write(b"corrupt %s during read\n" % filepath) | |
230 | os.rename(filepath, filepath + b".corrupt") |
|
230 | os.rename(filepath, filepath + b".corrupt") | |
231 | raise KeyError(b"corrupt local cache file %s" % filepath) |
|
231 | raise KeyError(b"corrupt local cache file %s" % filepath) | |
232 | except IOError: |
|
232 | except IOError: | |
233 | raise KeyError( |
|
233 | raise KeyError( | |
234 | b"no file found at %s for %s:%s" % (filepath, name, hex(node)) |
|
234 | b"no file found at %s for %s:%s" % (filepath, name, hex(node)) | |
235 | ) |
|
235 | ) | |
236 |
|
236 | |||
237 | return data |
|
237 | return data | |
238 |
|
238 | |||
239 | def addremotefilelognode(self, name, node, data): |
|
239 | def addremotefilelognode(self, name, node, data): | |
240 | filepath = self._getfilepath(name, node) |
|
240 | filepath = self._getfilepath(name, node) | |
241 |
|
241 | |||
242 | oldumask = os.umask(0o002) |
|
242 | oldumask = os.umask(0o002) | |
243 | try: |
|
243 | try: | |
244 | # if this node already exists, save the old version for |
|
244 | # if this node already exists, save the old version for | |
245 | # recovery/debugging purposes. |
|
245 | # recovery/debugging purposes. | |
246 | if os.path.exists(filepath): |
|
246 | if os.path.exists(filepath): | |
247 | newfilename = filepath + b'_old' |
|
247 | newfilename = filepath + b'_old' | |
248 | # newfilename can be read-only and shutil.copy will fail. |
|
248 | # newfilename can be read-only and shutil.copy will fail. | |
249 | # Delete newfilename to avoid it |
|
249 | # Delete newfilename to avoid it | |
250 | if os.path.exists(newfilename): |
|
250 | if os.path.exists(newfilename): | |
251 | shallowutil.unlinkfile(newfilename) |
|
251 | shallowutil.unlinkfile(newfilename) | |
252 | shutil.copy(filepath, newfilename) |
|
252 | shutil.copy(filepath, newfilename) | |
253 |
|
253 | |||
254 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) |
|
254 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) | |
255 | shallowutil.writefile(filepath, data, readonly=True) |
|
255 | shallowutil.writefile(filepath, data, readonly=True) | |
256 |
|
256 | |||
257 | if self._validatecache: |
|
257 | if self._validatecache: | |
258 | if not self._validatekey(filepath, b'write'): |
|
258 | if not self._validatekey(filepath, b'write'): | |
259 | raise error.Abort( |
|
259 | raise error.Abort( | |
260 | _(b"local cache write was corrupted %s") % filepath |
|
260 | _(b"local cache write was corrupted %s") % filepath | |
261 | ) |
|
261 | ) | |
262 | finally: |
|
262 | finally: | |
263 | os.umask(oldumask) |
|
263 | os.umask(oldumask) | |
264 |
|
264 | |||
265 | def markrepo(self, path): |
|
265 | def markrepo(self, path): | |
266 | """Call this to add the given repo path to the store's list of |
|
266 | """Call this to add the given repo path to the store's list of | |
267 | repositories that are using it. This is useful later when doing garbage |
|
267 | repositories that are using it. This is useful later when doing garbage | |
268 | collection, since it allows us to insecpt the repos to see what nodes |
|
268 | collection, since it allows us to insecpt the repos to see what nodes | |
269 | they want to be kept alive in the store. |
|
269 | they want to be kept alive in the store. | |
270 | """ |
|
270 | """ | |
271 | repospath = os.path.join(self._path, b"repos") |
|
271 | repospath = os.path.join(self._path, b"repos") | |
272 | with open(repospath, b'ab') as reposfile: |
|
272 | with open(repospath, b'ab') as reposfile: | |
273 | reposfile.write(os.path.dirname(path) + b"\n") |
|
273 | reposfile.write(os.path.dirname(path) + b"\n") | |
274 |
|
274 | |||
275 | repospathstat = os.stat(repospath) |
|
275 | repospathstat = os.stat(repospath) | |
276 | if repospathstat.st_uid == self._uid: |
|
276 | if repospathstat.st_uid == self._uid: | |
277 | os.chmod(repospath, 0o0664) |
|
277 | os.chmod(repospath, 0o0664) | |
278 |
|
278 | |||
279 | def _validatekey(self, path, action): |
|
279 | def _validatekey(self, path, action): | |
280 | with open(path, b'rb') as f: |
|
280 | with open(path, b'rb') as f: | |
281 | data = f.read() |
|
281 | data = f.read() | |
282 |
|
282 | |||
283 | if self._validatedata(data, path): |
|
283 | if self._validatedata(data, path): | |
284 | return True |
|
284 | return True | |
285 |
|
285 | |||
286 | if self._validatecachelog: |
|
286 | if self._validatecachelog: | |
287 | with open(self._validatecachelog, b'ab+') as f: |
|
287 | with open(self._validatecachelog, b'ab+') as f: | |
288 | f.write(b"corrupt %s during %s\n" % (path, action)) |
|
288 | f.write(b"corrupt %s during %s\n" % (path, action)) | |
289 |
|
289 | |||
290 | os.rename(path, path + b".corrupt") |
|
290 | os.rename(path, path + b".corrupt") | |
291 | return False |
|
291 | return False | |
292 |
|
292 | |||
293 | def _validatedata(self, data, path): |
|
293 | def _validatedata(self, data, path): | |
294 | try: |
|
294 | try: | |
295 | if len(data) > 0: |
|
295 | if len(data) > 0: | |
296 | # see remotefilelogserver.createfileblob for the format |
|
296 | # see remotefilelogserver.createfileblob for the format | |
297 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
297 | offset, size, flags = shallowutil.parsesizeflags(data) | |
298 | if len(data) <= size: |
|
298 | if len(data) <= size: | |
299 | # it is truncated |
|
299 | # it is truncated | |
300 | return False |
|
300 | return False | |
301 |
|
301 | |||
302 | # extract the node from the metadata |
|
302 | # extract the node from the metadata | |
303 | offset += size |
|
303 | offset += size | |
304 | datanode = data[offset : offset + 20] |
|
304 | datanode = data[offset : offset + 20] | |
305 |
|
305 | |||
306 | # and compare against the path |
|
306 | # and compare against the path | |
307 | if os.path.basename(path) == hex(datanode): |
|
307 | if os.path.basename(path) == hex(datanode): | |
308 | # Content matches the intended path |
|
308 | # Content matches the intended path | |
309 | return True |
|
309 | return True | |
310 | return False |
|
310 | return False | |
311 |
except (ValueError, |
|
311 | except (ValueError, shallowutil.BadRemotefilelogHeader): | |
312 | pass |
|
312 | pass | |
313 |
|
313 | |||
314 | return False |
|
314 | return False | |
315 |
|
315 | |||
316 | def gc(self, keepkeys): |
|
316 | def gc(self, keepkeys): | |
317 | ui = self.ui |
|
317 | ui = self.ui | |
318 | cachepath = self._path |
|
318 | cachepath = self._path | |
319 |
|
319 | |||
320 | # prune cache |
|
320 | # prune cache | |
321 | queue = pycompat.queue.PriorityQueue() |
|
321 | queue = pycompat.queue.PriorityQueue() | |
322 | originalsize = 0 |
|
322 | originalsize = 0 | |
323 | size = 0 |
|
323 | size = 0 | |
324 | count = 0 |
|
324 | count = 0 | |
325 | removed = 0 |
|
325 | removed = 0 | |
326 |
|
326 | |||
327 | # keep files newer than a day even if they aren't needed |
|
327 | # keep files newer than a day even if they aren't needed | |
328 | limit = time.time() - (60 * 60 * 24) |
|
328 | limit = time.time() - (60 * 60 * 24) | |
329 |
|
329 | |||
330 | progress = ui.makeprogress( |
|
330 | progress = ui.makeprogress( | |
331 | _(b"removing unnecessary files"), unit=b"files" |
|
331 | _(b"removing unnecessary files"), unit=b"files" | |
332 | ) |
|
332 | ) | |
333 | progress.update(0) |
|
333 | progress.update(0) | |
334 | for root, dirs, files in os.walk(cachepath): |
|
334 | for root, dirs, files in os.walk(cachepath): | |
335 | for file in files: |
|
335 | for file in files: | |
336 | if file == b'repos': |
|
336 | if file == b'repos': | |
337 | continue |
|
337 | continue | |
338 |
|
338 | |||
339 | # Don't delete pack files |
|
339 | # Don't delete pack files | |
340 | if b'/packs/' in root: |
|
340 | if b'/packs/' in root: | |
341 | continue |
|
341 | continue | |
342 |
|
342 | |||
343 | progress.update(count) |
|
343 | progress.update(count) | |
344 | path = os.path.join(root, file) |
|
344 | path = os.path.join(root, file) | |
345 | key = os.path.relpath(path, cachepath) |
|
345 | key = os.path.relpath(path, cachepath) | |
346 | count += 1 |
|
346 | count += 1 | |
347 | try: |
|
347 | try: | |
348 | pathstat = os.stat(path) |
|
348 | pathstat = os.stat(path) | |
349 | except OSError as e: |
|
349 | except OSError as e: | |
350 | # errno.ENOENT = no such file or directory |
|
350 | # errno.ENOENT = no such file or directory | |
351 | if e.errno != errno.ENOENT: |
|
351 | if e.errno != errno.ENOENT: | |
352 | raise |
|
352 | raise | |
353 | msg = _( |
|
353 | msg = _( | |
354 | b"warning: file %s was removed by another process\n" |
|
354 | b"warning: file %s was removed by another process\n" | |
355 | ) |
|
355 | ) | |
356 | ui.warn(msg % path) |
|
356 | ui.warn(msg % path) | |
357 | continue |
|
357 | continue | |
358 |
|
358 | |||
359 | originalsize += pathstat.st_size |
|
359 | originalsize += pathstat.st_size | |
360 |
|
360 | |||
361 | if key in keepkeys or pathstat.st_atime > limit: |
|
361 | if key in keepkeys or pathstat.st_atime > limit: | |
362 | queue.put((pathstat.st_atime, path, pathstat)) |
|
362 | queue.put((pathstat.st_atime, path, pathstat)) | |
363 | size += pathstat.st_size |
|
363 | size += pathstat.st_size | |
364 | else: |
|
364 | else: | |
365 | try: |
|
365 | try: | |
366 | shallowutil.unlinkfile(path) |
|
366 | shallowutil.unlinkfile(path) | |
367 | except OSError as e: |
|
367 | except OSError as e: | |
368 | # errno.ENOENT = no such file or directory |
|
368 | # errno.ENOENT = no such file or directory | |
369 | if e.errno != errno.ENOENT: |
|
369 | if e.errno != errno.ENOENT: | |
370 | raise |
|
370 | raise | |
371 | msg = _( |
|
371 | msg = _( | |
372 | b"warning: file %s was removed by another " |
|
372 | b"warning: file %s was removed by another " | |
373 | b"process\n" |
|
373 | b"process\n" | |
374 | ) |
|
374 | ) | |
375 | ui.warn(msg % path) |
|
375 | ui.warn(msg % path) | |
376 | continue |
|
376 | continue | |
377 | removed += 1 |
|
377 | removed += 1 | |
378 | progress.complete() |
|
378 | progress.complete() | |
379 |
|
379 | |||
380 | # remove oldest files until under limit |
|
380 | # remove oldest files until under limit | |
381 | limit = ui.configbytes(b"remotefilelog", b"cachelimit") |
|
381 | limit = ui.configbytes(b"remotefilelog", b"cachelimit") | |
382 | if size > limit: |
|
382 | if size > limit: | |
383 | excess = size - limit |
|
383 | excess = size - limit | |
384 | progress = ui.makeprogress( |
|
384 | progress = ui.makeprogress( | |
385 | _(b"enforcing cache limit"), unit=b"bytes", total=excess |
|
385 | _(b"enforcing cache limit"), unit=b"bytes", total=excess | |
386 | ) |
|
386 | ) | |
387 | removedexcess = 0 |
|
387 | removedexcess = 0 | |
388 | while queue and size > limit and size > 0: |
|
388 | while queue and size > limit and size > 0: | |
389 | progress.update(removedexcess) |
|
389 | progress.update(removedexcess) | |
390 | atime, oldpath, oldpathstat = queue.get() |
|
390 | atime, oldpath, oldpathstat = queue.get() | |
391 | try: |
|
391 | try: | |
392 | shallowutil.unlinkfile(oldpath) |
|
392 | shallowutil.unlinkfile(oldpath) | |
393 | except OSError as e: |
|
393 | except OSError as e: | |
394 | # errno.ENOENT = no such file or directory |
|
394 | # errno.ENOENT = no such file or directory | |
395 | if e.errno != errno.ENOENT: |
|
395 | if e.errno != errno.ENOENT: | |
396 | raise |
|
396 | raise | |
397 | msg = _( |
|
397 | msg = _( | |
398 | b"warning: file %s was removed by another process\n" |
|
398 | b"warning: file %s was removed by another process\n" | |
399 | ) |
|
399 | ) | |
400 | ui.warn(msg % oldpath) |
|
400 | ui.warn(msg % oldpath) | |
401 | size -= oldpathstat.st_size |
|
401 | size -= oldpathstat.st_size | |
402 | removed += 1 |
|
402 | removed += 1 | |
403 | removedexcess += oldpathstat.st_size |
|
403 | removedexcess += oldpathstat.st_size | |
404 | progress.complete() |
|
404 | progress.complete() | |
405 |
|
405 | |||
406 | ui.status( |
|
406 | ui.status( | |
407 | _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") |
|
407 | _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") | |
408 | % ( |
|
408 | % ( | |
409 | removed, |
|
409 | removed, | |
410 | count, |
|
410 | count, | |
411 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, |
|
411 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, | |
412 | float(size) / 1024.0 / 1024.0 / 1024.0, |
|
412 | float(size) / 1024.0 / 1024.0 / 1024.0, | |
413 | ) |
|
413 | ) | |
414 | ) |
|
414 | ) | |
415 |
|
415 | |||
416 |
|
416 | |||
417 | class baseunionstore(object): |
|
417 | class baseunionstore(object): | |
418 | def __init__(self, *args, **kwargs): |
|
418 | def __init__(self, *args, **kwargs): | |
419 | # If one of the functions that iterates all of the stores is about to |
|
419 | # If one of the functions that iterates all of the stores is about to | |
420 | # throw a KeyError, try this many times with a full refresh between |
|
420 | # throw a KeyError, try this many times with a full refresh between | |
421 | # attempts. A repack operation may have moved data from one store to |
|
421 | # attempts. A repack operation may have moved data from one store to | |
422 | # another while we were running. |
|
422 | # another while we were running. | |
423 | self.numattempts = kwargs.get('numretries', 0) + 1 |
|
423 | self.numattempts = kwargs.get('numretries', 0) + 1 | |
424 | # If not-None, call this function on every retry and if the attempts are |
|
424 | # If not-None, call this function on every retry and if the attempts are | |
425 | # exhausted. |
|
425 | # exhausted. | |
426 | self.retrylog = kwargs.get('retrylog', None) |
|
426 | self.retrylog = kwargs.get('retrylog', None) | |
427 |
|
427 | |||
428 | def markforrefresh(self): |
|
428 | def markforrefresh(self): | |
429 | for store in self.stores: |
|
429 | for store in self.stores: | |
430 | if util.safehasattr(store, b'markforrefresh'): |
|
430 | if util.safehasattr(store, b'markforrefresh'): | |
431 | store.markforrefresh() |
|
431 | store.markforrefresh() | |
432 |
|
432 | |||
433 | @staticmethod |
|
433 | @staticmethod | |
434 | def retriable(fn): |
|
434 | def retriable(fn): | |
435 | def noop(*args): |
|
435 | def noop(*args): | |
436 | pass |
|
436 | pass | |
437 |
|
437 | |||
438 | def wrapped(self, *args, **kwargs): |
|
438 | def wrapped(self, *args, **kwargs): | |
439 | retrylog = self.retrylog or noop |
|
439 | retrylog = self.retrylog or noop | |
440 | funcname = fn.__name__ |
|
440 | funcname = fn.__name__ | |
441 | i = 0 |
|
441 | i = 0 | |
442 | while i < self.numattempts: |
|
442 | while i < self.numattempts: | |
443 | if i > 0: |
|
443 | if i > 0: | |
444 | retrylog( |
|
444 | retrylog( | |
445 | b're-attempting (n=%d) %s\n' |
|
445 | b're-attempting (n=%d) %s\n' | |
446 | % (i, pycompat.sysbytes(funcname)) |
|
446 | % (i, pycompat.sysbytes(funcname)) | |
447 | ) |
|
447 | ) | |
448 | self.markforrefresh() |
|
448 | self.markforrefresh() | |
449 | i += 1 |
|
449 | i += 1 | |
450 | try: |
|
450 | try: | |
451 | return fn(self, *args, **kwargs) |
|
451 | return fn(self, *args, **kwargs) | |
452 | except KeyError: |
|
452 | except KeyError: | |
453 | if i == self.numattempts: |
|
453 | if i == self.numattempts: | |
454 | # retries exhausted |
|
454 | # retries exhausted | |
455 | retrylog( |
|
455 | retrylog( | |
456 | b'retries exhausted in %s, raising KeyError\n' |
|
456 | b'retries exhausted in %s, raising KeyError\n' | |
457 | % pycompat.sysbytes(funcname) |
|
457 | % pycompat.sysbytes(funcname) | |
458 | ) |
|
458 | ) | |
459 | raise |
|
459 | raise | |
460 |
|
460 | |||
461 | return wrapped |
|
461 | return wrapped |
@@ -1,536 +1,544 b'' | |||||
1 | # shallowutil.py -- remotefilelog utilities |
|
1 | # shallowutil.py -- remotefilelog utilities | |
2 | # |
|
2 | # | |
3 | # Copyright 2014 Facebook, Inc. |
|
3 | # Copyright 2014 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import collections |
|
9 | import collections | |
10 | import errno |
|
10 | import errno | |
11 | import os |
|
11 | import os | |
12 | import stat |
|
12 | import stat | |
13 | import struct |
|
13 | import struct | |
14 | import tempfile |
|
14 | import tempfile | |
15 |
|
15 | |||
16 | from mercurial.i18n import _ |
|
16 | from mercurial.i18n import _ | |
17 | from mercurial.pycompat import open |
|
17 | from mercurial.pycompat import open | |
18 | from mercurial.node import hex |
|
18 | from mercurial.node import hex | |
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
20 | error, |
|
20 | error, | |
21 | pycompat, |
|
21 | pycompat, | |
22 | revlog, |
|
22 | revlog, | |
23 | util, |
|
23 | util, | |
24 | ) |
|
24 | ) | |
25 | from mercurial.utils import ( |
|
25 | from mercurial.utils import ( | |
26 | hashutil, |
|
26 | hashutil, | |
27 | storageutil, |
|
27 | storageutil, | |
28 | stringutil, |
|
28 | stringutil, | |
29 | ) |
|
29 | ) | |
30 | from . import constants |
|
30 | from . import constants | |
31 |
|
31 | |||
32 | if not pycompat.iswindows: |
|
32 | if not pycompat.iswindows: | |
33 | import grp |
|
33 | import grp | |
34 |
|
34 | |||
35 |
|
35 | |||
36 | def isenabled(repo): |
|
36 | def isenabled(repo): | |
37 | """returns whether the repository is remotefilelog enabled or not""" |
|
37 | """returns whether the repository is remotefilelog enabled or not""" | |
38 | return constants.SHALLOWREPO_REQUIREMENT in repo.requirements |
|
38 | return constants.SHALLOWREPO_REQUIREMENT in repo.requirements | |
39 |
|
39 | |||
40 |
|
40 | |||
41 | def getcachekey(reponame, file, id): |
|
41 | def getcachekey(reponame, file, id): | |
42 | pathhash = hex(hashutil.sha1(file).digest()) |
|
42 | pathhash = hex(hashutil.sha1(file).digest()) | |
43 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) |
|
43 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) | |
44 |
|
44 | |||
45 |
|
45 | |||
46 | def getlocalkey(file, id): |
|
46 | def getlocalkey(file, id): | |
47 | pathhash = hex(hashutil.sha1(file).digest()) |
|
47 | pathhash = hex(hashutil.sha1(file).digest()) | |
48 | return os.path.join(pathhash, id) |
|
48 | return os.path.join(pathhash, id) | |
49 |
|
49 | |||
50 |
|
50 | |||
51 | def getcachepath(ui, allowempty=False): |
|
51 | def getcachepath(ui, allowempty=False): | |
52 | cachepath = ui.config(b"remotefilelog", b"cachepath") |
|
52 | cachepath = ui.config(b"remotefilelog", b"cachepath") | |
53 | if not cachepath: |
|
53 | if not cachepath: | |
54 | if allowempty: |
|
54 | if allowempty: | |
55 | return None |
|
55 | return None | |
56 | else: |
|
56 | else: | |
57 | raise error.Abort( |
|
57 | raise error.Abort( | |
58 | _(b"could not find config option remotefilelog.cachepath") |
|
58 | _(b"could not find config option remotefilelog.cachepath") | |
59 | ) |
|
59 | ) | |
60 | return util.expandpath(cachepath) |
|
60 | return util.expandpath(cachepath) | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | def getcachepackpath(repo, category): |
|
63 | def getcachepackpath(repo, category): | |
64 | cachepath = getcachepath(repo.ui) |
|
64 | cachepath = getcachepath(repo.ui) | |
65 | if category != constants.FILEPACK_CATEGORY: |
|
65 | if category != constants.FILEPACK_CATEGORY: | |
66 | return os.path.join(cachepath, repo.name, b'packs', category) |
|
66 | return os.path.join(cachepath, repo.name, b'packs', category) | |
67 | else: |
|
67 | else: | |
68 | return os.path.join(cachepath, repo.name, b'packs') |
|
68 | return os.path.join(cachepath, repo.name, b'packs') | |
69 |
|
69 | |||
70 |
|
70 | |||
71 | def getlocalpackpath(base, category): |
|
71 | def getlocalpackpath(base, category): | |
72 | return os.path.join(base, b'packs', category) |
|
72 | return os.path.join(base, b'packs', category) | |
73 |
|
73 | |||
74 |
|
74 | |||
75 | def createrevlogtext(text, copyfrom=None, copyrev=None): |
|
75 | def createrevlogtext(text, copyfrom=None, copyrev=None): | |
76 | """returns a string that matches the revlog contents in a |
|
76 | """returns a string that matches the revlog contents in a | |
77 | traditional revlog |
|
77 | traditional revlog | |
78 | """ |
|
78 | """ | |
79 | meta = {} |
|
79 | meta = {} | |
80 | if copyfrom or text.startswith(b'\1\n'): |
|
80 | if copyfrom or text.startswith(b'\1\n'): | |
81 | if copyfrom: |
|
81 | if copyfrom: | |
82 | meta[b'copy'] = copyfrom |
|
82 | meta[b'copy'] = copyfrom | |
83 | meta[b'copyrev'] = copyrev |
|
83 | meta[b'copyrev'] = copyrev | |
84 | text = storageutil.packmeta(meta, text) |
|
84 | text = storageutil.packmeta(meta, text) | |
85 |
|
85 | |||
86 | return text |
|
86 | return text | |
87 |
|
87 | |||
88 |
|
88 | |||
89 | def parsemeta(text): |
|
89 | def parsemeta(text): | |
90 | """parse mercurial filelog metadata""" |
|
90 | """parse mercurial filelog metadata""" | |
91 | meta, size = storageutil.parsemeta(text) |
|
91 | meta, size = storageutil.parsemeta(text) | |
92 | if text.startswith(b'\1\n'): |
|
92 | if text.startswith(b'\1\n'): | |
93 | s = text.index(b'\1\n', 2) |
|
93 | s = text.index(b'\1\n', 2) | |
94 | text = text[s + 2 :] |
|
94 | text = text[s + 2 :] | |
95 | return meta or {}, text |
|
95 | return meta or {}, text | |
96 |
|
96 | |||
97 |
|
97 | |||
98 | def sumdicts(*dicts): |
|
98 | def sumdicts(*dicts): | |
99 | """Adds all the values of *dicts together into one dictionary. This assumes |
|
99 | """Adds all the values of *dicts together into one dictionary. This assumes | |
100 | the values in *dicts are all summable. |
|
100 | the values in *dicts are all summable. | |
101 |
|
101 | |||
102 | e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1} |
|
102 | e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1} | |
103 | """ |
|
103 | """ | |
104 | result = collections.defaultdict(lambda: 0) |
|
104 | result = collections.defaultdict(lambda: 0) | |
105 | for dict in dicts: |
|
105 | for dict in dicts: | |
106 | for k, v in pycompat.iteritems(dict): |
|
106 | for k, v in pycompat.iteritems(dict): | |
107 | result[k] += v |
|
107 | result[k] += v | |
108 | return result |
|
108 | return result | |
109 |
|
109 | |||
110 |
|
110 | |||
111 | def prefixkeys(dict, prefix): |
|
111 | def prefixkeys(dict, prefix): | |
112 | """Returns ``dict`` with ``prefix`` prepended to all its keys.""" |
|
112 | """Returns ``dict`` with ``prefix`` prepended to all its keys.""" | |
113 | result = {} |
|
113 | result = {} | |
114 | for k, v in pycompat.iteritems(dict): |
|
114 | for k, v in pycompat.iteritems(dict): | |
115 | result[prefix + k] = v |
|
115 | result[prefix + k] = v | |
116 | return result |
|
116 | return result | |
117 |
|
117 | |||
118 |
|
118 | |||
119 | def reportpackmetrics(ui, prefix, *stores): |
|
119 | def reportpackmetrics(ui, prefix, *stores): | |
120 | dicts = [s.getmetrics() for s in stores] |
|
120 | dicts = [s.getmetrics() for s in stores] | |
121 | dict = prefixkeys(sumdicts(*dicts), prefix + b'_') |
|
121 | dict = prefixkeys(sumdicts(*dicts), prefix + b'_') | |
122 | ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict)) |
|
122 | ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict)) | |
123 |
|
123 | |||
124 |
|
124 | |||
125 | def _parsepackmeta(metabuf): |
|
125 | def _parsepackmeta(metabuf): | |
126 | """parse datapack meta, bytes (<metadata-list>) -> dict |
|
126 | """parse datapack meta, bytes (<metadata-list>) -> dict | |
127 |
|
127 | |||
128 | The dict contains raw content - both keys and values are strings. |
|
128 | The dict contains raw content - both keys and values are strings. | |
129 | Upper-level business may want to convert some of them to other types like |
|
129 | Upper-level business may want to convert some of them to other types like | |
130 | integers, on their own. |
|
130 | integers, on their own. | |
131 |
|
131 | |||
132 | raise ValueError if the data is corrupted |
|
132 | raise ValueError if the data is corrupted | |
133 | """ |
|
133 | """ | |
134 | metadict = {} |
|
134 | metadict = {} | |
135 | offset = 0 |
|
135 | offset = 0 | |
136 | buflen = len(metabuf) |
|
136 | buflen = len(metabuf) | |
137 | while buflen - offset >= 3: |
|
137 | while buflen - offset >= 3: | |
138 | key = metabuf[offset : offset + 1] |
|
138 | key = metabuf[offset : offset + 1] | |
139 | offset += 1 |
|
139 | offset += 1 | |
140 | metalen = struct.unpack_from(b'!H', metabuf, offset)[0] |
|
140 | metalen = struct.unpack_from(b'!H', metabuf, offset)[0] | |
141 | offset += 2 |
|
141 | offset += 2 | |
142 | if offset + metalen > buflen: |
|
142 | if offset + metalen > buflen: | |
143 | raise ValueError(b'corrupted metadata: incomplete buffer') |
|
143 | raise ValueError(b'corrupted metadata: incomplete buffer') | |
144 | value = metabuf[offset : offset + metalen] |
|
144 | value = metabuf[offset : offset + metalen] | |
145 | metadict[key] = value |
|
145 | metadict[key] = value | |
146 | offset += metalen |
|
146 | offset += metalen | |
147 | if offset != buflen: |
|
147 | if offset != buflen: | |
148 | raise ValueError(b'corrupted metadata: redundant data') |
|
148 | raise ValueError(b'corrupted metadata: redundant data') | |
149 | return metadict |
|
149 | return metadict | |
150 |
|
150 | |||
151 |
|
151 | |||
152 | def _buildpackmeta(metadict): |
|
152 | def _buildpackmeta(metadict): | |
153 | """reverse of _parsepackmeta, dict -> bytes (<metadata-list>) |
|
153 | """reverse of _parsepackmeta, dict -> bytes (<metadata-list>) | |
154 |
|
154 | |||
155 | The dict contains raw content - both keys and values are strings. |
|
155 | The dict contains raw content - both keys and values are strings. | |
156 | Upper-level business may want to serialize some of other types (like |
|
156 | Upper-level business may want to serialize some of other types (like | |
157 | integers) to strings before calling this function. |
|
157 | integers) to strings before calling this function. | |
158 |
|
158 | |||
159 | raise ProgrammingError when metadata key is illegal, or ValueError if |
|
159 | raise ProgrammingError when metadata key is illegal, or ValueError if | |
160 | length limit is exceeded |
|
160 | length limit is exceeded | |
161 | """ |
|
161 | """ | |
162 | metabuf = b'' |
|
162 | metabuf = b'' | |
163 | for k, v in sorted(pycompat.iteritems((metadict or {}))): |
|
163 | for k, v in sorted(pycompat.iteritems((metadict or {}))): | |
164 | if len(k) != 1: |
|
164 | if len(k) != 1: | |
165 | raise error.ProgrammingError(b'packmeta: illegal key: %s' % k) |
|
165 | raise error.ProgrammingError(b'packmeta: illegal key: %s' % k) | |
166 | if len(v) > 0xFFFE: |
|
166 | if len(v) > 0xFFFE: | |
167 | raise ValueError( |
|
167 | raise ValueError( | |
168 | b'metadata value is too long: 0x%x > 0xfffe' % len(v) |
|
168 | b'metadata value is too long: 0x%x > 0xfffe' % len(v) | |
169 | ) |
|
169 | ) | |
170 | metabuf += k |
|
170 | metabuf += k | |
171 | metabuf += struct.pack(b'!H', len(v)) |
|
171 | metabuf += struct.pack(b'!H', len(v)) | |
172 | metabuf += v |
|
172 | metabuf += v | |
173 | # len(metabuf) is guaranteed representable in 4 bytes, because there are |
|
173 | # len(metabuf) is guaranteed representable in 4 bytes, because there are | |
174 | # only 256 keys, and for each value, len(value) <= 0xfffe. |
|
174 | # only 256 keys, and for each value, len(value) <= 0xfffe. | |
175 | return metabuf |
|
175 | return metabuf | |
176 |
|
176 | |||
177 |
|
177 | |||
178 | _metaitemtypes = { |
|
178 | _metaitemtypes = { | |
179 | constants.METAKEYFLAG: (int, pycompat.long), |
|
179 | constants.METAKEYFLAG: (int, pycompat.long), | |
180 | constants.METAKEYSIZE: (int, pycompat.long), |
|
180 | constants.METAKEYSIZE: (int, pycompat.long), | |
181 | } |
|
181 | } | |
182 |
|
182 | |||
183 |
|
183 | |||
184 | def buildpackmeta(metadict): |
|
184 | def buildpackmeta(metadict): | |
185 | """like _buildpackmeta, but typechecks metadict and normalize it. |
|
185 | """like _buildpackmeta, but typechecks metadict and normalize it. | |
186 |
|
186 | |||
187 | This means, METAKEYSIZE and METAKEYSIZE should have integers as values, |
|
187 | This means, METAKEYSIZE and METAKEYSIZE should have integers as values, | |
188 | and METAKEYFLAG will be dropped if its value is 0. |
|
188 | and METAKEYFLAG will be dropped if its value is 0. | |
189 | """ |
|
189 | """ | |
190 | newmeta = {} |
|
190 | newmeta = {} | |
191 | for k, v in pycompat.iteritems(metadict or {}): |
|
191 | for k, v in pycompat.iteritems(metadict or {}): | |
192 | expectedtype = _metaitemtypes.get(k, (bytes,)) |
|
192 | expectedtype = _metaitemtypes.get(k, (bytes,)) | |
193 | if not isinstance(v, expectedtype): |
|
193 | if not isinstance(v, expectedtype): | |
194 | raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k) |
|
194 | raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k) | |
195 | # normalize int to binary buffer |
|
195 | # normalize int to binary buffer | |
196 | if int in expectedtype: |
|
196 | if int in expectedtype: | |
197 | # optimization: remove flag if it's 0 to save space |
|
197 | # optimization: remove flag if it's 0 to save space | |
198 | if k == constants.METAKEYFLAG and v == 0: |
|
198 | if k == constants.METAKEYFLAG and v == 0: | |
199 | continue |
|
199 | continue | |
200 | v = int2bin(v) |
|
200 | v = int2bin(v) | |
201 | newmeta[k] = v |
|
201 | newmeta[k] = v | |
202 | return _buildpackmeta(newmeta) |
|
202 | return _buildpackmeta(newmeta) | |
203 |
|
203 | |||
204 |
|
204 | |||
205 | def parsepackmeta(metabuf): |
|
205 | def parsepackmeta(metabuf): | |
206 | """like _parsepackmeta, but convert fields to desired types automatically. |
|
206 | """like _parsepackmeta, but convert fields to desired types automatically. | |
207 |
|
207 | |||
208 | This means, METAKEYFLAG and METAKEYSIZE fields will be converted to |
|
208 | This means, METAKEYFLAG and METAKEYSIZE fields will be converted to | |
209 | integers. |
|
209 | integers. | |
210 | """ |
|
210 | """ | |
211 | metadict = _parsepackmeta(metabuf) |
|
211 | metadict = _parsepackmeta(metabuf) | |
212 | for k, v in pycompat.iteritems(metadict): |
|
212 | for k, v in pycompat.iteritems(metadict): | |
213 | if k in _metaitemtypes and int in _metaitemtypes[k]: |
|
213 | if k in _metaitemtypes and int in _metaitemtypes[k]: | |
214 | metadict[k] = bin2int(v) |
|
214 | metadict[k] = bin2int(v) | |
215 | return metadict |
|
215 | return metadict | |
216 |
|
216 | |||
217 |
|
217 | |||
218 | def int2bin(n): |
|
218 | def int2bin(n): | |
219 | """convert a non-negative integer to raw binary buffer""" |
|
219 | """convert a non-negative integer to raw binary buffer""" | |
220 | buf = bytearray() |
|
220 | buf = bytearray() | |
221 | while n > 0: |
|
221 | while n > 0: | |
222 | buf.insert(0, n & 0xFF) |
|
222 | buf.insert(0, n & 0xFF) | |
223 | n >>= 8 |
|
223 | n >>= 8 | |
224 | return bytes(buf) |
|
224 | return bytes(buf) | |
225 |
|
225 | |||
226 |
|
226 | |||
227 | def bin2int(buf): |
|
227 | def bin2int(buf): | |
228 | """the reverse of int2bin, convert a binary buffer to an integer""" |
|
228 | """the reverse of int2bin, convert a binary buffer to an integer""" | |
229 | x = 0 |
|
229 | x = 0 | |
230 | for b in bytearray(buf): |
|
230 | for b in bytearray(buf): | |
231 | x <<= 8 |
|
231 | x <<= 8 | |
232 | x |= b |
|
232 | x |= b | |
233 | return x |
|
233 | return x | |
234 |
|
234 | |||
235 |
|
235 | |||
|
236 | class BadRemotefilelogHeader(error.StorageError): | |||
|
237 | """Exception raised when parsing a remotefilelog blob header fails.""" | |||
|
238 | ||||
|
239 | ||||
236 | def parsesizeflags(raw): |
|
240 | def parsesizeflags(raw): | |
237 | """given a remotefilelog blob, return (headersize, rawtextsize, flags) |
|
241 | """given a remotefilelog blob, return (headersize, rawtextsize, flags) | |
238 |
|
242 | |||
239 | see remotefilelogserver.createfileblob for the format. |
|
243 | see remotefilelogserver.createfileblob for the format. | |
240 | raise RuntimeError if the content is illformed. |
|
244 | raise RuntimeError if the content is illformed. | |
241 | """ |
|
245 | """ | |
242 | flags = revlog.REVIDX_DEFAULT_FLAGS |
|
246 | flags = revlog.REVIDX_DEFAULT_FLAGS | |
243 | size = None |
|
247 | size = None | |
244 | try: |
|
248 | try: | |
245 | index = raw.index(b'\0') |
|
249 | index = raw.index(b'\0') | |
246 | header = raw[:index] |
|
250 | header = raw[:index] | |
247 | if header.startswith(b'v'): |
|
251 | if header.startswith(b'v'): | |
248 | # v1 and above, header starts with 'v' |
|
252 | # v1 and above, header starts with 'v' | |
249 | if header.startswith(b'v1\n'): |
|
253 | if header.startswith(b'v1\n'): | |
250 | for s in header.split(b'\n'): |
|
254 | for s in header.split(b'\n'): | |
251 | if s.startswith(constants.METAKEYSIZE): |
|
255 | if s.startswith(constants.METAKEYSIZE): | |
252 | size = int(s[len(constants.METAKEYSIZE) :]) |
|
256 | size = int(s[len(constants.METAKEYSIZE) :]) | |
253 | elif s.startswith(constants.METAKEYFLAG): |
|
257 | elif s.startswith(constants.METAKEYFLAG): | |
254 | flags = int(s[len(constants.METAKEYFLAG) :]) |
|
258 | flags = int(s[len(constants.METAKEYFLAG) :]) | |
255 | else: |
|
259 | else: | |
256 |
raise |
|
260 | raise BadRemotefilelogHeader( | |
257 | b'unsupported remotefilelog header: %s' % header |
|
261 | b'unsupported remotefilelog header: %s' % header | |
258 | ) |
|
262 | ) | |
259 | else: |
|
263 | else: | |
260 | # v0, str(int(size)) is the header |
|
264 | # v0, str(int(size)) is the header | |
261 | size = int(header) |
|
265 | size = int(header) | |
262 | except ValueError: |
|
266 | except ValueError: | |
263 | raise RuntimeError("unexpected remotefilelog header: illegal format") |
|
267 | raise BadRemotefilelogHeader( | |
|
268 | "unexpected remotefilelog header: illegal format" | |||
|
269 | ) | |||
264 | if size is None: |
|
270 | if size is None: | |
265 | raise RuntimeError("unexpected remotefilelog header: no size found") |
|
271 | raise BadRemotefilelogHeader( | |
|
272 | "unexpected remotefilelog header: no size found" | |||
|
273 | ) | |||
266 | return index + 1, size, flags |
|
274 | return index + 1, size, flags | |
267 |
|
275 | |||
268 |
|
276 | |||
269 | def buildfileblobheader(size, flags, version=None): |
|
277 | def buildfileblobheader(size, flags, version=None): | |
270 | """return the header of a remotefilelog blob. |
|
278 | """return the header of a remotefilelog blob. | |
271 |
|
279 | |||
272 | see remotefilelogserver.createfileblob for the format. |
|
280 | see remotefilelogserver.createfileblob for the format. | |
273 | approximately the reverse of parsesizeflags. |
|
281 | approximately the reverse of parsesizeflags. | |
274 |
|
282 | |||
275 | version could be 0 or 1, or None (auto decide). |
|
283 | version could be 0 or 1, or None (auto decide). | |
276 | """ |
|
284 | """ | |
277 | # choose v0 if flags is empty, otherwise v1 |
|
285 | # choose v0 if flags is empty, otherwise v1 | |
278 | if version is None: |
|
286 | if version is None: | |
279 | version = int(bool(flags)) |
|
287 | version = int(bool(flags)) | |
280 | if version == 1: |
|
288 | if version == 1: | |
281 | header = b'v1\n%s%d\n%s%d' % ( |
|
289 | header = b'v1\n%s%d\n%s%d' % ( | |
282 | constants.METAKEYSIZE, |
|
290 | constants.METAKEYSIZE, | |
283 | size, |
|
291 | size, | |
284 | constants.METAKEYFLAG, |
|
292 | constants.METAKEYFLAG, | |
285 | flags, |
|
293 | flags, | |
286 | ) |
|
294 | ) | |
287 | elif version == 0: |
|
295 | elif version == 0: | |
288 | if flags: |
|
296 | if flags: | |
289 | raise error.ProgrammingError(b'fileblob v0 does not support flag') |
|
297 | raise error.ProgrammingError(b'fileblob v0 does not support flag') | |
290 | header = b'%d' % size |
|
298 | header = b'%d' % size | |
291 | else: |
|
299 | else: | |
292 | raise error.ProgrammingError(b'unknown fileblob version %d' % version) |
|
300 | raise error.ProgrammingError(b'unknown fileblob version %d' % version) | |
293 | return header |
|
301 | return header | |
294 |
|
302 | |||
295 |
|
303 | |||
296 | def ancestormap(raw): |
|
304 | def ancestormap(raw): | |
297 | offset, size, flags = parsesizeflags(raw) |
|
305 | offset, size, flags = parsesizeflags(raw) | |
298 | start = offset + size |
|
306 | start = offset + size | |
299 |
|
307 | |||
300 | mapping = {} |
|
308 | mapping = {} | |
301 | while start < len(raw): |
|
309 | while start < len(raw): | |
302 | divider = raw.index(b'\0', start + 80) |
|
310 | divider = raw.index(b'\0', start + 80) | |
303 |
|
311 | |||
304 | currentnode = raw[start : (start + 20)] |
|
312 | currentnode = raw[start : (start + 20)] | |
305 | p1 = raw[(start + 20) : (start + 40)] |
|
313 | p1 = raw[(start + 20) : (start + 40)] | |
306 | p2 = raw[(start + 40) : (start + 60)] |
|
314 | p2 = raw[(start + 40) : (start + 60)] | |
307 | linknode = raw[(start + 60) : (start + 80)] |
|
315 | linknode = raw[(start + 60) : (start + 80)] | |
308 | copyfrom = raw[(start + 80) : divider] |
|
316 | copyfrom = raw[(start + 80) : divider] | |
309 |
|
317 | |||
310 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
318 | mapping[currentnode] = (p1, p2, linknode, copyfrom) | |
311 | start = divider + 1 |
|
319 | start = divider + 1 | |
312 |
|
320 | |||
313 | return mapping |
|
321 | return mapping | |
314 |
|
322 | |||
315 |
|
323 | |||
316 | def readfile(path): |
|
324 | def readfile(path): | |
317 | f = open(path, b'rb') |
|
325 | f = open(path, b'rb') | |
318 | try: |
|
326 | try: | |
319 | result = f.read() |
|
327 | result = f.read() | |
320 |
|
328 | |||
321 | # we should never have empty files |
|
329 | # we should never have empty files | |
322 | if not result: |
|
330 | if not result: | |
323 | os.remove(path) |
|
331 | os.remove(path) | |
324 | raise IOError(b"empty file: %s" % path) |
|
332 | raise IOError(b"empty file: %s" % path) | |
325 |
|
333 | |||
326 | return result |
|
334 | return result | |
327 | finally: |
|
335 | finally: | |
328 | f.close() |
|
336 | f.close() | |
329 |
|
337 | |||
330 |
|
338 | |||
331 | def unlinkfile(filepath): |
|
339 | def unlinkfile(filepath): | |
332 | if pycompat.iswindows: |
|
340 | if pycompat.iswindows: | |
333 | # On Windows, os.unlink cannnot delete readonly files |
|
341 | # On Windows, os.unlink cannnot delete readonly files | |
334 | os.chmod(filepath, stat.S_IWUSR) |
|
342 | os.chmod(filepath, stat.S_IWUSR) | |
335 | os.unlink(filepath) |
|
343 | os.unlink(filepath) | |
336 |
|
344 | |||
337 |
|
345 | |||
338 | def renamefile(source, destination): |
|
346 | def renamefile(source, destination): | |
339 | if pycompat.iswindows: |
|
347 | if pycompat.iswindows: | |
340 | # On Windows, os.rename cannot rename readonly files |
|
348 | # On Windows, os.rename cannot rename readonly files | |
341 | # and cannot overwrite destination if it exists |
|
349 | # and cannot overwrite destination if it exists | |
342 | os.chmod(source, stat.S_IWUSR) |
|
350 | os.chmod(source, stat.S_IWUSR) | |
343 | if os.path.isfile(destination): |
|
351 | if os.path.isfile(destination): | |
344 | os.chmod(destination, stat.S_IWUSR) |
|
352 | os.chmod(destination, stat.S_IWUSR) | |
345 | os.unlink(destination) |
|
353 | os.unlink(destination) | |
346 |
|
354 | |||
347 | os.rename(source, destination) |
|
355 | os.rename(source, destination) | |
348 |
|
356 | |||
349 |
|
357 | |||
350 | def writefile(path, content, readonly=False): |
|
358 | def writefile(path, content, readonly=False): | |
351 | dirname, filename = os.path.split(path) |
|
359 | dirname, filename = os.path.split(path) | |
352 | if not os.path.exists(dirname): |
|
360 | if not os.path.exists(dirname): | |
353 | try: |
|
361 | try: | |
354 | os.makedirs(dirname) |
|
362 | os.makedirs(dirname) | |
355 | except OSError as ex: |
|
363 | except OSError as ex: | |
356 | if ex.errno != errno.EEXIST: |
|
364 | if ex.errno != errno.EEXIST: | |
357 | raise |
|
365 | raise | |
358 |
|
366 | |||
359 | fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname) |
|
367 | fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname) | |
360 | os.close(fd) |
|
368 | os.close(fd) | |
361 |
|
369 | |||
362 | try: |
|
370 | try: | |
363 | f = util.posixfile(temp, b'wb') |
|
371 | f = util.posixfile(temp, b'wb') | |
364 | f.write(content) |
|
372 | f.write(content) | |
365 | f.close() |
|
373 | f.close() | |
366 |
|
374 | |||
367 | if readonly: |
|
375 | if readonly: | |
368 | mode = 0o444 |
|
376 | mode = 0o444 | |
369 | else: |
|
377 | else: | |
370 | # tempfiles are created with 0o600, so we need to manually set the |
|
378 | # tempfiles are created with 0o600, so we need to manually set the | |
371 | # mode. |
|
379 | # mode. | |
372 | oldumask = os.umask(0) |
|
380 | oldumask = os.umask(0) | |
373 | # there's no way to get the umask without modifying it, so set it |
|
381 | # there's no way to get the umask without modifying it, so set it | |
374 | # back |
|
382 | # back | |
375 | os.umask(oldumask) |
|
383 | os.umask(oldumask) | |
376 | mode = ~oldumask |
|
384 | mode = ~oldumask | |
377 |
|
385 | |||
378 | renamefile(temp, path) |
|
386 | renamefile(temp, path) | |
379 | os.chmod(path, mode) |
|
387 | os.chmod(path, mode) | |
380 | except Exception: |
|
388 | except Exception: | |
381 | try: |
|
389 | try: | |
382 | unlinkfile(temp) |
|
390 | unlinkfile(temp) | |
383 | except OSError: |
|
391 | except OSError: | |
384 | pass |
|
392 | pass | |
385 | raise |
|
393 | raise | |
386 |
|
394 | |||
387 |
|
395 | |||
388 | def sortnodes(nodes, parentfunc): |
|
396 | def sortnodes(nodes, parentfunc): | |
389 | """Topologically sorts the nodes, using the parentfunc to find |
|
397 | """Topologically sorts the nodes, using the parentfunc to find | |
390 | the parents of nodes.""" |
|
398 | the parents of nodes.""" | |
391 | nodes = set(nodes) |
|
399 | nodes = set(nodes) | |
392 | childmap = {} |
|
400 | childmap = {} | |
393 | parentmap = {} |
|
401 | parentmap = {} | |
394 | roots = [] |
|
402 | roots = [] | |
395 |
|
403 | |||
396 | # Build a child and parent map |
|
404 | # Build a child and parent map | |
397 | for n in nodes: |
|
405 | for n in nodes: | |
398 | parents = [p for p in parentfunc(n) if p in nodes] |
|
406 | parents = [p for p in parentfunc(n) if p in nodes] | |
399 | parentmap[n] = set(parents) |
|
407 | parentmap[n] = set(parents) | |
400 | for p in parents: |
|
408 | for p in parents: | |
401 | childmap.setdefault(p, set()).add(n) |
|
409 | childmap.setdefault(p, set()).add(n) | |
402 | if not parents: |
|
410 | if not parents: | |
403 | roots.append(n) |
|
411 | roots.append(n) | |
404 |
|
412 | |||
405 | roots.sort() |
|
413 | roots.sort() | |
406 | # Process roots, adding children to the queue as they become roots |
|
414 | # Process roots, adding children to the queue as they become roots | |
407 | results = [] |
|
415 | results = [] | |
408 | while roots: |
|
416 | while roots: | |
409 | n = roots.pop(0) |
|
417 | n = roots.pop(0) | |
410 | results.append(n) |
|
418 | results.append(n) | |
411 | if n in childmap: |
|
419 | if n in childmap: | |
412 | children = childmap[n] |
|
420 | children = childmap[n] | |
413 | for c in children: |
|
421 | for c in children: | |
414 | childparents = parentmap[c] |
|
422 | childparents = parentmap[c] | |
415 | childparents.remove(n) |
|
423 | childparents.remove(n) | |
416 | if len(childparents) == 0: |
|
424 | if len(childparents) == 0: | |
417 | # insert at the beginning, that way child nodes |
|
425 | # insert at the beginning, that way child nodes | |
418 | # are likely to be output immediately after their |
|
426 | # are likely to be output immediately after their | |
419 | # parents. This gives better compression results. |
|
427 | # parents. This gives better compression results. | |
420 | roots.insert(0, c) |
|
428 | roots.insert(0, c) | |
421 |
|
429 | |||
422 | return results |
|
430 | return results | |
423 |
|
431 | |||
424 |
|
432 | |||
425 | def readexactly(stream, n): |
|
433 | def readexactly(stream, n): | |
426 | '''read n bytes from stream.read and abort if less was available''' |
|
434 | '''read n bytes from stream.read and abort if less was available''' | |
427 | s = stream.read(n) |
|
435 | s = stream.read(n) | |
428 | if len(s) < n: |
|
436 | if len(s) < n: | |
429 | raise error.Abort( |
|
437 | raise error.Abort( | |
430 | _(b"stream ended unexpectedly (got %d bytes, expected %d)") |
|
438 | _(b"stream ended unexpectedly (got %d bytes, expected %d)") | |
431 | % (len(s), n) |
|
439 | % (len(s), n) | |
432 | ) |
|
440 | ) | |
433 | return s |
|
441 | return s | |
434 |
|
442 | |||
435 |
|
443 | |||
436 | def readunpack(stream, fmt): |
|
444 | def readunpack(stream, fmt): | |
437 | data = readexactly(stream, struct.calcsize(fmt)) |
|
445 | data = readexactly(stream, struct.calcsize(fmt)) | |
438 | return struct.unpack(fmt, data) |
|
446 | return struct.unpack(fmt, data) | |
439 |
|
447 | |||
440 |
|
448 | |||
441 | def readpath(stream): |
|
449 | def readpath(stream): | |
442 | rawlen = readexactly(stream, constants.FILENAMESIZE) |
|
450 | rawlen = readexactly(stream, constants.FILENAMESIZE) | |
443 | pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0] |
|
451 | pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0] | |
444 | return readexactly(stream, pathlen) |
|
452 | return readexactly(stream, pathlen) | |
445 |
|
453 | |||
446 |
|
454 | |||
447 | def readnodelist(stream): |
|
455 | def readnodelist(stream): | |
448 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) |
|
456 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) | |
449 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] |
|
457 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] | |
450 | for i in pycompat.xrange(nodecount): |
|
458 | for i in pycompat.xrange(nodecount): | |
451 | yield readexactly(stream, constants.NODESIZE) |
|
459 | yield readexactly(stream, constants.NODESIZE) | |
452 |
|
460 | |||
453 |
|
461 | |||
454 | def readpathlist(stream): |
|
462 | def readpathlist(stream): | |
455 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) |
|
463 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) | |
456 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] |
|
464 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] | |
457 | for i in pycompat.xrange(pathcount): |
|
465 | for i in pycompat.xrange(pathcount): | |
458 | yield readpath(stream) |
|
466 | yield readpath(stream) | |
459 |
|
467 | |||
460 |
|
468 | |||
461 | def getgid(groupname): |
|
469 | def getgid(groupname): | |
462 | try: |
|
470 | try: | |
463 | gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid |
|
471 | gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid | |
464 | return gid |
|
472 | return gid | |
465 | except KeyError: |
|
473 | except KeyError: | |
466 | return None |
|
474 | return None | |
467 |
|
475 | |||
468 |
|
476 | |||
469 | def setstickygroupdir(path, gid, warn=None): |
|
477 | def setstickygroupdir(path, gid, warn=None): | |
470 | if gid is None: |
|
478 | if gid is None: | |
471 | return |
|
479 | return | |
472 | try: |
|
480 | try: | |
473 | os.chown(path, -1, gid) |
|
481 | os.chown(path, -1, gid) | |
474 | os.chmod(path, 0o2775) |
|
482 | os.chmod(path, 0o2775) | |
475 | except (IOError, OSError) as ex: |
|
483 | except (IOError, OSError) as ex: | |
476 | if warn: |
|
484 | if warn: | |
477 | warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex)) |
|
485 | warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex)) | |
478 |
|
486 | |||
479 |
|
487 | |||
480 | def mkstickygroupdir(ui, path): |
|
488 | def mkstickygroupdir(ui, path): | |
481 | """Creates the given directory (if it doesn't exist) and give it a |
|
489 | """Creates the given directory (if it doesn't exist) and give it a | |
482 | particular group with setgid enabled.""" |
|
490 | particular group with setgid enabled.""" | |
483 | gid = None |
|
491 | gid = None | |
484 | groupname = ui.config(b"remotefilelog", b"cachegroup") |
|
492 | groupname = ui.config(b"remotefilelog", b"cachegroup") | |
485 | if groupname: |
|
493 | if groupname: | |
486 | gid = getgid(groupname) |
|
494 | gid = getgid(groupname) | |
487 | if gid is None: |
|
495 | if gid is None: | |
488 | ui.warn(_(b'unable to resolve group name: %s\n') % groupname) |
|
496 | ui.warn(_(b'unable to resolve group name: %s\n') % groupname) | |
489 |
|
497 | |||
490 | # we use a single stat syscall to test the existence and mode / group bit |
|
498 | # we use a single stat syscall to test the existence and mode / group bit | |
491 | st = None |
|
499 | st = None | |
492 | try: |
|
500 | try: | |
493 | st = os.stat(path) |
|
501 | st = os.stat(path) | |
494 | except OSError: |
|
502 | except OSError: | |
495 | pass |
|
503 | pass | |
496 |
|
504 | |||
497 | if st: |
|
505 | if st: | |
498 | # exists |
|
506 | # exists | |
499 | if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid: |
|
507 | if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid: | |
500 | # permission needs to be fixed |
|
508 | # permission needs to be fixed | |
501 | setstickygroupdir(path, gid, ui.warn) |
|
509 | setstickygroupdir(path, gid, ui.warn) | |
502 | return |
|
510 | return | |
503 |
|
511 | |||
504 | oldumask = os.umask(0o002) |
|
512 | oldumask = os.umask(0o002) | |
505 | try: |
|
513 | try: | |
506 | missingdirs = [path] |
|
514 | missingdirs = [path] | |
507 | path = os.path.dirname(path) |
|
515 | path = os.path.dirname(path) | |
508 | while path and not os.path.exists(path): |
|
516 | while path and not os.path.exists(path): | |
509 | missingdirs.append(path) |
|
517 | missingdirs.append(path) | |
510 | path = os.path.dirname(path) |
|
518 | path = os.path.dirname(path) | |
511 |
|
519 | |||
512 | for path in reversed(missingdirs): |
|
520 | for path in reversed(missingdirs): | |
513 | try: |
|
521 | try: | |
514 | os.mkdir(path) |
|
522 | os.mkdir(path) | |
515 | except OSError as ex: |
|
523 | except OSError as ex: | |
516 | if ex.errno != errno.EEXIST: |
|
524 | if ex.errno != errno.EEXIST: | |
517 | raise |
|
525 | raise | |
518 |
|
526 | |||
519 | for path in missingdirs: |
|
527 | for path in missingdirs: | |
520 | setstickygroupdir(path, gid, ui.warn) |
|
528 | setstickygroupdir(path, gid, ui.warn) | |
521 | finally: |
|
529 | finally: | |
522 | os.umask(oldumask) |
|
530 | os.umask(oldumask) | |
523 |
|
531 | |||
524 |
|
532 | |||
525 | def getusername(ui): |
|
533 | def getusername(ui): | |
526 | try: |
|
534 | try: | |
527 | return stringutil.shortuser(ui.username()) |
|
535 | return stringutil.shortuser(ui.username()) | |
528 | except Exception: |
|
536 | except Exception: | |
529 | return b'unknown' |
|
537 | return b'unknown' | |
530 |
|
538 | |||
531 |
|
539 | |||
532 | def getreponame(ui): |
|
540 | def getreponame(ui): | |
533 | reponame = ui.config(b'paths', b'default') |
|
541 | reponame = ui.config(b'paths', b'default') | |
534 | if reponame: |
|
542 | if reponame: | |
535 | return os.path.basename(reponame) |
|
543 | return os.path.basename(reponame) | |
536 | return b"unknown" |
|
544 | return b"unknown" |
@@ -1,72 +1,73 b'' | |||||
1 | #require no-windows |
|
1 | #require no-windows | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
3 | $ . "$TESTDIR/remotefilelog-library.sh" | |
4 |
|
4 | |||
5 | $ hg init master |
|
5 | $ hg init master | |
6 | $ cd master |
|
6 | $ cd master | |
7 | $ cat >> .hg/hgrc <<EOF |
|
7 | $ cat >> .hg/hgrc <<EOF | |
8 | > [remotefilelog] |
|
8 | > [remotefilelog] | |
9 | > server=True |
|
9 | > server=True | |
10 | > EOF |
|
10 | > EOF | |
11 | $ echo x > x |
|
11 | $ echo x > x | |
12 | $ echo y > y |
|
12 | $ echo y > y | |
13 | $ echo z > z |
|
13 | $ echo z > z | |
14 | $ hg commit -qAm xy |
|
14 | $ hg commit -qAm xy | |
15 |
|
15 | |||
16 | $ cd .. |
|
16 | $ cd .. | |
17 |
|
17 | |||
18 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
18 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
19 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) |
|
19 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) | |
20 | $ cd shallow |
|
20 | $ cd shallow | |
21 |
|
21 | |||
22 | Verify corrupt cache handling repairs by default |
|
22 | Verify corrupt cache handling repairs by default | |
23 |
|
23 | |||
24 | $ hg up -q null |
|
24 | $ hg up -q null | |
25 | $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
25 | $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
26 | $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
26 | $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
27 | $ hg up tip |
|
27 | $ hg up tip | |
28 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
28 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
29 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
29 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
30 |
|
30 | |||
31 | Verify corrupt cache error message |
|
31 | Verify corrupt cache error message | |
32 |
|
32 | |||
33 | $ hg up -q null |
|
33 | $ hg up -q null | |
34 | $ cat >> .hg/hgrc <<EOF |
|
34 | $ cat >> .hg/hgrc <<EOF | |
35 | > [remotefilelog] |
|
35 | > [remotefilelog] | |
36 | > validatecache=off |
|
36 | > validatecache=off | |
37 | > EOF |
|
37 | > EOF | |
38 | $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
38 | $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
39 | $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
39 | $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
40 | $ hg up tip 2>&1 | egrep "^[^ ].*unexpected remotefilelog" |
|
40 | $ hg up tip 2>&1 | egrep "^[^ ].*unexpected remotefilelog" | |
41 |
|
|
41 | abort: unexpected remotefilelog header: illegal format (no-py3 !) | |
|
42 | hgext.remotefilelog.shallowutil.BadRemotefilelogHeader: unexpected remotefilelog header: illegal format (py3 !) | |||
42 |
|
43 | |||
43 | Verify detection and remediation when remotefilelog.validatecachelog is set |
|
44 | Verify detection and remediation when remotefilelog.validatecachelog is set | |
44 |
|
45 | |||
45 | $ cat >> .hg/hgrc <<EOF |
|
46 | $ cat >> .hg/hgrc <<EOF | |
46 | > [remotefilelog] |
|
47 | > [remotefilelog] | |
47 | > validatecachelog=$PWD/.hg/remotefilelog_cache.log |
|
48 | > validatecachelog=$PWD/.hg/remotefilelog_cache.log | |
48 | > validatecache=strict |
|
49 | > validatecache=strict | |
49 | > EOF |
|
50 | > EOF | |
50 | $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
51 | $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
51 | $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
52 | $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
52 | $ hg up tip |
|
53 | $ hg up tip | |
53 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
54 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
54 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
55 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
55 | $ cat .hg/remotefilelog_cache.log |
|
56 | $ cat .hg/remotefilelog_cache.log | |
56 | corrupt $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 during contains |
|
57 | corrupt $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 during contains | |
57 |
|
58 | |||
58 | Verify handling of corrupt server cache |
|
59 | Verify handling of corrupt server cache | |
59 |
|
60 | |||
60 | $ rm -f ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
61 | $ rm -f ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
61 | $ touch ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
62 | $ touch ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
62 | $ clearcache |
|
63 | $ clearcache | |
63 | $ hg prefetch -r . |
|
64 | $ hg prefetch -r . | |
64 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) |
|
65 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) | |
65 | $ test -s ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
66 | $ test -s ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
66 | $ hg debugremotefilelog $CACHEDIR/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
67 | $ hg debugremotefilelog $CACHEDIR/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
67 | size: 2 bytes |
|
68 | size: 2 bytes | |
68 | path: $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
69 | path: $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
69 | key: 076f5e2225b3 |
|
70 | key: 076f5e2225b3 | |
70 |
|
71 | |||
71 | node => p1 p2 linknode copyfrom |
|
72 | node => p1 p2 linknode copyfrom | |
72 | 076f5e2225b3 => 000000000000 000000000000 f3d0bb0d1e48 |
|
73 | 076f5e2225b3 => 000000000000 000000000000 f3d0bb0d1e48 |
General Comments 0
You need to be logged in to leave comments.
Login now