Show More
@@ -1,426 +1,426 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import errno |
|
3 | import errno | |
4 | import hashlib |
|
4 | import hashlib | |
5 | import os |
|
5 | import os | |
6 | import shutil |
|
6 | import shutil | |
7 | import stat |
|
7 | import stat | |
8 | import time |
|
8 | import time | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial.node import bin, hex |
|
11 | from mercurial.node import bin, hex | |
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
13 | error, |
|
13 | error, | |
14 | pycompat, |
|
14 | pycompat, | |
15 | util, |
|
15 | util, | |
16 | ) |
|
16 | ) | |
17 | from . import ( |
|
17 | from . import ( | |
18 | constants, |
|
18 | constants, | |
19 | shallowutil, |
|
19 | shallowutil, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | class basestore(object): |
|
22 | class basestore(object): | |
23 | def __init__(self, repo, path, reponame, shared=False): |
|
23 | def __init__(self, repo, path, reponame, shared=False): | |
24 | """Creates a remotefilelog store object for the given repo name. |
|
24 | """Creates a remotefilelog store object for the given repo name. | |
25 |
|
25 | |||
26 | `path` - The file path where this store keeps its data |
|
26 | `path` - The file path where this store keeps its data | |
27 | `reponame` - The name of the repo. This is used to partition data from |
|
27 | `reponame` - The name of the repo. This is used to partition data from | |
28 | many repos. |
|
28 | many repos. | |
29 | `shared` - True if this store is a shared cache of data from the central |
|
29 | `shared` - True if this store is a shared cache of data from the central | |
30 | server, for many repos on this machine. False means this store is for |
|
30 | server, for many repos on this machine. False means this store is for | |
31 | the local data for one repo. |
|
31 | the local data for one repo. | |
32 | """ |
|
32 | """ | |
33 | self.repo = repo |
|
33 | self.repo = repo | |
34 | self.ui = repo.ui |
|
34 | self.ui = repo.ui | |
35 | self._path = path |
|
35 | self._path = path | |
36 | self._reponame = reponame |
|
36 | self._reponame = reponame | |
37 | self._shared = shared |
|
37 | self._shared = shared | |
38 | self._uid = os.getuid() if not pycompat.iswindows else None |
|
38 | self._uid = os.getuid() if not pycompat.iswindows else None | |
39 |
|
39 | |||
40 | self._validatecachelog = self.ui.config("remotefilelog", |
|
40 | self._validatecachelog = self.ui.config("remotefilelog", | |
41 | "validatecachelog") |
|
41 | "validatecachelog") | |
42 | self._validatecache = self.ui.config("remotefilelog", "validatecache", |
|
42 | self._validatecache = self.ui.config("remotefilelog", "validatecache", | |
43 | 'on') |
|
43 | 'on') | |
44 | if self._validatecache not in ('on', 'strict', 'off'): |
|
44 | if self._validatecache not in ('on', 'strict', 'off'): | |
45 | self._validatecache = 'on' |
|
45 | self._validatecache = 'on' | |
46 | if self._validatecache == 'off': |
|
46 | if self._validatecache == 'off': | |
47 | self._validatecache = False |
|
47 | self._validatecache = False | |
48 |
|
48 | |||
49 | if shared: |
|
49 | if shared: | |
50 | shallowutil.mkstickygroupdir(self.ui, path) |
|
50 | shallowutil.mkstickygroupdir(self.ui, path) | |
51 |
|
51 | |||
52 | def getmissing(self, keys): |
|
52 | def getmissing(self, keys): | |
53 | missing = [] |
|
53 | missing = [] | |
54 | for name, node in keys: |
|
54 | for name, node in keys: | |
55 | filepath = self._getfilepath(name, node) |
|
55 | filepath = self._getfilepath(name, node) | |
56 | exists = os.path.exists(filepath) |
|
56 | exists = os.path.exists(filepath) | |
57 | if (exists and self._validatecache == 'strict' and |
|
57 | if (exists and self._validatecache == 'strict' and | |
58 | not self._validatekey(filepath, 'contains')): |
|
58 | not self._validatekey(filepath, 'contains')): | |
59 | exists = False |
|
59 | exists = False | |
60 | if not exists: |
|
60 | if not exists: | |
61 | missing.append((name, node)) |
|
61 | missing.append((name, node)) | |
62 |
|
62 | |||
63 | return missing |
|
63 | return missing | |
64 |
|
64 | |||
65 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE |
|
65 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE | |
66 |
|
66 | |||
67 | def markledger(self, ledger, options=None): |
|
67 | def markledger(self, ledger, options=None): | |
68 | if options and options.get(constants.OPTION_PACKSONLY): |
|
68 | if options and options.get(constants.OPTION_PACKSONLY): | |
69 | return |
|
69 | return | |
70 | if self._shared: |
|
70 | if self._shared: | |
71 | for filename, nodes in self._getfiles(): |
|
71 | for filename, nodes in self._getfiles(): | |
72 | for node in nodes: |
|
72 | for node in nodes: | |
73 | ledger.markdataentry(self, filename, node) |
|
73 | ledger.markdataentry(self, filename, node) | |
74 | ledger.markhistoryentry(self, filename, node) |
|
74 | ledger.markhistoryentry(self, filename, node) | |
75 |
|
75 | |||
76 | def cleanup(self, ledger): |
|
76 | def cleanup(self, ledger): | |
77 | ui = self.ui |
|
77 | ui = self.ui | |
78 | entries = ledger.sources.get(self, []) |
|
78 | entries = ledger.sources.get(self, []) | |
79 | count = 0 |
|
79 | count = 0 | |
80 | progress = ui.makeprogress(_("cleaning up"), unit="files", |
|
80 | progress = ui.makeprogress(_("cleaning up"), unit="files", | |
81 | total=len(entries)) |
|
81 | total=len(entries)) | |
82 | for entry in entries: |
|
82 | for entry in entries: | |
83 | if entry.gced or (entry.datarepacked and entry.historyrepacked): |
|
83 | if entry.gced or (entry.datarepacked and entry.historyrepacked): | |
84 | progress.update(count) |
|
84 | progress.update(count) | |
85 | path = self._getfilepath(entry.filename, entry.node) |
|
85 | path = self._getfilepath(entry.filename, entry.node) | |
86 | util.tryunlink(path) |
|
86 | util.tryunlink(path) | |
87 | count += 1 |
|
87 | count += 1 | |
88 | progress.complete() |
|
88 | progress.complete() | |
89 |
|
89 | |||
90 | # Clean up the repo cache directory. |
|
90 | # Clean up the repo cache directory. | |
91 | self._cleanupdirectory(self._getrepocachepath()) |
|
91 | self._cleanupdirectory(self._getrepocachepath()) | |
92 |
|
92 | |||
93 | # BELOW THIS ARE NON-STANDARD APIS |
|
93 | # BELOW THIS ARE NON-STANDARD APIS | |
94 |
|
94 | |||
95 | def _cleanupdirectory(self, rootdir): |
|
95 | def _cleanupdirectory(self, rootdir): | |
96 | """Removes the empty directories and unnecessary files within the root |
|
96 | """Removes the empty directories and unnecessary files within the root | |
97 | directory recursively. Note that this method does not remove the root |
|
97 | directory recursively. Note that this method does not remove the root | |
98 | directory itself. """ |
|
98 | directory itself. """ | |
99 |
|
99 | |||
100 | oldfiles = set() |
|
100 | oldfiles = set() | |
101 | otherfiles = set() |
|
101 | otherfiles = set() | |
102 | # osutil.listdir returns stat information which saves some rmdir/listdir |
|
102 | # osutil.listdir returns stat information which saves some rmdir/listdir | |
103 | # syscalls. |
|
103 | # syscalls. | |
104 | for name, mode in util.osutil.listdir(rootdir): |
|
104 | for name, mode in util.osutil.listdir(rootdir): | |
105 | if stat.S_ISDIR(mode): |
|
105 | if stat.S_ISDIR(mode): | |
106 | dirpath = os.path.join(rootdir, name) |
|
106 | dirpath = os.path.join(rootdir, name) | |
107 | self._cleanupdirectory(dirpath) |
|
107 | self._cleanupdirectory(dirpath) | |
108 |
|
108 | |||
109 | # Now that the directory specified by dirpath is potentially |
|
109 | # Now that the directory specified by dirpath is potentially | |
110 | # empty, try and remove it. |
|
110 | # empty, try and remove it. | |
111 | try: |
|
111 | try: | |
112 | os.rmdir(dirpath) |
|
112 | os.rmdir(dirpath) | |
113 | except OSError: |
|
113 | except OSError: | |
114 | pass |
|
114 | pass | |
115 |
|
115 | |||
116 | elif stat.S_ISREG(mode): |
|
116 | elif stat.S_ISREG(mode): | |
117 | if name.endswith('_old'): |
|
117 | if name.endswith('_old'): | |
118 | oldfiles.add(name[:-4]) |
|
118 | oldfiles.add(name[:-4]) | |
119 | else: |
|
119 | else: | |
120 | otherfiles.add(name) |
|
120 | otherfiles.add(name) | |
121 |
|
121 | |||
122 | # Remove the files which end with suffix '_old' and have no |
|
122 | # Remove the files which end with suffix '_old' and have no | |
123 | # corresponding file without the suffix '_old'. See addremotefilelognode |
|
123 | # corresponding file without the suffix '_old'. See addremotefilelognode | |
124 | # method for the generation/purpose of files with '_old' suffix. |
|
124 | # method for the generation/purpose of files with '_old' suffix. | |
125 | for filename in oldfiles - otherfiles: |
|
125 | for filename in oldfiles - otherfiles: | |
126 | filepath = os.path.join(rootdir, filename + '_old') |
|
126 | filepath = os.path.join(rootdir, filename + '_old') | |
127 | util.tryunlink(filepath) |
|
127 | util.tryunlink(filepath) | |
128 |
|
128 | |||
129 | def _getfiles(self): |
|
129 | def _getfiles(self): | |
130 | """Return a list of (filename, [node,...]) for all the revisions that |
|
130 | """Return a list of (filename, [node,...]) for all the revisions that | |
131 | exist in the store. |
|
131 | exist in the store. | |
132 |
|
132 | |||
133 | This is useful for obtaining a list of all the contents of the store |
|
133 | This is useful for obtaining a list of all the contents of the store | |
134 | when performing a repack to another store, since the store API requires |
|
134 | when performing a repack to another store, since the store API requires | |
135 | name+node keys and not namehash+node keys. |
|
135 | name+node keys and not namehash+node keys. | |
136 | """ |
|
136 | """ | |
137 | existing = {} |
|
137 | existing = {} | |
138 | for filenamehash, node in self._listkeys(): |
|
138 | for filenamehash, node in self._listkeys(): | |
139 | existing.setdefault(filenamehash, []).append(node) |
|
139 | existing.setdefault(filenamehash, []).append(node) | |
140 |
|
140 | |||
141 | filenamemap = self._resolvefilenames(existing.keys()) |
|
141 | filenamemap = self._resolvefilenames(existing.keys()) | |
142 |
|
142 | |||
143 | for filename, sha in filenamemap.iteritems(): |
|
143 | for filename, sha in filenamemap.iteritems(): | |
144 | yield (filename, existing[sha]) |
|
144 | yield (filename, existing[sha]) | |
145 |
|
145 | |||
146 | def _resolvefilenames(self, hashes): |
|
146 | def _resolvefilenames(self, hashes): | |
147 | """Given a list of filename hashes that are present in the |
|
147 | """Given a list of filename hashes that are present in the | |
148 | remotefilelog store, return a mapping from filename->hash. |
|
148 | remotefilelog store, return a mapping from filename->hash. | |
149 |
|
149 | |||
150 | This is useful when converting remotefilelog blobs into other storage |
|
150 | This is useful when converting remotefilelog blobs into other storage | |
151 | formats. |
|
151 | formats. | |
152 | """ |
|
152 | """ | |
153 | if not hashes: |
|
153 | if not hashes: | |
154 | return {} |
|
154 | return {} | |
155 |
|
155 | |||
156 | filenames = {} |
|
156 | filenames = {} | |
157 | missingfilename = set(hashes) |
|
157 | missingfilename = set(hashes) | |
158 |
|
158 | |||
159 | # Start with a full manifest, since it'll cover the majority of files |
|
159 | # Start with a full manifest, since it'll cover the majority of files | |
160 | for filename in self.repo['tip'].manifest(): |
|
160 | for filename in self.repo['tip'].manifest(): | |
161 | sha = hashlib.sha1(filename).digest() |
|
161 | sha = hashlib.sha1(filename).digest() | |
162 | if sha in missingfilename: |
|
162 | if sha in missingfilename: | |
163 | filenames[filename] = sha |
|
163 | filenames[filename] = sha | |
164 | missingfilename.discard(sha) |
|
164 | missingfilename.discard(sha) | |
165 |
|
165 | |||
166 | # Scan the changelog until we've found every file name |
|
166 | # Scan the changelog until we've found every file name | |
167 | cl = self.repo.unfiltered().changelog |
|
167 | cl = self.repo.unfiltered().changelog | |
168 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): |
|
168 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): | |
169 | if not missingfilename: |
|
169 | if not missingfilename: | |
170 | break |
|
170 | break | |
171 | files = cl.readfiles(cl.node(rev)) |
|
171 | files = cl.readfiles(cl.node(rev)) | |
172 | for filename in files: |
|
172 | for filename in files: | |
173 | sha = hashlib.sha1(filename).digest() |
|
173 | sha = hashlib.sha1(filename).digest() | |
174 | if sha in missingfilename: |
|
174 | if sha in missingfilename: | |
175 | filenames[filename] = sha |
|
175 | filenames[filename] = sha | |
176 | missingfilename.discard(sha) |
|
176 | missingfilename.discard(sha) | |
177 |
|
177 | |||
178 | return filenames |
|
178 | return filenames | |
179 |
|
179 | |||
180 | def _getrepocachepath(self): |
|
180 | def _getrepocachepath(self): | |
181 | return os.path.join( |
|
181 | return os.path.join( | |
182 | self._path, self._reponame) if self._shared else self._path |
|
182 | self._path, self._reponame) if self._shared else self._path | |
183 |
|
183 | |||
184 | def _listkeys(self): |
|
184 | def _listkeys(self): | |
185 | """List all the remotefilelog keys that exist in the store. |
|
185 | """List all the remotefilelog keys that exist in the store. | |
186 |
|
186 | |||
187 | Returns a iterator of (filename hash, filecontent hash) tuples. |
|
187 | Returns a iterator of (filename hash, filecontent hash) tuples. | |
188 | """ |
|
188 | """ | |
189 |
|
189 | |||
190 | for root, dirs, files in os.walk(self._getrepocachepath()): |
|
190 | for root, dirs, files in os.walk(self._getrepocachepath()): | |
191 | for filename in files: |
|
191 | for filename in files: | |
192 | if len(filename) != 40: |
|
192 | if len(filename) != 40: | |
193 | continue |
|
193 | continue | |
194 | node = filename |
|
194 | node = filename | |
195 | if self._shared: |
|
195 | if self._shared: | |
196 | # .../1a/85ffda..be21 |
|
196 | # .../1a/85ffda..be21 | |
197 | filenamehash = root[-41:-39] + root[-38:] |
|
197 | filenamehash = root[-41:-39] + root[-38:] | |
198 | else: |
|
198 | else: | |
199 | filenamehash = root[-40:] |
|
199 | filenamehash = root[-40:] | |
200 | yield (bin(filenamehash), bin(node)) |
|
200 | yield (bin(filenamehash), bin(node)) | |
201 |
|
201 | |||
202 | def _getfilepath(self, name, node): |
|
202 | def _getfilepath(self, name, node): | |
203 | node = hex(node) |
|
203 | node = hex(node) | |
204 | if self._shared: |
|
204 | if self._shared: | |
205 | key = shallowutil.getcachekey(self._reponame, name, node) |
|
205 | key = shallowutil.getcachekey(self._reponame, name, node) | |
206 | else: |
|
206 | else: | |
207 | key = shallowutil.getlocalkey(name, node) |
|
207 | key = shallowutil.getlocalkey(name, node) | |
208 |
|
208 | |||
209 | return os.path.join(self._path, key) |
|
209 | return os.path.join(self._path, key) | |
210 |
|
210 | |||
211 | def _getdata(self, name, node): |
|
211 | def _getdata(self, name, node): | |
212 | filepath = self._getfilepath(name, node) |
|
212 | filepath = self._getfilepath(name, node) | |
213 | try: |
|
213 | try: | |
214 | data = shallowutil.readfile(filepath) |
|
214 | data = shallowutil.readfile(filepath) | |
215 | if self._validatecache and not self._validatedata(data, filepath): |
|
215 | if self._validatecache and not self._validatedata(data, filepath): | |
216 | if self._validatecachelog: |
|
216 | if self._validatecachelog: | |
217 | with open(self._validatecachelog, 'a+') as f: |
|
217 | with open(self._validatecachelog, 'a+') as f: | |
218 | f.write("corrupt %s during read\n" % filepath) |
|
218 | f.write("corrupt %s during read\n" % filepath) | |
219 | os.rename(filepath, filepath + ".corrupt") |
|
219 | os.rename(filepath, filepath + ".corrupt") | |
220 | raise KeyError("corrupt local cache file %s" % filepath) |
|
220 | raise KeyError("corrupt local cache file %s" % filepath) | |
221 | except IOError: |
|
221 | except IOError: | |
222 | raise KeyError("no file found at %s for %s:%s" % (filepath, name, |
|
222 | raise KeyError("no file found at %s for %s:%s" % (filepath, name, | |
223 | hex(node))) |
|
223 | hex(node))) | |
224 |
|
224 | |||
225 | return data |
|
225 | return data | |
226 |
|
226 | |||
227 | def addremotefilelognode(self, name, node, data): |
|
227 | def addremotefilelognode(self, name, node, data): | |
228 | filepath = self._getfilepath(name, node) |
|
228 | filepath = self._getfilepath(name, node) | |
229 |
|
229 | |||
230 | oldumask = os.umask(0o002) |
|
230 | oldumask = os.umask(0o002) | |
231 | try: |
|
231 | try: | |
232 | # if this node already exists, save the old version for |
|
232 | # if this node already exists, save the old version for | |
233 | # recovery/debugging purposes. |
|
233 | # recovery/debugging purposes. | |
234 | if os.path.exists(filepath): |
|
234 | if os.path.exists(filepath): | |
235 | newfilename = filepath + '_old' |
|
235 | newfilename = filepath + '_old' | |
236 | # newfilename can be read-only and shutil.copy will fail. |
|
236 | # newfilename can be read-only and shutil.copy will fail. | |
237 | # Delete newfilename to avoid it |
|
237 | # Delete newfilename to avoid it | |
238 | if os.path.exists(newfilename): |
|
238 | if os.path.exists(newfilename): | |
239 | shallowutil.unlinkfile(newfilename) |
|
239 | shallowutil.unlinkfile(newfilename) | |
240 | shutil.copy(filepath, newfilename) |
|
240 | shutil.copy(filepath, newfilename) | |
241 |
|
241 | |||
242 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) |
|
242 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) | |
243 | shallowutil.writefile(filepath, data, readonly=True) |
|
243 | shallowutil.writefile(filepath, data, readonly=True) | |
244 |
|
244 | |||
245 | if self._validatecache: |
|
245 | if self._validatecache: | |
246 | if not self._validatekey(filepath, 'write'): |
|
246 | if not self._validatekey(filepath, 'write'): | |
247 | raise error.Abort(_("local cache write was corrupted %s") % |
|
247 | raise error.Abort(_("local cache write was corrupted %s") % | |
248 | filepath) |
|
248 | filepath) | |
249 | finally: |
|
249 | finally: | |
250 | os.umask(oldumask) |
|
250 | os.umask(oldumask) | |
251 |
|
251 | |||
252 | def markrepo(self, path): |
|
252 | def markrepo(self, path): | |
253 | """Call this to add the given repo path to the store's list of |
|
253 | """Call this to add the given repo path to the store's list of | |
254 | repositories that are using it. This is useful later when doing garbage |
|
254 | repositories that are using it. This is useful later when doing garbage | |
255 | collection, since it allows us to insecpt the repos to see what nodes |
|
255 | collection, since it allows us to insecpt the repos to see what nodes | |
256 | they want to be kept alive in the store. |
|
256 | they want to be kept alive in the store. | |
257 | """ |
|
257 | """ | |
258 | repospath = os.path.join(self._path, "repos") |
|
258 | repospath = os.path.join(self._path, "repos") | |
259 | with open(repospath, 'ab') as reposfile: |
|
259 | with open(repospath, 'ab') as reposfile: | |
260 | reposfile.write(os.path.dirname(path) + "\n") |
|
260 | reposfile.write(os.path.dirname(path) + "\n") | |
261 |
|
261 | |||
262 | repospathstat = os.stat(repospath) |
|
262 | repospathstat = os.stat(repospath) | |
263 | if repospathstat.st_uid == self._uid: |
|
263 | if repospathstat.st_uid == self._uid: | |
264 | os.chmod(repospath, 0o0664) |
|
264 | os.chmod(repospath, 0o0664) | |
265 |
|
265 | |||
266 | def _validatekey(self, path, action): |
|
266 | def _validatekey(self, path, action): | |
267 | with open(path, 'rb') as f: |
|
267 | with open(path, 'rb') as f: | |
268 | data = f.read() |
|
268 | data = f.read() | |
269 |
|
269 | |||
270 | if self._validatedata(data, path): |
|
270 | if self._validatedata(data, path): | |
271 | return True |
|
271 | return True | |
272 |
|
272 | |||
273 | if self._validatecachelog: |
|
273 | if self._validatecachelog: | |
274 | with open(self._validatecachelog, 'ab+') as f: |
|
274 | with open(self._validatecachelog, 'ab+') as f: | |
275 | f.write("corrupt %s during %s\n" % (path, action)) |
|
275 | f.write("corrupt %s during %s\n" % (path, action)) | |
276 |
|
276 | |||
277 | os.rename(path, path + ".corrupt") |
|
277 | os.rename(path, path + ".corrupt") | |
278 | return False |
|
278 | return False | |
279 |
|
279 | |||
280 | def _validatedata(self, data, path): |
|
280 | def _validatedata(self, data, path): | |
281 | try: |
|
281 | try: | |
282 | if len(data) > 0: |
|
282 | if len(data) > 0: | |
283 | # see remotefilelogserver.createfileblob for the format |
|
283 | # see remotefilelogserver.createfileblob for the format | |
284 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
284 | offset, size, flags = shallowutil.parsesizeflags(data) | |
285 | if len(data) <= size: |
|
285 | if len(data) <= size: | |
286 | # it is truncated |
|
286 | # it is truncated | |
287 | return False |
|
287 | return False | |
288 |
|
288 | |||
289 | # extract the node from the metadata |
|
289 | # extract the node from the metadata | |
290 | offset += size |
|
290 | offset += size | |
291 | datanode = data[offset:offset + 20] |
|
291 | datanode = data[offset:offset + 20] | |
292 |
|
292 | |||
293 | # and compare against the path |
|
293 | # and compare against the path | |
294 | if os.path.basename(path) == hex(datanode): |
|
294 | if os.path.basename(path) == hex(datanode): | |
295 | # Content matches the intended path |
|
295 | # Content matches the intended path | |
296 | return True |
|
296 | return True | |
297 | return False |
|
297 | return False | |
298 | except (ValueError, RuntimeError): |
|
298 | except (ValueError, RuntimeError): | |
299 | pass |
|
299 | pass | |
300 |
|
300 | |||
301 | return False |
|
301 | return False | |
302 |
|
302 | |||
303 | def gc(self, keepkeys): |
|
303 | def gc(self, keepkeys): | |
304 | ui = self.ui |
|
304 | ui = self.ui | |
305 | cachepath = self._path |
|
305 | cachepath = self._path | |
306 |
|
306 | |||
307 | # prune cache |
|
307 | # prune cache | |
308 | import Queue |
|
308 | import Queue | |
309 | queue = Queue.PriorityQueue() |
|
309 | queue = Queue.PriorityQueue() | |
310 | originalsize = 0 |
|
310 | originalsize = 0 | |
311 | size = 0 |
|
311 | size = 0 | |
312 | count = 0 |
|
312 | count = 0 | |
313 | removed = 0 |
|
313 | removed = 0 | |
314 |
|
314 | |||
315 | # keep files newer than a day even if they aren't needed |
|
315 | # keep files newer than a day even if they aren't needed | |
316 | limit = time.time() - (60 * 60 * 24) |
|
316 | limit = time.time() - (60 * 60 * 24) | |
317 |
|
317 | |||
318 | progress = ui.makeprogress(_("removing unnecessary files"), |
|
318 | progress = ui.makeprogress(_("removing unnecessary files"), | |
319 | unit="files") |
|
319 | unit="files") | |
320 | progress.update(0) |
|
320 | progress.update(0) | |
321 | for root, dirs, files in os.walk(cachepath): |
|
321 | for root, dirs, files in os.walk(cachepath): | |
322 | for file in files: |
|
322 | for file in files: | |
323 | if file == 'repos': |
|
323 | if file == 'repos': | |
324 | continue |
|
324 | continue | |
325 |
|
325 | |||
326 | # Don't delete pack files |
|
326 | # Don't delete pack files | |
327 | if '/packs/' in root: |
|
327 | if '/packs/' in root: | |
328 | continue |
|
328 | continue | |
329 |
|
329 | |||
330 | progress.update(count) |
|
330 | progress.update(count) | |
331 | path = os.path.join(root, file) |
|
331 | path = os.path.join(root, file) | |
332 | key = os.path.relpath(path, cachepath) |
|
332 | key = os.path.relpath(path, cachepath) | |
333 | count += 1 |
|
333 | count += 1 | |
334 | try: |
|
334 | try: | |
335 | pathstat = os.stat(path) |
|
335 | pathstat = os.stat(path) | |
336 | except OSError as e: |
|
336 | except OSError as e: | |
337 | # errno.ENOENT = no such file or directory |
|
337 | # errno.ENOENT = no such file or directory | |
338 | if e.errno != errno.ENOENT: |
|
338 | if e.errno != errno.ENOENT: | |
339 | raise |
|
339 | raise | |
340 | msg = _("warning: file %s was removed by another process\n") |
|
340 | msg = _("warning: file %s was removed by another process\n") | |
341 | ui.warn(msg % path) |
|
341 | ui.warn(msg % path) | |
342 | continue |
|
342 | continue | |
343 |
|
343 | |||
344 | originalsize += pathstat.st_size |
|
344 | originalsize += pathstat.st_size | |
345 |
|
345 | |||
346 | if key in keepkeys or pathstat.st_atime > limit: |
|
346 | if key in keepkeys or pathstat.st_atime > limit: | |
347 | queue.put((pathstat.st_atime, path, pathstat)) |
|
347 | queue.put((pathstat.st_atime, path, pathstat)) | |
348 | size += pathstat.st_size |
|
348 | size += pathstat.st_size | |
349 | else: |
|
349 | else: | |
350 | try: |
|
350 | try: | |
351 | shallowutil.unlinkfile(path) |
|
351 | shallowutil.unlinkfile(path) | |
352 | except OSError as e: |
|
352 | except OSError as e: | |
353 | # errno.ENOENT = no such file or directory |
|
353 | # errno.ENOENT = no such file or directory | |
354 | if e.errno != errno.ENOENT: |
|
354 | if e.errno != errno.ENOENT: | |
355 | raise |
|
355 | raise | |
356 | msg = _("warning: file %s was removed by another " |
|
356 | msg = _("warning: file %s was removed by another " | |
357 | "process\n") |
|
357 | "process\n") | |
358 | ui.warn(msg % path) |
|
358 | ui.warn(msg % path) | |
359 | continue |
|
359 | continue | |
360 | removed += 1 |
|
360 | removed += 1 | |
361 | progress.complete() |
|
361 | progress.complete() | |
362 |
|
362 | |||
363 | # remove oldest files until under limit |
|
363 | # remove oldest files until under limit | |
364 | limit = ui.configbytes("remotefilelog", "cachelimit") |
|
364 | limit = ui.configbytes("remotefilelog", "cachelimit") | |
365 | if size > limit: |
|
365 | if size > limit: | |
366 | excess = size - limit |
|
366 | excess = size - limit | |
367 | progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes", |
|
367 | progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes", | |
368 | total=excess) |
|
368 | total=excess) | |
369 | removedexcess = 0 |
|
369 | removedexcess = 0 | |
370 | while queue and size > limit and size > 0: |
|
370 | while queue and size > limit and size > 0: | |
371 | progress.update(removedexcess) |
|
371 | progress.update(removedexcess) | |
372 | atime, oldpath, oldpathstat = queue.get() |
|
372 | atime, oldpath, oldpathstat = queue.get() | |
373 | try: |
|
373 | try: | |
374 | shallowutil.unlinkfile(oldpath) |
|
374 | shallowutil.unlinkfile(oldpath) | |
375 | except OSError as e: |
|
375 | except OSError as e: | |
376 | # errno.ENOENT = no such file or directory |
|
376 | # errno.ENOENT = no such file or directory | |
377 | if e.errno != errno.ENOENT: |
|
377 | if e.errno != errno.ENOENT: | |
378 | raise |
|
378 | raise | |
379 | msg = _("warning: file %s was removed by another process\n") |
|
379 | msg = _("warning: file %s was removed by another process\n") | |
380 | ui.warn(msg % oldpath) |
|
380 | ui.warn(msg % oldpath) | |
381 | size -= oldpathstat.st_size |
|
381 | size -= oldpathstat.st_size | |
382 | removed += 1 |
|
382 | removed += 1 | |
383 | removedexcess += oldpathstat.st_size |
|
383 | removedexcess += oldpathstat.st_size | |
384 | progress.complete() |
|
384 | progress.complete() | |
385 |
|
385 | |||
386 |
ui.status(_("finished: removed % |
|
386 | ui.status(_("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") | |
387 | % (removed, count, |
|
387 | % (removed, count, | |
388 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, |
|
388 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, | |
389 | float(size) / 1024.0 / 1024.0 / 1024.0)) |
|
389 | float(size) / 1024.0 / 1024.0 / 1024.0)) | |
390 |
|
390 | |||
391 | class baseunionstore(object): |
|
391 | class baseunionstore(object): | |
392 | def __init__(self, *args, **kwargs): |
|
392 | def __init__(self, *args, **kwargs): | |
393 | # If one of the functions that iterates all of the stores is about to |
|
393 | # If one of the functions that iterates all of the stores is about to | |
394 | # throw a KeyError, try this many times with a full refresh between |
|
394 | # throw a KeyError, try this many times with a full refresh between | |
395 | # attempts. A repack operation may have moved data from one store to |
|
395 | # attempts. A repack operation may have moved data from one store to | |
396 | # another while we were running. |
|
396 | # another while we were running. | |
397 | self.numattempts = kwargs.get(r'numretries', 0) + 1 |
|
397 | self.numattempts = kwargs.get(r'numretries', 0) + 1 | |
398 | # If not-None, call this function on every retry and if the attempts are |
|
398 | # If not-None, call this function on every retry and if the attempts are | |
399 | # exhausted. |
|
399 | # exhausted. | |
400 | self.retrylog = kwargs.get(r'retrylog', None) |
|
400 | self.retrylog = kwargs.get(r'retrylog', None) | |
401 |
|
401 | |||
402 | def markforrefresh(self): |
|
402 | def markforrefresh(self): | |
403 | for store in self.stores: |
|
403 | for store in self.stores: | |
404 | if util.safehasattr(store, 'markforrefresh'): |
|
404 | if util.safehasattr(store, 'markforrefresh'): | |
405 | store.markforrefresh() |
|
405 | store.markforrefresh() | |
406 |
|
406 | |||
407 | @staticmethod |
|
407 | @staticmethod | |
408 | def retriable(fn): |
|
408 | def retriable(fn): | |
409 | def noop(*args): |
|
409 | def noop(*args): | |
410 | pass |
|
410 | pass | |
411 | def wrapped(self, *args, **kwargs): |
|
411 | def wrapped(self, *args, **kwargs): | |
412 | retrylog = self.retrylog or noop |
|
412 | retrylog = self.retrylog or noop | |
413 | funcname = fn.__name__ |
|
413 | funcname = fn.__name__ | |
414 | for i in pycompat.xrange(self.numattempts): |
|
414 | for i in pycompat.xrange(self.numattempts): | |
415 | if i > 0: |
|
415 | if i > 0: | |
416 | retrylog('re-attempting (n=%d) %s\n' % (i, funcname)) |
|
416 | retrylog('re-attempting (n=%d) %s\n' % (i, funcname)) | |
417 | self.markforrefresh() |
|
417 | self.markforrefresh() | |
418 | try: |
|
418 | try: | |
419 | return fn(self, *args, **kwargs) |
|
419 | return fn(self, *args, **kwargs) | |
420 | except KeyError: |
|
420 | except KeyError: | |
421 | pass |
|
421 | pass | |
422 | # retries exhausted |
|
422 | # retries exhausted | |
423 | retrylog('retries exhausted in %s, raising KeyError\n' % |
|
423 | retrylog('retries exhausted in %s, raising KeyError\n' % | |
424 | pycompat.sysbytes(funcname)) |
|
424 | pycompat.sysbytes(funcname)) | |
425 | raise |
|
425 | raise | |
426 | return wrapped |
|
426 | return wrapped |
@@ -1,378 +1,378 b'' | |||||
1 | # debugcommands.py - debug logic for remotefilelog |
|
1 | # debugcommands.py - debug logic for remotefilelog | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import hashlib |
|
9 | import hashlib | |
10 | import os |
|
10 | import os | |
11 | import zlib |
|
11 | import zlib | |
12 |
|
12 | |||
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
15 | from mercurial import ( |
|
15 | from mercurial import ( | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
18 | node as nodemod, |
|
18 | node as nodemod, | |
19 | revlog, |
|
19 | revlog, | |
20 | ) |
|
20 | ) | |
21 | from . import ( |
|
21 | from . import ( | |
22 | constants, |
|
22 | constants, | |
23 | datapack, |
|
23 | datapack, | |
24 | extutil, |
|
24 | extutil, | |
25 | fileserverclient, |
|
25 | fileserverclient, | |
26 | historypack, |
|
26 | historypack, | |
27 | repack, |
|
27 | repack, | |
28 | shallowutil, |
|
28 | shallowutil, | |
29 | ) |
|
29 | ) | |
30 |
|
30 | |||
31 | def debugremotefilelog(ui, path, **opts): |
|
31 | def debugremotefilelog(ui, path, **opts): | |
32 | decompress = opts.get(r'decompress') |
|
32 | decompress = opts.get(r'decompress') | |
33 |
|
33 | |||
34 | size, firstnode, mapping = parsefileblob(path, decompress) |
|
34 | size, firstnode, mapping = parsefileblob(path, decompress) | |
35 |
|
35 | |||
36 | ui.status(_("size: %d bytes\n") % (size)) |
|
36 | ui.status(_("size: %d bytes\n") % (size)) | |
37 | ui.status(_("path: %s \n") % (path)) |
|
37 | ui.status(_("path: %s \n") % (path)) | |
38 | ui.status(_("key: %s \n") % (short(firstnode))) |
|
38 | ui.status(_("key: %s \n") % (short(firstnode))) | |
39 | ui.status(_("\n")) |
|
39 | ui.status(_("\n")) | |
40 | ui.status(_("%12s => %12s %13s %13s %12s\n") % |
|
40 | ui.status(_("%12s => %12s %13s %13s %12s\n") % | |
41 | ("node", "p1", "p2", "linknode", "copyfrom")) |
|
41 | ("node", "p1", "p2", "linknode", "copyfrom")) | |
42 |
|
42 | |||
43 | queue = [firstnode] |
|
43 | queue = [firstnode] | |
44 | while queue: |
|
44 | while queue: | |
45 | node = queue.pop(0) |
|
45 | node = queue.pop(0) | |
46 | p1, p2, linknode, copyfrom = mapping[node] |
|
46 | p1, p2, linknode, copyfrom = mapping[node] | |
47 | ui.status(_("%s => %s %s %s %s\n") % |
|
47 | ui.status(_("%s => %s %s %s %s\n") % | |
48 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) |
|
48 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) | |
49 | if p1 != nullid: |
|
49 | if p1 != nullid: | |
50 | queue.append(p1) |
|
50 | queue.append(p1) | |
51 | if p2 != nullid: |
|
51 | if p2 != nullid: | |
52 | queue.append(p2) |
|
52 | queue.append(p2) | |
53 |
|
53 | |||
54 | def buildtemprevlog(repo, file): |
|
54 | def buildtemprevlog(repo, file): | |
55 | # get filename key |
|
55 | # get filename key | |
56 | filekey = nodemod.hex(hashlib.sha1(file).digest()) |
|
56 | filekey = nodemod.hex(hashlib.sha1(file).digest()) | |
57 | filedir = os.path.join(repo.path, 'store/data', filekey) |
|
57 | filedir = os.path.join(repo.path, 'store/data', filekey) | |
58 |
|
58 | |||
59 | # sort all entries based on linkrev |
|
59 | # sort all entries based on linkrev | |
60 | fctxs = [] |
|
60 | fctxs = [] | |
61 | for filenode in os.listdir(filedir): |
|
61 | for filenode in os.listdir(filedir): | |
62 | if '_old' not in filenode: |
|
62 | if '_old' not in filenode: | |
63 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) |
|
63 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) | |
64 |
|
64 | |||
65 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) |
|
65 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) | |
66 |
|
66 | |||
67 | # add to revlog |
|
67 | # add to revlog | |
68 | temppath = repo.sjoin('data/temprevlog.i') |
|
68 | temppath = repo.sjoin('data/temprevlog.i') | |
69 | if os.path.exists(temppath): |
|
69 | if os.path.exists(temppath): | |
70 | os.remove(temppath) |
|
70 | os.remove(temppath) | |
71 | r = filelog.filelog(repo.svfs, 'temprevlog') |
|
71 | r = filelog.filelog(repo.svfs, 'temprevlog') | |
72 |
|
72 | |||
73 | class faket(object): |
|
73 | class faket(object): | |
74 | def add(self, a, b, c): |
|
74 | def add(self, a, b, c): | |
75 | pass |
|
75 | pass | |
76 | t = faket() |
|
76 | t = faket() | |
77 | for fctx in fctxs: |
|
77 | for fctx in fctxs: | |
78 | if fctx.node() not in repo: |
|
78 | if fctx.node() not in repo: | |
79 | continue |
|
79 | continue | |
80 |
|
80 | |||
81 | p = fctx.filelog().parents(fctx.filenode()) |
|
81 | p = fctx.filelog().parents(fctx.filenode()) | |
82 | meta = {} |
|
82 | meta = {} | |
83 | if fctx.renamed(): |
|
83 | if fctx.renamed(): | |
84 | meta['copy'] = fctx.renamed()[0] |
|
84 | meta['copy'] = fctx.renamed()[0] | |
85 | meta['copyrev'] = hex(fctx.renamed()[1]) |
|
85 | meta['copyrev'] = hex(fctx.renamed()[1]) | |
86 |
|
86 | |||
87 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) |
|
87 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) | |
88 |
|
88 | |||
89 | return r |
|
89 | return r | |
90 |
|
90 | |||
91 | def debugindex(orig, ui, repo, file_=None, **opts): |
|
91 | def debugindex(orig, ui, repo, file_=None, **opts): | |
92 | """dump the contents of an index file""" |
|
92 | """dump the contents of an index file""" | |
93 | if (opts.get(r'changelog') or |
|
93 | if (opts.get(r'changelog') or | |
94 | opts.get(r'manifest') or |
|
94 | opts.get(r'manifest') or | |
95 | opts.get(r'dir') or |
|
95 | opts.get(r'dir') or | |
96 | not shallowutil.isenabled(repo) or |
|
96 | not shallowutil.isenabled(repo) or | |
97 | not repo.shallowmatch(file_)): |
|
97 | not repo.shallowmatch(file_)): | |
98 | return orig(ui, repo, file_, **opts) |
|
98 | return orig(ui, repo, file_, **opts) | |
99 |
|
99 | |||
100 | r = buildtemprevlog(repo, file_) |
|
100 | r = buildtemprevlog(repo, file_) | |
101 |
|
101 | |||
102 | # debugindex like normal |
|
102 | # debugindex like normal | |
103 | format = opts.get('format', 0) |
|
103 | format = opts.get('format', 0) | |
104 | if format not in (0, 1): |
|
104 | if format not in (0, 1): | |
105 | raise error.Abort(_("unknown format %d") % format) |
|
105 | raise error.Abort(_("unknown format %d") % format) | |
106 |
|
106 | |||
107 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
107 | generaldelta = r.version & revlog.FLAG_GENERALDELTA | |
108 | if generaldelta: |
|
108 | if generaldelta: | |
109 | basehdr = ' delta' |
|
109 | basehdr = ' delta' | |
110 | else: |
|
110 | else: | |
111 | basehdr = ' base' |
|
111 | basehdr = ' base' | |
112 |
|
112 | |||
113 | if format == 0: |
|
113 | if format == 0: | |
114 | ui.write((" rev offset length " + basehdr + " linkrev" |
|
114 | ui.write((" rev offset length " + basehdr + " linkrev" | |
115 | " nodeid p1 p2\n")) |
|
115 | " nodeid p1 p2\n")) | |
116 | elif format == 1: |
|
116 | elif format == 1: | |
117 | ui.write((" rev flag offset length" |
|
117 | ui.write((" rev flag offset length" | |
118 | " size " + basehdr + " link p1 p2" |
|
118 | " size " + basehdr + " link p1 p2" | |
119 | " nodeid\n")) |
|
119 | " nodeid\n")) | |
120 |
|
120 | |||
121 | for i in r: |
|
121 | for i in r: | |
122 | node = r.node(i) |
|
122 | node = r.node(i) | |
123 | if generaldelta: |
|
123 | if generaldelta: | |
124 | base = r.deltaparent(i) |
|
124 | base = r.deltaparent(i) | |
125 | else: |
|
125 | else: | |
126 | base = r.chainbase(i) |
|
126 | base = r.chainbase(i) | |
127 | if format == 0: |
|
127 | if format == 0: | |
128 | try: |
|
128 | try: | |
129 | pp = r.parents(node) |
|
129 | pp = r.parents(node) | |
130 | except Exception: |
|
130 | except Exception: | |
131 | pp = [nullid, nullid] |
|
131 | pp = [nullid, nullid] | |
132 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( |
|
132 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( | |
133 | i, r.start(i), r.length(i), base, r.linkrev(i), |
|
133 | i, r.start(i), r.length(i), base, r.linkrev(i), | |
134 | short(node), short(pp[0]), short(pp[1]))) |
|
134 | short(node), short(pp[0]), short(pp[1]))) | |
135 | elif format == 1: |
|
135 | elif format == 1: | |
136 | pr = r.parentrevs(i) |
|
136 | pr = r.parentrevs(i) | |
137 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( |
|
137 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( | |
138 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
138 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), | |
139 | base, r.linkrev(i), pr[0], pr[1], short(node))) |
|
139 | base, r.linkrev(i), pr[0], pr[1], short(node))) | |
140 |
|
140 | |||
141 | def debugindexdot(orig, ui, repo, file_): |
|
141 | def debugindexdot(orig, ui, repo, file_): | |
142 | """dump an index DAG as a graphviz dot file""" |
|
142 | """dump an index DAG as a graphviz dot file""" | |
143 | if not shallowutil.isenabled(repo): |
|
143 | if not shallowutil.isenabled(repo): | |
144 | return orig(ui, repo, file_) |
|
144 | return orig(ui, repo, file_) | |
145 |
|
145 | |||
146 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) |
|
146 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) | |
147 |
|
147 | |||
148 | ui.write(("digraph G {\n")) |
|
148 | ui.write(("digraph G {\n")) | |
149 | for i in r: |
|
149 | for i in r: | |
150 | node = r.node(i) |
|
150 | node = r.node(i) | |
151 | pp = r.parents(node) |
|
151 | pp = r.parents(node) | |
152 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
152 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) | |
153 | if pp[1] != nullid: |
|
153 | if pp[1] != nullid: | |
154 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
154 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) | |
155 | ui.write("}\n") |
|
155 | ui.write("}\n") | |
156 |
|
156 | |||
157 | def verifyremotefilelog(ui, path, **opts): |
|
157 | def verifyremotefilelog(ui, path, **opts): | |
158 | decompress = opts.get(r'decompress') |
|
158 | decompress = opts.get(r'decompress') | |
159 |
|
159 | |||
160 | for root, dirs, files in os.walk(path): |
|
160 | for root, dirs, files in os.walk(path): | |
161 | for file in files: |
|
161 | for file in files: | |
162 | if file == "repos": |
|
162 | if file == "repos": | |
163 | continue |
|
163 | continue | |
164 | filepath = os.path.join(root, file) |
|
164 | filepath = os.path.join(root, file) | |
165 | size, firstnode, mapping = parsefileblob(filepath, decompress) |
|
165 | size, firstnode, mapping = parsefileblob(filepath, decompress) | |
166 | for p1, p2, linknode, copyfrom in mapping.itervalues(): |
|
166 | for p1, p2, linknode, copyfrom in mapping.itervalues(): | |
167 | if linknode == nullid: |
|
167 | if linknode == nullid: | |
168 | actualpath = os.path.relpath(root, path) |
|
168 | actualpath = os.path.relpath(root, path) | |
169 | key = fileserverclient.getcachekey("reponame", actualpath, |
|
169 | key = fileserverclient.getcachekey("reponame", actualpath, | |
170 | file) |
|
170 | file) | |
171 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, |
|
171 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, | |
172 | path))) |
|
172 | path))) | |
173 |
|
173 | |||
174 | def _decompressblob(raw): |
|
174 | def _decompressblob(raw): | |
175 | return zlib.decompress(raw) |
|
175 | return zlib.decompress(raw) | |
176 |
|
176 | |||
177 | def parsefileblob(path, decompress): |
|
177 | def parsefileblob(path, decompress): | |
178 | raw = None |
|
178 | raw = None | |
179 | f = open(path, "rb") |
|
179 | f = open(path, "rb") | |
180 | try: |
|
180 | try: | |
181 | raw = f.read() |
|
181 | raw = f.read() | |
182 | finally: |
|
182 | finally: | |
183 | f.close() |
|
183 | f.close() | |
184 |
|
184 | |||
185 | if decompress: |
|
185 | if decompress: | |
186 | raw = _decompressblob(raw) |
|
186 | raw = _decompressblob(raw) | |
187 |
|
187 | |||
188 | offset, size, flags = shallowutil.parsesizeflags(raw) |
|
188 | offset, size, flags = shallowutil.parsesizeflags(raw) | |
189 | start = offset + size |
|
189 | start = offset + size | |
190 |
|
190 | |||
191 | firstnode = None |
|
191 | firstnode = None | |
192 |
|
192 | |||
193 | mapping = {} |
|
193 | mapping = {} | |
194 | while start < len(raw): |
|
194 | while start < len(raw): | |
195 | divider = raw.index('\0', start + 80) |
|
195 | divider = raw.index('\0', start + 80) | |
196 |
|
196 | |||
197 | currentnode = raw[start:(start + 20)] |
|
197 | currentnode = raw[start:(start + 20)] | |
198 | if not firstnode: |
|
198 | if not firstnode: | |
199 | firstnode = currentnode |
|
199 | firstnode = currentnode | |
200 |
|
200 | |||
201 | p1 = raw[(start + 20):(start + 40)] |
|
201 | p1 = raw[(start + 20):(start + 40)] | |
202 | p2 = raw[(start + 40):(start + 60)] |
|
202 | p2 = raw[(start + 40):(start + 60)] | |
203 | linknode = raw[(start + 60):(start + 80)] |
|
203 | linknode = raw[(start + 60):(start + 80)] | |
204 | copyfrom = raw[(start + 80):divider] |
|
204 | copyfrom = raw[(start + 80):divider] | |
205 |
|
205 | |||
206 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
206 | mapping[currentnode] = (p1, p2, linknode, copyfrom) | |
207 | start = divider + 1 |
|
207 | start = divider + 1 | |
208 |
|
208 | |||
209 | return size, firstnode, mapping |
|
209 | return size, firstnode, mapping | |
210 |
|
210 | |||
211 | def debugdatapack(ui, *paths, **opts): |
|
211 | def debugdatapack(ui, *paths, **opts): | |
212 | for path in paths: |
|
212 | for path in paths: | |
213 | if '.data' in path: |
|
213 | if '.data' in path: | |
214 | path = path[:path.index('.data')] |
|
214 | path = path[:path.index('.data')] | |
215 | ui.write("%s:\n" % path) |
|
215 | ui.write("%s:\n" % path) | |
216 | dpack = datapack.datapack(path) |
|
216 | dpack = datapack.datapack(path) | |
217 | node = opts.get(r'node') |
|
217 | node = opts.get(r'node') | |
218 | if node: |
|
218 | if node: | |
219 | deltachain = dpack.getdeltachain('', bin(node)) |
|
219 | deltachain = dpack.getdeltachain('', bin(node)) | |
220 | dumpdeltachain(ui, deltachain, **opts) |
|
220 | dumpdeltachain(ui, deltachain, **opts) | |
221 | return |
|
221 | return | |
222 |
|
222 | |||
223 | if opts.get(r'long'): |
|
223 | if opts.get(r'long'): | |
224 | hashformatter = hex |
|
224 | hashformatter = hex | |
225 | hashlen = 42 |
|
225 | hashlen = 42 | |
226 | else: |
|
226 | else: | |
227 | hashformatter = short |
|
227 | hashformatter = short | |
228 | hashlen = 14 |
|
228 | hashlen = 14 | |
229 |
|
229 | |||
230 | lastfilename = None |
|
230 | lastfilename = None | |
231 | totaldeltasize = 0 |
|
231 | totaldeltasize = 0 | |
232 | totalblobsize = 0 |
|
232 | totalblobsize = 0 | |
233 | def printtotals(): |
|
233 | def printtotals(): | |
234 | if lastfilename is not None: |
|
234 | if lastfilename is not None: | |
235 | ui.write("\n") |
|
235 | ui.write("\n") | |
236 | if not totaldeltasize or not totalblobsize: |
|
236 | if not totaldeltasize or not totalblobsize: | |
237 | return |
|
237 | return | |
238 | difference = totalblobsize - totaldeltasize |
|
238 | difference = totalblobsize - totaldeltasize | |
239 | deltastr = "%0.1f%% %s" % ( |
|
239 | deltastr = "%0.1f%% %s" % ( | |
240 | (100.0 * abs(difference) / totalblobsize), |
|
240 | (100.0 * abs(difference) / totalblobsize), | |
241 | ("smaller" if difference > 0 else "bigger")) |
|
241 | ("smaller" if difference > 0 else "bigger")) | |
242 |
|
242 | |||
243 | ui.write(("Total:%s%s %s (%s)\n") % ( |
|
243 | ui.write(("Total:%s%s %s (%s)\n") % ( | |
244 | "".ljust(2 * hashlen - len("Total:")), |
|
244 | "".ljust(2 * hashlen - len("Total:")), | |
245 |
|
|
245 | ('%d' % totaldeltasize).ljust(12), | |
246 |
|
|
246 | ('%d' % totalblobsize).ljust(9), | |
247 | deltastr |
|
247 | deltastr | |
248 | )) |
|
248 | )) | |
249 |
|
249 | |||
250 | bases = {} |
|
250 | bases = {} | |
251 | nodes = set() |
|
251 | nodes = set() | |
252 | failures = 0 |
|
252 | failures = 0 | |
253 | for filename, node, deltabase, deltalen in dpack.iterentries(): |
|
253 | for filename, node, deltabase, deltalen in dpack.iterentries(): | |
254 | bases[node] = deltabase |
|
254 | bases[node] = deltabase | |
255 | if node in nodes: |
|
255 | if node in nodes: | |
256 | ui.write(("Bad entry: %s appears twice\n" % short(node))) |
|
256 | ui.write(("Bad entry: %s appears twice\n" % short(node))) | |
257 | failures += 1 |
|
257 | failures += 1 | |
258 | nodes.add(node) |
|
258 | nodes.add(node) | |
259 | if filename != lastfilename: |
|
259 | if filename != lastfilename: | |
260 | printtotals() |
|
260 | printtotals() | |
261 | name = '(empty name)' if filename == '' else filename |
|
261 | name = '(empty name)' if filename == '' else filename | |
262 | ui.write("%s:\n" % name) |
|
262 | ui.write("%s:\n" % name) | |
263 | ui.write("%s%s%s%s\n" % ( |
|
263 | ui.write("%s%s%s%s\n" % ( | |
264 | "Node".ljust(hashlen), |
|
264 | "Node".ljust(hashlen), | |
265 | "Delta Base".ljust(hashlen), |
|
265 | "Delta Base".ljust(hashlen), | |
266 | "Delta Length".ljust(14), |
|
266 | "Delta Length".ljust(14), | |
267 | "Blob Size".ljust(9))) |
|
267 | "Blob Size".ljust(9))) | |
268 | lastfilename = filename |
|
268 | lastfilename = filename | |
269 | totalblobsize = 0 |
|
269 | totalblobsize = 0 | |
270 | totaldeltasize = 0 |
|
270 | totaldeltasize = 0 | |
271 |
|
271 | |||
272 | # Metadata could be missing, in which case it will be an empty dict. |
|
272 | # Metadata could be missing, in which case it will be an empty dict. | |
273 | meta = dpack.getmeta(filename, node) |
|
273 | meta = dpack.getmeta(filename, node) | |
274 | if constants.METAKEYSIZE in meta: |
|
274 | if constants.METAKEYSIZE in meta: | |
275 | blobsize = meta[constants.METAKEYSIZE] |
|
275 | blobsize = meta[constants.METAKEYSIZE] | |
276 | totaldeltasize += deltalen |
|
276 | totaldeltasize += deltalen | |
277 | totalblobsize += blobsize |
|
277 | totalblobsize += blobsize | |
278 | else: |
|
278 | else: | |
279 | blobsize = "(missing)" |
|
279 | blobsize = "(missing)" | |
280 |
ui.write("%s %s %s% |
|
280 | ui.write("%s %s %s%d\n" % ( | |
281 | hashformatter(node), |
|
281 | hashformatter(node), | |
282 | hashformatter(deltabase), |
|
282 | hashformatter(deltabase), | |
283 |
|
|
283 | ('%d' % deltalen).ljust(14), | |
284 | blobsize)) |
|
284 | blobsize)) | |
285 |
|
285 | |||
286 | if filename is not None: |
|
286 | if filename is not None: | |
287 | printtotals() |
|
287 | printtotals() | |
288 |
|
288 | |||
289 | failures += _sanitycheck(ui, set(nodes), bases) |
|
289 | failures += _sanitycheck(ui, set(nodes), bases) | |
290 | if failures > 1: |
|
290 | if failures > 1: | |
291 | ui.warn(("%d failures\n" % failures)) |
|
291 | ui.warn(("%d failures\n" % failures)) | |
292 | return 1 |
|
292 | return 1 | |
293 |
|
293 | |||
294 | def _sanitycheck(ui, nodes, bases): |
|
294 | def _sanitycheck(ui, nodes, bases): | |
295 | """ |
|
295 | """ | |
296 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a |
|
296 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a | |
297 | mapping of node->base): |
|
297 | mapping of node->base): | |
298 |
|
298 | |||
299 | - Each deltabase must itself be a node elsewhere in the pack |
|
299 | - Each deltabase must itself be a node elsewhere in the pack | |
300 | - There must be no cycles |
|
300 | - There must be no cycles | |
301 | """ |
|
301 | """ | |
302 | failures = 0 |
|
302 | failures = 0 | |
303 | for node in nodes: |
|
303 | for node in nodes: | |
304 | seen = set() |
|
304 | seen = set() | |
305 | current = node |
|
305 | current = node | |
306 | deltabase = bases[current] |
|
306 | deltabase = bases[current] | |
307 |
|
307 | |||
308 | while deltabase != nullid: |
|
308 | while deltabase != nullid: | |
309 | if deltabase not in nodes: |
|
309 | if deltabase not in nodes: | |
310 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % |
|
310 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % | |
311 | (short(node), short(deltabase)))) |
|
311 | (short(node), short(deltabase)))) | |
312 | failures += 1 |
|
312 | failures += 1 | |
313 | break |
|
313 | break | |
314 |
|
314 | |||
315 | if deltabase in seen: |
|
315 | if deltabase in seen: | |
316 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % |
|
316 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % | |
317 | (short(node), short(deltabase)))) |
|
317 | (short(node), short(deltabase)))) | |
318 | failures += 1 |
|
318 | failures += 1 | |
319 | break |
|
319 | break | |
320 |
|
320 | |||
321 | current = deltabase |
|
321 | current = deltabase | |
322 | seen.add(current) |
|
322 | seen.add(current) | |
323 | deltabase = bases[current] |
|
323 | deltabase = bases[current] | |
324 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid |
|
324 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid | |
325 | # so we don't traverse it again. |
|
325 | # so we don't traverse it again. | |
326 | bases[node] = nullid |
|
326 | bases[node] = nullid | |
327 | return failures |
|
327 | return failures | |
328 |
|
328 | |||
329 | def dumpdeltachain(ui, deltachain, **opts): |
|
329 | def dumpdeltachain(ui, deltachain, **opts): | |
330 | hashformatter = hex |
|
330 | hashformatter = hex | |
331 | hashlen = 40 |
|
331 | hashlen = 40 | |
332 |
|
332 | |||
333 | lastfilename = None |
|
333 | lastfilename = None | |
334 | for filename, node, filename, deltabasenode, delta in deltachain: |
|
334 | for filename, node, filename, deltabasenode, delta in deltachain: | |
335 | if filename != lastfilename: |
|
335 | if filename != lastfilename: | |
336 | ui.write("\n%s\n" % filename) |
|
336 | ui.write("\n%s\n" % filename) | |
337 | lastfilename = filename |
|
337 | lastfilename = filename | |
338 | ui.write("%s %s %s %s\n" % ( |
|
338 | ui.write("%s %s %s %s\n" % ( | |
339 | "Node".ljust(hashlen), |
|
339 | "Node".ljust(hashlen), | |
340 | "Delta Base".ljust(hashlen), |
|
340 | "Delta Base".ljust(hashlen), | |
341 | "Delta SHA1".ljust(hashlen), |
|
341 | "Delta SHA1".ljust(hashlen), | |
342 | "Delta Length".ljust(6), |
|
342 | "Delta Length".ljust(6), | |
343 | )) |
|
343 | )) | |
344 |
|
344 | |||
345 |
ui.write("%s %s %s % |
|
345 | ui.write("%s %s %s %d\n" % ( | |
346 | hashformatter(node), |
|
346 | hashformatter(node), | |
347 | hashformatter(deltabasenode), |
|
347 | hashformatter(deltabasenode), | |
348 | nodemod.hex(hashlib.sha1(delta).digest()), |
|
348 | nodemod.hex(hashlib.sha1(delta).digest()), | |
349 | len(delta))) |
|
349 | len(delta))) | |
350 |
|
350 | |||
351 | def debughistorypack(ui, path): |
|
351 | def debughistorypack(ui, path): | |
352 | if '.hist' in path: |
|
352 | if '.hist' in path: | |
353 | path = path[:path.index('.hist')] |
|
353 | path = path[:path.index('.hist')] | |
354 | hpack = historypack.historypack(path) |
|
354 | hpack = historypack.historypack(path) | |
355 |
|
355 | |||
356 | lastfilename = None |
|
356 | lastfilename = None | |
357 | for entry in hpack.iterentries(): |
|
357 | for entry in hpack.iterentries(): | |
358 | filename, node, p1node, p2node, linknode, copyfrom = entry |
|
358 | filename, node, p1node, p2node, linknode, copyfrom = entry | |
359 | if filename != lastfilename: |
|
359 | if filename != lastfilename: | |
360 | ui.write("\n%s\n" % filename) |
|
360 | ui.write("\n%s\n" % filename) | |
361 | ui.write("%s%s%s%s%s\n" % ( |
|
361 | ui.write("%s%s%s%s%s\n" % ( | |
362 | "Node".ljust(14), |
|
362 | "Node".ljust(14), | |
363 | "P1 Node".ljust(14), |
|
363 | "P1 Node".ljust(14), | |
364 | "P2 Node".ljust(14), |
|
364 | "P2 Node".ljust(14), | |
365 | "Link Node".ljust(14), |
|
365 | "Link Node".ljust(14), | |
366 | "Copy From")) |
|
366 | "Copy From")) | |
367 | lastfilename = filename |
|
367 | lastfilename = filename | |
368 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), |
|
368 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), | |
369 | short(p2node), short(linknode), copyfrom)) |
|
369 | short(p2node), short(linknode), copyfrom)) | |
370 |
|
370 | |||
371 | def debugwaitonrepack(repo): |
|
371 | def debugwaitonrepack(repo): | |
372 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): |
|
372 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): | |
373 | return |
|
373 | return | |
374 |
|
374 | |||
375 | def debugwaitonprefetch(repo): |
|
375 | def debugwaitonprefetch(repo): | |
376 | with repo._lock(repo.svfs, "prefetchlock", True, None, |
|
376 | with repo._lock(repo.svfs, "prefetchlock", True, None, | |
377 | None, _('prefetching in %s') % repo.origroot): |
|
377 | None, _('prefetching in %s') % repo.origroot): | |
378 | pass |
|
378 | pass |
General Comments 0
You need to be logged in to leave comments.
Login now