Show More
@@ -1,426 +1,426 b'' | |||
|
1 | 1 | from __future__ import absolute_import |
|
2 | 2 | |
|
3 | 3 | import errno |
|
4 | 4 | import hashlib |
|
5 | 5 | import os |
|
6 | 6 | import shutil |
|
7 | 7 | import stat |
|
8 | 8 | import time |
|
9 | 9 | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | from mercurial.node import bin, hex |
|
12 | 12 | from mercurial import ( |
|
13 | 13 | error, |
|
14 | 14 | pycompat, |
|
15 | 15 | util, |
|
16 | 16 | ) |
|
17 | 17 | from . import ( |
|
18 | 18 | constants, |
|
19 | 19 | shallowutil, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | class basestore(object): |
|
23 | 23 | def __init__(self, repo, path, reponame, shared=False): |
|
24 | 24 | """Creates a remotefilelog store object for the given repo name. |
|
25 | 25 | |
|
26 | 26 | `path` - The file path where this store keeps its data |
|
27 | 27 | `reponame` - The name of the repo. This is used to partition data from |
|
28 | 28 | many repos. |
|
29 | 29 | `shared` - True if this store is a shared cache of data from the central |
|
30 | 30 | server, for many repos on this machine. False means this store is for |
|
31 | 31 | the local data for one repo. |
|
32 | 32 | """ |
|
33 | 33 | self.repo = repo |
|
34 | 34 | self.ui = repo.ui |
|
35 | 35 | self._path = path |
|
36 | 36 | self._reponame = reponame |
|
37 | 37 | self._shared = shared |
|
38 | 38 | self._uid = os.getuid() if not pycompat.iswindows else None |
|
39 | 39 | |
|
40 | 40 | self._validatecachelog = self.ui.config("remotefilelog", |
|
41 | 41 | "validatecachelog") |
|
42 | 42 | self._validatecache = self.ui.config("remotefilelog", "validatecache", |
|
43 | 43 | 'on') |
|
44 | 44 | if self._validatecache not in ('on', 'strict', 'off'): |
|
45 | 45 | self._validatecache = 'on' |
|
46 | 46 | if self._validatecache == 'off': |
|
47 | 47 | self._validatecache = False |
|
48 | 48 | |
|
49 | 49 | if shared: |
|
50 | 50 | shallowutil.mkstickygroupdir(self.ui, path) |
|
51 | 51 | |
|
52 | 52 | def getmissing(self, keys): |
|
53 | 53 | missing = [] |
|
54 | 54 | for name, node in keys: |
|
55 | 55 | filepath = self._getfilepath(name, node) |
|
56 | 56 | exists = os.path.exists(filepath) |
|
57 | 57 | if (exists and self._validatecache == 'strict' and |
|
58 | 58 | not self._validatekey(filepath, 'contains')): |
|
59 | 59 | exists = False |
|
60 | 60 | if not exists: |
|
61 | 61 | missing.append((name, node)) |
|
62 | 62 | |
|
63 | 63 | return missing |
|
64 | 64 | |
|
65 | 65 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE |
|
66 | 66 | |
|
67 | 67 | def markledger(self, ledger, options=None): |
|
68 | 68 | if options and options.get(constants.OPTION_PACKSONLY): |
|
69 | 69 | return |
|
70 | 70 | if self._shared: |
|
71 | 71 | for filename, nodes in self._getfiles(): |
|
72 | 72 | for node in nodes: |
|
73 | 73 | ledger.markdataentry(self, filename, node) |
|
74 | 74 | ledger.markhistoryentry(self, filename, node) |
|
75 | 75 | |
|
76 | 76 | def cleanup(self, ledger): |
|
77 | 77 | ui = self.ui |
|
78 | 78 | entries = ledger.sources.get(self, []) |
|
79 | 79 | count = 0 |
|
80 | 80 | progress = ui.makeprogress(_("cleaning up"), unit="files", |
|
81 | 81 | total=len(entries)) |
|
82 | 82 | for entry in entries: |
|
83 | 83 | if entry.gced or (entry.datarepacked and entry.historyrepacked): |
|
84 | 84 | progress.update(count) |
|
85 | 85 | path = self._getfilepath(entry.filename, entry.node) |
|
86 | 86 | util.tryunlink(path) |
|
87 | 87 | count += 1 |
|
88 | 88 | progress.complete() |
|
89 | 89 | |
|
90 | 90 | # Clean up the repo cache directory. |
|
91 | 91 | self._cleanupdirectory(self._getrepocachepath()) |
|
92 | 92 | |
|
93 | 93 | # BELOW THIS ARE NON-STANDARD APIS |
|
94 | 94 | |
|
95 | 95 | def _cleanupdirectory(self, rootdir): |
|
96 | 96 | """Removes the empty directories and unnecessary files within the root |
|
97 | 97 | directory recursively. Note that this method does not remove the root |
|
98 | 98 | directory itself. """ |
|
99 | 99 | |
|
100 | 100 | oldfiles = set() |
|
101 | 101 | otherfiles = set() |
|
102 | 102 | # osutil.listdir returns stat information which saves some rmdir/listdir |
|
103 | 103 | # syscalls. |
|
104 | 104 | for name, mode in util.osutil.listdir(rootdir): |
|
105 | 105 | if stat.S_ISDIR(mode): |
|
106 | 106 | dirpath = os.path.join(rootdir, name) |
|
107 | 107 | self._cleanupdirectory(dirpath) |
|
108 | 108 | |
|
109 | 109 | # Now that the directory specified by dirpath is potentially |
|
110 | 110 | # empty, try and remove it. |
|
111 | 111 | try: |
|
112 | 112 | os.rmdir(dirpath) |
|
113 | 113 | except OSError: |
|
114 | 114 | pass |
|
115 | 115 | |
|
116 | 116 | elif stat.S_ISREG(mode): |
|
117 | 117 | if name.endswith('_old'): |
|
118 | 118 | oldfiles.add(name[:-4]) |
|
119 | 119 | else: |
|
120 | 120 | otherfiles.add(name) |
|
121 | 121 | |
|
122 | 122 | # Remove the files which end with suffix '_old' and have no |
|
123 | 123 | # corresponding file without the suffix '_old'. See addremotefilelognode |
|
124 | 124 | # method for the generation/purpose of files with '_old' suffix. |
|
125 | 125 | for filename in oldfiles - otherfiles: |
|
126 | 126 | filepath = os.path.join(rootdir, filename + '_old') |
|
127 | 127 | util.tryunlink(filepath) |
|
128 | 128 | |
|
129 | 129 | def _getfiles(self): |
|
130 | 130 | """Return a list of (filename, [node,...]) for all the revisions that |
|
131 | 131 | exist in the store. |
|
132 | 132 | |
|
133 | 133 | This is useful for obtaining a list of all the contents of the store |
|
134 | 134 | when performing a repack to another store, since the store API requires |
|
135 | 135 | name+node keys and not namehash+node keys. |
|
136 | 136 | """ |
|
137 | 137 | existing = {} |
|
138 | 138 | for filenamehash, node in self._listkeys(): |
|
139 | 139 | existing.setdefault(filenamehash, []).append(node) |
|
140 | 140 | |
|
141 | 141 | filenamemap = self._resolvefilenames(existing.keys()) |
|
142 | 142 | |
|
143 | 143 | for filename, sha in filenamemap.iteritems(): |
|
144 | 144 | yield (filename, existing[sha]) |
|
145 | 145 | |
|
146 | 146 | def _resolvefilenames(self, hashes): |
|
147 | 147 | """Given a list of filename hashes that are present in the |
|
148 | 148 | remotefilelog store, return a mapping from filename->hash. |
|
149 | 149 | |
|
150 | 150 | This is useful when converting remotefilelog blobs into other storage |
|
151 | 151 | formats. |
|
152 | 152 | """ |
|
153 | 153 | if not hashes: |
|
154 | 154 | return {} |
|
155 | 155 | |
|
156 | 156 | filenames = {} |
|
157 | 157 | missingfilename = set(hashes) |
|
158 | 158 | |
|
159 | 159 | # Start with a full manifest, since it'll cover the majority of files |
|
160 | 160 | for filename in self.repo['tip'].manifest(): |
|
161 | 161 | sha = hashlib.sha1(filename).digest() |
|
162 | 162 | if sha in missingfilename: |
|
163 | 163 | filenames[filename] = sha |
|
164 | 164 | missingfilename.discard(sha) |
|
165 | 165 | |
|
166 | 166 | # Scan the changelog until we've found every file name |
|
167 | 167 | cl = self.repo.unfiltered().changelog |
|
168 | 168 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): |
|
169 | 169 | if not missingfilename: |
|
170 | 170 | break |
|
171 | 171 | files = cl.readfiles(cl.node(rev)) |
|
172 | 172 | for filename in files: |
|
173 | 173 | sha = hashlib.sha1(filename).digest() |
|
174 | 174 | if sha in missingfilename: |
|
175 | 175 | filenames[filename] = sha |
|
176 | 176 | missingfilename.discard(sha) |
|
177 | 177 | |
|
178 | 178 | return filenames |
|
179 | 179 | |
|
180 | 180 | def _getrepocachepath(self): |
|
181 | 181 | return os.path.join( |
|
182 | 182 | self._path, self._reponame) if self._shared else self._path |
|
183 | 183 | |
|
184 | 184 | def _listkeys(self): |
|
185 | 185 | """List all the remotefilelog keys that exist in the store. |
|
186 | 186 | |
|
187 | 187 | Returns a iterator of (filename hash, filecontent hash) tuples. |
|
188 | 188 | """ |
|
189 | 189 | |
|
190 | 190 | for root, dirs, files in os.walk(self._getrepocachepath()): |
|
191 | 191 | for filename in files: |
|
192 | 192 | if len(filename) != 40: |
|
193 | 193 | continue |
|
194 | 194 | node = filename |
|
195 | 195 | if self._shared: |
|
196 | 196 | # .../1a/85ffda..be21 |
|
197 | 197 | filenamehash = root[-41:-39] + root[-38:] |
|
198 | 198 | else: |
|
199 | 199 | filenamehash = root[-40:] |
|
200 | 200 | yield (bin(filenamehash), bin(node)) |
|
201 | 201 | |
|
202 | 202 | def _getfilepath(self, name, node): |
|
203 | 203 | node = hex(node) |
|
204 | 204 | if self._shared: |
|
205 | 205 | key = shallowutil.getcachekey(self._reponame, name, node) |
|
206 | 206 | else: |
|
207 | 207 | key = shallowutil.getlocalkey(name, node) |
|
208 | 208 | |
|
209 | 209 | return os.path.join(self._path, key) |
|
210 | 210 | |
|
211 | 211 | def _getdata(self, name, node): |
|
212 | 212 | filepath = self._getfilepath(name, node) |
|
213 | 213 | try: |
|
214 | 214 | data = shallowutil.readfile(filepath) |
|
215 | 215 | if self._validatecache and not self._validatedata(data, filepath): |
|
216 | 216 | if self._validatecachelog: |
|
217 | 217 | with open(self._validatecachelog, 'a+') as f: |
|
218 | 218 | f.write("corrupt %s during read\n" % filepath) |
|
219 | 219 | os.rename(filepath, filepath + ".corrupt") |
|
220 | 220 | raise KeyError("corrupt local cache file %s" % filepath) |
|
221 | 221 | except IOError: |
|
222 | 222 | raise KeyError("no file found at %s for %s:%s" % (filepath, name, |
|
223 | 223 | hex(node))) |
|
224 | 224 | |
|
225 | 225 | return data |
|
226 | 226 | |
|
227 | 227 | def addremotefilelognode(self, name, node, data): |
|
228 | 228 | filepath = self._getfilepath(name, node) |
|
229 | 229 | |
|
230 | 230 | oldumask = os.umask(0o002) |
|
231 | 231 | try: |
|
232 | 232 | # if this node already exists, save the old version for |
|
233 | 233 | # recovery/debugging purposes. |
|
234 | 234 | if os.path.exists(filepath): |
|
235 | 235 | newfilename = filepath + '_old' |
|
236 | 236 | # newfilename can be read-only and shutil.copy will fail. |
|
237 | 237 | # Delete newfilename to avoid it |
|
238 | 238 | if os.path.exists(newfilename): |
|
239 | 239 | shallowutil.unlinkfile(newfilename) |
|
240 | 240 | shutil.copy(filepath, newfilename) |
|
241 | 241 | |
|
242 | 242 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) |
|
243 | 243 | shallowutil.writefile(filepath, data, readonly=True) |
|
244 | 244 | |
|
245 | 245 | if self._validatecache: |
|
246 | 246 | if not self._validatekey(filepath, 'write'): |
|
247 | 247 | raise error.Abort(_("local cache write was corrupted %s") % |
|
248 | 248 | filepath) |
|
249 | 249 | finally: |
|
250 | 250 | os.umask(oldumask) |
|
251 | 251 | |
|
252 | 252 | def markrepo(self, path): |
|
253 | 253 | """Call this to add the given repo path to the store's list of |
|
254 | 254 | repositories that are using it. This is useful later when doing garbage |
|
255 | 255 | collection, since it allows us to insecpt the repos to see what nodes |
|
256 | 256 | they want to be kept alive in the store. |
|
257 | 257 | """ |
|
258 | 258 | repospath = os.path.join(self._path, "repos") |
|
259 | 259 | with open(repospath, 'ab') as reposfile: |
|
260 | 260 | reposfile.write(os.path.dirname(path) + "\n") |
|
261 | 261 | |
|
262 | 262 | repospathstat = os.stat(repospath) |
|
263 | 263 | if repospathstat.st_uid == self._uid: |
|
264 | 264 | os.chmod(repospath, 0o0664) |
|
265 | 265 | |
|
266 | 266 | def _validatekey(self, path, action): |
|
267 | 267 | with open(path, 'rb') as f: |
|
268 | 268 | data = f.read() |
|
269 | 269 | |
|
270 | 270 | if self._validatedata(data, path): |
|
271 | 271 | return True |
|
272 | 272 | |
|
273 | 273 | if self._validatecachelog: |
|
274 | 274 | with open(self._validatecachelog, 'ab+') as f: |
|
275 | 275 | f.write("corrupt %s during %s\n" % (path, action)) |
|
276 | 276 | |
|
277 | 277 | os.rename(path, path + ".corrupt") |
|
278 | 278 | return False |
|
279 | 279 | |
|
280 | 280 | def _validatedata(self, data, path): |
|
281 | 281 | try: |
|
282 | 282 | if len(data) > 0: |
|
283 | 283 | # see remotefilelogserver.createfileblob for the format |
|
284 | 284 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
285 | 285 | if len(data) <= size: |
|
286 | 286 | # it is truncated |
|
287 | 287 | return False |
|
288 | 288 | |
|
289 | 289 | # extract the node from the metadata |
|
290 | 290 | offset += size |
|
291 | 291 | datanode = data[offset:offset + 20] |
|
292 | 292 | |
|
293 | 293 | # and compare against the path |
|
294 | 294 | if os.path.basename(path) == hex(datanode): |
|
295 | 295 | # Content matches the intended path |
|
296 | 296 | return True |
|
297 | 297 | return False |
|
298 | 298 | except (ValueError, RuntimeError): |
|
299 | 299 | pass |
|
300 | 300 | |
|
301 | 301 | return False |
|
302 | 302 | |
|
303 | 303 | def gc(self, keepkeys): |
|
304 | 304 | ui = self.ui |
|
305 | 305 | cachepath = self._path |
|
306 | 306 | |
|
307 | 307 | # prune cache |
|
308 | 308 | import Queue |
|
309 | 309 | queue = Queue.PriorityQueue() |
|
310 | 310 | originalsize = 0 |
|
311 | 311 | size = 0 |
|
312 | 312 | count = 0 |
|
313 | 313 | removed = 0 |
|
314 | 314 | |
|
315 | 315 | # keep files newer than a day even if they aren't needed |
|
316 | 316 | limit = time.time() - (60 * 60 * 24) |
|
317 | 317 | |
|
318 | 318 | progress = ui.makeprogress(_("removing unnecessary files"), |
|
319 | 319 | unit="files") |
|
320 | 320 | progress.update(0) |
|
321 | 321 | for root, dirs, files in os.walk(cachepath): |
|
322 | 322 | for file in files: |
|
323 | 323 | if file == 'repos': |
|
324 | 324 | continue |
|
325 | 325 | |
|
326 | 326 | # Don't delete pack files |
|
327 | 327 | if '/packs/' in root: |
|
328 | 328 | continue |
|
329 | 329 | |
|
330 | 330 | progress.update(count) |
|
331 | 331 | path = os.path.join(root, file) |
|
332 | 332 | key = os.path.relpath(path, cachepath) |
|
333 | 333 | count += 1 |
|
334 | 334 | try: |
|
335 | 335 | pathstat = os.stat(path) |
|
336 | 336 | except OSError as e: |
|
337 | 337 | # errno.ENOENT = no such file or directory |
|
338 | 338 | if e.errno != errno.ENOENT: |
|
339 | 339 | raise |
|
340 | 340 | msg = _("warning: file %s was removed by another process\n") |
|
341 | 341 | ui.warn(msg % path) |
|
342 | 342 | continue |
|
343 | 343 | |
|
344 | 344 | originalsize += pathstat.st_size |
|
345 | 345 | |
|
346 | 346 | if key in keepkeys or pathstat.st_atime > limit: |
|
347 | 347 | queue.put((pathstat.st_atime, path, pathstat)) |
|
348 | 348 | size += pathstat.st_size |
|
349 | 349 | else: |
|
350 | 350 | try: |
|
351 | 351 | shallowutil.unlinkfile(path) |
|
352 | 352 | except OSError as e: |
|
353 | 353 | # errno.ENOENT = no such file or directory |
|
354 | 354 | if e.errno != errno.ENOENT: |
|
355 | 355 | raise |
|
356 | 356 | msg = _("warning: file %s was removed by another " |
|
357 | 357 | "process\n") |
|
358 | 358 | ui.warn(msg % path) |
|
359 | 359 | continue |
|
360 | 360 | removed += 1 |
|
361 | 361 | progress.complete() |
|
362 | 362 | |
|
363 | 363 | # remove oldest files until under limit |
|
364 | 364 | limit = ui.configbytes("remotefilelog", "cachelimit") |
|
365 | 365 | if size > limit: |
|
366 | 366 | excess = size - limit |
|
367 | 367 | progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes", |
|
368 | 368 | total=excess) |
|
369 | 369 | removedexcess = 0 |
|
370 | 370 | while queue and size > limit and size > 0: |
|
371 | 371 | progress.update(removedexcess) |
|
372 | 372 | atime, oldpath, oldpathstat = queue.get() |
|
373 | 373 | try: |
|
374 | 374 | shallowutil.unlinkfile(oldpath) |
|
375 | 375 | except OSError as e: |
|
376 | 376 | # errno.ENOENT = no such file or directory |
|
377 | 377 | if e.errno != errno.ENOENT: |
|
378 | 378 | raise |
|
379 | 379 | msg = _("warning: file %s was removed by another process\n") |
|
380 | 380 | ui.warn(msg % oldpath) |
|
381 | 381 | size -= oldpathstat.st_size |
|
382 | 382 | removed += 1 |
|
383 | 383 | removedexcess += oldpathstat.st_size |
|
384 | 384 | progress.complete() |
|
385 | 385 | |
|
386 |
ui.status(_("finished: removed % |
|
|
386 | ui.status(_("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") | |
|
387 | 387 | % (removed, count, |
|
388 | 388 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, |
|
389 | 389 | float(size) / 1024.0 / 1024.0 / 1024.0)) |
|
390 | 390 | |
|
391 | 391 | class baseunionstore(object): |
|
392 | 392 | def __init__(self, *args, **kwargs): |
|
393 | 393 | # If one of the functions that iterates all of the stores is about to |
|
394 | 394 | # throw a KeyError, try this many times with a full refresh between |
|
395 | 395 | # attempts. A repack operation may have moved data from one store to |
|
396 | 396 | # another while we were running. |
|
397 | 397 | self.numattempts = kwargs.get(r'numretries', 0) + 1 |
|
398 | 398 | # If not-None, call this function on every retry and if the attempts are |
|
399 | 399 | # exhausted. |
|
400 | 400 | self.retrylog = kwargs.get(r'retrylog', None) |
|
401 | 401 | |
|
402 | 402 | def markforrefresh(self): |
|
403 | 403 | for store in self.stores: |
|
404 | 404 | if util.safehasattr(store, 'markforrefresh'): |
|
405 | 405 | store.markforrefresh() |
|
406 | 406 | |
|
407 | 407 | @staticmethod |
|
408 | 408 | def retriable(fn): |
|
409 | 409 | def noop(*args): |
|
410 | 410 | pass |
|
411 | 411 | def wrapped(self, *args, **kwargs): |
|
412 | 412 | retrylog = self.retrylog or noop |
|
413 | 413 | funcname = fn.__name__ |
|
414 | 414 | for i in pycompat.xrange(self.numattempts): |
|
415 | 415 | if i > 0: |
|
416 | 416 | retrylog('re-attempting (n=%d) %s\n' % (i, funcname)) |
|
417 | 417 | self.markforrefresh() |
|
418 | 418 | try: |
|
419 | 419 | return fn(self, *args, **kwargs) |
|
420 | 420 | except KeyError: |
|
421 | 421 | pass |
|
422 | 422 | # retries exhausted |
|
423 | 423 | retrylog('retries exhausted in %s, raising KeyError\n' % |
|
424 | 424 | pycompat.sysbytes(funcname)) |
|
425 | 425 | raise |
|
426 | 426 | return wrapped |
@@ -1,378 +1,378 b'' | |||
|
1 | 1 | # debugcommands.py - debug logic for remotefilelog |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import hashlib |
|
10 | 10 | import os |
|
11 | 11 | import zlib |
|
12 | 12 | |
|
13 | 13 | from mercurial.node import bin, hex, nullid, short |
|
14 | 14 | from mercurial.i18n import _ |
|
15 | 15 | from mercurial import ( |
|
16 | 16 | error, |
|
17 | 17 | filelog, |
|
18 | 18 | node as nodemod, |
|
19 | 19 | revlog, |
|
20 | 20 | ) |
|
21 | 21 | from . import ( |
|
22 | 22 | constants, |
|
23 | 23 | datapack, |
|
24 | 24 | extutil, |
|
25 | 25 | fileserverclient, |
|
26 | 26 | historypack, |
|
27 | 27 | repack, |
|
28 | 28 | shallowutil, |
|
29 | 29 | ) |
|
30 | 30 | |
|
31 | 31 | def debugremotefilelog(ui, path, **opts): |
|
32 | 32 | decompress = opts.get(r'decompress') |
|
33 | 33 | |
|
34 | 34 | size, firstnode, mapping = parsefileblob(path, decompress) |
|
35 | 35 | |
|
36 | 36 | ui.status(_("size: %d bytes\n") % (size)) |
|
37 | 37 | ui.status(_("path: %s \n") % (path)) |
|
38 | 38 | ui.status(_("key: %s \n") % (short(firstnode))) |
|
39 | 39 | ui.status(_("\n")) |
|
40 | 40 | ui.status(_("%12s => %12s %13s %13s %12s\n") % |
|
41 | 41 | ("node", "p1", "p2", "linknode", "copyfrom")) |
|
42 | 42 | |
|
43 | 43 | queue = [firstnode] |
|
44 | 44 | while queue: |
|
45 | 45 | node = queue.pop(0) |
|
46 | 46 | p1, p2, linknode, copyfrom = mapping[node] |
|
47 | 47 | ui.status(_("%s => %s %s %s %s\n") % |
|
48 | 48 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) |
|
49 | 49 | if p1 != nullid: |
|
50 | 50 | queue.append(p1) |
|
51 | 51 | if p2 != nullid: |
|
52 | 52 | queue.append(p2) |
|
53 | 53 | |
|
54 | 54 | def buildtemprevlog(repo, file): |
|
55 | 55 | # get filename key |
|
56 | 56 | filekey = nodemod.hex(hashlib.sha1(file).digest()) |
|
57 | 57 | filedir = os.path.join(repo.path, 'store/data', filekey) |
|
58 | 58 | |
|
59 | 59 | # sort all entries based on linkrev |
|
60 | 60 | fctxs = [] |
|
61 | 61 | for filenode in os.listdir(filedir): |
|
62 | 62 | if '_old' not in filenode: |
|
63 | 63 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) |
|
64 | 64 | |
|
65 | 65 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) |
|
66 | 66 | |
|
67 | 67 | # add to revlog |
|
68 | 68 | temppath = repo.sjoin('data/temprevlog.i') |
|
69 | 69 | if os.path.exists(temppath): |
|
70 | 70 | os.remove(temppath) |
|
71 | 71 | r = filelog.filelog(repo.svfs, 'temprevlog') |
|
72 | 72 | |
|
73 | 73 | class faket(object): |
|
74 | 74 | def add(self, a, b, c): |
|
75 | 75 | pass |
|
76 | 76 | t = faket() |
|
77 | 77 | for fctx in fctxs: |
|
78 | 78 | if fctx.node() not in repo: |
|
79 | 79 | continue |
|
80 | 80 | |
|
81 | 81 | p = fctx.filelog().parents(fctx.filenode()) |
|
82 | 82 | meta = {} |
|
83 | 83 | if fctx.renamed(): |
|
84 | 84 | meta['copy'] = fctx.renamed()[0] |
|
85 | 85 | meta['copyrev'] = hex(fctx.renamed()[1]) |
|
86 | 86 | |
|
87 | 87 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) |
|
88 | 88 | |
|
89 | 89 | return r |
|
90 | 90 | |
|
91 | 91 | def debugindex(orig, ui, repo, file_=None, **opts): |
|
92 | 92 | """dump the contents of an index file""" |
|
93 | 93 | if (opts.get(r'changelog') or |
|
94 | 94 | opts.get(r'manifest') or |
|
95 | 95 | opts.get(r'dir') or |
|
96 | 96 | not shallowutil.isenabled(repo) or |
|
97 | 97 | not repo.shallowmatch(file_)): |
|
98 | 98 | return orig(ui, repo, file_, **opts) |
|
99 | 99 | |
|
100 | 100 | r = buildtemprevlog(repo, file_) |
|
101 | 101 | |
|
102 | 102 | # debugindex like normal |
|
103 | 103 | format = opts.get('format', 0) |
|
104 | 104 | if format not in (0, 1): |
|
105 | 105 | raise error.Abort(_("unknown format %d") % format) |
|
106 | 106 | |
|
107 | 107 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
108 | 108 | if generaldelta: |
|
109 | 109 | basehdr = ' delta' |
|
110 | 110 | else: |
|
111 | 111 | basehdr = ' base' |
|
112 | 112 | |
|
113 | 113 | if format == 0: |
|
114 | 114 | ui.write((" rev offset length " + basehdr + " linkrev" |
|
115 | 115 | " nodeid p1 p2\n")) |
|
116 | 116 | elif format == 1: |
|
117 | 117 | ui.write((" rev flag offset length" |
|
118 | 118 | " size " + basehdr + " link p1 p2" |
|
119 | 119 | " nodeid\n")) |
|
120 | 120 | |
|
121 | 121 | for i in r: |
|
122 | 122 | node = r.node(i) |
|
123 | 123 | if generaldelta: |
|
124 | 124 | base = r.deltaparent(i) |
|
125 | 125 | else: |
|
126 | 126 | base = r.chainbase(i) |
|
127 | 127 | if format == 0: |
|
128 | 128 | try: |
|
129 | 129 | pp = r.parents(node) |
|
130 | 130 | except Exception: |
|
131 | 131 | pp = [nullid, nullid] |
|
132 | 132 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( |
|
133 | 133 | i, r.start(i), r.length(i), base, r.linkrev(i), |
|
134 | 134 | short(node), short(pp[0]), short(pp[1]))) |
|
135 | 135 | elif format == 1: |
|
136 | 136 | pr = r.parentrevs(i) |
|
137 | 137 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( |
|
138 | 138 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
139 | 139 | base, r.linkrev(i), pr[0], pr[1], short(node))) |
|
140 | 140 | |
|
141 | 141 | def debugindexdot(orig, ui, repo, file_): |
|
142 | 142 | """dump an index DAG as a graphviz dot file""" |
|
143 | 143 | if not shallowutil.isenabled(repo): |
|
144 | 144 | return orig(ui, repo, file_) |
|
145 | 145 | |
|
146 | 146 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) |
|
147 | 147 | |
|
148 | 148 | ui.write(("digraph G {\n")) |
|
149 | 149 | for i in r: |
|
150 | 150 | node = r.node(i) |
|
151 | 151 | pp = r.parents(node) |
|
152 | 152 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
153 | 153 | if pp[1] != nullid: |
|
154 | 154 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
155 | 155 | ui.write("}\n") |
|
156 | 156 | |
|
157 | 157 | def verifyremotefilelog(ui, path, **opts): |
|
158 | 158 | decompress = opts.get(r'decompress') |
|
159 | 159 | |
|
160 | 160 | for root, dirs, files in os.walk(path): |
|
161 | 161 | for file in files: |
|
162 | 162 | if file == "repos": |
|
163 | 163 | continue |
|
164 | 164 | filepath = os.path.join(root, file) |
|
165 | 165 | size, firstnode, mapping = parsefileblob(filepath, decompress) |
|
166 | 166 | for p1, p2, linknode, copyfrom in mapping.itervalues(): |
|
167 | 167 | if linknode == nullid: |
|
168 | 168 | actualpath = os.path.relpath(root, path) |
|
169 | 169 | key = fileserverclient.getcachekey("reponame", actualpath, |
|
170 | 170 | file) |
|
171 | 171 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, |
|
172 | 172 | path))) |
|
173 | 173 | |
|
174 | 174 | def _decompressblob(raw): |
|
175 | 175 | return zlib.decompress(raw) |
|
176 | 176 | |
|
177 | 177 | def parsefileblob(path, decompress): |
|
178 | 178 | raw = None |
|
179 | 179 | f = open(path, "rb") |
|
180 | 180 | try: |
|
181 | 181 | raw = f.read() |
|
182 | 182 | finally: |
|
183 | 183 | f.close() |
|
184 | 184 | |
|
185 | 185 | if decompress: |
|
186 | 186 | raw = _decompressblob(raw) |
|
187 | 187 | |
|
188 | 188 | offset, size, flags = shallowutil.parsesizeflags(raw) |
|
189 | 189 | start = offset + size |
|
190 | 190 | |
|
191 | 191 | firstnode = None |
|
192 | 192 | |
|
193 | 193 | mapping = {} |
|
194 | 194 | while start < len(raw): |
|
195 | 195 | divider = raw.index('\0', start + 80) |
|
196 | 196 | |
|
197 | 197 | currentnode = raw[start:(start + 20)] |
|
198 | 198 | if not firstnode: |
|
199 | 199 | firstnode = currentnode |
|
200 | 200 | |
|
201 | 201 | p1 = raw[(start + 20):(start + 40)] |
|
202 | 202 | p2 = raw[(start + 40):(start + 60)] |
|
203 | 203 | linknode = raw[(start + 60):(start + 80)] |
|
204 | 204 | copyfrom = raw[(start + 80):divider] |
|
205 | 205 | |
|
206 | 206 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
207 | 207 | start = divider + 1 |
|
208 | 208 | |
|
209 | 209 | return size, firstnode, mapping |
|
210 | 210 | |
|
211 | 211 | def debugdatapack(ui, *paths, **opts): |
|
212 | 212 | for path in paths: |
|
213 | 213 | if '.data' in path: |
|
214 | 214 | path = path[:path.index('.data')] |
|
215 | 215 | ui.write("%s:\n" % path) |
|
216 | 216 | dpack = datapack.datapack(path) |
|
217 | 217 | node = opts.get(r'node') |
|
218 | 218 | if node: |
|
219 | 219 | deltachain = dpack.getdeltachain('', bin(node)) |
|
220 | 220 | dumpdeltachain(ui, deltachain, **opts) |
|
221 | 221 | return |
|
222 | 222 | |
|
223 | 223 | if opts.get(r'long'): |
|
224 | 224 | hashformatter = hex |
|
225 | 225 | hashlen = 42 |
|
226 | 226 | else: |
|
227 | 227 | hashformatter = short |
|
228 | 228 | hashlen = 14 |
|
229 | 229 | |
|
230 | 230 | lastfilename = None |
|
231 | 231 | totaldeltasize = 0 |
|
232 | 232 | totalblobsize = 0 |
|
233 | 233 | def printtotals(): |
|
234 | 234 | if lastfilename is not None: |
|
235 | 235 | ui.write("\n") |
|
236 | 236 | if not totaldeltasize or not totalblobsize: |
|
237 | 237 | return |
|
238 | 238 | difference = totalblobsize - totaldeltasize |
|
239 | 239 | deltastr = "%0.1f%% %s" % ( |
|
240 | 240 | (100.0 * abs(difference) / totalblobsize), |
|
241 | 241 | ("smaller" if difference > 0 else "bigger")) |
|
242 | 242 | |
|
243 | 243 | ui.write(("Total:%s%s %s (%s)\n") % ( |
|
244 | 244 | "".ljust(2 * hashlen - len("Total:")), |
|
245 |
|
|
|
246 |
|
|
|
245 | ('%d' % totaldeltasize).ljust(12), | |
|
246 | ('%d' % totalblobsize).ljust(9), | |
|
247 | 247 | deltastr |
|
248 | 248 | )) |
|
249 | 249 | |
|
250 | 250 | bases = {} |
|
251 | 251 | nodes = set() |
|
252 | 252 | failures = 0 |
|
253 | 253 | for filename, node, deltabase, deltalen in dpack.iterentries(): |
|
254 | 254 | bases[node] = deltabase |
|
255 | 255 | if node in nodes: |
|
256 | 256 | ui.write(("Bad entry: %s appears twice\n" % short(node))) |
|
257 | 257 | failures += 1 |
|
258 | 258 | nodes.add(node) |
|
259 | 259 | if filename != lastfilename: |
|
260 | 260 | printtotals() |
|
261 | 261 | name = '(empty name)' if filename == '' else filename |
|
262 | 262 | ui.write("%s:\n" % name) |
|
263 | 263 | ui.write("%s%s%s%s\n" % ( |
|
264 | 264 | "Node".ljust(hashlen), |
|
265 | 265 | "Delta Base".ljust(hashlen), |
|
266 | 266 | "Delta Length".ljust(14), |
|
267 | 267 | "Blob Size".ljust(9))) |
|
268 | 268 | lastfilename = filename |
|
269 | 269 | totalblobsize = 0 |
|
270 | 270 | totaldeltasize = 0 |
|
271 | 271 | |
|
272 | 272 | # Metadata could be missing, in which case it will be an empty dict. |
|
273 | 273 | meta = dpack.getmeta(filename, node) |
|
274 | 274 | if constants.METAKEYSIZE in meta: |
|
275 | 275 | blobsize = meta[constants.METAKEYSIZE] |
|
276 | 276 | totaldeltasize += deltalen |
|
277 | 277 | totalblobsize += blobsize |
|
278 | 278 | else: |
|
279 | 279 | blobsize = "(missing)" |
|
280 |
ui.write("%s %s %s% |
|
|
280 | ui.write("%s %s %s%d\n" % ( | |
|
281 | 281 | hashformatter(node), |
|
282 | 282 | hashformatter(deltabase), |
|
283 |
|
|
|
283 | ('%d' % deltalen).ljust(14), | |
|
284 | 284 | blobsize)) |
|
285 | 285 | |
|
286 | 286 | if filename is not None: |
|
287 | 287 | printtotals() |
|
288 | 288 | |
|
289 | 289 | failures += _sanitycheck(ui, set(nodes), bases) |
|
290 | 290 | if failures > 1: |
|
291 | 291 | ui.warn(("%d failures\n" % failures)) |
|
292 | 292 | return 1 |
|
293 | 293 | |
|
294 | 294 | def _sanitycheck(ui, nodes, bases): |
|
295 | 295 | """ |
|
296 | 296 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a |
|
297 | 297 | mapping of node->base): |
|
298 | 298 | |
|
299 | 299 | - Each deltabase must itself be a node elsewhere in the pack |
|
300 | 300 | - There must be no cycles |
|
301 | 301 | """ |
|
302 | 302 | failures = 0 |
|
303 | 303 | for node in nodes: |
|
304 | 304 | seen = set() |
|
305 | 305 | current = node |
|
306 | 306 | deltabase = bases[current] |
|
307 | 307 | |
|
308 | 308 | while deltabase != nullid: |
|
309 | 309 | if deltabase not in nodes: |
|
310 | 310 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % |
|
311 | 311 | (short(node), short(deltabase)))) |
|
312 | 312 | failures += 1 |
|
313 | 313 | break |
|
314 | 314 | |
|
315 | 315 | if deltabase in seen: |
|
316 | 316 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % |
|
317 | 317 | (short(node), short(deltabase)))) |
|
318 | 318 | failures += 1 |
|
319 | 319 | break |
|
320 | 320 | |
|
321 | 321 | current = deltabase |
|
322 | 322 | seen.add(current) |
|
323 | 323 | deltabase = bases[current] |
|
324 | 324 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid |
|
325 | 325 | # so we don't traverse it again. |
|
326 | 326 | bases[node] = nullid |
|
327 | 327 | return failures |
|
328 | 328 | |
|
329 | 329 | def dumpdeltachain(ui, deltachain, **opts): |
|
330 | 330 | hashformatter = hex |
|
331 | 331 | hashlen = 40 |
|
332 | 332 | |
|
333 | 333 | lastfilename = None |
|
334 | 334 | for filename, node, filename, deltabasenode, delta in deltachain: |
|
335 | 335 | if filename != lastfilename: |
|
336 | 336 | ui.write("\n%s\n" % filename) |
|
337 | 337 | lastfilename = filename |
|
338 | 338 | ui.write("%s %s %s %s\n" % ( |
|
339 | 339 | "Node".ljust(hashlen), |
|
340 | 340 | "Delta Base".ljust(hashlen), |
|
341 | 341 | "Delta SHA1".ljust(hashlen), |
|
342 | 342 | "Delta Length".ljust(6), |
|
343 | 343 | )) |
|
344 | 344 | |
|
345 |
ui.write("%s %s %s % |
|
|
345 | ui.write("%s %s %s %d\n" % ( | |
|
346 | 346 | hashformatter(node), |
|
347 | 347 | hashformatter(deltabasenode), |
|
348 | 348 | nodemod.hex(hashlib.sha1(delta).digest()), |
|
349 | 349 | len(delta))) |
|
350 | 350 | |
|
351 | 351 | def debughistorypack(ui, path): |
|
352 | 352 | if '.hist' in path: |
|
353 | 353 | path = path[:path.index('.hist')] |
|
354 | 354 | hpack = historypack.historypack(path) |
|
355 | 355 | |
|
356 | 356 | lastfilename = None |
|
357 | 357 | for entry in hpack.iterentries(): |
|
358 | 358 | filename, node, p1node, p2node, linknode, copyfrom = entry |
|
359 | 359 | if filename != lastfilename: |
|
360 | 360 | ui.write("\n%s\n" % filename) |
|
361 | 361 | ui.write("%s%s%s%s%s\n" % ( |
|
362 | 362 | "Node".ljust(14), |
|
363 | 363 | "P1 Node".ljust(14), |
|
364 | 364 | "P2 Node".ljust(14), |
|
365 | 365 | "Link Node".ljust(14), |
|
366 | 366 | "Copy From")) |
|
367 | 367 | lastfilename = filename |
|
368 | 368 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), |
|
369 | 369 | short(p2node), short(linknode), copyfrom)) |
|
370 | 370 | |
|
371 | 371 | def debugwaitonrepack(repo): |
|
372 | 372 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): |
|
373 | 373 | return |
|
374 | 374 | |
|
375 | 375 | def debugwaitonprefetch(repo): |
|
376 | 376 | with repo._lock(repo.svfs, "prefetchlock", True, None, |
|
377 | 377 | None, _('prefetching in %s') % repo.origroot): |
|
378 | 378 | pass |
General Comments 0
You need to be logged in to leave comments.
Login now