Show More
@@ -1,784 +1,784 | |||||
1 | # Copyright 2009-2010 Gregory P. Ward |
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 | # Copyright 2010-2011 Fog Creek Software |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
4 | # Copyright 2010-2011 Unity Technologies |
|
4 | # Copyright 2010-2011 Unity Technologies | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | '''largefiles utility code: must not import other modules in this package.''' |
|
9 | '''largefiles utility code: must not import other modules in this package.''' | |
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | import contextlib |
|
12 | import contextlib | |
13 | import copy |
|
13 | import copy | |
14 | import os |
|
14 | import os | |
15 | import stat |
|
15 | import stat | |
16 |
|
16 | |||
17 | from mercurial.i18n import _ |
|
17 | from mercurial.i18n import _ | |
18 | from mercurial.node import ( |
|
18 | from mercurial.node import ( | |
19 | hex, |
|
19 | hex, | |
20 | nullid, |
|
20 | nullid, | |
21 | ) |
|
21 | ) | |
22 | from mercurial.pycompat import open |
|
22 | from mercurial.pycompat import open | |
23 |
|
23 | |||
24 | from mercurial import ( |
|
24 | from mercurial import ( | |
25 | dirstate, |
|
25 | dirstate, | |
26 | encoding, |
|
26 | encoding, | |
27 | error, |
|
27 | error, | |
28 | httpconnection, |
|
28 | httpconnection, | |
29 | match as matchmod, |
|
29 | match as matchmod, | |
30 | pycompat, |
|
30 | pycompat, | |
31 | scmutil, |
|
31 | scmutil, | |
32 | sparse, |
|
32 | sparse, | |
33 | util, |
|
33 | util, | |
34 | vfs as vfsmod, |
|
34 | vfs as vfsmod, | |
35 | ) |
|
35 | ) | |
36 | from mercurial.utils import hashutil |
|
36 | from mercurial.utils import hashutil | |
37 |
|
37 | |||
38 | shortname = b'.hglf' |
|
38 | shortname = b'.hglf' | |
39 | shortnameslash = shortname + b'/' |
|
39 | shortnameslash = shortname + b'/' | |
40 | longname = b'largefiles' |
|
40 | longname = b'largefiles' | |
41 |
|
41 | |||
42 | # -- Private worker functions ------------------------------------------ |
|
42 | # -- Private worker functions ------------------------------------------ | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | @contextlib.contextmanager |
|
45 | @contextlib.contextmanager | |
46 | def lfstatus(repo, value=True): |
|
46 | def lfstatus(repo, value=True): | |
47 | oldvalue = getattr(repo, 'lfstatus', False) |
|
47 | oldvalue = getattr(repo, 'lfstatus', False) | |
48 | repo.lfstatus = value |
|
48 | repo.lfstatus = value | |
49 | try: |
|
49 | try: | |
50 | yield |
|
50 | yield | |
51 | finally: |
|
51 | finally: | |
52 | repo.lfstatus = oldvalue |
|
52 | repo.lfstatus = oldvalue | |
53 |
|
53 | |||
54 |
|
54 | |||
55 | def getminsize(ui, assumelfiles, opt, default=10): |
|
55 | def getminsize(ui, assumelfiles, opt, default=10): | |
56 | lfsize = opt |
|
56 | lfsize = opt | |
57 | if not lfsize and assumelfiles: |
|
57 | if not lfsize and assumelfiles: | |
58 | lfsize = ui.config(longname, b'minsize', default=default) |
|
58 | lfsize = ui.config(longname, b'minsize', default=default) | |
59 | if lfsize: |
|
59 | if lfsize: | |
60 | try: |
|
60 | try: | |
61 | lfsize = float(lfsize) |
|
61 | lfsize = float(lfsize) | |
62 | except ValueError: |
|
62 | except ValueError: | |
63 | raise error.Abort( |
|
63 | raise error.Abort( | |
64 | _(b'largefiles: size must be number (not %s)\n') % lfsize |
|
64 | _(b'largefiles: size must be number (not %s)\n') % lfsize | |
65 | ) |
|
65 | ) | |
66 | if lfsize is None: |
|
66 | if lfsize is None: | |
67 | raise error.Abort(_(b'minimum size for largefiles must be specified')) |
|
67 | raise error.Abort(_(b'minimum size for largefiles must be specified')) | |
68 | return lfsize |
|
68 | return lfsize | |
69 |
|
69 | |||
70 |
|
70 | |||
71 | def link(src, dest): |
|
71 | def link(src, dest): | |
72 | """Try to create hardlink - if that fails, efficiently make a copy.""" |
|
72 | """Try to create hardlink - if that fails, efficiently make a copy.""" | |
73 | util.makedirs(os.path.dirname(dest)) |
|
73 | util.makedirs(os.path.dirname(dest)) | |
74 | try: |
|
74 | try: | |
75 | util.oslink(src, dest) |
|
75 | util.oslink(src, dest) | |
76 | except OSError: |
|
76 | except OSError: | |
77 | # if hardlinks fail, fallback on atomic copy |
|
77 | # if hardlinks fail, fallback on atomic copy | |
78 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: |
|
78 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: | |
79 | for chunk in util.filechunkiter(srcf): |
|
79 | for chunk in util.filechunkiter(srcf): | |
80 | dstf.write(chunk) |
|
80 | dstf.write(chunk) | |
81 | os.chmod(dest, os.stat(src).st_mode) |
|
81 | os.chmod(dest, os.stat(src).st_mode) | |
82 |
|
82 | |||
83 |
|
83 | |||
84 | def usercachepath(ui, hash): |
|
84 | def usercachepath(ui, hash): | |
85 | """Return the correct location in the "global" largefiles cache for a file |
|
85 | """Return the correct location in the "global" largefiles cache for a file | |
86 | with the given hash. |
|
86 | with the given hash. | |
87 | This cache is used for sharing of largefiles across repositories - both |
|
87 | This cache is used for sharing of largefiles across repositories - both | |
88 | to preserve download bandwidth and storage space.""" |
|
88 | to preserve download bandwidth and storage space.""" | |
89 | return os.path.join(_usercachedir(ui), hash) |
|
89 | return os.path.join(_usercachedir(ui), hash) | |
90 |
|
90 | |||
91 |
|
91 | |||
92 | def _usercachedir(ui, name=longname): |
|
92 | def _usercachedir(ui, name=longname): | |
93 | '''Return the location of the "global" largefiles cache.''' |
|
93 | '''Return the location of the "global" largefiles cache.''' | |
94 | path = ui.configpath(name, b'usercache') |
|
94 | path = ui.configpath(name, b'usercache') | |
95 | if path: |
|
95 | if path: | |
96 | return path |
|
96 | return path | |
97 |
|
97 | |||
98 | hint = None |
|
98 | hint = None | |
99 |
|
99 | |||
100 | if pycompat.iswindows: |
|
100 | if pycompat.iswindows: | |
101 | appdata = encoding.environ.get( |
|
101 | appdata = encoding.environ.get( | |
102 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') |
|
102 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') | |
103 | ) |
|
103 | ) | |
104 | if appdata: |
|
104 | if appdata: | |
105 | return os.path.join(appdata, name) |
|
105 | return os.path.join(appdata, name) | |
106 |
|
106 | |||
107 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
107 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | |
108 | b"LOCALAPPDATA", |
|
108 | b"LOCALAPPDATA", | |
109 | b"APPDATA", |
|
109 | b"APPDATA", | |
110 | name, |
|
110 | name, | |
111 | ) |
|
111 | ) | |
112 | elif pycompat.isdarwin: |
|
112 | elif pycompat.isdarwin: | |
113 | home = encoding.environ.get(b'HOME') |
|
113 | home = encoding.environ.get(b'HOME') | |
114 | if home: |
|
114 | if home: | |
115 | return os.path.join(home, b'Library', b'Caches', name) |
|
115 | return os.path.join(home, b'Library', b'Caches', name) | |
116 |
|
116 | |||
117 | hint = _(b"define %s in the environment, or set %s.usercache") % ( |
|
117 | hint = _(b"define %s in the environment, or set %s.usercache") % ( | |
118 | b"HOME", |
|
118 | b"HOME", | |
119 | name, |
|
119 | name, | |
120 | ) |
|
120 | ) | |
121 | elif pycompat.isposix: |
|
121 | elif pycompat.isposix: | |
122 | path = encoding.environ.get(b'XDG_CACHE_HOME') |
|
122 | path = encoding.environ.get(b'XDG_CACHE_HOME') | |
123 | if path: |
|
123 | if path: | |
124 | return os.path.join(path, name) |
|
124 | return os.path.join(path, name) | |
125 | home = encoding.environ.get(b'HOME') |
|
125 | home = encoding.environ.get(b'HOME') | |
126 | if home: |
|
126 | if home: | |
127 | return os.path.join(home, b'.cache', name) |
|
127 | return os.path.join(home, b'.cache', name) | |
128 |
|
128 | |||
129 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
129 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | |
130 | b"XDG_CACHE_HOME", |
|
130 | b"XDG_CACHE_HOME", | |
131 | b"HOME", |
|
131 | b"HOME", | |
132 | name, |
|
132 | name, | |
133 | ) |
|
133 | ) | |
134 | else: |
|
134 | else: | |
135 | raise error.Abort( |
|
135 | raise error.Abort( | |
136 | _(b'unknown operating system: %s\n') % pycompat.osname |
|
136 | _(b'unknown operating system: %s\n') % pycompat.osname | |
137 | ) |
|
137 | ) | |
138 |
|
138 | |||
139 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) |
|
139 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) | |
140 |
|
140 | |||
141 |
|
141 | |||
142 | def inusercache(ui, hash): |
|
142 | def inusercache(ui, hash): | |
143 | path = usercachepath(ui, hash) |
|
143 | path = usercachepath(ui, hash) | |
144 | return os.path.exists(path) |
|
144 | return os.path.exists(path) | |
145 |
|
145 | |||
146 |
|
146 | |||
147 | def findfile(repo, hash): |
|
147 | def findfile(repo, hash): | |
148 | """Return store path of the largefile with the specified hash. |
|
148 | """Return store path of the largefile with the specified hash. | |
149 | As a side effect, the file might be linked from user cache. |
|
149 | As a side effect, the file might be linked from user cache. | |
150 | Return None if the file can't be found locally.""" |
|
150 | Return None if the file can't be found locally.""" | |
151 | path, exists = findstorepath(repo, hash) |
|
151 | path, exists = findstorepath(repo, hash) | |
152 | if exists: |
|
152 | if exists: | |
153 | repo.ui.note(_(b'found %s in store\n') % hash) |
|
153 | repo.ui.note(_(b'found %s in store\n') % hash) | |
154 | return path |
|
154 | return path | |
155 | elif inusercache(repo.ui, hash): |
|
155 | elif inusercache(repo.ui, hash): | |
156 | repo.ui.note(_(b'found %s in system cache\n') % hash) |
|
156 | repo.ui.note(_(b'found %s in system cache\n') % hash) | |
157 | path = storepath(repo, hash) |
|
157 | path = storepath(repo, hash) | |
158 | link(usercachepath(repo.ui, hash), path) |
|
158 | link(usercachepath(repo.ui, hash), path) | |
159 | return path |
|
159 | return path | |
160 | return None |
|
160 | return None | |
161 |
|
161 | |||
162 |
|
162 | |||
163 | class largefilesdirstate(dirstate.dirstate): |
|
163 | class largefilesdirstate(dirstate.dirstate): | |
164 | def __getitem__(self, key): |
|
164 | def __getitem__(self, key): | |
165 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
165 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) | |
166 |
|
166 | |||
167 | def normal(self, f): |
|
167 | def normal(self, f): | |
168 | return super(largefilesdirstate, self).normal(unixpath(f)) |
|
168 | return super(largefilesdirstate, self).normal(unixpath(f)) | |
169 |
|
169 | |||
170 | def remove(self, f): |
|
170 | def remove(self, f): | |
171 | return super(largefilesdirstate, self).remove(unixpath(f)) |
|
171 | return super(largefilesdirstate, self).remove(unixpath(f)) | |
172 |
|
172 | |||
173 | def add(self, f): |
|
173 | def add(self, f): | |
174 | return super(largefilesdirstate, self).add(unixpath(f)) |
|
174 | return super(largefilesdirstate, self).add(unixpath(f)) | |
175 |
|
175 | |||
176 | def drop(self, f): |
|
176 | def drop(self, f): | |
177 | return super(largefilesdirstate, self).drop(unixpath(f)) |
|
177 | return super(largefilesdirstate, self).drop(unixpath(f)) | |
178 |
|
178 | |||
179 | def forget(self, f): |
|
179 | def forget(self, f): | |
180 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
180 | return super(largefilesdirstate, self).forget(unixpath(f)) | |
181 |
|
181 | |||
182 | def normallookup(self, f): |
|
182 | def normallookup(self, f): | |
183 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
183 | return super(largefilesdirstate, self).normallookup(unixpath(f)) | |
184 |
|
184 | |||
185 | def _ignore(self, f): |
|
185 | def _ignore(self, f): | |
186 | return False |
|
186 | return False | |
187 |
|
187 | |||
188 | def write(self, tr=False): |
|
188 | def write(self, tr=False): | |
189 | # (1) disable PENDING mode always |
|
189 | # (1) disable PENDING mode always | |
190 | # (lfdirstate isn't yet managed as a part of the transaction) |
|
190 | # (lfdirstate isn't yet managed as a part of the transaction) | |
191 | # (2) avoid develwarn 'use dirstate.write with ....' |
|
191 | # (2) avoid develwarn 'use dirstate.write with ....' | |
192 | super(largefilesdirstate, self).write(None) |
|
192 | super(largefilesdirstate, self).write(None) | |
193 |
|
193 | |||
194 |
|
194 | |||
195 | def openlfdirstate(ui, repo, create=True): |
|
195 | def openlfdirstate(ui, repo, create=True): | |
196 | """ |
|
196 | """ | |
197 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
197 | Return a dirstate object that tracks largefiles: i.e. its root is | |
198 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
198 | the repo root, but it is saved in .hg/largefiles/dirstate. | |
199 | """ |
|
199 | """ | |
200 | vfs = repo.vfs |
|
200 | vfs = repo.vfs | |
201 | lfstoredir = longname |
|
201 | lfstoredir = longname | |
202 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
202 | opener = vfsmod.vfs(vfs.join(lfstoredir)) | |
203 | lfdirstate = largefilesdirstate( |
|
203 | lfdirstate = largefilesdirstate( | |
204 | opener, |
|
204 | opener, | |
205 | ui, |
|
205 | ui, | |
206 | repo.root, |
|
206 | repo.root, | |
207 | repo.dirstate._validate, |
|
207 | repo.dirstate._validate, | |
208 | lambda: sparse.matcher(repo), |
|
208 | lambda: sparse.matcher(repo), | |
209 | repo.nodeconstants, |
|
209 | repo.nodeconstants, | |
210 | ) |
|
210 | ) | |
211 |
|
211 | |||
212 | # If the largefiles dirstate does not exist, populate and create |
|
212 | # If the largefiles dirstate does not exist, populate and create | |
213 | # it. This ensures that we create it on the first meaningful |
|
213 | # it. This ensures that we create it on the first meaningful | |
214 | # largefiles operation in a new clone. |
|
214 | # largefiles operation in a new clone. | |
215 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): |
|
215 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): | |
216 | matcher = getstandinmatcher(repo) |
|
216 | matcher = getstandinmatcher(repo) | |
217 | standins = repo.dirstate.walk( |
|
217 | standins = repo.dirstate.walk( | |
218 | matcher, subrepos=[], unknown=False, ignored=False |
|
218 | matcher, subrepos=[], unknown=False, ignored=False | |
219 | ) |
|
219 | ) | |
220 |
|
220 | |||
221 | if len(standins) > 0: |
|
221 | if len(standins) > 0: | |
222 | vfs.makedirs(lfstoredir) |
|
222 | vfs.makedirs(lfstoredir) | |
223 |
|
223 | |||
224 | for standin in standins: |
|
224 | for standin in standins: | |
225 | lfile = splitstandin(standin) |
|
225 | lfile = splitstandin(standin) | |
226 | lfdirstate.normallookup(lfile) |
|
226 | lfdirstate.normallookup(lfile) | |
227 | return lfdirstate |
|
227 | return lfdirstate | |
228 |
|
228 | |||
229 |
|
229 | |||
230 | def lfdirstatestatus(lfdirstate, repo): |
|
230 | def lfdirstatestatus(lfdirstate, repo): | |
231 | pctx = repo[b'.'] |
|
231 | pctx = repo[b'.'] | |
232 | match = matchmod.always() |
|
232 | match = matchmod.always() | |
233 | unsure, s = lfdirstate.status( |
|
233 | unsure, s = lfdirstate.status( | |
234 | match, subrepos=[], ignored=False, clean=False, unknown=False |
|
234 | match, subrepos=[], ignored=False, clean=False, unknown=False | |
235 | ) |
|
235 | ) | |
236 | modified, clean = s.modified, s.clean |
|
236 | modified, clean = s.modified, s.clean | |
237 | for lfile in unsure: |
|
237 | for lfile in unsure: | |
238 | try: |
|
238 | try: | |
239 | fctx = pctx[standin(lfile)] |
|
239 | fctx = pctx[standin(lfile)] | |
240 | except LookupError: |
|
240 | except LookupError: | |
241 | fctx = None |
|
241 | fctx = None | |
242 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): |
|
242 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): | |
243 | modified.append(lfile) |
|
243 | modified.append(lfile) | |
244 | else: |
|
244 | else: | |
245 | clean.append(lfile) |
|
245 | clean.append(lfile) | |
246 | lfdirstate.normal(lfile) |
|
246 | lfdirstate.normal(lfile) | |
247 | return s |
|
247 | return s | |
248 |
|
248 | |||
249 |
|
249 | |||
250 | def listlfiles(repo, rev=None, matcher=None): |
|
250 | def listlfiles(repo, rev=None, matcher=None): | |
251 | """return a list of largefiles in the working copy or the |
|
251 | """return a list of largefiles in the working copy or the | |
252 | specified changeset""" |
|
252 | specified changeset""" | |
253 |
|
253 | |||
254 | if matcher is None: |
|
254 | if matcher is None: | |
255 | matcher = getstandinmatcher(repo) |
|
255 | matcher = getstandinmatcher(repo) | |
256 |
|
256 | |||
257 | # ignore unknown files in working directory |
|
257 | # ignore unknown files in working directory | |
258 | return [ |
|
258 | return [ | |
259 | splitstandin(f) |
|
259 | splitstandin(f) | |
260 | for f in repo[rev].walk(matcher) |
|
260 | for f in repo[rev].walk(matcher) | |
261 | if rev is not None or repo.dirstate[f] != b'?' |
|
261 | if rev is not None or repo.dirstate[f] != b'?' | |
262 | ] |
|
262 | ] | |
263 |
|
263 | |||
264 |
|
264 | |||
265 | def instore(repo, hash, forcelocal=False): |
|
265 | def instore(repo, hash, forcelocal=False): | |
266 | '''Return true if a largefile with the given hash exists in the store''' |
|
266 | '''Return true if a largefile with the given hash exists in the store''' | |
267 | return os.path.exists(storepath(repo, hash, forcelocal)) |
|
267 | return os.path.exists(storepath(repo, hash, forcelocal)) | |
268 |
|
268 | |||
269 |
|
269 | |||
270 | def storepath(repo, hash, forcelocal=False): |
|
270 | def storepath(repo, hash, forcelocal=False): | |
271 | """Return the correct location in the repository largefiles store for a |
|
271 | """Return the correct location in the repository largefiles store for a | |
272 | file with the given hash.""" |
|
272 | file with the given hash.""" | |
273 | if not forcelocal and repo.shared(): |
|
273 | if not forcelocal and repo.shared(): | |
274 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
|
274 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) | |
275 | return repo.vfs.join(longname, hash) |
|
275 | return repo.vfs.join(longname, hash) | |
276 |
|
276 | |||
277 |
|
277 | |||
278 | def findstorepath(repo, hash): |
|
278 | def findstorepath(repo, hash): | |
279 | """Search through the local store path(s) to find the file for the given |
|
279 | """Search through the local store path(s) to find the file for the given | |
280 | hash. If the file is not found, its path in the primary store is returned. |
|
280 | hash. If the file is not found, its path in the primary store is returned. | |
281 | The return value is a tuple of (path, exists(path)). |
|
281 | The return value is a tuple of (path, exists(path)). | |
282 | """ |
|
282 | """ | |
283 | # For shared repos, the primary store is in the share source. But for |
|
283 | # For shared repos, the primary store is in the share source. But for | |
284 | # backward compatibility, force a lookup in the local store if it wasn't |
|
284 | # backward compatibility, force a lookup in the local store if it wasn't | |
285 | # found in the share source. |
|
285 | # found in the share source. | |
286 | path = storepath(repo, hash, False) |
|
286 | path = storepath(repo, hash, False) | |
287 |
|
287 | |||
288 | if instore(repo, hash): |
|
288 | if instore(repo, hash): | |
289 | return (path, True) |
|
289 | return (path, True) | |
290 | elif repo.shared() and instore(repo, hash, True): |
|
290 | elif repo.shared() and instore(repo, hash, True): | |
291 | return storepath(repo, hash, True), True |
|
291 | return storepath(repo, hash, True), True | |
292 |
|
292 | |||
293 | return (path, False) |
|
293 | return (path, False) | |
294 |
|
294 | |||
295 |
|
295 | |||
296 | def copyfromcache(repo, hash, filename): |
|
296 | def copyfromcache(repo, hash, filename): | |
297 | """Copy the specified largefile from the repo or system cache to |
|
297 | """Copy the specified largefile from the repo or system cache to | |
298 | filename in the repository. Return true on success or false if the |
|
298 | filename in the repository. Return true on success or false if the | |
299 | file was not found in either cache (which should not happened: |
|
299 | file was not found in either cache (which should not happened: | |
300 | this is meant to be called only after ensuring that the needed |
|
300 | this is meant to be called only after ensuring that the needed | |
301 | largefile exists in the cache).""" |
|
301 | largefile exists in the cache).""" | |
302 | wvfs = repo.wvfs |
|
302 | wvfs = repo.wvfs | |
303 | path = findfile(repo, hash) |
|
303 | path = findfile(repo, hash) | |
304 | if path is None: |
|
304 | if path is None: | |
305 | return False |
|
305 | return False | |
306 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
|
306 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | |
307 | # The write may fail before the file is fully written, but we |
|
307 | # The write may fail before the file is fully written, but we | |
308 | # don't use atomic writes in the working copy. |
|
308 | # don't use atomic writes in the working copy. | |
309 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: |
|
309 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: | |
310 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) |
|
310 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) | |
311 | if gothash != hash: |
|
311 | if gothash != hash: | |
312 | repo.ui.warn( |
|
312 | repo.ui.warn( | |
313 | _(b'%s: data corruption in %s with hash %s\n') |
|
313 | _(b'%s: data corruption in %s with hash %s\n') | |
314 | % (filename, path, gothash) |
|
314 | % (filename, path, gothash) | |
315 | ) |
|
315 | ) | |
316 | wvfs.unlink(filename) |
|
316 | wvfs.unlink(filename) | |
317 | return False |
|
317 | return False | |
318 | return True |
|
318 | return True | |
319 |
|
319 | |||
320 |
|
320 | |||
321 | def copytostore(repo, ctx, file, fstandin): |
|
321 | def copytostore(repo, ctx, file, fstandin): | |
322 | wvfs = repo.wvfs |
|
322 | wvfs = repo.wvfs | |
323 | hash = readasstandin(ctx[fstandin]) |
|
323 | hash = readasstandin(ctx[fstandin]) | |
324 | if instore(repo, hash): |
|
324 | if instore(repo, hash): | |
325 | return |
|
325 | return | |
326 | if wvfs.exists(file): |
|
326 | if wvfs.exists(file): | |
327 | copytostoreabsolute(repo, wvfs.join(file), hash) |
|
327 | copytostoreabsolute(repo, wvfs.join(file), hash) | |
328 | else: |
|
328 | else: | |
329 | repo.ui.warn( |
|
329 | repo.ui.warn( | |
330 | _(b"%s: largefile %s not available from local store\n") |
|
330 | _(b"%s: largefile %s not available from local store\n") | |
331 | % (file, hash) |
|
331 | % (file, hash) | |
332 | ) |
|
332 | ) | |
333 |
|
333 | |||
334 |
|
334 | |||
335 | def copyalltostore(repo, node): |
|
335 | def copyalltostore(repo, node): | |
336 | '''Copy all largefiles in a given revision to the store''' |
|
336 | '''Copy all largefiles in a given revision to the store''' | |
337 |
|
337 | |||
338 | ctx = repo[node] |
|
338 | ctx = repo[node] | |
339 | for filename in ctx.files(): |
|
339 | for filename in ctx.files(): | |
340 | realfile = splitstandin(filename) |
|
340 | realfile = splitstandin(filename) | |
341 | if realfile is not None and filename in ctx.manifest(): |
|
341 | if realfile is not None and filename in ctx.manifest(): | |
342 | copytostore(repo, ctx, realfile, filename) |
|
342 | copytostore(repo, ctx, realfile, filename) | |
343 |
|
343 | |||
344 |
|
344 | |||
345 | def copytostoreabsolute(repo, file, hash): |
|
345 | def copytostoreabsolute(repo, file, hash): | |
346 | if inusercache(repo.ui, hash): |
|
346 | if inusercache(repo.ui, hash): | |
347 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
|
347 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) | |
348 | else: |
|
348 | else: | |
349 | util.makedirs(os.path.dirname(storepath(repo, hash))) |
|
349 | util.makedirs(os.path.dirname(storepath(repo, hash))) | |
350 | with open(file, b'rb') as srcf: |
|
350 | with open(file, b'rb') as srcf: | |
351 | with util.atomictempfile( |
|
351 | with util.atomictempfile( | |
352 | storepath(repo, hash), createmode=repo.store.createmode |
|
352 | storepath(repo, hash), createmode=repo.store.createmode | |
353 | ) as dstf: |
|
353 | ) as dstf: | |
354 | for chunk in util.filechunkiter(srcf): |
|
354 | for chunk in util.filechunkiter(srcf): | |
355 | dstf.write(chunk) |
|
355 | dstf.write(chunk) | |
356 | linktousercache(repo, hash) |
|
356 | linktousercache(repo, hash) | |
357 |
|
357 | |||
358 |
|
358 | |||
359 | def linktousercache(repo, hash): |
|
359 | def linktousercache(repo, hash): | |
360 | """Link / copy the largefile with the specified hash from the store |
|
360 | """Link / copy the largefile with the specified hash from the store | |
361 | to the cache.""" |
|
361 | to the cache.""" | |
362 | path = usercachepath(repo.ui, hash) |
|
362 | path = usercachepath(repo.ui, hash) | |
363 | link(storepath(repo, hash), path) |
|
363 | link(storepath(repo, hash), path) | |
364 |
|
364 | |||
365 |
|
365 | |||
366 | def getstandinmatcher(repo, rmatcher=None): |
|
366 | def getstandinmatcher(repo, rmatcher=None): | |
367 | '''Return a match object that applies rmatcher to the standin directory''' |
|
367 | '''Return a match object that applies rmatcher to the standin directory''' | |
368 | wvfs = repo.wvfs |
|
368 | wvfs = repo.wvfs | |
369 | standindir = shortname |
|
369 | standindir = shortname | |
370 |
|
370 | |||
371 | # no warnings about missing files or directories |
|
371 | # no warnings about missing files or directories | |
372 | badfn = lambda f, msg: None |
|
372 | badfn = lambda f, msg: None | |
373 |
|
373 | |||
374 | if rmatcher and not rmatcher.always(): |
|
374 | if rmatcher and not rmatcher.always(): | |
375 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] |
|
375 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] | |
376 | if not pats: |
|
376 | if not pats: | |
377 | pats = [wvfs.join(standindir)] |
|
377 | pats = [wvfs.join(standindir)] | |
378 | match = scmutil.match(repo[None], pats, badfn=badfn) |
|
378 | match = scmutil.match(repo[None], pats, badfn=badfn) | |
379 | else: |
|
379 | else: | |
380 | # no patterns: relative to repo root |
|
380 | # no patterns: relative to repo root | |
381 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
|
381 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) | |
382 | return match |
|
382 | return match | |
383 |
|
383 | |||
384 |
|
384 | |||
385 | def composestandinmatcher(repo, rmatcher): |
|
385 | def composestandinmatcher(repo, rmatcher): | |
386 | """Return a matcher that accepts standins corresponding to the |
|
386 | """Return a matcher that accepts standins corresponding to the | |
387 | files accepted by rmatcher. Pass the list of files in the matcher |
|
387 | files accepted by rmatcher. Pass the list of files in the matcher | |
388 | as the paths specified by the user.""" |
|
388 | as the paths specified by the user.""" | |
389 | smatcher = getstandinmatcher(repo, rmatcher) |
|
389 | smatcher = getstandinmatcher(repo, rmatcher) | |
390 | isstandin = smatcher.matchfn |
|
390 | isstandin = smatcher.matchfn | |
391 |
|
391 | |||
392 | def composedmatchfn(f): |
|
392 | def composedmatchfn(f): | |
393 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
393 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | |
394 |
|
394 | |||
395 | smatcher.matchfn = composedmatchfn |
|
395 | smatcher.matchfn = composedmatchfn | |
396 |
|
396 | |||
397 | return smatcher |
|
397 | return smatcher | |
398 |
|
398 | |||
399 |
|
399 | |||
400 | def standin(filename): |
|
400 | def standin(filename): | |
401 | """Return the repo-relative path to the standin for the specified big |
|
401 | """Return the repo-relative path to the standin for the specified big | |
402 | file.""" |
|
402 | file.""" | |
403 | # Notes: |
|
403 | # Notes: | |
404 | # 1) Some callers want an absolute path, but for instance addlargefiles |
|
404 | # 1) Some callers want an absolute path, but for instance addlargefiles | |
405 | # needs it repo-relative so it can be passed to repo[None].add(). So |
|
405 | # needs it repo-relative so it can be passed to repo[None].add(). So | |
406 | # leave it up to the caller to use repo.wjoin() to get an absolute path. |
|
406 | # leave it up to the caller to use repo.wjoin() to get an absolute path. | |
407 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
407 | # 2) Join with '/' because that's what dirstate always uses, even on | |
408 | # Windows. Change existing separator to '/' first in case we are |
|
408 | # Windows. Change existing separator to '/' first in case we are | |
409 | # passed filenames from an external source (like the command line). |
|
409 | # passed filenames from an external source (like the command line). | |
410 | return shortnameslash + util.pconvert(filename) |
|
410 | return shortnameslash + util.pconvert(filename) | |
411 |
|
411 | |||
412 |
|
412 | |||
413 | def isstandin(filename): |
|
413 | def isstandin(filename): | |
414 | """Return true if filename is a big file standin. filename must be |
|
414 | """Return true if filename is a big file standin. filename must be | |
415 | in Mercurial's internal form (slash-separated).""" |
|
415 | in Mercurial's internal form (slash-separated).""" | |
416 | return filename.startswith(shortnameslash) |
|
416 | return filename.startswith(shortnameslash) | |
417 |
|
417 | |||
418 |
|
418 | |||
419 | def splitstandin(filename): |
|
419 | def splitstandin(filename): | |
420 | # Split on / because that's what dirstate always uses, even on Windows. |
|
420 | # Split on / because that's what dirstate always uses, even on Windows. | |
421 | # Change local separator to / first just in case we are passed filenames |
|
421 | # Change local separator to / first just in case we are passed filenames | |
422 | # from an external source (like the command line). |
|
422 | # from an external source (like the command line). | |
423 | bits = util.pconvert(filename).split(b'/', 1) |
|
423 | bits = util.pconvert(filename).split(b'/', 1) | |
424 | if len(bits) == 2 and bits[0] == shortname: |
|
424 | if len(bits) == 2 and bits[0] == shortname: | |
425 | return bits[1] |
|
425 | return bits[1] | |
426 | else: |
|
426 | else: | |
427 | return None |
|
427 | return None | |
428 |
|
428 | |||
429 |
|
429 | |||
430 | def updatestandin(repo, lfile, standin): |
|
430 | def updatestandin(repo, lfile, standin): | |
431 | """Re-calculate hash value of lfile and write it into standin |
|
431 | """Re-calculate hash value of lfile and write it into standin | |
432 |
|
432 | |||
433 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. |
|
433 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. | |
434 | """ |
|
434 | """ | |
435 | file = repo.wjoin(lfile) |
|
435 | file = repo.wjoin(lfile) | |
436 | if repo.wvfs.exists(lfile): |
|
436 | if repo.wvfs.exists(lfile): | |
437 | hash = hashfile(file) |
|
437 | hash = hashfile(file) | |
438 | executable = getexecutable(file) |
|
438 | executable = getexecutable(file) | |
439 | writestandin(repo, standin, hash, executable) |
|
439 | writestandin(repo, standin, hash, executable) | |
440 | else: |
|
440 | else: | |
441 | raise error.Abort(_(b'%s: file not found!') % lfile) |
|
441 | raise error.Abort(_(b'%s: file not found!') % lfile) | |
442 |
|
442 | |||
443 |
|
443 | |||
444 | def readasstandin(fctx): |
|
444 | def readasstandin(fctx): | |
445 | """read hex hash from given filectx of standin file |
|
445 | """read hex hash from given filectx of standin file | |
446 |
|
446 | |||
447 | This encapsulates how "standin" data is stored into storage layer.""" |
|
447 | This encapsulates how "standin" data is stored into storage layer.""" | |
448 | return fctx.data().strip() |
|
448 | return fctx.data().strip() | |
449 |
|
449 | |||
450 |
|
450 | |||
451 | def writestandin(repo, standin, hash, executable): |
|
451 | def writestandin(repo, standin, hash, executable): | |
452 | '''write hash to <repo.root>/<standin>''' |
|
452 | '''write hash to <repo.root>/<standin>''' | |
453 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') |
|
453 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') | |
454 |
|
454 | |||
455 |
|
455 | |||
456 | def copyandhash(instream, outfile): |
|
456 | def copyandhash(instream, outfile): | |
457 | """Read bytes from instream (iterable) and write them to outfile, |
|
457 | """Read bytes from instream (iterable) and write them to outfile, | |
458 | computing the SHA-1 hash of the data along the way. Return the hash.""" |
|
458 | computing the SHA-1 hash of the data along the way. Return the hash.""" | |
459 | hasher = hashutil.sha1(b'') |
|
459 | hasher = hashutil.sha1(b'') | |
460 | for data in instream: |
|
460 | for data in instream: | |
461 | hasher.update(data) |
|
461 | hasher.update(data) | |
462 | outfile.write(data) |
|
462 | outfile.write(data) | |
463 | return hex(hasher.digest()) |
|
463 | return hex(hasher.digest()) | |
464 |
|
464 | |||
465 |
|
465 | |||
466 | def hashfile(file): |
|
466 | def hashfile(file): | |
467 | if not os.path.exists(file): |
|
467 | if not os.path.exists(file): | |
468 | return b'' |
|
468 | return b'' | |
469 | with open(file, b'rb') as fd: |
|
469 | with open(file, b'rb') as fd: | |
470 | return hexsha1(fd) |
|
470 | return hexsha1(fd) | |
471 |
|
471 | |||
472 |
|
472 | |||
473 | def getexecutable(filename): |
|
473 | def getexecutable(filename): | |
474 | mode = os.stat(filename).st_mode |
|
474 | mode = os.stat(filename).st_mode | |
475 | return ( |
|
475 | return ( | |
476 | (mode & stat.S_IXUSR) |
|
476 | (mode & stat.S_IXUSR) | |
477 | and (mode & stat.S_IXGRP) |
|
477 | and (mode & stat.S_IXGRP) | |
478 | and (mode & stat.S_IXOTH) |
|
478 | and (mode & stat.S_IXOTH) | |
479 | ) |
|
479 | ) | |
480 |
|
480 | |||
481 |
|
481 | |||
482 | def urljoin(first, second, *arg): |
|
482 | def urljoin(first, second, *arg): | |
483 | def join(left, right): |
|
483 | def join(left, right): | |
484 | if not left.endswith(b'/'): |
|
484 | if not left.endswith(b'/'): | |
485 | left += b'/' |
|
485 | left += b'/' | |
486 | if right.startswith(b'/'): |
|
486 | if right.startswith(b'/'): | |
487 | right = right[1:] |
|
487 | right = right[1:] | |
488 | return left + right |
|
488 | return left + right | |
489 |
|
489 | |||
490 | url = join(first, second) |
|
490 | url = join(first, second) | |
491 | for a in arg: |
|
491 | for a in arg: | |
492 | url = join(url, a) |
|
492 | url = join(url, a) | |
493 | return url |
|
493 | return url | |
494 |
|
494 | |||
495 |
|
495 | |||
496 | def hexsha1(fileobj): |
|
496 | def hexsha1(fileobj): | |
497 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
497 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | |
498 | object data""" |
|
498 | object data""" | |
499 | h = hashutil.sha1() |
|
499 | h = hashutil.sha1() | |
500 | for chunk in util.filechunkiter(fileobj): |
|
500 | for chunk in util.filechunkiter(fileobj): | |
501 | h.update(chunk) |
|
501 | h.update(chunk) | |
502 | return hex(h.digest()) |
|
502 | return hex(h.digest()) | |
503 |
|
503 | |||
504 |
|
504 | |||
505 | def httpsendfile(ui, filename): |
|
505 | def httpsendfile(ui, filename): | |
506 | return httpconnection.httpsendfile(ui, filename, b'rb') |
|
506 | return httpconnection.httpsendfile(ui, filename, b'rb') | |
507 |
|
507 | |||
508 |
|
508 | |||
509 | def unixpath(path): |
|
509 | def unixpath(path): | |
510 | '''Return a version of path normalized for use with the lfdirstate.''' |
|
510 | '''Return a version of path normalized for use with the lfdirstate.''' | |
511 | return util.pconvert(os.path.normpath(path)) |
|
511 | return util.pconvert(os.path.normpath(path)) | |
512 |
|
512 | |||
513 |
|
513 | |||
514 | def islfilesrepo(repo): |
|
514 | def islfilesrepo(repo): | |
515 | '''Return true if the repo is a largefile repo.''' |
|
515 | '''Return true if the repo is a largefile repo.''' | |
516 | if b'largefiles' in repo.requirements and any( |
|
516 | if b'largefiles' in repo.requirements and any( | |
517 |
shortnameslash in f[ |
|
517 | shortnameslash in f[1] for f in repo.store.datafiles() | |
518 | ): |
|
518 | ): | |
519 | return True |
|
519 | return True | |
520 |
|
520 | |||
521 | return any(openlfdirstate(repo.ui, repo, False)) |
|
521 | return any(openlfdirstate(repo.ui, repo, False)) | |
522 |
|
522 | |||
523 |
|
523 | |||
524 | class storeprotonotcapable(Exception): |
|
524 | class storeprotonotcapable(Exception): | |
525 | def __init__(self, storetypes): |
|
525 | def __init__(self, storetypes): | |
526 | self.storetypes = storetypes |
|
526 | self.storetypes = storetypes | |
527 |
|
527 | |||
528 |
|
528 | |||
529 | def getstandinsstate(repo): |
|
529 | def getstandinsstate(repo): | |
530 | standins = [] |
|
530 | standins = [] | |
531 | matcher = getstandinmatcher(repo) |
|
531 | matcher = getstandinmatcher(repo) | |
532 | wctx = repo[None] |
|
532 | wctx = repo[None] | |
533 | for standin in repo.dirstate.walk( |
|
533 | for standin in repo.dirstate.walk( | |
534 | matcher, subrepos=[], unknown=False, ignored=False |
|
534 | matcher, subrepos=[], unknown=False, ignored=False | |
535 | ): |
|
535 | ): | |
536 | lfile = splitstandin(standin) |
|
536 | lfile = splitstandin(standin) | |
537 | try: |
|
537 | try: | |
538 | hash = readasstandin(wctx[standin]) |
|
538 | hash = readasstandin(wctx[standin]) | |
539 | except IOError: |
|
539 | except IOError: | |
540 | hash = None |
|
540 | hash = None | |
541 | standins.append((lfile, hash)) |
|
541 | standins.append((lfile, hash)) | |
542 | return standins |
|
542 | return standins | |
543 |
|
543 | |||
544 |
|
544 | |||
545 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
545 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): | |
546 | lfstandin = standin(lfile) |
|
546 | lfstandin = standin(lfile) | |
547 | if lfstandin in repo.dirstate: |
|
547 | if lfstandin in repo.dirstate: | |
548 | stat = repo.dirstate._map[lfstandin] |
|
548 | stat = repo.dirstate._map[lfstandin] | |
549 | state, mtime = stat[0], stat[3] |
|
549 | state, mtime = stat[0], stat[3] | |
550 | else: |
|
550 | else: | |
551 | state, mtime = b'?', -1 |
|
551 | state, mtime = b'?', -1 | |
552 | if state == b'n': |
|
552 | if state == b'n': | |
553 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): |
|
553 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): | |
554 | # state 'n' doesn't ensure 'clean' in this case |
|
554 | # state 'n' doesn't ensure 'clean' in this case | |
555 | lfdirstate.normallookup(lfile) |
|
555 | lfdirstate.normallookup(lfile) | |
556 | else: |
|
556 | else: | |
557 | lfdirstate.normal(lfile) |
|
557 | lfdirstate.normal(lfile) | |
558 | elif state == b'm': |
|
558 | elif state == b'm': | |
559 | lfdirstate.normallookup(lfile) |
|
559 | lfdirstate.normallookup(lfile) | |
560 | elif state == b'r': |
|
560 | elif state == b'r': | |
561 | lfdirstate.remove(lfile) |
|
561 | lfdirstate.remove(lfile) | |
562 | elif state == b'a': |
|
562 | elif state == b'a': | |
563 | lfdirstate.add(lfile) |
|
563 | lfdirstate.add(lfile) | |
564 | elif state == b'?': |
|
564 | elif state == b'?': | |
565 | lfdirstate.drop(lfile) |
|
565 | lfdirstate.drop(lfile) | |
566 |
|
566 | |||
567 |
|
567 | |||
568 | def markcommitted(orig, ctx, node): |
|
568 | def markcommitted(orig, ctx, node): | |
569 | repo = ctx.repo() |
|
569 | repo = ctx.repo() | |
570 |
|
570 | |||
571 | orig(node) |
|
571 | orig(node) | |
572 |
|
572 | |||
573 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" |
|
573 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | |
574 | # because files coming from the 2nd parent are omitted in the latter. |
|
574 | # because files coming from the 2nd parent are omitted in the latter. | |
575 | # |
|
575 | # | |
576 | # The former should be used to get targets of "synclfdirstate", |
|
576 | # The former should be used to get targets of "synclfdirstate", | |
577 | # because such files: |
|
577 | # because such files: | |
578 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and |
|
578 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and | |
579 | # - have to be marked as "n" after commit, but |
|
579 | # - have to be marked as "n" after commit, but | |
580 | # - aren't listed in "repo[node].files()" |
|
580 | # - aren't listed in "repo[node].files()" | |
581 |
|
581 | |||
582 | lfdirstate = openlfdirstate(repo.ui, repo) |
|
582 | lfdirstate = openlfdirstate(repo.ui, repo) | |
583 | for f in ctx.files(): |
|
583 | for f in ctx.files(): | |
584 | lfile = splitstandin(f) |
|
584 | lfile = splitstandin(f) | |
585 | if lfile is not None: |
|
585 | if lfile is not None: | |
586 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
586 | synclfdirstate(repo, lfdirstate, lfile, False) | |
587 | lfdirstate.write() |
|
587 | lfdirstate.write() | |
588 |
|
588 | |||
589 | # As part of committing, copy all of the largefiles into the cache. |
|
589 | # As part of committing, copy all of the largefiles into the cache. | |
590 | # |
|
590 | # | |
591 | # Using "node" instead of "ctx" implies additional "repo[node]" |
|
591 | # Using "node" instead of "ctx" implies additional "repo[node]" | |
592 | # lookup while copyalltostore(), but can omit redundant check for |
|
592 | # lookup while copyalltostore(), but can omit redundant check for | |
593 | # files comming from the 2nd parent, which should exist in store |
|
593 | # files comming from the 2nd parent, which should exist in store | |
594 | # at merging. |
|
594 | # at merging. | |
595 | copyalltostore(repo, node) |
|
595 | copyalltostore(repo, node) | |
596 |
|
596 | |||
597 |
|
597 | |||
598 | def getlfilestoupdate(oldstandins, newstandins): |
|
598 | def getlfilestoupdate(oldstandins, newstandins): | |
599 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
|
599 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) | |
600 | filelist = [] |
|
600 | filelist = [] | |
601 | for f in changedstandins: |
|
601 | for f in changedstandins: | |
602 | if f[0] not in filelist: |
|
602 | if f[0] not in filelist: | |
603 | filelist.append(f[0]) |
|
603 | filelist.append(f[0]) | |
604 | return filelist |
|
604 | return filelist | |
605 |
|
605 | |||
606 |
|
606 | |||
607 | def getlfilestoupload(repo, missing, addfunc): |
|
607 | def getlfilestoupload(repo, missing, addfunc): | |
608 | makeprogress = repo.ui.makeprogress |
|
608 | makeprogress = repo.ui.makeprogress | |
609 | with makeprogress( |
|
609 | with makeprogress( | |
610 | _(b'finding outgoing largefiles'), |
|
610 | _(b'finding outgoing largefiles'), | |
611 | unit=_(b'revisions'), |
|
611 | unit=_(b'revisions'), | |
612 | total=len(missing), |
|
612 | total=len(missing), | |
613 | ) as progress: |
|
613 | ) as progress: | |
614 | for i, n in enumerate(missing): |
|
614 | for i, n in enumerate(missing): | |
615 | progress.update(i) |
|
615 | progress.update(i) | |
616 | parents = [p for p in repo[n].parents() if p != nullid] |
|
616 | parents = [p for p in repo[n].parents() if p != nullid] | |
617 |
|
617 | |||
618 | with lfstatus(repo, value=False): |
|
618 | with lfstatus(repo, value=False): | |
619 | ctx = repo[n] |
|
619 | ctx = repo[n] | |
620 |
|
620 | |||
621 | files = set(ctx.files()) |
|
621 | files = set(ctx.files()) | |
622 | if len(parents) == 2: |
|
622 | if len(parents) == 2: | |
623 | mc = ctx.manifest() |
|
623 | mc = ctx.manifest() | |
624 | mp1 = ctx.p1().manifest() |
|
624 | mp1 = ctx.p1().manifest() | |
625 | mp2 = ctx.p2().manifest() |
|
625 | mp2 = ctx.p2().manifest() | |
626 | for f in mp1: |
|
626 | for f in mp1: | |
627 | if f not in mc: |
|
627 | if f not in mc: | |
628 | files.add(f) |
|
628 | files.add(f) | |
629 | for f in mp2: |
|
629 | for f in mp2: | |
630 | if f not in mc: |
|
630 | if f not in mc: | |
631 | files.add(f) |
|
631 | files.add(f) | |
632 | for f in mc: |
|
632 | for f in mc: | |
633 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
633 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | |
634 | files.add(f) |
|
634 | files.add(f) | |
635 | for fn in files: |
|
635 | for fn in files: | |
636 | if isstandin(fn) and fn in ctx: |
|
636 | if isstandin(fn) and fn in ctx: | |
637 | addfunc(fn, readasstandin(ctx[fn])) |
|
637 | addfunc(fn, readasstandin(ctx[fn])) | |
638 |
|
638 | |||
639 |
|
639 | |||
640 | def updatestandinsbymatch(repo, match): |
|
640 | def updatestandinsbymatch(repo, match): | |
641 | """Update standins in the working directory according to specified match |
|
641 | """Update standins in the working directory according to specified match | |
642 |
|
642 | |||
643 | This returns (possibly modified) ``match`` object to be used for |
|
643 | This returns (possibly modified) ``match`` object to be used for | |
644 | subsequent commit process. |
|
644 | subsequent commit process. | |
645 | """ |
|
645 | """ | |
646 |
|
646 | |||
647 | ui = repo.ui |
|
647 | ui = repo.ui | |
648 |
|
648 | |||
649 | # Case 1: user calls commit with no specific files or |
|
649 | # Case 1: user calls commit with no specific files or | |
650 | # include/exclude patterns: refresh and commit all files that |
|
650 | # include/exclude patterns: refresh and commit all files that | |
651 | # are "dirty". |
|
651 | # are "dirty". | |
652 | if match is None or match.always(): |
|
652 | if match is None or match.always(): | |
653 | # Spend a bit of time here to get a list of files we know |
|
653 | # Spend a bit of time here to get a list of files we know | |
654 | # are modified so we can compare only against those. |
|
654 | # are modified so we can compare only against those. | |
655 | # It can cost a lot of time (several seconds) |
|
655 | # It can cost a lot of time (several seconds) | |
656 | # otherwise to update all standins if the largefiles are |
|
656 | # otherwise to update all standins if the largefiles are | |
657 | # large. |
|
657 | # large. | |
658 | lfdirstate = openlfdirstate(ui, repo) |
|
658 | lfdirstate = openlfdirstate(ui, repo) | |
659 | dirtymatch = matchmod.always() |
|
659 | dirtymatch = matchmod.always() | |
660 | unsure, s = lfdirstate.status( |
|
660 | unsure, s = lfdirstate.status( | |
661 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
661 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False | |
662 | ) |
|
662 | ) | |
663 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
663 | modifiedfiles = unsure + s.modified + s.added + s.removed | |
664 | lfiles = listlfiles(repo) |
|
664 | lfiles = listlfiles(repo) | |
665 | # this only loops through largefiles that exist (not |
|
665 | # this only loops through largefiles that exist (not | |
666 | # removed/renamed) |
|
666 | # removed/renamed) | |
667 | for lfile in lfiles: |
|
667 | for lfile in lfiles: | |
668 | if lfile in modifiedfiles: |
|
668 | if lfile in modifiedfiles: | |
669 | fstandin = standin(lfile) |
|
669 | fstandin = standin(lfile) | |
670 | if repo.wvfs.exists(fstandin): |
|
670 | if repo.wvfs.exists(fstandin): | |
671 | # this handles the case where a rebase is being |
|
671 | # this handles the case where a rebase is being | |
672 | # performed and the working copy is not updated |
|
672 | # performed and the working copy is not updated | |
673 | # yet. |
|
673 | # yet. | |
674 | if repo.wvfs.exists(lfile): |
|
674 | if repo.wvfs.exists(lfile): | |
675 | updatestandin(repo, lfile, fstandin) |
|
675 | updatestandin(repo, lfile, fstandin) | |
676 |
|
676 | |||
677 | return match |
|
677 | return match | |
678 |
|
678 | |||
679 | lfiles = listlfiles(repo) |
|
679 | lfiles = listlfiles(repo) | |
680 | match._files = repo._subdirlfs(match.files(), lfiles) |
|
680 | match._files = repo._subdirlfs(match.files(), lfiles) | |
681 |
|
681 | |||
682 | # Case 2: user calls commit with specified patterns: refresh |
|
682 | # Case 2: user calls commit with specified patterns: refresh | |
683 | # any matching big files. |
|
683 | # any matching big files. | |
684 | smatcher = composestandinmatcher(repo, match) |
|
684 | smatcher = composestandinmatcher(repo, match) | |
685 | standins = repo.dirstate.walk( |
|
685 | standins = repo.dirstate.walk( | |
686 | smatcher, subrepos=[], unknown=False, ignored=False |
|
686 | smatcher, subrepos=[], unknown=False, ignored=False | |
687 | ) |
|
687 | ) | |
688 |
|
688 | |||
689 | # No matching big files: get out of the way and pass control to |
|
689 | # No matching big files: get out of the way and pass control to | |
690 | # the usual commit() method. |
|
690 | # the usual commit() method. | |
691 | if not standins: |
|
691 | if not standins: | |
692 | return match |
|
692 | return match | |
693 |
|
693 | |||
694 | # Refresh all matching big files. It's possible that the |
|
694 | # Refresh all matching big files. It's possible that the | |
695 | # commit will end up failing, in which case the big files will |
|
695 | # commit will end up failing, in which case the big files will | |
696 | # stay refreshed. No harm done: the user modified them and |
|
696 | # stay refreshed. No harm done: the user modified them and | |
697 | # asked to commit them, so sooner or later we're going to |
|
697 | # asked to commit them, so sooner or later we're going to | |
698 | # refresh the standins. Might as well leave them refreshed. |
|
698 | # refresh the standins. Might as well leave them refreshed. | |
699 | lfdirstate = openlfdirstate(ui, repo) |
|
699 | lfdirstate = openlfdirstate(ui, repo) | |
700 | for fstandin in standins: |
|
700 | for fstandin in standins: | |
701 | lfile = splitstandin(fstandin) |
|
701 | lfile = splitstandin(fstandin) | |
702 | if lfdirstate[lfile] != b'r': |
|
702 | if lfdirstate[lfile] != b'r': | |
703 | updatestandin(repo, lfile, fstandin) |
|
703 | updatestandin(repo, lfile, fstandin) | |
704 |
|
704 | |||
705 | # Cook up a new matcher that only matches regular files or |
|
705 | # Cook up a new matcher that only matches regular files or | |
706 | # standins corresponding to the big files requested by the |
|
706 | # standins corresponding to the big files requested by the | |
707 | # user. Have to modify _files to prevent commit() from |
|
707 | # user. Have to modify _files to prevent commit() from | |
708 | # complaining "not tracked" for big files. |
|
708 | # complaining "not tracked" for big files. | |
709 | match = copy.copy(match) |
|
709 | match = copy.copy(match) | |
710 | origmatchfn = match.matchfn |
|
710 | origmatchfn = match.matchfn | |
711 |
|
711 | |||
712 | # Check both the list of largefiles and the list of |
|
712 | # Check both the list of largefiles and the list of | |
713 | # standins because if a largefile was removed, it |
|
713 | # standins because if a largefile was removed, it | |
714 | # won't be in the list of largefiles at this point |
|
714 | # won't be in the list of largefiles at this point | |
715 | match._files += sorted(standins) |
|
715 | match._files += sorted(standins) | |
716 |
|
716 | |||
717 | actualfiles = [] |
|
717 | actualfiles = [] | |
718 | for f in match._files: |
|
718 | for f in match._files: | |
719 | fstandin = standin(f) |
|
719 | fstandin = standin(f) | |
720 |
|
720 | |||
721 | # For largefiles, only one of the normal and standin should be |
|
721 | # For largefiles, only one of the normal and standin should be | |
722 | # committed (except if one of them is a remove). In the case of a |
|
722 | # committed (except if one of them is a remove). In the case of a | |
723 | # standin removal, drop the normal file if it is unknown to dirstate. |
|
723 | # standin removal, drop the normal file if it is unknown to dirstate. | |
724 | # Thus, skip plain largefile names but keep the standin. |
|
724 | # Thus, skip plain largefile names but keep the standin. | |
725 | if f in lfiles or fstandin in standins: |
|
725 | if f in lfiles or fstandin in standins: | |
726 | if repo.dirstate[fstandin] != b'r': |
|
726 | if repo.dirstate[fstandin] != b'r': | |
727 | if repo.dirstate[f] != b'r': |
|
727 | if repo.dirstate[f] != b'r': | |
728 | continue |
|
728 | continue | |
729 | elif repo.dirstate[f] == b'?': |
|
729 | elif repo.dirstate[f] == b'?': | |
730 | continue |
|
730 | continue | |
731 |
|
731 | |||
732 | actualfiles.append(f) |
|
732 | actualfiles.append(f) | |
733 | match._files = actualfiles |
|
733 | match._files = actualfiles | |
734 |
|
734 | |||
735 | def matchfn(f): |
|
735 | def matchfn(f): | |
736 | if origmatchfn(f): |
|
736 | if origmatchfn(f): | |
737 | return f not in lfiles |
|
737 | return f not in lfiles | |
738 | else: |
|
738 | else: | |
739 | return f in standins |
|
739 | return f in standins | |
740 |
|
740 | |||
741 | match.matchfn = matchfn |
|
741 | match.matchfn = matchfn | |
742 |
|
742 | |||
743 | return match |
|
743 | return match | |
744 |
|
744 | |||
745 |
|
745 | |||
746 | class automatedcommithook(object): |
|
746 | class automatedcommithook(object): | |
747 | """Stateful hook to update standins at the 1st commit of resuming |
|
747 | """Stateful hook to update standins at the 1st commit of resuming | |
748 |
|
748 | |||
749 | For efficiency, updating standins in the working directory should |
|
749 | For efficiency, updating standins in the working directory should | |
750 | be avoided while automated committing (like rebase, transplant and |
|
750 | be avoided while automated committing (like rebase, transplant and | |
751 | so on), because they should be updated before committing. |
|
751 | so on), because they should be updated before committing. | |
752 |
|
752 | |||
753 | But the 1st commit of resuming automated committing (e.g. ``rebase |
|
753 | But the 1st commit of resuming automated committing (e.g. ``rebase | |
754 | --continue``) should update them, because largefiles may be |
|
754 | --continue``) should update them, because largefiles may be | |
755 | modified manually. |
|
755 | modified manually. | |
756 | """ |
|
756 | """ | |
757 |
|
757 | |||
758 | def __init__(self, resuming): |
|
758 | def __init__(self, resuming): | |
759 | self.resuming = resuming |
|
759 | self.resuming = resuming | |
760 |
|
760 | |||
761 | def __call__(self, repo, match): |
|
761 | def __call__(self, repo, match): | |
762 | if self.resuming: |
|
762 | if self.resuming: | |
763 | self.resuming = False # avoids updating at subsequent commits |
|
763 | self.resuming = False # avoids updating at subsequent commits | |
764 | return updatestandinsbymatch(repo, match) |
|
764 | return updatestandinsbymatch(repo, match) | |
765 | else: |
|
765 | else: | |
766 | return match |
|
766 | return match | |
767 |
|
767 | |||
768 |
|
768 | |||
769 | def getstatuswriter(ui, repo, forcibly=None): |
|
769 | def getstatuswriter(ui, repo, forcibly=None): | |
770 | """Return the function to write largefiles specific status out |
|
770 | """Return the function to write largefiles specific status out | |
771 |
|
771 | |||
772 | If ``forcibly`` is ``None``, this returns the last element of |
|
772 | If ``forcibly`` is ``None``, this returns the last element of | |
773 | ``repo._lfstatuswriters`` as "default" writer function. |
|
773 | ``repo._lfstatuswriters`` as "default" writer function. | |
774 |
|
774 | |||
775 | Otherwise, this returns the function to always write out (or |
|
775 | Otherwise, this returns the function to always write out (or | |
776 | ignore if ``not forcibly``) status. |
|
776 | ignore if ``not forcibly``) status. | |
777 | """ |
|
777 | """ | |
778 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): |
|
778 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): | |
779 | return repo._lfstatuswriters[-1] |
|
779 | return repo._lfstatuswriters[-1] | |
780 | else: |
|
780 | else: | |
781 | if forcibly: |
|
781 | if forcibly: | |
782 | return ui.status # forcibly WRITE OUT |
|
782 | return ui.status # forcibly WRITE OUT | |
783 | else: |
|
783 | else: | |
784 | return lambda *msg, **opts: None # forcibly IGNORE |
|
784 | return lambda *msg, **opts: None # forcibly IGNORE |
@@ -1,456 +1,456 | |||||
1 | # Copyright 2009-2010 Gregory P. Ward |
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 | # Copyright 2010-2011 Fog Creek Software |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
4 | # Copyright 2010-2011 Unity Technologies |
|
4 | # Copyright 2010-2011 Unity Technologies | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | '''setup for largefiles repositories: reposetup''' |
|
9 | '''setup for largefiles repositories: reposetup''' | |
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | import copy |
|
12 | import copy | |
13 |
|
13 | |||
14 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
15 |
|
15 | |||
16 | from mercurial import ( |
|
16 | from mercurial import ( | |
17 | error, |
|
17 | error, | |
18 | extensions, |
|
18 | extensions, | |
19 | localrepo, |
|
19 | localrepo, | |
20 | match as matchmod, |
|
20 | match as matchmod, | |
21 | scmutil, |
|
21 | scmutil, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | from . import ( |
|
25 | from . import ( | |
26 | lfcommands, |
|
26 | lfcommands, | |
27 | lfutil, |
|
27 | lfutil, | |
28 | ) |
|
28 | ) | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | def reposetup(ui, repo): |
|
31 | def reposetup(ui, repo): | |
32 | # wire repositories should be given new wireproto functions |
|
32 | # wire repositories should be given new wireproto functions | |
33 | # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs" |
|
33 | # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs" | |
34 | if not repo.local(): |
|
34 | if not repo.local(): | |
35 | return |
|
35 | return | |
36 |
|
36 | |||
37 | class lfilesrepo(repo.__class__): |
|
37 | class lfilesrepo(repo.__class__): | |
38 | # the mark to examine whether "repo" object enables largefiles or not |
|
38 | # the mark to examine whether "repo" object enables largefiles or not | |
39 | _largefilesenabled = True |
|
39 | _largefilesenabled = True | |
40 |
|
40 | |||
41 | lfstatus = False |
|
41 | lfstatus = False | |
42 |
|
42 | |||
43 | # When lfstatus is set, return a context that gives the names |
|
43 | # When lfstatus is set, return a context that gives the names | |
44 | # of largefiles instead of their corresponding standins and |
|
44 | # of largefiles instead of their corresponding standins and | |
45 | # identifies the largefiles as always binary, regardless of |
|
45 | # identifies the largefiles as always binary, regardless of | |
46 | # their actual contents. |
|
46 | # their actual contents. | |
47 | def __getitem__(self, changeid): |
|
47 | def __getitem__(self, changeid): | |
48 | ctx = super(lfilesrepo, self).__getitem__(changeid) |
|
48 | ctx = super(lfilesrepo, self).__getitem__(changeid) | |
49 | if self.lfstatus: |
|
49 | if self.lfstatus: | |
50 |
|
50 | |||
51 | def files(orig): |
|
51 | def files(orig): | |
52 | filenames = orig() |
|
52 | filenames = orig() | |
53 | return [lfutil.splitstandin(f) or f for f in filenames] |
|
53 | return [lfutil.splitstandin(f) or f for f in filenames] | |
54 |
|
54 | |||
55 | extensions.wrapfunction(ctx, 'files', files) |
|
55 | extensions.wrapfunction(ctx, 'files', files) | |
56 |
|
56 | |||
57 | def manifest(orig): |
|
57 | def manifest(orig): | |
58 | man1 = orig() |
|
58 | man1 = orig() | |
59 |
|
59 | |||
60 | class lfilesmanifest(man1.__class__): |
|
60 | class lfilesmanifest(man1.__class__): | |
61 | def __contains__(self, filename): |
|
61 | def __contains__(self, filename): | |
62 | orig = super(lfilesmanifest, self).__contains__ |
|
62 | orig = super(lfilesmanifest, self).__contains__ | |
63 | return orig(filename) or orig( |
|
63 | return orig(filename) or orig( | |
64 | lfutil.standin(filename) |
|
64 | lfutil.standin(filename) | |
65 | ) |
|
65 | ) | |
66 |
|
66 | |||
67 | man1.__class__ = lfilesmanifest |
|
67 | man1.__class__ = lfilesmanifest | |
68 | return man1 |
|
68 | return man1 | |
69 |
|
69 | |||
70 | extensions.wrapfunction(ctx, 'manifest', manifest) |
|
70 | extensions.wrapfunction(ctx, 'manifest', manifest) | |
71 |
|
71 | |||
72 | def filectx(orig, path, fileid=None, filelog=None): |
|
72 | def filectx(orig, path, fileid=None, filelog=None): | |
73 | try: |
|
73 | try: | |
74 | if filelog is not None: |
|
74 | if filelog is not None: | |
75 | result = orig(path, fileid, filelog) |
|
75 | result = orig(path, fileid, filelog) | |
76 | else: |
|
76 | else: | |
77 | result = orig(path, fileid) |
|
77 | result = orig(path, fileid) | |
78 | except error.LookupError: |
|
78 | except error.LookupError: | |
79 | # Adding a null character will cause Mercurial to |
|
79 | # Adding a null character will cause Mercurial to | |
80 | # identify this as a binary file. |
|
80 | # identify this as a binary file. | |
81 | if filelog is not None: |
|
81 | if filelog is not None: | |
82 | result = orig(lfutil.standin(path), fileid, filelog) |
|
82 | result = orig(lfutil.standin(path), fileid, filelog) | |
83 | else: |
|
83 | else: | |
84 | result = orig(lfutil.standin(path), fileid) |
|
84 | result = orig(lfutil.standin(path), fileid) | |
85 | olddata = result.data |
|
85 | olddata = result.data | |
86 | result.data = lambda: olddata() + b'\0' |
|
86 | result.data = lambda: olddata() + b'\0' | |
87 | return result |
|
87 | return result | |
88 |
|
88 | |||
89 | extensions.wrapfunction(ctx, 'filectx', filectx) |
|
89 | extensions.wrapfunction(ctx, 'filectx', filectx) | |
90 |
|
90 | |||
91 | return ctx |
|
91 | return ctx | |
92 |
|
92 | |||
93 | # Figure out the status of big files and insert them into the |
|
93 | # Figure out the status of big files and insert them into the | |
94 | # appropriate list in the result. Also removes standin files |
|
94 | # appropriate list in the result. Also removes standin files | |
95 | # from the listing. Revert to the original status if |
|
95 | # from the listing. Revert to the original status if | |
96 | # self.lfstatus is False. |
|
96 | # self.lfstatus is False. | |
97 | # XXX large file status is buggy when used on repo proxy. |
|
97 | # XXX large file status is buggy when used on repo proxy. | |
98 | # XXX this needs to be investigated. |
|
98 | # XXX this needs to be investigated. | |
99 | @localrepo.unfilteredmethod |
|
99 | @localrepo.unfilteredmethod | |
100 | def status( |
|
100 | def status( | |
101 | self, |
|
101 | self, | |
102 | node1=b'.', |
|
102 | node1=b'.', | |
103 | node2=None, |
|
103 | node2=None, | |
104 | match=None, |
|
104 | match=None, | |
105 | ignored=False, |
|
105 | ignored=False, | |
106 | clean=False, |
|
106 | clean=False, | |
107 | unknown=False, |
|
107 | unknown=False, | |
108 | listsubrepos=False, |
|
108 | listsubrepos=False, | |
109 | ): |
|
109 | ): | |
110 | listignored, listclean, listunknown = ignored, clean, unknown |
|
110 | listignored, listclean, listunknown = ignored, clean, unknown | |
111 | orig = super(lfilesrepo, self).status |
|
111 | orig = super(lfilesrepo, self).status | |
112 | if not self.lfstatus: |
|
112 | if not self.lfstatus: | |
113 | return orig( |
|
113 | return orig( | |
114 | node1, |
|
114 | node1, | |
115 | node2, |
|
115 | node2, | |
116 | match, |
|
116 | match, | |
117 | listignored, |
|
117 | listignored, | |
118 | listclean, |
|
118 | listclean, | |
119 | listunknown, |
|
119 | listunknown, | |
120 | listsubrepos, |
|
120 | listsubrepos, | |
121 | ) |
|
121 | ) | |
122 |
|
122 | |||
123 | # some calls in this function rely on the old version of status |
|
123 | # some calls in this function rely on the old version of status | |
124 | self.lfstatus = False |
|
124 | self.lfstatus = False | |
125 | ctx1 = self[node1] |
|
125 | ctx1 = self[node1] | |
126 | ctx2 = self[node2] |
|
126 | ctx2 = self[node2] | |
127 | working = ctx2.rev() is None |
|
127 | working = ctx2.rev() is None | |
128 | parentworking = working and ctx1 == self[b'.'] |
|
128 | parentworking = working and ctx1 == self[b'.'] | |
129 |
|
129 | |||
130 | if match is None: |
|
130 | if match is None: | |
131 | match = matchmod.always() |
|
131 | match = matchmod.always() | |
132 |
|
132 | |||
133 | try: |
|
133 | try: | |
134 | # updating the dirstate is optional |
|
134 | # updating the dirstate is optional | |
135 | # so we don't wait on the lock |
|
135 | # so we don't wait on the lock | |
136 | wlock = self.wlock(False) |
|
136 | wlock = self.wlock(False) | |
137 | gotlock = True |
|
137 | gotlock = True | |
138 | except error.LockError: |
|
138 | except error.LockError: | |
139 | wlock = util.nullcontextmanager() |
|
139 | wlock = util.nullcontextmanager() | |
140 | gotlock = False |
|
140 | gotlock = False | |
141 | with wlock: |
|
141 | with wlock: | |
142 |
|
142 | |||
143 | # First check if paths or patterns were specified on the |
|
143 | # First check if paths or patterns were specified on the | |
144 | # command line. If there were, and they don't match any |
|
144 | # command line. If there were, and they don't match any | |
145 | # largefiles, we should just bail here and let super |
|
145 | # largefiles, we should just bail here and let super | |
146 | # handle it -- thus gaining a big performance boost. |
|
146 | # handle it -- thus gaining a big performance boost. | |
147 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
147 | lfdirstate = lfutil.openlfdirstate(ui, self) | |
148 | if not match.always(): |
|
148 | if not match.always(): | |
149 | for f in lfdirstate: |
|
149 | for f in lfdirstate: | |
150 | if match(f): |
|
150 | if match(f): | |
151 | break |
|
151 | break | |
152 | else: |
|
152 | else: | |
153 | return orig( |
|
153 | return orig( | |
154 | node1, |
|
154 | node1, | |
155 | node2, |
|
155 | node2, | |
156 | match, |
|
156 | match, | |
157 | listignored, |
|
157 | listignored, | |
158 | listclean, |
|
158 | listclean, | |
159 | listunknown, |
|
159 | listunknown, | |
160 | listsubrepos, |
|
160 | listsubrepos, | |
161 | ) |
|
161 | ) | |
162 |
|
162 | |||
163 | # Create a copy of match that matches standins instead |
|
163 | # Create a copy of match that matches standins instead | |
164 | # of largefiles. |
|
164 | # of largefiles. | |
165 | def tostandins(files): |
|
165 | def tostandins(files): | |
166 | if not working: |
|
166 | if not working: | |
167 | return files |
|
167 | return files | |
168 | newfiles = [] |
|
168 | newfiles = [] | |
169 | dirstate = self.dirstate |
|
169 | dirstate = self.dirstate | |
170 | for f in files: |
|
170 | for f in files: | |
171 | sf = lfutil.standin(f) |
|
171 | sf = lfutil.standin(f) | |
172 | if sf in dirstate: |
|
172 | if sf in dirstate: | |
173 | newfiles.append(sf) |
|
173 | newfiles.append(sf) | |
174 | elif dirstate.hasdir(sf): |
|
174 | elif dirstate.hasdir(sf): | |
175 | # Directory entries could be regular or |
|
175 | # Directory entries could be regular or | |
176 | # standin, check both |
|
176 | # standin, check both | |
177 | newfiles.extend((f, sf)) |
|
177 | newfiles.extend((f, sf)) | |
178 | else: |
|
178 | else: | |
179 | newfiles.append(f) |
|
179 | newfiles.append(f) | |
180 | return newfiles |
|
180 | return newfiles | |
181 |
|
181 | |||
182 | m = copy.copy(match) |
|
182 | m = copy.copy(match) | |
183 | m._files = tostandins(m._files) |
|
183 | m._files = tostandins(m._files) | |
184 |
|
184 | |||
185 | result = orig( |
|
185 | result = orig( | |
186 | node1, node2, m, ignored, clean, unknown, listsubrepos |
|
186 | node1, node2, m, ignored, clean, unknown, listsubrepos | |
187 | ) |
|
187 | ) | |
188 | if working: |
|
188 | if working: | |
189 |
|
189 | |||
190 | def sfindirstate(f): |
|
190 | def sfindirstate(f): | |
191 | sf = lfutil.standin(f) |
|
191 | sf = lfutil.standin(f) | |
192 | dirstate = self.dirstate |
|
192 | dirstate = self.dirstate | |
193 | return sf in dirstate or dirstate.hasdir(sf) |
|
193 | return sf in dirstate or dirstate.hasdir(sf) | |
194 |
|
194 | |||
195 | match._files = [f for f in match._files if sfindirstate(f)] |
|
195 | match._files = [f for f in match._files if sfindirstate(f)] | |
196 | # Don't waste time getting the ignored and unknown |
|
196 | # Don't waste time getting the ignored and unknown | |
197 | # files from lfdirstate |
|
197 | # files from lfdirstate | |
198 | unsure, s = lfdirstate.status( |
|
198 | unsure, s = lfdirstate.status( | |
199 | match, |
|
199 | match, | |
200 | subrepos=[], |
|
200 | subrepos=[], | |
201 | ignored=False, |
|
201 | ignored=False, | |
202 | clean=listclean, |
|
202 | clean=listclean, | |
203 | unknown=False, |
|
203 | unknown=False, | |
204 | ) |
|
204 | ) | |
205 | (modified, added, removed, deleted, clean) = ( |
|
205 | (modified, added, removed, deleted, clean) = ( | |
206 | s.modified, |
|
206 | s.modified, | |
207 | s.added, |
|
207 | s.added, | |
208 | s.removed, |
|
208 | s.removed, | |
209 | s.deleted, |
|
209 | s.deleted, | |
210 | s.clean, |
|
210 | s.clean, | |
211 | ) |
|
211 | ) | |
212 | if parentworking: |
|
212 | if parentworking: | |
213 | for lfile in unsure: |
|
213 | for lfile in unsure: | |
214 | standin = lfutil.standin(lfile) |
|
214 | standin = lfutil.standin(lfile) | |
215 | if standin not in ctx1: |
|
215 | if standin not in ctx1: | |
216 | # from second parent |
|
216 | # from second parent | |
217 | modified.append(lfile) |
|
217 | modified.append(lfile) | |
218 | elif lfutil.readasstandin( |
|
218 | elif lfutil.readasstandin( | |
219 | ctx1[standin] |
|
219 | ctx1[standin] | |
220 | ) != lfutil.hashfile(self.wjoin(lfile)): |
|
220 | ) != lfutil.hashfile(self.wjoin(lfile)): | |
221 | modified.append(lfile) |
|
221 | modified.append(lfile) | |
222 | else: |
|
222 | else: | |
223 | if listclean: |
|
223 | if listclean: | |
224 | clean.append(lfile) |
|
224 | clean.append(lfile) | |
225 | lfdirstate.normal(lfile) |
|
225 | lfdirstate.normal(lfile) | |
226 | else: |
|
226 | else: | |
227 | tocheck = unsure + modified + added + clean |
|
227 | tocheck = unsure + modified + added + clean | |
228 | modified, added, clean = [], [], [] |
|
228 | modified, added, clean = [], [], [] | |
229 | checkexec = self.dirstate._checkexec |
|
229 | checkexec = self.dirstate._checkexec | |
230 |
|
230 | |||
231 | for lfile in tocheck: |
|
231 | for lfile in tocheck: | |
232 | standin = lfutil.standin(lfile) |
|
232 | standin = lfutil.standin(lfile) | |
233 | if standin in ctx1: |
|
233 | if standin in ctx1: | |
234 | abslfile = self.wjoin(lfile) |
|
234 | abslfile = self.wjoin(lfile) | |
235 | if ( |
|
235 | if ( | |
236 | lfutil.readasstandin(ctx1[standin]) |
|
236 | lfutil.readasstandin(ctx1[standin]) | |
237 | != lfutil.hashfile(abslfile) |
|
237 | != lfutil.hashfile(abslfile) | |
238 | ) or ( |
|
238 | ) or ( | |
239 | checkexec |
|
239 | checkexec | |
240 | and (b'x' in ctx1.flags(standin)) |
|
240 | and (b'x' in ctx1.flags(standin)) | |
241 | != bool(lfutil.getexecutable(abslfile)) |
|
241 | != bool(lfutil.getexecutable(abslfile)) | |
242 | ): |
|
242 | ): | |
243 | modified.append(lfile) |
|
243 | modified.append(lfile) | |
244 | elif listclean: |
|
244 | elif listclean: | |
245 | clean.append(lfile) |
|
245 | clean.append(lfile) | |
246 | else: |
|
246 | else: | |
247 | added.append(lfile) |
|
247 | added.append(lfile) | |
248 |
|
248 | |||
249 | # at this point, 'removed' contains largefiles |
|
249 | # at this point, 'removed' contains largefiles | |
250 | # marked as 'R' in the working context. |
|
250 | # marked as 'R' in the working context. | |
251 | # then, largefiles not managed also in the target |
|
251 | # then, largefiles not managed also in the target | |
252 | # context should be excluded from 'removed'. |
|
252 | # context should be excluded from 'removed'. | |
253 | removed = [ |
|
253 | removed = [ | |
254 | lfile |
|
254 | lfile | |
255 | for lfile in removed |
|
255 | for lfile in removed | |
256 | if lfutil.standin(lfile) in ctx1 |
|
256 | if lfutil.standin(lfile) in ctx1 | |
257 | ] |
|
257 | ] | |
258 |
|
258 | |||
259 | # Standins no longer found in lfdirstate have been deleted |
|
259 | # Standins no longer found in lfdirstate have been deleted | |
260 | for standin in ctx1.walk(lfutil.getstandinmatcher(self)): |
|
260 | for standin in ctx1.walk(lfutil.getstandinmatcher(self)): | |
261 | lfile = lfutil.splitstandin(standin) |
|
261 | lfile = lfutil.splitstandin(standin) | |
262 | if not match(lfile): |
|
262 | if not match(lfile): | |
263 | continue |
|
263 | continue | |
264 | if lfile not in lfdirstate: |
|
264 | if lfile not in lfdirstate: | |
265 | deleted.append(lfile) |
|
265 | deleted.append(lfile) | |
266 | # Sync "largefile has been removed" back to the |
|
266 | # Sync "largefile has been removed" back to the | |
267 | # standin. Removing a file as a side effect of |
|
267 | # standin. Removing a file as a side effect of | |
268 | # running status is gross, but the alternatives (if |
|
268 | # running status is gross, but the alternatives (if | |
269 | # any) are worse. |
|
269 | # any) are worse. | |
270 | self.wvfs.unlinkpath(standin, ignoremissing=True) |
|
270 | self.wvfs.unlinkpath(standin, ignoremissing=True) | |
271 |
|
271 | |||
272 | # Filter result lists |
|
272 | # Filter result lists | |
273 | result = list(result) |
|
273 | result = list(result) | |
274 |
|
274 | |||
275 | # Largefiles are not really removed when they're |
|
275 | # Largefiles are not really removed when they're | |
276 | # still in the normal dirstate. Likewise, normal |
|
276 | # still in the normal dirstate. Likewise, normal | |
277 | # files are not really removed if they are still in |
|
277 | # files are not really removed if they are still in | |
278 | # lfdirstate. This happens in merges where files |
|
278 | # lfdirstate. This happens in merges where files | |
279 | # change type. |
|
279 | # change type. | |
280 | removed = [f for f in removed if f not in self.dirstate] |
|
280 | removed = [f for f in removed if f not in self.dirstate] | |
281 | result[2] = [f for f in result[2] if f not in lfdirstate] |
|
281 | result[2] = [f for f in result[2] if f not in lfdirstate] | |
282 |
|
282 | |||
283 | lfiles = set(lfdirstate) |
|
283 | lfiles = set(lfdirstate) | |
284 | # Unknown files |
|
284 | # Unknown files | |
285 | result[4] = set(result[4]).difference(lfiles) |
|
285 | result[4] = set(result[4]).difference(lfiles) | |
286 | # Ignored files |
|
286 | # Ignored files | |
287 | result[5] = set(result[5]).difference(lfiles) |
|
287 | result[5] = set(result[5]).difference(lfiles) | |
288 | # combine normal files and largefiles |
|
288 | # combine normal files and largefiles | |
289 | normals = [ |
|
289 | normals = [ | |
290 | [fn for fn in filelist if not lfutil.isstandin(fn)] |
|
290 | [fn for fn in filelist if not lfutil.isstandin(fn)] | |
291 | for filelist in result |
|
291 | for filelist in result | |
292 | ] |
|
292 | ] | |
293 | lfstatus = ( |
|
293 | lfstatus = ( | |
294 | modified, |
|
294 | modified, | |
295 | added, |
|
295 | added, | |
296 | removed, |
|
296 | removed, | |
297 | deleted, |
|
297 | deleted, | |
298 | [], |
|
298 | [], | |
299 | [], |
|
299 | [], | |
300 | clean, |
|
300 | clean, | |
301 | ) |
|
301 | ) | |
302 | result = [ |
|
302 | result = [ | |
303 | sorted(list1 + list2) |
|
303 | sorted(list1 + list2) | |
304 | for (list1, list2) in zip(normals, lfstatus) |
|
304 | for (list1, list2) in zip(normals, lfstatus) | |
305 | ] |
|
305 | ] | |
306 | else: # not against working directory |
|
306 | else: # not against working directory | |
307 | result = [ |
|
307 | result = [ | |
308 | [lfutil.splitstandin(f) or f for f in items] |
|
308 | [lfutil.splitstandin(f) or f for f in items] | |
309 | for items in result |
|
309 | for items in result | |
310 | ] |
|
310 | ] | |
311 |
|
311 | |||
312 | if gotlock: |
|
312 | if gotlock: | |
313 | lfdirstate.write() |
|
313 | lfdirstate.write() | |
314 |
|
314 | |||
315 | self.lfstatus = True |
|
315 | self.lfstatus = True | |
316 | return scmutil.status(*result) |
|
316 | return scmutil.status(*result) | |
317 |
|
317 | |||
318 | def commitctx(self, ctx, *args, **kwargs): |
|
318 | def commitctx(self, ctx, *args, **kwargs): | |
319 | node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs) |
|
319 | node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs) | |
320 |
|
320 | |||
321 | class lfilesctx(ctx.__class__): |
|
321 | class lfilesctx(ctx.__class__): | |
322 | def markcommitted(self, node): |
|
322 | def markcommitted(self, node): | |
323 | orig = super(lfilesctx, self).markcommitted |
|
323 | orig = super(lfilesctx, self).markcommitted | |
324 | return lfutil.markcommitted(orig, self, node) |
|
324 | return lfutil.markcommitted(orig, self, node) | |
325 |
|
325 | |||
326 | ctx.__class__ = lfilesctx |
|
326 | ctx.__class__ = lfilesctx | |
327 | return node |
|
327 | return node | |
328 |
|
328 | |||
329 | # Before commit, largefile standins have not had their |
|
329 | # Before commit, largefile standins have not had their | |
330 | # contents updated to reflect the hash of their largefile. |
|
330 | # contents updated to reflect the hash of their largefile. | |
331 | # Do that here. |
|
331 | # Do that here. | |
332 | def commit( |
|
332 | def commit( | |
333 | self, |
|
333 | self, | |
334 | text=b"", |
|
334 | text=b"", | |
335 | user=None, |
|
335 | user=None, | |
336 | date=None, |
|
336 | date=None, | |
337 | match=None, |
|
337 | match=None, | |
338 | force=False, |
|
338 | force=False, | |
339 | editor=False, |
|
339 | editor=False, | |
340 | extra=None, |
|
340 | extra=None, | |
341 | ): |
|
341 | ): | |
342 | if extra is None: |
|
342 | if extra is None: | |
343 | extra = {} |
|
343 | extra = {} | |
344 | orig = super(lfilesrepo, self).commit |
|
344 | orig = super(lfilesrepo, self).commit | |
345 |
|
345 | |||
346 | with self.wlock(): |
|
346 | with self.wlock(): | |
347 | lfcommithook = self._lfcommithooks[-1] |
|
347 | lfcommithook = self._lfcommithooks[-1] | |
348 | match = lfcommithook(self, match) |
|
348 | match = lfcommithook(self, match) | |
349 | result = orig( |
|
349 | result = orig( | |
350 | text=text, |
|
350 | text=text, | |
351 | user=user, |
|
351 | user=user, | |
352 | date=date, |
|
352 | date=date, | |
353 | match=match, |
|
353 | match=match, | |
354 | force=force, |
|
354 | force=force, | |
355 | editor=editor, |
|
355 | editor=editor, | |
356 | extra=extra, |
|
356 | extra=extra, | |
357 | ) |
|
357 | ) | |
358 | return result |
|
358 | return result | |
359 |
|
359 | |||
360 | # TODO: _subdirlfs should be moved into "lfutil.py", because |
|
360 | # TODO: _subdirlfs should be moved into "lfutil.py", because | |
361 | # it is referred only from "lfutil.updatestandinsbymatch" |
|
361 | # it is referred only from "lfutil.updatestandinsbymatch" | |
362 | def _subdirlfs(self, files, lfiles): |
|
362 | def _subdirlfs(self, files, lfiles): | |
363 | """ |
|
363 | """ | |
364 | Adjust matched file list |
|
364 | Adjust matched file list | |
365 | If we pass a directory to commit whose only committable files |
|
365 | If we pass a directory to commit whose only committable files | |
366 | are largefiles, the core commit code aborts before finding |
|
366 | are largefiles, the core commit code aborts before finding | |
367 | the largefiles. |
|
367 | the largefiles. | |
368 | So we do the following: |
|
368 | So we do the following: | |
369 | For directories that only have largefiles as matches, |
|
369 | For directories that only have largefiles as matches, | |
370 | we explicitly add the largefiles to the match list and remove |
|
370 | we explicitly add the largefiles to the match list and remove | |
371 | the directory. |
|
371 | the directory. | |
372 | In other cases, we leave the match list unmodified. |
|
372 | In other cases, we leave the match list unmodified. | |
373 | """ |
|
373 | """ | |
374 | actualfiles = [] |
|
374 | actualfiles = [] | |
375 | dirs = [] |
|
375 | dirs = [] | |
376 | regulars = [] |
|
376 | regulars = [] | |
377 |
|
377 | |||
378 | for f in files: |
|
378 | for f in files: | |
379 | if lfutil.isstandin(f + b'/'): |
|
379 | if lfutil.isstandin(f + b'/'): | |
380 | raise error.Abort( |
|
380 | raise error.Abort( | |
381 | _(b'file "%s" is a largefile standin') % f, |
|
381 | _(b'file "%s" is a largefile standin') % f, | |
382 | hint=b'commit the largefile itself instead', |
|
382 | hint=b'commit the largefile itself instead', | |
383 | ) |
|
383 | ) | |
384 | # Scan directories |
|
384 | # Scan directories | |
385 | if self.wvfs.isdir(f): |
|
385 | if self.wvfs.isdir(f): | |
386 | dirs.append(f) |
|
386 | dirs.append(f) | |
387 | else: |
|
387 | else: | |
388 | regulars.append(f) |
|
388 | regulars.append(f) | |
389 |
|
389 | |||
390 | for f in dirs: |
|
390 | for f in dirs: | |
391 | matcheddir = False |
|
391 | matcheddir = False | |
392 | d = self.dirstate.normalize(f) + b'/' |
|
392 | d = self.dirstate.normalize(f) + b'/' | |
393 | # Check for matched normal files |
|
393 | # Check for matched normal files | |
394 | for mf in regulars: |
|
394 | for mf in regulars: | |
395 | if self.dirstate.normalize(mf).startswith(d): |
|
395 | if self.dirstate.normalize(mf).startswith(d): | |
396 | actualfiles.append(f) |
|
396 | actualfiles.append(f) | |
397 | matcheddir = True |
|
397 | matcheddir = True | |
398 | break |
|
398 | break | |
399 | if not matcheddir: |
|
399 | if not matcheddir: | |
400 | # If no normal match, manually append |
|
400 | # If no normal match, manually append | |
401 | # any matching largefiles |
|
401 | # any matching largefiles | |
402 | for lf in lfiles: |
|
402 | for lf in lfiles: | |
403 | if self.dirstate.normalize(lf).startswith(d): |
|
403 | if self.dirstate.normalize(lf).startswith(d): | |
404 | actualfiles.append(lf) |
|
404 | actualfiles.append(lf) | |
405 | if not matcheddir: |
|
405 | if not matcheddir: | |
406 | # There may still be normal files in the dir, so |
|
406 | # There may still be normal files in the dir, so | |
407 | # add a directory to the list, which |
|
407 | # add a directory to the list, which | |
408 | # forces status/dirstate to walk all files and |
|
408 | # forces status/dirstate to walk all files and | |
409 | # call the match function on the matcher, even |
|
409 | # call the match function on the matcher, even | |
410 | # on case sensitive filesystems. |
|
410 | # on case sensitive filesystems. | |
411 | actualfiles.append(b'.') |
|
411 | actualfiles.append(b'.') | |
412 | matcheddir = True |
|
412 | matcheddir = True | |
413 | # Nothing in dir, so readd it |
|
413 | # Nothing in dir, so readd it | |
414 | # and let commit reject it |
|
414 | # and let commit reject it | |
415 | if not matcheddir: |
|
415 | if not matcheddir: | |
416 | actualfiles.append(f) |
|
416 | actualfiles.append(f) | |
417 |
|
417 | |||
418 | # Always add normal files |
|
418 | # Always add normal files | |
419 | actualfiles += regulars |
|
419 | actualfiles += regulars | |
420 | return actualfiles |
|
420 | return actualfiles | |
421 |
|
421 | |||
422 | repo.__class__ = lfilesrepo |
|
422 | repo.__class__ = lfilesrepo | |
423 |
|
423 | |||
424 | # stack of hooks being executed before committing. |
|
424 | # stack of hooks being executed before committing. | |
425 | # only last element ("_lfcommithooks[-1]") is used for each committing. |
|
425 | # only last element ("_lfcommithooks[-1]") is used for each committing. | |
426 | repo._lfcommithooks = [lfutil.updatestandinsbymatch] |
|
426 | repo._lfcommithooks = [lfutil.updatestandinsbymatch] | |
427 |
|
427 | |||
428 | # Stack of status writer functions taking "*msg, **opts" arguments |
|
428 | # Stack of status writer functions taking "*msg, **opts" arguments | |
429 | # like "ui.status()". Only last element ("_lfstatuswriters[-1]") |
|
429 | # like "ui.status()". Only last element ("_lfstatuswriters[-1]") | |
430 | # is used to write status out. |
|
430 | # is used to write status out. | |
431 | repo._lfstatuswriters = [ui.status] |
|
431 | repo._lfstatuswriters = [ui.status] | |
432 |
|
432 | |||
433 | def prepushoutgoinghook(pushop): |
|
433 | def prepushoutgoinghook(pushop): | |
434 | """Push largefiles for pushop before pushing revisions.""" |
|
434 | """Push largefiles for pushop before pushing revisions.""" | |
435 | lfrevs = pushop.lfrevs |
|
435 | lfrevs = pushop.lfrevs | |
436 | if lfrevs is None: |
|
436 | if lfrevs is None: | |
437 | lfrevs = pushop.outgoing.missing |
|
437 | lfrevs = pushop.outgoing.missing | |
438 | if lfrevs: |
|
438 | if lfrevs: | |
439 | toupload = set() |
|
439 | toupload = set() | |
440 | addfunc = lambda fn, lfhash: toupload.add(lfhash) |
|
440 | addfunc = lambda fn, lfhash: toupload.add(lfhash) | |
441 | lfutil.getlfilestoupload(pushop.repo, lfrevs, addfunc) |
|
441 | lfutil.getlfilestoupload(pushop.repo, lfrevs, addfunc) | |
442 | lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload) |
|
442 | lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload) | |
443 |
|
443 | |||
444 | repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook) |
|
444 | repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook) | |
445 |
|
445 | |||
446 | def checkrequireslfiles(ui, repo, **kwargs): |
|
446 | def checkrequireslfiles(ui, repo, **kwargs): | |
447 | if b'largefiles' not in repo.requirements and any( |
|
447 | if b'largefiles' not in repo.requirements and any( | |
448 |
lfutil.shortname + b'/' in f[ |
|
448 | lfutil.shortname + b'/' in f[1] for f in repo.store.datafiles() | |
449 | ): |
|
449 | ): | |
450 | repo.requirements.add(b'largefiles') |
|
450 | repo.requirements.add(b'largefiles') | |
451 | scmutil.writereporequirements(repo) |
|
451 | scmutil.writereporequirements(repo) | |
452 |
|
452 | |||
453 | ui.setconfig( |
|
453 | ui.setconfig( | |
454 | b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles' |
|
454 | b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles' | |
455 | ) |
|
455 | ) | |
456 | ui.setconfig(b'hooks', b'commit.lfiles', checkrequireslfiles, b'largefiles') |
|
456 | ui.setconfig(b'hooks', b'commit.lfiles', checkrequireslfiles, b'largefiles') |
@@ -1,677 +1,677 | |||||
1 | # narrowcommands.py - command modifications for narrowhg extension |
|
1 | # narrowcommands.py - command modifications for narrowhg extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import itertools |
|
9 | import itertools | |
10 | import os |
|
10 | import os | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial.node import ( |
|
13 | from mercurial.node import ( | |
14 | hex, |
|
14 | hex, | |
15 | nullid, |
|
15 | nullid, | |
16 | short, |
|
16 | short, | |
17 | ) |
|
17 | ) | |
18 | from mercurial import ( |
|
18 | from mercurial import ( | |
19 | bundle2, |
|
19 | bundle2, | |
20 | cmdutil, |
|
20 | cmdutil, | |
21 | commands, |
|
21 | commands, | |
22 | discovery, |
|
22 | discovery, | |
23 | encoding, |
|
23 | encoding, | |
24 | error, |
|
24 | error, | |
25 | exchange, |
|
25 | exchange, | |
26 | extensions, |
|
26 | extensions, | |
27 | hg, |
|
27 | hg, | |
28 | narrowspec, |
|
28 | narrowspec, | |
29 | pathutil, |
|
29 | pathutil, | |
30 | pycompat, |
|
30 | pycompat, | |
31 | registrar, |
|
31 | registrar, | |
32 | repair, |
|
32 | repair, | |
33 | repoview, |
|
33 | repoview, | |
34 | requirements, |
|
34 | requirements, | |
35 | sparse, |
|
35 | sparse, | |
36 | util, |
|
36 | util, | |
37 | wireprototypes, |
|
37 | wireprototypes, | |
38 | ) |
|
38 | ) | |
39 |
|
39 | |||
40 | table = {} |
|
40 | table = {} | |
41 | command = registrar.command(table) |
|
41 | command = registrar.command(table) | |
42 |
|
42 | |||
43 |
|
43 | |||
44 | def setup(): |
|
44 | def setup(): | |
45 | """Wraps user-facing mercurial commands with narrow-aware versions.""" |
|
45 | """Wraps user-facing mercurial commands with narrow-aware versions.""" | |
46 |
|
46 | |||
47 | entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd) |
|
47 | entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd) | |
48 | entry[1].append( |
|
48 | entry[1].append( | |
49 | (b'', b'narrow', None, _(b"create a narrow clone of select files")) |
|
49 | (b'', b'narrow', None, _(b"create a narrow clone of select files")) | |
50 | ) |
|
50 | ) | |
51 | entry[1].append( |
|
51 | entry[1].append( | |
52 | ( |
|
52 | ( | |
53 | b'', |
|
53 | b'', | |
54 | b'depth', |
|
54 | b'depth', | |
55 | b'', |
|
55 | b'', | |
56 | _(b"limit the history fetched by distance from heads"), |
|
56 | _(b"limit the history fetched by distance from heads"), | |
57 | ) |
|
57 | ) | |
58 | ) |
|
58 | ) | |
59 | entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file"))) |
|
59 | entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file"))) | |
60 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit |
|
60 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit | |
61 | if b'sparse' not in extensions.enabled(): |
|
61 | if b'sparse' not in extensions.enabled(): | |
62 | entry[1].append( |
|
62 | entry[1].append( | |
63 | (b'', b'include', [], _(b"specifically fetch this file/directory")) |
|
63 | (b'', b'include', [], _(b"specifically fetch this file/directory")) | |
64 | ) |
|
64 | ) | |
65 | entry[1].append( |
|
65 | entry[1].append( | |
66 | ( |
|
66 | ( | |
67 | b'', |
|
67 | b'', | |
68 | b'exclude', |
|
68 | b'exclude', | |
69 | [], |
|
69 | [], | |
70 | _(b"do not fetch this file/directory, even if included"), |
|
70 | _(b"do not fetch this file/directory, even if included"), | |
71 | ) |
|
71 | ) | |
72 | ) |
|
72 | ) | |
73 |
|
73 | |||
74 | entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd) |
|
74 | entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd) | |
75 | entry[1].append( |
|
75 | entry[1].append( | |
76 | ( |
|
76 | ( | |
77 | b'', |
|
77 | b'', | |
78 | b'depth', |
|
78 | b'depth', | |
79 | b'', |
|
79 | b'', | |
80 | _(b"limit the history fetched by distance from heads"), |
|
80 | _(b"limit the history fetched by distance from heads"), | |
81 | ) |
|
81 | ) | |
82 | ) |
|
82 | ) | |
83 |
|
83 | |||
84 | extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd) |
|
84 | extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd) | |
85 |
|
85 | |||
86 |
|
86 | |||
87 | def clonenarrowcmd(orig, ui, repo, *args, **opts): |
|
87 | def clonenarrowcmd(orig, ui, repo, *args, **opts): | |
88 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" |
|
88 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" | |
89 | opts = pycompat.byteskwargs(opts) |
|
89 | opts = pycompat.byteskwargs(opts) | |
90 | wrappedextraprepare = util.nullcontextmanager() |
|
90 | wrappedextraprepare = util.nullcontextmanager() | |
91 | narrowspecfile = opts[b'narrowspec'] |
|
91 | narrowspecfile = opts[b'narrowspec'] | |
92 |
|
92 | |||
93 | if narrowspecfile: |
|
93 | if narrowspecfile: | |
94 | filepath = os.path.join(encoding.getcwd(), narrowspecfile) |
|
94 | filepath = os.path.join(encoding.getcwd(), narrowspecfile) | |
95 | ui.status(_(b"reading narrowspec from '%s'\n") % filepath) |
|
95 | ui.status(_(b"reading narrowspec from '%s'\n") % filepath) | |
96 | try: |
|
96 | try: | |
97 | fdata = util.readfile(filepath) |
|
97 | fdata = util.readfile(filepath) | |
98 | except IOError as inst: |
|
98 | except IOError as inst: | |
99 | raise error.Abort( |
|
99 | raise error.Abort( | |
100 | _(b"cannot read narrowspecs from '%s': %s") |
|
100 | _(b"cannot read narrowspecs from '%s': %s") | |
101 | % (filepath, encoding.strtolocal(inst.strerror)) |
|
101 | % (filepath, encoding.strtolocal(inst.strerror)) | |
102 | ) |
|
102 | ) | |
103 |
|
103 | |||
104 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') |
|
104 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') | |
105 | if profiles: |
|
105 | if profiles: | |
106 | raise error.ConfigError( |
|
106 | raise error.ConfigError( | |
107 | _( |
|
107 | _( | |
108 | b"cannot specify other files using '%include' in" |
|
108 | b"cannot specify other files using '%include' in" | |
109 | b" narrowspec" |
|
109 | b" narrowspec" | |
110 | ) |
|
110 | ) | |
111 | ) |
|
111 | ) | |
112 |
|
112 | |||
113 | narrowspec.validatepatterns(includes) |
|
113 | narrowspec.validatepatterns(includes) | |
114 | narrowspec.validatepatterns(excludes) |
|
114 | narrowspec.validatepatterns(excludes) | |
115 |
|
115 | |||
116 | # narrowspec is passed so we should assume that user wants narrow clone |
|
116 | # narrowspec is passed so we should assume that user wants narrow clone | |
117 | opts[b'narrow'] = True |
|
117 | opts[b'narrow'] = True | |
118 | opts[b'include'].extend(includes) |
|
118 | opts[b'include'].extend(includes) | |
119 | opts[b'exclude'].extend(excludes) |
|
119 | opts[b'exclude'].extend(excludes) | |
120 |
|
120 | |||
121 | if opts[b'narrow']: |
|
121 | if opts[b'narrow']: | |
122 |
|
122 | |||
123 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
123 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
124 | orig(pullop, kwargs) |
|
124 | orig(pullop, kwargs) | |
125 |
|
125 | |||
126 | if opts.get(b'depth'): |
|
126 | if opts.get(b'depth'): | |
127 | kwargs[b'depth'] = opts[b'depth'] |
|
127 | kwargs[b'depth'] = opts[b'depth'] | |
128 |
|
128 | |||
129 | wrappedextraprepare = extensions.wrappedfunction( |
|
129 | wrappedextraprepare = extensions.wrappedfunction( | |
130 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
130 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen | |
131 | ) |
|
131 | ) | |
132 |
|
132 | |||
133 | with wrappedextraprepare: |
|
133 | with wrappedextraprepare: | |
134 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) |
|
134 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) | |
135 |
|
135 | |||
136 |
|
136 | |||
137 | def pullnarrowcmd(orig, ui, repo, *args, **opts): |
|
137 | def pullnarrowcmd(orig, ui, repo, *args, **opts): | |
138 | """Wraps pull command to allow modifying narrow spec.""" |
|
138 | """Wraps pull command to allow modifying narrow spec.""" | |
139 | wrappedextraprepare = util.nullcontextmanager() |
|
139 | wrappedextraprepare = util.nullcontextmanager() | |
140 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
140 | if requirements.NARROW_REQUIREMENT in repo.requirements: | |
141 |
|
141 | |||
142 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
142 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
143 | orig(pullop, kwargs) |
|
143 | orig(pullop, kwargs) | |
144 | if opts.get('depth'): |
|
144 | if opts.get('depth'): | |
145 | kwargs[b'depth'] = opts['depth'] |
|
145 | kwargs[b'depth'] = opts['depth'] | |
146 |
|
146 | |||
147 | wrappedextraprepare = extensions.wrappedfunction( |
|
147 | wrappedextraprepare = extensions.wrappedfunction( | |
148 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
148 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen | |
149 | ) |
|
149 | ) | |
150 |
|
150 | |||
151 | with wrappedextraprepare: |
|
151 | with wrappedextraprepare: | |
152 | return orig(ui, repo, *args, **opts) |
|
152 | return orig(ui, repo, *args, **opts) | |
153 |
|
153 | |||
154 |
|
154 | |||
155 | def archivenarrowcmd(orig, ui, repo, *args, **opts): |
|
155 | def archivenarrowcmd(orig, ui, repo, *args, **opts): | |
156 | """Wraps archive command to narrow the default includes.""" |
|
156 | """Wraps archive command to narrow the default includes.""" | |
157 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
157 | if requirements.NARROW_REQUIREMENT in repo.requirements: | |
158 | repo_includes, repo_excludes = repo.narrowpats |
|
158 | repo_includes, repo_excludes = repo.narrowpats | |
159 | includes = set(opts.get('include', [])) |
|
159 | includes = set(opts.get('include', [])) | |
160 | excludes = set(opts.get('exclude', [])) |
|
160 | excludes = set(opts.get('exclude', [])) | |
161 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( |
|
161 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( | |
162 | includes, excludes, repo_includes, repo_excludes |
|
162 | includes, excludes, repo_includes, repo_excludes | |
163 | ) |
|
163 | ) | |
164 | if includes: |
|
164 | if includes: | |
165 | opts['include'] = includes |
|
165 | opts['include'] = includes | |
166 | if excludes: |
|
166 | if excludes: | |
167 | opts['exclude'] = excludes |
|
167 | opts['exclude'] = excludes | |
168 | return orig(ui, repo, *args, **opts) |
|
168 | return orig(ui, repo, *args, **opts) | |
169 |
|
169 | |||
170 |
|
170 | |||
171 | def pullbundle2extraprepare(orig, pullop, kwargs): |
|
171 | def pullbundle2extraprepare(orig, pullop, kwargs): | |
172 | repo = pullop.repo |
|
172 | repo = pullop.repo | |
173 | if requirements.NARROW_REQUIREMENT not in repo.requirements: |
|
173 | if requirements.NARROW_REQUIREMENT not in repo.requirements: | |
174 | return orig(pullop, kwargs) |
|
174 | return orig(pullop, kwargs) | |
175 |
|
175 | |||
176 | if wireprototypes.NARROWCAP not in pullop.remote.capabilities(): |
|
176 | if wireprototypes.NARROWCAP not in pullop.remote.capabilities(): | |
177 | raise error.Abort(_(b"server does not support narrow clones")) |
|
177 | raise error.Abort(_(b"server does not support narrow clones")) | |
178 | orig(pullop, kwargs) |
|
178 | orig(pullop, kwargs) | |
179 | kwargs[b'narrow'] = True |
|
179 | kwargs[b'narrow'] = True | |
180 | include, exclude = repo.narrowpats |
|
180 | include, exclude = repo.narrowpats | |
181 | kwargs[b'oldincludepats'] = include |
|
181 | kwargs[b'oldincludepats'] = include | |
182 | kwargs[b'oldexcludepats'] = exclude |
|
182 | kwargs[b'oldexcludepats'] = exclude | |
183 | if include: |
|
183 | if include: | |
184 | kwargs[b'includepats'] = include |
|
184 | kwargs[b'includepats'] = include | |
185 | if exclude: |
|
185 | if exclude: | |
186 | kwargs[b'excludepats'] = exclude |
|
186 | kwargs[b'excludepats'] = exclude | |
187 | # calculate known nodes only in ellipses cases because in non-ellipses cases |
|
187 | # calculate known nodes only in ellipses cases because in non-ellipses cases | |
188 | # we have all the nodes |
|
188 | # we have all the nodes | |
189 | if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities(): |
|
189 | if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities(): | |
190 | kwargs[b'known'] = [ |
|
190 | kwargs[b'known'] = [ | |
191 | hex(ctx.node()) |
|
191 | hex(ctx.node()) | |
192 | for ctx in repo.set(b'::%ln', pullop.common) |
|
192 | for ctx in repo.set(b'::%ln', pullop.common) | |
193 | if ctx.node() != nullid |
|
193 | if ctx.node() != nullid | |
194 | ] |
|
194 | ] | |
195 | if not kwargs[b'known']: |
|
195 | if not kwargs[b'known']: | |
196 | # Mercurial serializes an empty list as '' and deserializes it as |
|
196 | # Mercurial serializes an empty list as '' and deserializes it as | |
197 | # [''], so delete it instead to avoid handling the empty string on |
|
197 | # [''], so delete it instead to avoid handling the empty string on | |
198 | # the server. |
|
198 | # the server. | |
199 | del kwargs[b'known'] |
|
199 | del kwargs[b'known'] | |
200 |
|
200 | |||
201 |
|
201 | |||
202 | extensions.wrapfunction( |
|
202 | extensions.wrapfunction( | |
203 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare |
|
203 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare | |
204 | ) |
|
204 | ) | |
205 |
|
205 | |||
206 |
|
206 | |||
207 | def _narrow( |
|
207 | def _narrow( | |
208 | ui, |
|
208 | ui, | |
209 | repo, |
|
209 | repo, | |
210 | remote, |
|
210 | remote, | |
211 | commoninc, |
|
211 | commoninc, | |
212 | oldincludes, |
|
212 | oldincludes, | |
213 | oldexcludes, |
|
213 | oldexcludes, | |
214 | newincludes, |
|
214 | newincludes, | |
215 | newexcludes, |
|
215 | newexcludes, | |
216 | force, |
|
216 | force, | |
217 | backup, |
|
217 | backup, | |
218 | ): |
|
218 | ): | |
219 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) |
|
219 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) | |
220 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
220 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) | |
221 |
|
221 | |||
222 | # This is essentially doing "hg outgoing" to find all local-only |
|
222 | # This is essentially doing "hg outgoing" to find all local-only | |
223 | # commits. We will then check that the local-only commits don't |
|
223 | # commits. We will then check that the local-only commits don't | |
224 | # have any changes to files that will be untracked. |
|
224 | # have any changes to files that will be untracked. | |
225 | unfi = repo.unfiltered() |
|
225 | unfi = repo.unfiltered() | |
226 | outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) |
|
226 | outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) | |
227 | ui.status(_(b'looking for local changes to affected paths\n')) |
|
227 | ui.status(_(b'looking for local changes to affected paths\n')) | |
228 | localnodes = [] |
|
228 | localnodes = [] | |
229 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
229 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
230 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): |
|
230 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): | |
231 | localnodes.append(n) |
|
231 | localnodes.append(n) | |
232 | revstostrip = unfi.revs(b'descendants(%ln)', localnodes) |
|
232 | revstostrip = unfi.revs(b'descendants(%ln)', localnodes) | |
233 | hiddenrevs = repoview.filterrevs(repo, b'visible') |
|
233 | hiddenrevs = repoview.filterrevs(repo, b'visible') | |
234 | visibletostrip = list( |
|
234 | visibletostrip = list( | |
235 | repo.changelog.node(r) for r in (revstostrip - hiddenrevs) |
|
235 | repo.changelog.node(r) for r in (revstostrip - hiddenrevs) | |
236 | ) |
|
236 | ) | |
237 | if visibletostrip: |
|
237 | if visibletostrip: | |
238 | ui.status( |
|
238 | ui.status( | |
239 | _( |
|
239 | _( | |
240 | b'The following changeset(s) or their ancestors have ' |
|
240 | b'The following changeset(s) or their ancestors have ' | |
241 | b'local changes not on the remote:\n' |
|
241 | b'local changes not on the remote:\n' | |
242 | ) |
|
242 | ) | |
243 | ) |
|
243 | ) | |
244 | maxnodes = 10 |
|
244 | maxnodes = 10 | |
245 | if ui.verbose or len(visibletostrip) <= maxnodes: |
|
245 | if ui.verbose or len(visibletostrip) <= maxnodes: | |
246 | for n in visibletostrip: |
|
246 | for n in visibletostrip: | |
247 | ui.status(b'%s\n' % short(n)) |
|
247 | ui.status(b'%s\n' % short(n)) | |
248 | else: |
|
248 | else: | |
249 | for n in visibletostrip[:maxnodes]: |
|
249 | for n in visibletostrip[:maxnodes]: | |
250 | ui.status(b'%s\n' % short(n)) |
|
250 | ui.status(b'%s\n' % short(n)) | |
251 | ui.status( |
|
251 | ui.status( | |
252 | _(b'...and %d more, use --verbose to list all\n') |
|
252 | _(b'...and %d more, use --verbose to list all\n') | |
253 | % (len(visibletostrip) - maxnodes) |
|
253 | % (len(visibletostrip) - maxnodes) | |
254 | ) |
|
254 | ) | |
255 | if not force: |
|
255 | if not force: | |
256 | raise error.StateError( |
|
256 | raise error.StateError( | |
257 | _(b'local changes found'), |
|
257 | _(b'local changes found'), | |
258 | hint=_(b'use --force-delete-local-changes to ignore'), |
|
258 | hint=_(b'use --force-delete-local-changes to ignore'), | |
259 | ) |
|
259 | ) | |
260 |
|
260 | |||
261 | with ui.uninterruptible(): |
|
261 | with ui.uninterruptible(): | |
262 | if revstostrip: |
|
262 | if revstostrip: | |
263 | tostrip = [unfi.changelog.node(r) for r in revstostrip] |
|
263 | tostrip = [unfi.changelog.node(r) for r in revstostrip] | |
264 | if repo[b'.'].node() in tostrip: |
|
264 | if repo[b'.'].node() in tostrip: | |
265 | # stripping working copy, so move to a different commit first |
|
265 | # stripping working copy, so move to a different commit first | |
266 | urev = max( |
|
266 | urev = max( | |
267 | repo.revs( |
|
267 | repo.revs( | |
268 | b'(::%n) - %ln + null', |
|
268 | b'(::%n) - %ln + null', | |
269 | repo[b'.'].node(), |
|
269 | repo[b'.'].node(), | |
270 | visibletostrip, |
|
270 | visibletostrip, | |
271 | ) |
|
271 | ) | |
272 | ) |
|
272 | ) | |
273 | hg.clean(repo, urev) |
|
273 | hg.clean(repo, urev) | |
274 | overrides = {(b'devel', b'strip-obsmarkers'): False} |
|
274 | overrides = {(b'devel', b'strip-obsmarkers'): False} | |
275 | with ui.configoverride(overrides, b'narrow'): |
|
275 | with ui.configoverride(overrides, b'narrow'): | |
276 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) |
|
276 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) | |
277 |
|
277 | |||
278 | todelete = [] |
|
278 | todelete = [] | |
279 | for f, f2, size in repo.store.datafiles(): |
|
279 | for t, f, f2, size in repo.store.datafiles(): | |
280 | if f.startswith(b'data/'): |
|
280 | if f.startswith(b'data/'): | |
281 | file = f[5:-2] |
|
281 | file = f[5:-2] | |
282 | if not newmatch(file): |
|
282 | if not newmatch(file): | |
283 | todelete.append(f) |
|
283 | todelete.append(f) | |
284 | elif f.startswith(b'meta/'): |
|
284 | elif f.startswith(b'meta/'): | |
285 | dir = f[5:-13] |
|
285 | dir = f[5:-13] | |
286 | dirs = sorted(pathutil.dirs({dir})) + [dir] |
|
286 | dirs = sorted(pathutil.dirs({dir})) + [dir] | |
287 | include = True |
|
287 | include = True | |
288 | for d in dirs: |
|
288 | for d in dirs: | |
289 | visit = newmatch.visitdir(d) |
|
289 | visit = newmatch.visitdir(d) | |
290 | if not visit: |
|
290 | if not visit: | |
291 | include = False |
|
291 | include = False | |
292 | break |
|
292 | break | |
293 | if visit == b'all': |
|
293 | if visit == b'all': | |
294 | break |
|
294 | break | |
295 | if not include: |
|
295 | if not include: | |
296 | todelete.append(f) |
|
296 | todelete.append(f) | |
297 |
|
297 | |||
298 | repo.destroying() |
|
298 | repo.destroying() | |
299 |
|
299 | |||
300 | with repo.transaction(b'narrowing'): |
|
300 | with repo.transaction(b'narrowing'): | |
301 | # Update narrowspec before removing revlogs, so repo won't be |
|
301 | # Update narrowspec before removing revlogs, so repo won't be | |
302 | # corrupt in case of crash |
|
302 | # corrupt in case of crash | |
303 | repo.setnarrowpats(newincludes, newexcludes) |
|
303 | repo.setnarrowpats(newincludes, newexcludes) | |
304 |
|
304 | |||
305 | for f in todelete: |
|
305 | for f in todelete: | |
306 | ui.status(_(b'deleting %s\n') % f) |
|
306 | ui.status(_(b'deleting %s\n') % f) | |
307 | util.unlinkpath(repo.svfs.join(f)) |
|
307 | util.unlinkpath(repo.svfs.join(f)) | |
308 | repo.store.markremoved(f) |
|
308 | repo.store.markremoved(f) | |
309 |
|
309 | |||
310 | narrowspec.updateworkingcopy(repo, assumeclean=True) |
|
310 | narrowspec.updateworkingcopy(repo, assumeclean=True) | |
311 | narrowspec.copytoworkingcopy(repo) |
|
311 | narrowspec.copytoworkingcopy(repo) | |
312 |
|
312 | |||
313 | repo.destroyed() |
|
313 | repo.destroyed() | |
314 |
|
314 | |||
315 |
|
315 | |||
316 | def _widen( |
|
316 | def _widen( | |
317 | ui, |
|
317 | ui, | |
318 | repo, |
|
318 | repo, | |
319 | remote, |
|
319 | remote, | |
320 | commoninc, |
|
320 | commoninc, | |
321 | oldincludes, |
|
321 | oldincludes, | |
322 | oldexcludes, |
|
322 | oldexcludes, | |
323 | newincludes, |
|
323 | newincludes, | |
324 | newexcludes, |
|
324 | newexcludes, | |
325 | ): |
|
325 | ): | |
326 | # for now we assume that if a server has ellipses enabled, we will be |
|
326 | # for now we assume that if a server has ellipses enabled, we will be | |
327 | # exchanging ellipses nodes. In future we should add ellipses as a client |
|
327 | # exchanging ellipses nodes. In future we should add ellipses as a client | |
328 | # side requirement (maybe) to distinguish a client is shallow or not and |
|
328 | # side requirement (maybe) to distinguish a client is shallow or not and | |
329 | # then send that information to server whether we want ellipses or not. |
|
329 | # then send that information to server whether we want ellipses or not. | |
330 | # Theoretically a non-ellipses repo should be able to use narrow |
|
330 | # Theoretically a non-ellipses repo should be able to use narrow | |
331 | # functionality from an ellipses enabled server |
|
331 | # functionality from an ellipses enabled server | |
332 | remotecap = remote.capabilities() |
|
332 | remotecap = remote.capabilities() | |
333 | ellipsesremote = any( |
|
333 | ellipsesremote = any( | |
334 | cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP |
|
334 | cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP | |
335 | ) |
|
335 | ) | |
336 |
|
336 | |||
337 | # check whether we are talking to a server which supports old version of |
|
337 | # check whether we are talking to a server which supports old version of | |
338 | # ellipses capabilities |
|
338 | # ellipses capabilities | |
339 | isoldellipses = ( |
|
339 | isoldellipses = ( | |
340 | ellipsesremote |
|
340 | ellipsesremote | |
341 | and wireprototypes.ELLIPSESCAP1 in remotecap |
|
341 | and wireprototypes.ELLIPSESCAP1 in remotecap | |
342 | and wireprototypes.ELLIPSESCAP not in remotecap |
|
342 | and wireprototypes.ELLIPSESCAP not in remotecap | |
343 | ) |
|
343 | ) | |
344 |
|
344 | |||
345 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
345 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): | |
346 | orig(pullop, kwargs) |
|
346 | orig(pullop, kwargs) | |
347 | # The old{in,ex}cludepats have already been set by orig() |
|
347 | # The old{in,ex}cludepats have already been set by orig() | |
348 | kwargs[b'includepats'] = newincludes |
|
348 | kwargs[b'includepats'] = newincludes | |
349 | kwargs[b'excludepats'] = newexcludes |
|
349 | kwargs[b'excludepats'] = newexcludes | |
350 |
|
350 | |||
351 | wrappedextraprepare = extensions.wrappedfunction( |
|
351 | wrappedextraprepare = extensions.wrappedfunction( | |
352 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
352 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen | |
353 | ) |
|
353 | ) | |
354 |
|
354 | |||
355 | # define a function that narrowbundle2 can call after creating the |
|
355 | # define a function that narrowbundle2 can call after creating the | |
356 | # backup bundle, but before applying the bundle from the server |
|
356 | # backup bundle, but before applying the bundle from the server | |
357 | def setnewnarrowpats(): |
|
357 | def setnewnarrowpats(): | |
358 | repo.setnarrowpats(newincludes, newexcludes) |
|
358 | repo.setnarrowpats(newincludes, newexcludes) | |
359 |
|
359 | |||
360 | repo.setnewnarrowpats = setnewnarrowpats |
|
360 | repo.setnewnarrowpats = setnewnarrowpats | |
361 | # silence the devel-warning of applying an empty changegroup |
|
361 | # silence the devel-warning of applying an empty changegroup | |
362 | overrides = {(b'devel', b'all-warnings'): False} |
|
362 | overrides = {(b'devel', b'all-warnings'): False} | |
363 |
|
363 | |||
364 | common = commoninc[0] |
|
364 | common = commoninc[0] | |
365 | with ui.uninterruptible(): |
|
365 | with ui.uninterruptible(): | |
366 | if ellipsesremote: |
|
366 | if ellipsesremote: | |
367 | ds = repo.dirstate |
|
367 | ds = repo.dirstate | |
368 | p1, p2 = ds.p1(), ds.p2() |
|
368 | p1, p2 = ds.p1(), ds.p2() | |
369 | with ds.parentchange(): |
|
369 | with ds.parentchange(): | |
370 | ds.setparents(nullid, nullid) |
|
370 | ds.setparents(nullid, nullid) | |
371 | if isoldellipses: |
|
371 | if isoldellipses: | |
372 | with wrappedextraprepare: |
|
372 | with wrappedextraprepare: | |
373 | exchange.pull(repo, remote, heads=common) |
|
373 | exchange.pull(repo, remote, heads=common) | |
374 | else: |
|
374 | else: | |
375 | known = [] |
|
375 | known = [] | |
376 | if ellipsesremote: |
|
376 | if ellipsesremote: | |
377 | known = [ |
|
377 | known = [ | |
378 | ctx.node() |
|
378 | ctx.node() | |
379 | for ctx in repo.set(b'::%ln', common) |
|
379 | for ctx in repo.set(b'::%ln', common) | |
380 | if ctx.node() != nullid |
|
380 | if ctx.node() != nullid | |
381 | ] |
|
381 | ] | |
382 | with remote.commandexecutor() as e: |
|
382 | with remote.commandexecutor() as e: | |
383 | bundle = e.callcommand( |
|
383 | bundle = e.callcommand( | |
384 | b'narrow_widen', |
|
384 | b'narrow_widen', | |
385 | { |
|
385 | { | |
386 | b'oldincludes': oldincludes, |
|
386 | b'oldincludes': oldincludes, | |
387 | b'oldexcludes': oldexcludes, |
|
387 | b'oldexcludes': oldexcludes, | |
388 | b'newincludes': newincludes, |
|
388 | b'newincludes': newincludes, | |
389 | b'newexcludes': newexcludes, |
|
389 | b'newexcludes': newexcludes, | |
390 | b'cgversion': b'03', |
|
390 | b'cgversion': b'03', | |
391 | b'commonheads': common, |
|
391 | b'commonheads': common, | |
392 | b'known': known, |
|
392 | b'known': known, | |
393 | b'ellipses': ellipsesremote, |
|
393 | b'ellipses': ellipsesremote, | |
394 | }, |
|
394 | }, | |
395 | ).result() |
|
395 | ).result() | |
396 |
|
396 | |||
397 | trmanager = exchange.transactionmanager( |
|
397 | trmanager = exchange.transactionmanager( | |
398 | repo, b'widen', remote.url() |
|
398 | repo, b'widen', remote.url() | |
399 | ) |
|
399 | ) | |
400 | with trmanager, repo.ui.configoverride(overrides, b'widen'): |
|
400 | with trmanager, repo.ui.configoverride(overrides, b'widen'): | |
401 | op = bundle2.bundleoperation( |
|
401 | op = bundle2.bundleoperation( | |
402 | repo, trmanager.transaction, source=b'widen' |
|
402 | repo, trmanager.transaction, source=b'widen' | |
403 | ) |
|
403 | ) | |
404 | # TODO: we should catch error.Abort here |
|
404 | # TODO: we should catch error.Abort here | |
405 | bundle2.processbundle(repo, bundle, op=op) |
|
405 | bundle2.processbundle(repo, bundle, op=op) | |
406 |
|
406 | |||
407 | if ellipsesremote: |
|
407 | if ellipsesremote: | |
408 | with ds.parentchange(): |
|
408 | with ds.parentchange(): | |
409 | ds.setparents(p1, p2) |
|
409 | ds.setparents(p1, p2) | |
410 |
|
410 | |||
411 | with repo.transaction(b'widening'): |
|
411 | with repo.transaction(b'widening'): | |
412 | repo.setnewnarrowpats() |
|
412 | repo.setnewnarrowpats() | |
413 | narrowspec.updateworkingcopy(repo) |
|
413 | narrowspec.updateworkingcopy(repo) | |
414 | narrowspec.copytoworkingcopy(repo) |
|
414 | narrowspec.copytoworkingcopy(repo) | |
415 |
|
415 | |||
416 |
|
416 | |||
417 | # TODO(rdamazio): Make new matcher format and update description |
|
417 | # TODO(rdamazio): Make new matcher format and update description | |
418 | @command( |
|
418 | @command( | |
419 | b'tracked', |
|
419 | b'tracked', | |
420 | [ |
|
420 | [ | |
421 | (b'', b'addinclude', [], _(b'new paths to include')), |
|
421 | (b'', b'addinclude', [], _(b'new paths to include')), | |
422 | (b'', b'removeinclude', [], _(b'old paths to no longer include')), |
|
422 | (b'', b'removeinclude', [], _(b'old paths to no longer include')), | |
423 | ( |
|
423 | ( | |
424 | b'', |
|
424 | b'', | |
425 | b'auto-remove-includes', |
|
425 | b'auto-remove-includes', | |
426 | False, |
|
426 | False, | |
427 | _(b'automatically choose unused includes to remove'), |
|
427 | _(b'automatically choose unused includes to remove'), | |
428 | ), |
|
428 | ), | |
429 | (b'', b'addexclude', [], _(b'new paths to exclude')), |
|
429 | (b'', b'addexclude', [], _(b'new paths to exclude')), | |
430 | (b'', b'import-rules', b'', _(b'import narrowspecs from a file')), |
|
430 | (b'', b'import-rules', b'', _(b'import narrowspecs from a file')), | |
431 | (b'', b'removeexclude', [], _(b'old paths to no longer exclude')), |
|
431 | (b'', b'removeexclude', [], _(b'old paths to no longer exclude')), | |
432 | ( |
|
432 | ( | |
433 | b'', |
|
433 | b'', | |
434 | b'clear', |
|
434 | b'clear', | |
435 | False, |
|
435 | False, | |
436 | _(b'whether to replace the existing narrowspec'), |
|
436 | _(b'whether to replace the existing narrowspec'), | |
437 | ), |
|
437 | ), | |
438 | ( |
|
438 | ( | |
439 | b'', |
|
439 | b'', | |
440 | b'force-delete-local-changes', |
|
440 | b'force-delete-local-changes', | |
441 | False, |
|
441 | False, | |
442 | _(b'forces deletion of local changes when narrowing'), |
|
442 | _(b'forces deletion of local changes when narrowing'), | |
443 | ), |
|
443 | ), | |
444 | ( |
|
444 | ( | |
445 | b'', |
|
445 | b'', | |
446 | b'backup', |
|
446 | b'backup', | |
447 | True, |
|
447 | True, | |
448 | _(b'back up local changes when narrowing'), |
|
448 | _(b'back up local changes when narrowing'), | |
449 | ), |
|
449 | ), | |
450 | ( |
|
450 | ( | |
451 | b'', |
|
451 | b'', | |
452 | b'update-working-copy', |
|
452 | b'update-working-copy', | |
453 | False, |
|
453 | False, | |
454 | _(b'update working copy when the store has changed'), |
|
454 | _(b'update working copy when the store has changed'), | |
455 | ), |
|
455 | ), | |
456 | ] |
|
456 | ] | |
457 | + commands.remoteopts, |
|
457 | + commands.remoteopts, | |
458 | _(b'[OPTIONS]... [REMOTE]'), |
|
458 | _(b'[OPTIONS]... [REMOTE]'), | |
459 | inferrepo=True, |
|
459 | inferrepo=True, | |
460 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
460 | helpcategory=command.CATEGORY_MAINTENANCE, | |
461 | ) |
|
461 | ) | |
462 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): |
|
462 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): | |
463 | """show or change the current narrowspec |
|
463 | """show or change the current narrowspec | |
464 |
|
464 | |||
465 | With no argument, shows the current narrowspec entries, one per line. Each |
|
465 | With no argument, shows the current narrowspec entries, one per line. Each | |
466 | line will be prefixed with 'I' or 'X' for included or excluded patterns, |
|
466 | line will be prefixed with 'I' or 'X' for included or excluded patterns, | |
467 | respectively. |
|
467 | respectively. | |
468 |
|
468 | |||
469 | The narrowspec is comprised of expressions to match remote files and/or |
|
469 | The narrowspec is comprised of expressions to match remote files and/or | |
470 | directories that should be pulled into your client. |
|
470 | directories that should be pulled into your client. | |
471 | The narrowspec has *include* and *exclude* expressions, with excludes always |
|
471 | The narrowspec has *include* and *exclude* expressions, with excludes always | |
472 | trumping includes: that is, if a file matches an exclude expression, it will |
|
472 | trumping includes: that is, if a file matches an exclude expression, it will | |
473 | be excluded even if it also matches an include expression. |
|
473 | be excluded even if it also matches an include expression. | |
474 | Excluding files that were never included has no effect. |
|
474 | Excluding files that were never included has no effect. | |
475 |
|
475 | |||
476 | Each included or excluded entry is in the format described by |
|
476 | Each included or excluded entry is in the format described by | |
477 | 'hg help patterns'. |
|
477 | 'hg help patterns'. | |
478 |
|
478 | |||
479 | The options allow you to add or remove included and excluded expressions. |
|
479 | The options allow you to add or remove included and excluded expressions. | |
480 |
|
480 | |||
481 | If --clear is specified, then all previous includes and excludes are DROPPED |
|
481 | If --clear is specified, then all previous includes and excludes are DROPPED | |
482 | and replaced by the new ones specified to --addinclude and --addexclude. |
|
482 | and replaced by the new ones specified to --addinclude and --addexclude. | |
483 | If --clear is specified without any further options, the narrowspec will be |
|
483 | If --clear is specified without any further options, the narrowspec will be | |
484 | empty and will not match any files. |
|
484 | empty and will not match any files. | |
485 |
|
485 | |||
486 | If --auto-remove-includes is specified, then those includes that don't match |
|
486 | If --auto-remove-includes is specified, then those includes that don't match | |
487 | any files modified by currently visible local commits (those not shared by |
|
487 | any files modified by currently visible local commits (those not shared by | |
488 | the remote) will be added to the set of explicitly specified includes to |
|
488 | the remote) will be added to the set of explicitly specified includes to | |
489 | remove. |
|
489 | remove. | |
490 |
|
490 | |||
491 | --import-rules accepts a path to a file containing rules, allowing you to |
|
491 | --import-rules accepts a path to a file containing rules, allowing you to | |
492 | add --addinclude, --addexclude rules in bulk. Like the other include and |
|
492 | add --addinclude, --addexclude rules in bulk. Like the other include and | |
493 | exclude switches, the changes are applied immediately. |
|
493 | exclude switches, the changes are applied immediately. | |
494 | """ |
|
494 | """ | |
495 | opts = pycompat.byteskwargs(opts) |
|
495 | opts = pycompat.byteskwargs(opts) | |
496 | if requirements.NARROW_REQUIREMENT not in repo.requirements: |
|
496 | if requirements.NARROW_REQUIREMENT not in repo.requirements: | |
497 | raise error.InputError( |
|
497 | raise error.InputError( | |
498 | _( |
|
498 | _( | |
499 | b'the tracked command is only supported on ' |
|
499 | b'the tracked command is only supported on ' | |
500 | b'repositories cloned with --narrow' |
|
500 | b'repositories cloned with --narrow' | |
501 | ) |
|
501 | ) | |
502 | ) |
|
502 | ) | |
503 |
|
503 | |||
504 | # Before supporting, decide whether it "hg tracked --clear" should mean |
|
504 | # Before supporting, decide whether it "hg tracked --clear" should mean | |
505 | # tracking no paths or all paths. |
|
505 | # tracking no paths or all paths. | |
506 | if opts[b'clear']: |
|
506 | if opts[b'clear']: | |
507 | raise error.InputError(_(b'the --clear option is not yet supported')) |
|
507 | raise error.InputError(_(b'the --clear option is not yet supported')) | |
508 |
|
508 | |||
509 | # import rules from a file |
|
509 | # import rules from a file | |
510 | newrules = opts.get(b'import_rules') |
|
510 | newrules = opts.get(b'import_rules') | |
511 | if newrules: |
|
511 | if newrules: | |
512 | try: |
|
512 | try: | |
513 | filepath = os.path.join(encoding.getcwd(), newrules) |
|
513 | filepath = os.path.join(encoding.getcwd(), newrules) | |
514 | fdata = util.readfile(filepath) |
|
514 | fdata = util.readfile(filepath) | |
515 | except IOError as inst: |
|
515 | except IOError as inst: | |
516 | raise error.StorageError( |
|
516 | raise error.StorageError( | |
517 | _(b"cannot read narrowspecs from '%s': %s") |
|
517 | _(b"cannot read narrowspecs from '%s': %s") | |
518 | % (filepath, encoding.strtolocal(inst.strerror)) |
|
518 | % (filepath, encoding.strtolocal(inst.strerror)) | |
519 | ) |
|
519 | ) | |
520 | includepats, excludepats, profiles = sparse.parseconfig( |
|
520 | includepats, excludepats, profiles = sparse.parseconfig( | |
521 | ui, fdata, b'narrow' |
|
521 | ui, fdata, b'narrow' | |
522 | ) |
|
522 | ) | |
523 | if profiles: |
|
523 | if profiles: | |
524 | raise error.InputError( |
|
524 | raise error.InputError( | |
525 | _( |
|
525 | _( | |
526 | b"including other spec files using '%include' " |
|
526 | b"including other spec files using '%include' " | |
527 | b"is not supported in narrowspec" |
|
527 | b"is not supported in narrowspec" | |
528 | ) |
|
528 | ) | |
529 | ) |
|
529 | ) | |
530 | opts[b'addinclude'].extend(includepats) |
|
530 | opts[b'addinclude'].extend(includepats) | |
531 | opts[b'addexclude'].extend(excludepats) |
|
531 | opts[b'addexclude'].extend(excludepats) | |
532 |
|
532 | |||
533 | addedincludes = narrowspec.parsepatterns(opts[b'addinclude']) |
|
533 | addedincludes = narrowspec.parsepatterns(opts[b'addinclude']) | |
534 | removedincludes = narrowspec.parsepatterns(opts[b'removeinclude']) |
|
534 | removedincludes = narrowspec.parsepatterns(opts[b'removeinclude']) | |
535 | addedexcludes = narrowspec.parsepatterns(opts[b'addexclude']) |
|
535 | addedexcludes = narrowspec.parsepatterns(opts[b'addexclude']) | |
536 | removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude']) |
|
536 | removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude']) | |
537 | autoremoveincludes = opts[b'auto_remove_includes'] |
|
537 | autoremoveincludes = opts[b'auto_remove_includes'] | |
538 |
|
538 | |||
539 | update_working_copy = opts[b'update_working_copy'] |
|
539 | update_working_copy = opts[b'update_working_copy'] | |
540 | only_show = not ( |
|
540 | only_show = not ( | |
541 | addedincludes |
|
541 | addedincludes | |
542 | or removedincludes |
|
542 | or removedincludes | |
543 | or addedexcludes |
|
543 | or addedexcludes | |
544 | or removedexcludes |
|
544 | or removedexcludes | |
545 | or newrules |
|
545 | or newrules | |
546 | or autoremoveincludes |
|
546 | or autoremoveincludes | |
547 | or update_working_copy |
|
547 | or update_working_copy | |
548 | ) |
|
548 | ) | |
549 |
|
549 | |||
550 | oldincludes, oldexcludes = repo.narrowpats |
|
550 | oldincludes, oldexcludes = repo.narrowpats | |
551 |
|
551 | |||
552 | # filter the user passed additions and deletions into actual additions and |
|
552 | # filter the user passed additions and deletions into actual additions and | |
553 | # deletions of excludes and includes |
|
553 | # deletions of excludes and includes | |
554 | addedincludes -= oldincludes |
|
554 | addedincludes -= oldincludes | |
555 | removedincludes &= oldincludes |
|
555 | removedincludes &= oldincludes | |
556 | addedexcludes -= oldexcludes |
|
556 | addedexcludes -= oldexcludes | |
557 | removedexcludes &= oldexcludes |
|
557 | removedexcludes &= oldexcludes | |
558 |
|
558 | |||
559 | widening = addedincludes or removedexcludes |
|
559 | widening = addedincludes or removedexcludes | |
560 | narrowing = removedincludes or addedexcludes |
|
560 | narrowing = removedincludes or addedexcludes | |
561 |
|
561 | |||
562 | # Only print the current narrowspec. |
|
562 | # Only print the current narrowspec. | |
563 | if only_show: |
|
563 | if only_show: | |
564 | ui.pager(b'tracked') |
|
564 | ui.pager(b'tracked') | |
565 | fm = ui.formatter(b'narrow', opts) |
|
565 | fm = ui.formatter(b'narrow', opts) | |
566 | for i in sorted(oldincludes): |
|
566 | for i in sorted(oldincludes): | |
567 | fm.startitem() |
|
567 | fm.startitem() | |
568 | fm.write(b'status', b'%s ', b'I', label=b'narrow.included') |
|
568 | fm.write(b'status', b'%s ', b'I', label=b'narrow.included') | |
569 | fm.write(b'pat', b'%s\n', i, label=b'narrow.included') |
|
569 | fm.write(b'pat', b'%s\n', i, label=b'narrow.included') | |
570 | for i in sorted(oldexcludes): |
|
570 | for i in sorted(oldexcludes): | |
571 | fm.startitem() |
|
571 | fm.startitem() | |
572 | fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded') |
|
572 | fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded') | |
573 | fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded') |
|
573 | fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded') | |
574 | fm.end() |
|
574 | fm.end() | |
575 | return 0 |
|
575 | return 0 | |
576 |
|
576 | |||
577 | if update_working_copy: |
|
577 | if update_working_copy: | |
578 | with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'): |
|
578 | with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'): | |
579 | narrowspec.updateworkingcopy(repo) |
|
579 | narrowspec.updateworkingcopy(repo) | |
580 | narrowspec.copytoworkingcopy(repo) |
|
580 | narrowspec.copytoworkingcopy(repo) | |
581 | return 0 |
|
581 | return 0 | |
582 |
|
582 | |||
583 | if not (widening or narrowing or autoremoveincludes): |
|
583 | if not (widening or narrowing or autoremoveincludes): | |
584 | ui.status(_(b"nothing to widen or narrow\n")) |
|
584 | ui.status(_(b"nothing to widen or narrow\n")) | |
585 | return 0 |
|
585 | return 0 | |
586 |
|
586 | |||
587 | with repo.wlock(), repo.lock(): |
|
587 | with repo.wlock(), repo.lock(): | |
588 | cmdutil.bailifchanged(repo) |
|
588 | cmdutil.bailifchanged(repo) | |
589 |
|
589 | |||
590 | # Find the revisions we have in common with the remote. These will |
|
590 | # Find the revisions we have in common with the remote. These will | |
591 | # be used for finding local-only changes for narrowing. They will |
|
591 | # be used for finding local-only changes for narrowing. They will | |
592 | # also define the set of revisions to update for widening. |
|
592 | # also define the set of revisions to update for widening. | |
593 | remotepath = ui.expandpath(remotepath or b'default') |
|
593 | remotepath = ui.expandpath(remotepath or b'default') | |
594 | url, branches = hg.parseurl(remotepath) |
|
594 | url, branches = hg.parseurl(remotepath) | |
595 | ui.status(_(b'comparing with %s\n') % util.hidepassword(url)) |
|
595 | ui.status(_(b'comparing with %s\n') % util.hidepassword(url)) | |
596 | remote = hg.peer(repo, opts, url) |
|
596 | remote = hg.peer(repo, opts, url) | |
597 |
|
597 | |||
598 | try: |
|
598 | try: | |
599 | # check narrow support before doing anything if widening needs to be |
|
599 | # check narrow support before doing anything if widening needs to be | |
600 | # performed. In future we should also abort if client is ellipses and |
|
600 | # performed. In future we should also abort if client is ellipses and | |
601 | # server does not support ellipses |
|
601 | # server does not support ellipses | |
602 | if ( |
|
602 | if ( | |
603 | widening |
|
603 | widening | |
604 | and wireprototypes.NARROWCAP not in remote.capabilities() |
|
604 | and wireprototypes.NARROWCAP not in remote.capabilities() | |
605 | ): |
|
605 | ): | |
606 | raise error.Abort(_(b"server does not support narrow clones")) |
|
606 | raise error.Abort(_(b"server does not support narrow clones")) | |
607 |
|
607 | |||
608 | commoninc = discovery.findcommonincoming(repo, remote) |
|
608 | commoninc = discovery.findcommonincoming(repo, remote) | |
609 |
|
609 | |||
610 | if autoremoveincludes: |
|
610 | if autoremoveincludes: | |
611 | outgoing = discovery.findcommonoutgoing( |
|
611 | outgoing = discovery.findcommonoutgoing( | |
612 | repo, remote, commoninc=commoninc |
|
612 | repo, remote, commoninc=commoninc | |
613 | ) |
|
613 | ) | |
614 | ui.status(_(b'looking for unused includes to remove\n')) |
|
614 | ui.status(_(b'looking for unused includes to remove\n')) | |
615 | localfiles = set() |
|
615 | localfiles = set() | |
616 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
616 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
617 | localfiles.update(repo[n].files()) |
|
617 | localfiles.update(repo[n].files()) | |
618 | suggestedremovals = [] |
|
618 | suggestedremovals = [] | |
619 | for include in sorted(oldincludes): |
|
619 | for include in sorted(oldincludes): | |
620 | match = narrowspec.match(repo.root, [include], oldexcludes) |
|
620 | match = narrowspec.match(repo.root, [include], oldexcludes) | |
621 | if not any(match(f) for f in localfiles): |
|
621 | if not any(match(f) for f in localfiles): | |
622 | suggestedremovals.append(include) |
|
622 | suggestedremovals.append(include) | |
623 | if suggestedremovals: |
|
623 | if suggestedremovals: | |
624 | for s in suggestedremovals: |
|
624 | for s in suggestedremovals: | |
625 | ui.status(b'%s\n' % s) |
|
625 | ui.status(b'%s\n' % s) | |
626 | if ( |
|
626 | if ( | |
627 | ui.promptchoice( |
|
627 | ui.promptchoice( | |
628 | _( |
|
628 | _( | |
629 | b'remove these unused includes (yn)?' |
|
629 | b'remove these unused includes (yn)?' | |
630 | b'$$ &Yes $$ &No' |
|
630 | b'$$ &Yes $$ &No' | |
631 | ) |
|
631 | ) | |
632 | ) |
|
632 | ) | |
633 | == 0 |
|
633 | == 0 | |
634 | ): |
|
634 | ): | |
635 | removedincludes.update(suggestedremovals) |
|
635 | removedincludes.update(suggestedremovals) | |
636 | narrowing = True |
|
636 | narrowing = True | |
637 | else: |
|
637 | else: | |
638 | ui.status(_(b'found no unused includes\n')) |
|
638 | ui.status(_(b'found no unused includes\n')) | |
639 |
|
639 | |||
640 | if narrowing: |
|
640 | if narrowing: | |
641 | newincludes = oldincludes - removedincludes |
|
641 | newincludes = oldincludes - removedincludes | |
642 | newexcludes = oldexcludes | addedexcludes |
|
642 | newexcludes = oldexcludes | addedexcludes | |
643 | _narrow( |
|
643 | _narrow( | |
644 | ui, |
|
644 | ui, | |
645 | repo, |
|
645 | repo, | |
646 | remote, |
|
646 | remote, | |
647 | commoninc, |
|
647 | commoninc, | |
648 | oldincludes, |
|
648 | oldincludes, | |
649 | oldexcludes, |
|
649 | oldexcludes, | |
650 | newincludes, |
|
650 | newincludes, | |
651 | newexcludes, |
|
651 | newexcludes, | |
652 | opts[b'force_delete_local_changes'], |
|
652 | opts[b'force_delete_local_changes'], | |
653 | opts[b'backup'], |
|
653 | opts[b'backup'], | |
654 | ) |
|
654 | ) | |
655 | # _narrow() updated the narrowspec and _widen() below needs to |
|
655 | # _narrow() updated the narrowspec and _widen() below needs to | |
656 | # use the updated values as its base (otherwise removed includes |
|
656 | # use the updated values as its base (otherwise removed includes | |
657 | # and addedexcludes will be lost in the resulting narrowspec) |
|
657 | # and addedexcludes will be lost in the resulting narrowspec) | |
658 | oldincludes = newincludes |
|
658 | oldincludes = newincludes | |
659 | oldexcludes = newexcludes |
|
659 | oldexcludes = newexcludes | |
660 |
|
660 | |||
661 | if widening: |
|
661 | if widening: | |
662 | newincludes = oldincludes | addedincludes |
|
662 | newincludes = oldincludes | addedincludes | |
663 | newexcludes = oldexcludes - removedexcludes |
|
663 | newexcludes = oldexcludes - removedexcludes | |
664 | _widen( |
|
664 | _widen( | |
665 | ui, |
|
665 | ui, | |
666 | repo, |
|
666 | repo, | |
667 | remote, |
|
667 | remote, | |
668 | commoninc, |
|
668 | commoninc, | |
669 | oldincludes, |
|
669 | oldincludes, | |
670 | oldexcludes, |
|
670 | oldexcludes, | |
671 | newincludes, |
|
671 | newincludes, | |
672 | newexcludes, |
|
672 | newexcludes, | |
673 | ) |
|
673 | ) | |
674 | finally: |
|
674 | finally: | |
675 | remote.close() |
|
675 | remote.close() | |
676 |
|
676 | |||
677 | return 0 |
|
677 | return 0 |
@@ -1,386 +1,386 | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import threading |
|
3 | import threading | |
4 |
|
4 | |||
5 | from mercurial.node import hex, nullid |
|
5 | from mercurial.node import hex, nullid | |
6 | from mercurial.pycompat import getattr |
|
6 | from mercurial.pycompat import getattr | |
7 | from mercurial import ( |
|
7 | from mercurial import ( | |
8 | mdiff, |
|
8 | mdiff, | |
9 | pycompat, |
|
9 | pycompat, | |
10 | revlog, |
|
10 | revlog, | |
11 | ) |
|
11 | ) | |
12 | from . import ( |
|
12 | from . import ( | |
13 | basestore, |
|
13 | basestore, | |
14 | constants, |
|
14 | constants, | |
15 | shallowutil, |
|
15 | shallowutil, | |
16 | ) |
|
16 | ) | |
17 |
|
17 | |||
18 |
|
18 | |||
19 | class ChainIndicies(object): |
|
19 | class ChainIndicies(object): | |
20 | """A static class for easy reference to the delta chain indicies.""" |
|
20 | """A static class for easy reference to the delta chain indicies.""" | |
21 |
|
21 | |||
22 | # The filename of this revision delta |
|
22 | # The filename of this revision delta | |
23 | NAME = 0 |
|
23 | NAME = 0 | |
24 | # The mercurial file node for this revision delta |
|
24 | # The mercurial file node for this revision delta | |
25 | NODE = 1 |
|
25 | NODE = 1 | |
26 | # The filename of the delta base's revision. This is useful when delta |
|
26 | # The filename of the delta base's revision. This is useful when delta | |
27 | # between different files (like in the case of a move or copy, we can delta |
|
27 | # between different files (like in the case of a move or copy, we can delta | |
28 | # against the original file content). |
|
28 | # against the original file content). | |
29 | BASENAME = 2 |
|
29 | BASENAME = 2 | |
30 | # The mercurial file node for the delta base revision. This is the nullid if |
|
30 | # The mercurial file node for the delta base revision. This is the nullid if | |
31 | # this delta is a full text. |
|
31 | # this delta is a full text. | |
32 | BASENODE = 3 |
|
32 | BASENODE = 3 | |
33 | # The actual delta or full text data. |
|
33 | # The actual delta or full text data. | |
34 | DATA = 4 |
|
34 | DATA = 4 | |
35 |
|
35 | |||
36 |
|
36 | |||
37 | class unioncontentstore(basestore.baseunionstore): |
|
37 | class unioncontentstore(basestore.baseunionstore): | |
38 | def __init__(self, *args, **kwargs): |
|
38 | def __init__(self, *args, **kwargs): | |
39 | super(unioncontentstore, self).__init__(*args, **kwargs) |
|
39 | super(unioncontentstore, self).__init__(*args, **kwargs) | |
40 |
|
40 | |||
41 | self.stores = args |
|
41 | self.stores = args | |
42 | self.writestore = kwargs.get('writestore') |
|
42 | self.writestore = kwargs.get('writestore') | |
43 |
|
43 | |||
44 | # If allowincomplete==True then the union store can return partial |
|
44 | # If allowincomplete==True then the union store can return partial | |
45 | # delta chains, otherwise it will throw a KeyError if a full |
|
45 | # delta chains, otherwise it will throw a KeyError if a full | |
46 | # deltachain can't be found. |
|
46 | # deltachain can't be found. | |
47 | self.allowincomplete = kwargs.get('allowincomplete', False) |
|
47 | self.allowincomplete = kwargs.get('allowincomplete', False) | |
48 |
|
48 | |||
49 | def get(self, name, node): |
|
49 | def get(self, name, node): | |
50 | """Fetches the full text revision contents of the given name+node pair. |
|
50 | """Fetches the full text revision contents of the given name+node pair. | |
51 | If the full text doesn't exist, throws a KeyError. |
|
51 | If the full text doesn't exist, throws a KeyError. | |
52 |
|
52 | |||
53 | Under the hood, this uses getdeltachain() across all the stores to build |
|
53 | Under the hood, this uses getdeltachain() across all the stores to build | |
54 | up a full chain to produce the full text. |
|
54 | up a full chain to produce the full text. | |
55 | """ |
|
55 | """ | |
56 | chain = self.getdeltachain(name, node) |
|
56 | chain = self.getdeltachain(name, node) | |
57 |
|
57 | |||
58 | if chain[-1][ChainIndicies.BASENODE] != nullid: |
|
58 | if chain[-1][ChainIndicies.BASENODE] != nullid: | |
59 | # If we didn't receive a full chain, throw |
|
59 | # If we didn't receive a full chain, throw | |
60 | raise KeyError((name, hex(node))) |
|
60 | raise KeyError((name, hex(node))) | |
61 |
|
61 | |||
62 | # The last entry in the chain is a full text, so we start our delta |
|
62 | # The last entry in the chain is a full text, so we start our delta | |
63 | # applies with that. |
|
63 | # applies with that. | |
64 | fulltext = chain.pop()[ChainIndicies.DATA] |
|
64 | fulltext = chain.pop()[ChainIndicies.DATA] | |
65 |
|
65 | |||
66 | text = fulltext |
|
66 | text = fulltext | |
67 | while chain: |
|
67 | while chain: | |
68 | delta = chain.pop()[ChainIndicies.DATA] |
|
68 | delta = chain.pop()[ChainIndicies.DATA] | |
69 | text = mdiff.patches(text, [delta]) |
|
69 | text = mdiff.patches(text, [delta]) | |
70 |
|
70 | |||
71 | return text |
|
71 | return text | |
72 |
|
72 | |||
73 | @basestore.baseunionstore.retriable |
|
73 | @basestore.baseunionstore.retriable | |
74 | def getdelta(self, name, node): |
|
74 | def getdelta(self, name, node): | |
75 | """Return the single delta entry for the given name/node pair.""" |
|
75 | """Return the single delta entry for the given name/node pair.""" | |
76 | for store in self.stores: |
|
76 | for store in self.stores: | |
77 | try: |
|
77 | try: | |
78 | return store.getdelta(name, node) |
|
78 | return store.getdelta(name, node) | |
79 | except KeyError: |
|
79 | except KeyError: | |
80 | pass |
|
80 | pass | |
81 |
|
81 | |||
82 | raise KeyError((name, hex(node))) |
|
82 | raise KeyError((name, hex(node))) | |
83 |
|
83 | |||
84 | def getdeltachain(self, name, node): |
|
84 | def getdeltachain(self, name, node): | |
85 | """Returns the deltachain for the given name/node pair. |
|
85 | """Returns the deltachain for the given name/node pair. | |
86 |
|
86 | |||
87 | Returns an ordered list of: |
|
87 | Returns an ordered list of: | |
88 |
|
88 | |||
89 | [(name, node, deltabasename, deltabasenode, deltacontent),...] |
|
89 | [(name, node, deltabasename, deltabasenode, deltacontent),...] | |
90 |
|
90 | |||
91 | where the chain is terminated by a full text entry with a nullid |
|
91 | where the chain is terminated by a full text entry with a nullid | |
92 | deltabasenode. |
|
92 | deltabasenode. | |
93 | """ |
|
93 | """ | |
94 | chain = self._getpartialchain(name, node) |
|
94 | chain = self._getpartialchain(name, node) | |
95 | while chain[-1][ChainIndicies.BASENODE] != nullid: |
|
95 | while chain[-1][ChainIndicies.BASENODE] != nullid: | |
96 | x, x, deltabasename, deltabasenode, x = chain[-1] |
|
96 | x, x, deltabasename, deltabasenode, x = chain[-1] | |
97 | try: |
|
97 | try: | |
98 | morechain = self._getpartialchain(deltabasename, deltabasenode) |
|
98 | morechain = self._getpartialchain(deltabasename, deltabasenode) | |
99 | chain.extend(morechain) |
|
99 | chain.extend(morechain) | |
100 | except KeyError: |
|
100 | except KeyError: | |
101 | # If we allow incomplete chains, don't throw. |
|
101 | # If we allow incomplete chains, don't throw. | |
102 | if not self.allowincomplete: |
|
102 | if not self.allowincomplete: | |
103 | raise |
|
103 | raise | |
104 | break |
|
104 | break | |
105 |
|
105 | |||
106 | return chain |
|
106 | return chain | |
107 |
|
107 | |||
108 | @basestore.baseunionstore.retriable |
|
108 | @basestore.baseunionstore.retriable | |
109 | def getmeta(self, name, node): |
|
109 | def getmeta(self, name, node): | |
110 | """Returns the metadata dict for given node.""" |
|
110 | """Returns the metadata dict for given node.""" | |
111 | for store in self.stores: |
|
111 | for store in self.stores: | |
112 | try: |
|
112 | try: | |
113 | return store.getmeta(name, node) |
|
113 | return store.getmeta(name, node) | |
114 | except KeyError: |
|
114 | except KeyError: | |
115 | pass |
|
115 | pass | |
116 | raise KeyError((name, hex(node))) |
|
116 | raise KeyError((name, hex(node))) | |
117 |
|
117 | |||
118 | def getmetrics(self): |
|
118 | def getmetrics(self): | |
119 | metrics = [s.getmetrics() for s in self.stores] |
|
119 | metrics = [s.getmetrics() for s in self.stores] | |
120 | return shallowutil.sumdicts(*metrics) |
|
120 | return shallowutil.sumdicts(*metrics) | |
121 |
|
121 | |||
122 | @basestore.baseunionstore.retriable |
|
122 | @basestore.baseunionstore.retriable | |
123 | def _getpartialchain(self, name, node): |
|
123 | def _getpartialchain(self, name, node): | |
124 | """Returns a partial delta chain for the given name/node pair. |
|
124 | """Returns a partial delta chain for the given name/node pair. | |
125 |
|
125 | |||
126 | A partial chain is a chain that may not be terminated in a full-text. |
|
126 | A partial chain is a chain that may not be terminated in a full-text. | |
127 | """ |
|
127 | """ | |
128 | for store in self.stores: |
|
128 | for store in self.stores: | |
129 | try: |
|
129 | try: | |
130 | return store.getdeltachain(name, node) |
|
130 | return store.getdeltachain(name, node) | |
131 | except KeyError: |
|
131 | except KeyError: | |
132 | pass |
|
132 | pass | |
133 |
|
133 | |||
134 | raise KeyError((name, hex(node))) |
|
134 | raise KeyError((name, hex(node))) | |
135 |
|
135 | |||
136 | def add(self, name, node, data): |
|
136 | def add(self, name, node, data): | |
137 | raise RuntimeError( |
|
137 | raise RuntimeError( | |
138 | b"cannot add content only to remotefilelog contentstore" |
|
138 | b"cannot add content only to remotefilelog contentstore" | |
139 | ) |
|
139 | ) | |
140 |
|
140 | |||
141 | def getmissing(self, keys): |
|
141 | def getmissing(self, keys): | |
142 | missing = keys |
|
142 | missing = keys | |
143 | for store in self.stores: |
|
143 | for store in self.stores: | |
144 | if missing: |
|
144 | if missing: | |
145 | missing = store.getmissing(missing) |
|
145 | missing = store.getmissing(missing) | |
146 | return missing |
|
146 | return missing | |
147 |
|
147 | |||
148 | def addremotefilelognode(self, name, node, data): |
|
148 | def addremotefilelognode(self, name, node, data): | |
149 | if self.writestore: |
|
149 | if self.writestore: | |
150 | self.writestore.addremotefilelognode(name, node, data) |
|
150 | self.writestore.addremotefilelognode(name, node, data) | |
151 | else: |
|
151 | else: | |
152 | raise RuntimeError(b"no writable store configured") |
|
152 | raise RuntimeError(b"no writable store configured") | |
153 |
|
153 | |||
154 | def markledger(self, ledger, options=None): |
|
154 | def markledger(self, ledger, options=None): | |
155 | for store in self.stores: |
|
155 | for store in self.stores: | |
156 | store.markledger(ledger, options) |
|
156 | store.markledger(ledger, options) | |
157 |
|
157 | |||
158 |
|
158 | |||
159 | class remotefilelogcontentstore(basestore.basestore): |
|
159 | class remotefilelogcontentstore(basestore.basestore): | |
160 | def __init__(self, *args, **kwargs): |
|
160 | def __init__(self, *args, **kwargs): | |
161 | super(remotefilelogcontentstore, self).__init__(*args, **kwargs) |
|
161 | super(remotefilelogcontentstore, self).__init__(*args, **kwargs) | |
162 | self._threaddata = threading.local() |
|
162 | self._threaddata = threading.local() | |
163 |
|
163 | |||
164 | def get(self, name, node): |
|
164 | def get(self, name, node): | |
165 | # return raw revision text |
|
165 | # return raw revision text | |
166 | data = self._getdata(name, node) |
|
166 | data = self._getdata(name, node) | |
167 |
|
167 | |||
168 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
168 | offset, size, flags = shallowutil.parsesizeflags(data) | |
169 | content = data[offset : offset + size] |
|
169 | content = data[offset : offset + size] | |
170 |
|
170 | |||
171 | ancestormap = shallowutil.ancestormap(data) |
|
171 | ancestormap = shallowutil.ancestormap(data) | |
172 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
172 | p1, p2, linknode, copyfrom = ancestormap[node] | |
173 | copyrev = None |
|
173 | copyrev = None | |
174 | if copyfrom: |
|
174 | if copyfrom: | |
175 | copyrev = hex(p1) |
|
175 | copyrev = hex(p1) | |
176 |
|
176 | |||
177 | self._updatemetacache(node, size, flags) |
|
177 | self._updatemetacache(node, size, flags) | |
178 |
|
178 | |||
179 | # lfs tracks renames in its own metadata, remove hg copy metadata, |
|
179 | # lfs tracks renames in its own metadata, remove hg copy metadata, | |
180 | # because copy metadata will be re-added by lfs flag processor. |
|
180 | # because copy metadata will be re-added by lfs flag processor. | |
181 | if flags & revlog.REVIDX_EXTSTORED: |
|
181 | if flags & revlog.REVIDX_EXTSTORED: | |
182 | copyrev = copyfrom = None |
|
182 | copyrev = copyfrom = None | |
183 | revision = shallowutil.createrevlogtext(content, copyfrom, copyrev) |
|
183 | revision = shallowutil.createrevlogtext(content, copyfrom, copyrev) | |
184 | return revision |
|
184 | return revision | |
185 |
|
185 | |||
186 | def getdelta(self, name, node): |
|
186 | def getdelta(self, name, node): | |
187 | # Since remotefilelog content stores only contain full texts, just |
|
187 | # Since remotefilelog content stores only contain full texts, just | |
188 | # return that. |
|
188 | # return that. | |
189 | revision = self.get(name, node) |
|
189 | revision = self.get(name, node) | |
190 | return revision, name, nullid, self.getmeta(name, node) |
|
190 | return revision, name, nullid, self.getmeta(name, node) | |
191 |
|
191 | |||
192 | def getdeltachain(self, name, node): |
|
192 | def getdeltachain(self, name, node): | |
193 | # Since remotefilelog content stores just contain full texts, we return |
|
193 | # Since remotefilelog content stores just contain full texts, we return | |
194 | # a fake delta chain that just consists of a single full text revision. |
|
194 | # a fake delta chain that just consists of a single full text revision. | |
195 | # The nullid in the deltabasenode slot indicates that the revision is a |
|
195 | # The nullid in the deltabasenode slot indicates that the revision is a | |
196 | # fulltext. |
|
196 | # fulltext. | |
197 | revision = self.get(name, node) |
|
197 | revision = self.get(name, node) | |
198 | return [(name, node, None, nullid, revision)] |
|
198 | return [(name, node, None, nullid, revision)] | |
199 |
|
199 | |||
200 | def getmeta(self, name, node): |
|
200 | def getmeta(self, name, node): | |
201 | self._sanitizemetacache() |
|
201 | self._sanitizemetacache() | |
202 | if node != self._threaddata.metacache[0]: |
|
202 | if node != self._threaddata.metacache[0]: | |
203 | data = self._getdata(name, node) |
|
203 | data = self._getdata(name, node) | |
204 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
204 | offset, size, flags = shallowutil.parsesizeflags(data) | |
205 | self._updatemetacache(node, size, flags) |
|
205 | self._updatemetacache(node, size, flags) | |
206 | return self._threaddata.metacache[1] |
|
206 | return self._threaddata.metacache[1] | |
207 |
|
207 | |||
208 | def add(self, name, node, data): |
|
208 | def add(self, name, node, data): | |
209 | raise RuntimeError( |
|
209 | raise RuntimeError( | |
210 | b"cannot add content only to remotefilelog contentstore" |
|
210 | b"cannot add content only to remotefilelog contentstore" | |
211 | ) |
|
211 | ) | |
212 |
|
212 | |||
213 | def _sanitizemetacache(self): |
|
213 | def _sanitizemetacache(self): | |
214 | metacache = getattr(self._threaddata, 'metacache', None) |
|
214 | metacache = getattr(self._threaddata, 'metacache', None) | |
215 | if metacache is None: |
|
215 | if metacache is None: | |
216 | self._threaddata.metacache = (None, None) # (node, meta) |
|
216 | self._threaddata.metacache = (None, None) # (node, meta) | |
217 |
|
217 | |||
218 | def _updatemetacache(self, node, size, flags): |
|
218 | def _updatemetacache(self, node, size, flags): | |
219 | self._sanitizemetacache() |
|
219 | self._sanitizemetacache() | |
220 | if node == self._threaddata.metacache[0]: |
|
220 | if node == self._threaddata.metacache[0]: | |
221 | return |
|
221 | return | |
222 | meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size} |
|
222 | meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size} | |
223 | self._threaddata.metacache = (node, meta) |
|
223 | self._threaddata.metacache = (node, meta) | |
224 |
|
224 | |||
225 |
|
225 | |||
226 | class remotecontentstore(object): |
|
226 | class remotecontentstore(object): | |
227 | def __init__(self, ui, fileservice, shared): |
|
227 | def __init__(self, ui, fileservice, shared): | |
228 | self._fileservice = fileservice |
|
228 | self._fileservice = fileservice | |
229 | # type(shared) is usually remotefilelogcontentstore |
|
229 | # type(shared) is usually remotefilelogcontentstore | |
230 | self._shared = shared |
|
230 | self._shared = shared | |
231 |
|
231 | |||
232 | def get(self, name, node): |
|
232 | def get(self, name, node): | |
233 | self._fileservice.prefetch( |
|
233 | self._fileservice.prefetch( | |
234 | [(name, hex(node))], force=True, fetchdata=True |
|
234 | [(name, hex(node))], force=True, fetchdata=True | |
235 | ) |
|
235 | ) | |
236 | return self._shared.get(name, node) |
|
236 | return self._shared.get(name, node) | |
237 |
|
237 | |||
238 | def getdelta(self, name, node): |
|
238 | def getdelta(self, name, node): | |
239 | revision = self.get(name, node) |
|
239 | revision = self.get(name, node) | |
240 | return revision, name, nullid, self._shared.getmeta(name, node) |
|
240 | return revision, name, nullid, self._shared.getmeta(name, node) | |
241 |
|
241 | |||
242 | def getdeltachain(self, name, node): |
|
242 | def getdeltachain(self, name, node): | |
243 | # Since our remote content stores just contain full texts, we return a |
|
243 | # Since our remote content stores just contain full texts, we return a | |
244 | # fake delta chain that just consists of a single full text revision. |
|
244 | # fake delta chain that just consists of a single full text revision. | |
245 | # The nullid in the deltabasenode slot indicates that the revision is a |
|
245 | # The nullid in the deltabasenode slot indicates that the revision is a | |
246 | # fulltext. |
|
246 | # fulltext. | |
247 | revision = self.get(name, node) |
|
247 | revision = self.get(name, node) | |
248 | return [(name, node, None, nullid, revision)] |
|
248 | return [(name, node, None, nullid, revision)] | |
249 |
|
249 | |||
250 | def getmeta(self, name, node): |
|
250 | def getmeta(self, name, node): | |
251 | self._fileservice.prefetch( |
|
251 | self._fileservice.prefetch( | |
252 | [(name, hex(node))], force=True, fetchdata=True |
|
252 | [(name, hex(node))], force=True, fetchdata=True | |
253 | ) |
|
253 | ) | |
254 | return self._shared.getmeta(name, node) |
|
254 | return self._shared.getmeta(name, node) | |
255 |
|
255 | |||
256 | def add(self, name, node, data): |
|
256 | def add(self, name, node, data): | |
257 | raise RuntimeError(b"cannot add to a remote store") |
|
257 | raise RuntimeError(b"cannot add to a remote store") | |
258 |
|
258 | |||
259 | def getmissing(self, keys): |
|
259 | def getmissing(self, keys): | |
260 | return keys |
|
260 | return keys | |
261 |
|
261 | |||
262 | def markledger(self, ledger, options=None): |
|
262 | def markledger(self, ledger, options=None): | |
263 | pass |
|
263 | pass | |
264 |
|
264 | |||
265 |
|
265 | |||
266 | class manifestrevlogstore(object): |
|
266 | class manifestrevlogstore(object): | |
267 | def __init__(self, repo): |
|
267 | def __init__(self, repo): | |
268 | self._store = repo.store |
|
268 | self._store = repo.store | |
269 | self._svfs = repo.svfs |
|
269 | self._svfs = repo.svfs | |
270 | self._revlogs = dict() |
|
270 | self._revlogs = dict() | |
271 | self._cl = revlog.revlog(self._svfs, b'00changelog.i') |
|
271 | self._cl = revlog.revlog(self._svfs, b'00changelog.i') | |
272 | self._repackstartlinkrev = 0 |
|
272 | self._repackstartlinkrev = 0 | |
273 |
|
273 | |||
274 | def get(self, name, node): |
|
274 | def get(self, name, node): | |
275 | return self._revlog(name).rawdata(node) |
|
275 | return self._revlog(name).rawdata(node) | |
276 |
|
276 | |||
277 | def getdelta(self, name, node): |
|
277 | def getdelta(self, name, node): | |
278 | revision = self.get(name, node) |
|
278 | revision = self.get(name, node) | |
279 | return revision, name, nullid, self.getmeta(name, node) |
|
279 | return revision, name, nullid, self.getmeta(name, node) | |
280 |
|
280 | |||
281 | def getdeltachain(self, name, node): |
|
281 | def getdeltachain(self, name, node): | |
282 | revision = self.get(name, node) |
|
282 | revision = self.get(name, node) | |
283 | return [(name, node, None, nullid, revision)] |
|
283 | return [(name, node, None, nullid, revision)] | |
284 |
|
284 | |||
285 | def getmeta(self, name, node): |
|
285 | def getmeta(self, name, node): | |
286 | rl = self._revlog(name) |
|
286 | rl = self._revlog(name) | |
287 | rev = rl.rev(node) |
|
287 | rev = rl.rev(node) | |
288 | return { |
|
288 | return { | |
289 | constants.METAKEYFLAG: rl.flags(rev), |
|
289 | constants.METAKEYFLAG: rl.flags(rev), | |
290 | constants.METAKEYSIZE: rl.rawsize(rev), |
|
290 | constants.METAKEYSIZE: rl.rawsize(rev), | |
291 | } |
|
291 | } | |
292 |
|
292 | |||
293 | def getancestors(self, name, node, known=None): |
|
293 | def getancestors(self, name, node, known=None): | |
294 | if known is None: |
|
294 | if known is None: | |
295 | known = set() |
|
295 | known = set() | |
296 | if node in known: |
|
296 | if node in known: | |
297 | return [] |
|
297 | return [] | |
298 |
|
298 | |||
299 | rl = self._revlog(name) |
|
299 | rl = self._revlog(name) | |
300 | ancestors = {} |
|
300 | ancestors = {} | |
301 | missing = {node} |
|
301 | missing = {node} | |
302 | for ancrev in rl.ancestors([rl.rev(node)], inclusive=True): |
|
302 | for ancrev in rl.ancestors([rl.rev(node)], inclusive=True): | |
303 | ancnode = rl.node(ancrev) |
|
303 | ancnode = rl.node(ancrev) | |
304 | missing.discard(ancnode) |
|
304 | missing.discard(ancnode) | |
305 |
|
305 | |||
306 | p1, p2 = rl.parents(ancnode) |
|
306 | p1, p2 = rl.parents(ancnode) | |
307 | if p1 != nullid and p1 not in known: |
|
307 | if p1 != nullid and p1 not in known: | |
308 | missing.add(p1) |
|
308 | missing.add(p1) | |
309 | if p2 != nullid and p2 not in known: |
|
309 | if p2 != nullid and p2 not in known: | |
310 | missing.add(p2) |
|
310 | missing.add(p2) | |
311 |
|
311 | |||
312 | linknode = self._cl.node(rl.linkrev(ancrev)) |
|
312 | linknode = self._cl.node(rl.linkrev(ancrev)) | |
313 | ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'') |
|
313 | ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'') | |
314 | if not missing: |
|
314 | if not missing: | |
315 | break |
|
315 | break | |
316 | return ancestors |
|
316 | return ancestors | |
317 |
|
317 | |||
318 | def getnodeinfo(self, name, node): |
|
318 | def getnodeinfo(self, name, node): | |
319 | cl = self._cl |
|
319 | cl = self._cl | |
320 | rl = self._revlog(name) |
|
320 | rl = self._revlog(name) | |
321 | parents = rl.parents(node) |
|
321 | parents = rl.parents(node) | |
322 | linkrev = rl.linkrev(rl.rev(node)) |
|
322 | linkrev = rl.linkrev(rl.rev(node)) | |
323 | return (parents[0], parents[1], cl.node(linkrev), None) |
|
323 | return (parents[0], parents[1], cl.node(linkrev), None) | |
324 |
|
324 | |||
325 | def add(self, *args): |
|
325 | def add(self, *args): | |
326 | raise RuntimeError(b"cannot add to a revlog store") |
|
326 | raise RuntimeError(b"cannot add to a revlog store") | |
327 |
|
327 | |||
328 | def _revlog(self, name): |
|
328 | def _revlog(self, name): | |
329 | rl = self._revlogs.get(name) |
|
329 | rl = self._revlogs.get(name) | |
330 | if rl is None: |
|
330 | if rl is None: | |
331 | revlogname = b'00manifesttree.i' |
|
331 | revlogname = b'00manifesttree.i' | |
332 | if name != b'': |
|
332 | if name != b'': | |
333 | revlogname = b'meta/%s/00manifest.i' % name |
|
333 | revlogname = b'meta/%s/00manifest.i' % name | |
334 | rl = revlog.revlog(self._svfs, revlogname) |
|
334 | rl = revlog.revlog(self._svfs, revlogname) | |
335 | self._revlogs[name] = rl |
|
335 | self._revlogs[name] = rl | |
336 | return rl |
|
336 | return rl | |
337 |
|
337 | |||
338 | def getmissing(self, keys): |
|
338 | def getmissing(self, keys): | |
339 | missing = [] |
|
339 | missing = [] | |
340 | for name, node in keys: |
|
340 | for name, node in keys: | |
341 | mfrevlog = self._revlog(name) |
|
341 | mfrevlog = self._revlog(name) | |
342 | if node not in mfrevlog.nodemap: |
|
342 | if node not in mfrevlog.nodemap: | |
343 | missing.append((name, node)) |
|
343 | missing.append((name, node)) | |
344 |
|
344 | |||
345 | return missing |
|
345 | return missing | |
346 |
|
346 | |||
347 | def setrepacklinkrevrange(self, startrev, endrev): |
|
347 | def setrepacklinkrevrange(self, startrev, endrev): | |
348 | self._repackstartlinkrev = startrev |
|
348 | self._repackstartlinkrev = startrev | |
349 | self._repackendlinkrev = endrev |
|
349 | self._repackendlinkrev = endrev | |
350 |
|
350 | |||
351 | def markledger(self, ledger, options=None): |
|
351 | def markledger(self, ledger, options=None): | |
352 | if options and options.get(constants.OPTION_PACKSONLY): |
|
352 | if options and options.get(constants.OPTION_PACKSONLY): | |
353 | return |
|
353 | return | |
354 | treename = b'' |
|
354 | treename = b'' | |
355 | rl = revlog.revlog(self._svfs, b'00manifesttree.i') |
|
355 | rl = revlog.revlog(self._svfs, b'00manifesttree.i') | |
356 | startlinkrev = self._repackstartlinkrev |
|
356 | startlinkrev = self._repackstartlinkrev | |
357 | endlinkrev = self._repackendlinkrev |
|
357 | endlinkrev = self._repackendlinkrev | |
358 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): |
|
358 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): | |
359 | linkrev = rl.linkrev(rev) |
|
359 | linkrev = rl.linkrev(rev) | |
360 | if linkrev < startlinkrev: |
|
360 | if linkrev < startlinkrev: | |
361 | break |
|
361 | break | |
362 | if linkrev > endlinkrev: |
|
362 | if linkrev > endlinkrev: | |
363 | continue |
|
363 | continue | |
364 | node = rl.node(rev) |
|
364 | node = rl.node(rev) | |
365 | ledger.markdataentry(self, treename, node) |
|
365 | ledger.markdataentry(self, treename, node) | |
366 | ledger.markhistoryentry(self, treename, node) |
|
366 | ledger.markhistoryentry(self, treename, node) | |
367 |
|
367 | |||
368 | for path, encoded, size in self._store.datafiles(): |
|
368 | for t, path, encoded, size in self._store.datafiles(): | |
369 | if path[:5] != b'meta/' or path[-2:] != b'.i': |
|
369 | if path[:5] != b'meta/' or path[-2:] != b'.i': | |
370 | continue |
|
370 | continue | |
371 |
|
371 | |||
372 | treename = path[5 : -len(b'/00manifest.i')] |
|
372 | treename = path[5 : -len(b'/00manifest.i')] | |
373 |
|
373 | |||
374 | rl = revlog.revlog(self._svfs, path) |
|
374 | rl = revlog.revlog(self._svfs, path) | |
375 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): |
|
375 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): | |
376 | linkrev = rl.linkrev(rev) |
|
376 | linkrev = rl.linkrev(rev) | |
377 | if linkrev < startlinkrev: |
|
377 | if linkrev < startlinkrev: | |
378 | break |
|
378 | break | |
379 | if linkrev > endlinkrev: |
|
379 | if linkrev > endlinkrev: | |
380 | continue |
|
380 | continue | |
381 | node = rl.node(rev) |
|
381 | node = rl.node(rev) | |
382 | ledger.markdataentry(self, treename, node) |
|
382 | ledger.markdataentry(self, treename, node) | |
383 | ledger.markhistoryentry(self, treename, node) |
|
383 | ledger.markhistoryentry(self, treename, node) | |
384 |
|
384 | |||
385 | def cleanup(self, ledger): |
|
385 | def cleanup(self, ledger): | |
386 | pass |
|
386 | pass |
@@ -1,439 +1,441 | |||||
1 | # remotefilelogserver.py - server logic for a remotefilelog server |
|
1 | # remotefilelogserver.py - server logic for a remotefilelog server | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import errno |
|
9 | import errno | |
10 | import os |
|
10 | import os | |
11 | import stat |
|
11 | import stat | |
12 | import time |
|
12 | import time | |
13 | import zlib |
|
13 | import zlib | |
14 |
|
14 | |||
15 | from mercurial.i18n import _ |
|
15 | from mercurial.i18n import _ | |
16 | from mercurial.node import bin, hex, nullid |
|
16 | from mercurial.node import bin, hex, nullid | |
17 | from mercurial.pycompat import open |
|
17 | from mercurial.pycompat import open | |
18 | from mercurial import ( |
|
18 | from mercurial import ( | |
19 | changegroup, |
|
19 | changegroup, | |
20 | changelog, |
|
20 | changelog, | |
21 | context, |
|
21 | context, | |
22 | error, |
|
22 | error, | |
23 | extensions, |
|
23 | extensions, | |
24 | match, |
|
24 | match, | |
25 | pycompat, |
|
25 | pycompat, | |
26 | scmutil, |
|
26 | scmutil, | |
27 | store, |
|
27 | store, | |
28 | streamclone, |
|
28 | streamclone, | |
29 | util, |
|
29 | util, | |
30 | wireprotoserver, |
|
30 | wireprotoserver, | |
31 | wireprototypes, |
|
31 | wireprototypes, | |
32 | wireprotov1server, |
|
32 | wireprotov1server, | |
33 | ) |
|
33 | ) | |
34 | from . import ( |
|
34 | from . import ( | |
35 | constants, |
|
35 | constants, | |
36 | shallowutil, |
|
36 | shallowutil, | |
37 | ) |
|
37 | ) | |
38 |
|
38 | |||
39 | _sshv1server = wireprotoserver.sshv1protocolhandler |
|
39 | _sshv1server = wireprotoserver.sshv1protocolhandler | |
40 |
|
40 | |||
41 |
|
41 | |||
42 | def setupserver(ui, repo): |
|
42 | def setupserver(ui, repo): | |
43 | """Sets up a normal Mercurial repo so it can serve files to shallow repos.""" |
|
43 | """Sets up a normal Mercurial repo so it can serve files to shallow repos.""" | |
44 | onetimesetup(ui) |
|
44 | onetimesetup(ui) | |
45 |
|
45 | |||
46 | # don't send files to shallow clients during pulls |
|
46 | # don't send files to shallow clients during pulls | |
47 | def generatefiles( |
|
47 | def generatefiles( | |
48 | orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs |
|
48 | orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs | |
49 | ): |
|
49 | ): | |
50 | caps = self._bundlecaps or [] |
|
50 | caps = self._bundlecaps or [] | |
51 | if constants.BUNDLE2_CAPABLITY in caps: |
|
51 | if constants.BUNDLE2_CAPABLITY in caps: | |
52 | # only send files that don't match the specified patterns |
|
52 | # only send files that don't match the specified patterns | |
53 | includepattern = None |
|
53 | includepattern = None | |
54 | excludepattern = None |
|
54 | excludepattern = None | |
55 | for cap in self._bundlecaps or []: |
|
55 | for cap in self._bundlecaps or []: | |
56 | if cap.startswith(b"includepattern="): |
|
56 | if cap.startswith(b"includepattern="): | |
57 | includepattern = cap[len(b"includepattern=") :].split(b'\0') |
|
57 | includepattern = cap[len(b"includepattern=") :].split(b'\0') | |
58 | elif cap.startswith(b"excludepattern="): |
|
58 | elif cap.startswith(b"excludepattern="): | |
59 | excludepattern = cap[len(b"excludepattern=") :].split(b'\0') |
|
59 | excludepattern = cap[len(b"excludepattern=") :].split(b'\0') | |
60 |
|
60 | |||
61 | m = match.always() |
|
61 | m = match.always() | |
62 | if includepattern or excludepattern: |
|
62 | if includepattern or excludepattern: | |
63 | m = match.match( |
|
63 | m = match.match( | |
64 | repo.root, b'', None, includepattern, excludepattern |
|
64 | repo.root, b'', None, includepattern, excludepattern | |
65 | ) |
|
65 | ) | |
66 |
|
66 | |||
67 | changedfiles = list([f for f in changedfiles if not m(f)]) |
|
67 | changedfiles = list([f for f in changedfiles if not m(f)]) | |
68 | return orig( |
|
68 | return orig( | |
69 | self, changedfiles, linknodes, commonrevs, source, *args, **kwargs |
|
69 | self, changedfiles, linknodes, commonrevs, source, *args, **kwargs | |
70 | ) |
|
70 | ) | |
71 |
|
71 | |||
72 | extensions.wrapfunction( |
|
72 | extensions.wrapfunction( | |
73 | changegroup.cgpacker, b'generatefiles', generatefiles |
|
73 | changegroup.cgpacker, b'generatefiles', generatefiles | |
74 | ) |
|
74 | ) | |
75 |
|
75 | |||
76 |
|
76 | |||
77 | onetime = False |
|
77 | onetime = False | |
78 |
|
78 | |||
79 |
|
79 | |||
80 | def onetimesetup(ui): |
|
80 | def onetimesetup(ui): | |
81 | """Configures the wireprotocol for both clients and servers.""" |
|
81 | """Configures the wireprotocol for both clients and servers.""" | |
82 | global onetime |
|
82 | global onetime | |
83 | if onetime: |
|
83 | if onetime: | |
84 | return |
|
84 | return | |
85 | onetime = True |
|
85 | onetime = True | |
86 |
|
86 | |||
87 | # support file content requests |
|
87 | # support file content requests | |
88 | wireprotov1server.wireprotocommand( |
|
88 | wireprotov1server.wireprotocommand( | |
89 | b'x_rfl_getflogheads', b'path', permission=b'pull' |
|
89 | b'x_rfl_getflogheads', b'path', permission=b'pull' | |
90 | )(getflogheads) |
|
90 | )(getflogheads) | |
91 | wireprotov1server.wireprotocommand( |
|
91 | wireprotov1server.wireprotocommand( | |
92 | b'x_rfl_getfiles', b'', permission=b'pull' |
|
92 | b'x_rfl_getfiles', b'', permission=b'pull' | |
93 | )(getfiles) |
|
93 | )(getfiles) | |
94 | wireprotov1server.wireprotocommand( |
|
94 | wireprotov1server.wireprotocommand( | |
95 | b'x_rfl_getfile', b'file node', permission=b'pull' |
|
95 | b'x_rfl_getfile', b'file node', permission=b'pull' | |
96 | )(getfile) |
|
96 | )(getfile) | |
97 |
|
97 | |||
98 | class streamstate(object): |
|
98 | class streamstate(object): | |
99 | match = None |
|
99 | match = None | |
100 | shallowremote = False |
|
100 | shallowremote = False | |
101 | noflatmf = False |
|
101 | noflatmf = False | |
102 |
|
102 | |||
103 | state = streamstate() |
|
103 | state = streamstate() | |
104 |
|
104 | |||
105 | def stream_out_shallow(repo, proto, other): |
|
105 | def stream_out_shallow(repo, proto, other): | |
106 | includepattern = None |
|
106 | includepattern = None | |
107 | excludepattern = None |
|
107 | excludepattern = None | |
108 | raw = other.get(b'includepattern') |
|
108 | raw = other.get(b'includepattern') | |
109 | if raw: |
|
109 | if raw: | |
110 | includepattern = raw.split(b'\0') |
|
110 | includepattern = raw.split(b'\0') | |
111 | raw = other.get(b'excludepattern') |
|
111 | raw = other.get(b'excludepattern') | |
112 | if raw: |
|
112 | if raw: | |
113 | excludepattern = raw.split(b'\0') |
|
113 | excludepattern = raw.split(b'\0') | |
114 |
|
114 | |||
115 | oldshallow = state.shallowremote |
|
115 | oldshallow = state.shallowremote | |
116 | oldmatch = state.match |
|
116 | oldmatch = state.match | |
117 | oldnoflatmf = state.noflatmf |
|
117 | oldnoflatmf = state.noflatmf | |
118 | try: |
|
118 | try: | |
119 | state.shallowremote = True |
|
119 | state.shallowremote = True | |
120 | state.match = match.always() |
|
120 | state.match = match.always() | |
121 | state.noflatmf = other.get(b'noflatmanifest') == b'True' |
|
121 | state.noflatmf = other.get(b'noflatmanifest') == b'True' | |
122 | if includepattern or excludepattern: |
|
122 | if includepattern or excludepattern: | |
123 | state.match = match.match( |
|
123 | state.match = match.match( | |
124 | repo.root, b'', None, includepattern, excludepattern |
|
124 | repo.root, b'', None, includepattern, excludepattern | |
125 | ) |
|
125 | ) | |
126 | streamres = wireprotov1server.stream(repo, proto) |
|
126 | streamres = wireprotov1server.stream(repo, proto) | |
127 |
|
127 | |||
128 | # Force the first value to execute, so the file list is computed |
|
128 | # Force the first value to execute, so the file list is computed | |
129 | # within the try/finally scope |
|
129 | # within the try/finally scope | |
130 | first = next(streamres.gen) |
|
130 | first = next(streamres.gen) | |
131 | second = next(streamres.gen) |
|
131 | second = next(streamres.gen) | |
132 |
|
132 | |||
133 | def gen(): |
|
133 | def gen(): | |
134 | yield first |
|
134 | yield first | |
135 | yield second |
|
135 | yield second | |
136 | for value in streamres.gen: |
|
136 | for value in streamres.gen: | |
137 | yield value |
|
137 | yield value | |
138 |
|
138 | |||
139 | return wireprototypes.streamres(gen()) |
|
139 | return wireprototypes.streamres(gen()) | |
140 | finally: |
|
140 | finally: | |
141 | state.shallowremote = oldshallow |
|
141 | state.shallowremote = oldshallow | |
142 | state.match = oldmatch |
|
142 | state.match = oldmatch | |
143 | state.noflatmf = oldnoflatmf |
|
143 | state.noflatmf = oldnoflatmf | |
144 |
|
144 | |||
145 | wireprotov1server.commands[b'stream_out_shallow'] = ( |
|
145 | wireprotov1server.commands[b'stream_out_shallow'] = ( | |
146 | stream_out_shallow, |
|
146 | stream_out_shallow, | |
147 | b'*', |
|
147 | b'*', | |
148 | ) |
|
148 | ) | |
149 |
|
149 | |||
150 | # don't clone filelogs to shallow clients |
|
150 | # don't clone filelogs to shallow clients | |
151 | def _walkstreamfiles(orig, repo, matcher=None): |
|
151 | def _walkstreamfiles(orig, repo, matcher=None): | |
152 | if state.shallowremote: |
|
152 | if state.shallowremote: | |
153 | # if we are shallow ourselves, stream our local commits |
|
153 | # if we are shallow ourselves, stream our local commits | |
154 | if shallowutil.isenabled(repo): |
|
154 | if shallowutil.isenabled(repo): | |
155 | striplen = len(repo.store.path) + 1 |
|
155 | striplen = len(repo.store.path) + 1 | |
156 | readdir = repo.store.rawvfs.readdir |
|
156 | readdir = repo.store.rawvfs.readdir | |
157 | visit = [os.path.join(repo.store.path, b'data')] |
|
157 | visit = [os.path.join(repo.store.path, b'data')] | |
158 | while visit: |
|
158 | while visit: | |
159 | p = visit.pop() |
|
159 | p = visit.pop() | |
160 | for f, kind, st in readdir(p, stat=True): |
|
160 | for f, kind, st in readdir(p, stat=True): | |
161 | fp = p + b'/' + f |
|
161 | fp = p + b'/' + f | |
162 | if kind == stat.S_IFREG: |
|
162 | if kind == stat.S_IFREG: | |
163 | if not fp.endswith(b'.i') and not fp.endswith( |
|
163 | if not fp.endswith(b'.i') and not fp.endswith( | |
164 | b'.d' |
|
164 | b'.d' | |
165 | ): |
|
165 | ): | |
166 | n = util.pconvert(fp[striplen:]) |
|
166 | n = util.pconvert(fp[striplen:]) | |
167 |
|
|
167 | d = store.decodedir(n) | |
|
168 | t = store.FILETYPE_OTHER | |||
|
169 | yield (t, d, n, st.st_size) | |||
168 | if kind == stat.S_IFDIR: |
|
170 | if kind == stat.S_IFDIR: | |
169 | visit.append(fp) |
|
171 | visit.append(fp) | |
170 |
|
172 | |||
171 | if scmutil.istreemanifest(repo): |
|
173 | if scmutil.istreemanifest(repo): | |
172 | for (u, e, s) in repo.store.datafiles(): |
|
174 | for (t, u, e, s) in repo.store.datafiles(): | |
173 | if u.startswith(b'meta/') and ( |
|
175 | if u.startswith(b'meta/') and ( | |
174 | u.endswith(b'.i') or u.endswith(b'.d') |
|
176 | u.endswith(b'.i') or u.endswith(b'.d') | |
175 | ): |
|
177 | ): | |
176 | yield (u, e, s) |
|
178 | yield (t, u, e, s) | |
177 |
|
179 | |||
178 | # Return .d and .i files that do not match the shallow pattern |
|
180 | # Return .d and .i files that do not match the shallow pattern | |
179 | match = state.match |
|
181 | match = state.match | |
180 | if match and not match.always(): |
|
182 | if match and not match.always(): | |
181 | for (u, e, s) in repo.store.datafiles(): |
|
183 | for (t, u, e, s) in repo.store.datafiles(): | |
182 | f = u[5:-2] # trim data/... and .i/.d |
|
184 | f = u[5:-2] # trim data/... and .i/.d | |
183 | if not state.match(f): |
|
185 | if not state.match(f): | |
184 | yield (u, e, s) |
|
186 | yield (t, u, e, s) | |
185 |
|
187 | |||
186 | for x in repo.store.topfiles(): |
|
188 | for x in repo.store.topfiles(): | |
187 | if state.noflatmf and x[0][:11] == b'00manifest.': |
|
189 | if state.noflatmf and x[0][:11] == b'00manifest.': | |
188 | continue |
|
190 | continue | |
189 | yield x |
|
191 | yield x | |
190 |
|
192 | |||
191 | elif shallowutil.isenabled(repo): |
|
193 | elif shallowutil.isenabled(repo): | |
192 | # don't allow cloning from a shallow repo to a full repo |
|
194 | # don't allow cloning from a shallow repo to a full repo | |
193 | # since it would require fetching every version of every |
|
195 | # since it would require fetching every version of every | |
194 | # file in order to create the revlogs. |
|
196 | # file in order to create the revlogs. | |
195 | raise error.Abort( |
|
197 | raise error.Abort( | |
196 | _(b"Cannot clone from a shallow repo to a full repo.") |
|
198 | _(b"Cannot clone from a shallow repo to a full repo.") | |
197 | ) |
|
199 | ) | |
198 | else: |
|
200 | else: | |
199 | for x in orig(repo, matcher): |
|
201 | for x in orig(repo, matcher): | |
200 | yield x |
|
202 | yield x | |
201 |
|
203 | |||
202 | extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles) |
|
204 | extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles) | |
203 |
|
205 | |||
204 | # expose remotefilelog capabilities |
|
206 | # expose remotefilelog capabilities | |
205 | def _capabilities(orig, repo, proto): |
|
207 | def _capabilities(orig, repo, proto): | |
206 | caps = orig(repo, proto) |
|
208 | caps = orig(repo, proto) | |
207 | if shallowutil.isenabled(repo) or ui.configbool( |
|
209 | if shallowutil.isenabled(repo) or ui.configbool( | |
208 | b'remotefilelog', b'server' |
|
210 | b'remotefilelog', b'server' | |
209 | ): |
|
211 | ): | |
210 | if isinstance(proto, _sshv1server): |
|
212 | if isinstance(proto, _sshv1server): | |
211 | # legacy getfiles method which only works over ssh |
|
213 | # legacy getfiles method which only works over ssh | |
212 | caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES) |
|
214 | caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES) | |
213 | caps.append(b'x_rfl_getflogheads') |
|
215 | caps.append(b'x_rfl_getflogheads') | |
214 | caps.append(b'x_rfl_getfile') |
|
216 | caps.append(b'x_rfl_getfile') | |
215 | return caps |
|
217 | return caps | |
216 |
|
218 | |||
217 | extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities) |
|
219 | extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities) | |
218 |
|
220 | |||
219 | def _adjustlinkrev(orig, self, *args, **kwargs): |
|
221 | def _adjustlinkrev(orig, self, *args, **kwargs): | |
220 | # When generating file blobs, taking the real path is too slow on large |
|
222 | # When generating file blobs, taking the real path is too slow on large | |
221 | # repos, so force it to just return the linkrev directly. |
|
223 | # repos, so force it to just return the linkrev directly. | |
222 | repo = self._repo |
|
224 | repo = self._repo | |
223 | if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev: |
|
225 | if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev: | |
224 | return self._filelog.linkrev(self._filelog.rev(self._filenode)) |
|
226 | return self._filelog.linkrev(self._filelog.rev(self._filenode)) | |
225 | return orig(self, *args, **kwargs) |
|
227 | return orig(self, *args, **kwargs) | |
226 |
|
228 | |||
227 | extensions.wrapfunction( |
|
229 | extensions.wrapfunction( | |
228 | context.basefilectx, b'_adjustlinkrev', _adjustlinkrev |
|
230 | context.basefilectx, b'_adjustlinkrev', _adjustlinkrev | |
229 | ) |
|
231 | ) | |
230 |
|
232 | |||
231 | def _iscmd(orig, cmd): |
|
233 | def _iscmd(orig, cmd): | |
232 | if cmd == b'x_rfl_getfiles': |
|
234 | if cmd == b'x_rfl_getfiles': | |
233 | return False |
|
235 | return False | |
234 | return orig(cmd) |
|
236 | return orig(cmd) | |
235 |
|
237 | |||
236 | extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd) |
|
238 | extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd) | |
237 |
|
239 | |||
238 |
|
240 | |||
239 | def _loadfileblob(repo, cachepath, path, node): |
|
241 | def _loadfileblob(repo, cachepath, path, node): | |
240 | filecachepath = os.path.join(cachepath, path, hex(node)) |
|
242 | filecachepath = os.path.join(cachepath, path, hex(node)) | |
241 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: |
|
243 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: | |
242 | filectx = repo.filectx(path, fileid=node) |
|
244 | filectx = repo.filectx(path, fileid=node) | |
243 | if filectx.node() == nullid: |
|
245 | if filectx.node() == nullid: | |
244 | repo.changelog = changelog.changelog(repo.svfs) |
|
246 | repo.changelog = changelog.changelog(repo.svfs) | |
245 | filectx = repo.filectx(path, fileid=node) |
|
247 | filectx = repo.filectx(path, fileid=node) | |
246 |
|
248 | |||
247 | text = createfileblob(filectx) |
|
249 | text = createfileblob(filectx) | |
248 | # TODO configurable compression engines |
|
250 | # TODO configurable compression engines | |
249 | text = zlib.compress(text) |
|
251 | text = zlib.compress(text) | |
250 |
|
252 | |||
251 | # everything should be user & group read/writable |
|
253 | # everything should be user & group read/writable | |
252 | oldumask = os.umask(0o002) |
|
254 | oldumask = os.umask(0o002) | |
253 | try: |
|
255 | try: | |
254 | dirname = os.path.dirname(filecachepath) |
|
256 | dirname = os.path.dirname(filecachepath) | |
255 | if not os.path.exists(dirname): |
|
257 | if not os.path.exists(dirname): | |
256 | try: |
|
258 | try: | |
257 | os.makedirs(dirname) |
|
259 | os.makedirs(dirname) | |
258 | except OSError as ex: |
|
260 | except OSError as ex: | |
259 | if ex.errno != errno.EEXIST: |
|
261 | if ex.errno != errno.EEXIST: | |
260 | raise |
|
262 | raise | |
261 |
|
263 | |||
262 | f = None |
|
264 | f = None | |
263 | try: |
|
265 | try: | |
264 | f = util.atomictempfile(filecachepath, b"wb") |
|
266 | f = util.atomictempfile(filecachepath, b"wb") | |
265 | f.write(text) |
|
267 | f.write(text) | |
266 | except (IOError, OSError): |
|
268 | except (IOError, OSError): | |
267 | # Don't abort if the user only has permission to read, |
|
269 | # Don't abort if the user only has permission to read, | |
268 | # and not write. |
|
270 | # and not write. | |
269 | pass |
|
271 | pass | |
270 | finally: |
|
272 | finally: | |
271 | if f: |
|
273 | if f: | |
272 | f.close() |
|
274 | f.close() | |
273 | finally: |
|
275 | finally: | |
274 | os.umask(oldumask) |
|
276 | os.umask(oldumask) | |
275 | else: |
|
277 | else: | |
276 | with open(filecachepath, b"rb") as f: |
|
278 | with open(filecachepath, b"rb") as f: | |
277 | text = f.read() |
|
279 | text = f.read() | |
278 | return text |
|
280 | return text | |
279 |
|
281 | |||
280 |
|
282 | |||
281 | def getflogheads(repo, proto, path): |
|
283 | def getflogheads(repo, proto, path): | |
282 | """A server api for requesting a filelog's heads""" |
|
284 | """A server api for requesting a filelog's heads""" | |
283 | flog = repo.file(path) |
|
285 | flog = repo.file(path) | |
284 | heads = flog.heads() |
|
286 | heads = flog.heads() | |
285 | return b'\n'.join((hex(head) for head in heads if head != nullid)) |
|
287 | return b'\n'.join((hex(head) for head in heads if head != nullid)) | |
286 |
|
288 | |||
287 |
|
289 | |||
288 | def getfile(repo, proto, file, node): |
|
290 | def getfile(repo, proto, file, node): | |
289 | """A server api for requesting a particular version of a file. Can be used |
|
291 | """A server api for requesting a particular version of a file. Can be used | |
290 | in batches to request many files at once. The return protocol is: |
|
292 | in batches to request many files at once. The return protocol is: | |
291 | <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or |
|
293 | <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or | |
292 | non-zero for an error. |
|
294 | non-zero for an error. | |
293 |
|
295 | |||
294 | data is a compressed blob with revlog flag and ancestors information. See |
|
296 | data is a compressed blob with revlog flag and ancestors information. See | |
295 | createfileblob for its content. |
|
297 | createfileblob for its content. | |
296 | """ |
|
298 | """ | |
297 | if shallowutil.isenabled(repo): |
|
299 | if shallowutil.isenabled(repo): | |
298 | return b'1\0' + _(b'cannot fetch remote files from shallow repo') |
|
300 | return b'1\0' + _(b'cannot fetch remote files from shallow repo') | |
299 | cachepath = repo.ui.config(b"remotefilelog", b"servercachepath") |
|
301 | cachepath = repo.ui.config(b"remotefilelog", b"servercachepath") | |
300 | if not cachepath: |
|
302 | if not cachepath: | |
301 | cachepath = os.path.join(repo.path, b"remotefilelogcache") |
|
303 | cachepath = os.path.join(repo.path, b"remotefilelogcache") | |
302 | node = bin(node.strip()) |
|
304 | node = bin(node.strip()) | |
303 | if node == nullid: |
|
305 | if node == nullid: | |
304 | return b'0\0' |
|
306 | return b'0\0' | |
305 | return b'0\0' + _loadfileblob(repo, cachepath, file, node) |
|
307 | return b'0\0' + _loadfileblob(repo, cachepath, file, node) | |
306 |
|
308 | |||
307 |
|
309 | |||
308 | def getfiles(repo, proto): |
|
310 | def getfiles(repo, proto): | |
309 | """A server api for requesting particular versions of particular files.""" |
|
311 | """A server api for requesting particular versions of particular files.""" | |
310 | if shallowutil.isenabled(repo): |
|
312 | if shallowutil.isenabled(repo): | |
311 | raise error.Abort(_(b'cannot fetch remote files from shallow repo')) |
|
313 | raise error.Abort(_(b'cannot fetch remote files from shallow repo')) | |
312 | if not isinstance(proto, _sshv1server): |
|
314 | if not isinstance(proto, _sshv1server): | |
313 | raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol')) |
|
315 | raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol')) | |
314 |
|
316 | |||
315 | def streamer(): |
|
317 | def streamer(): | |
316 | fin = proto._fin |
|
318 | fin = proto._fin | |
317 |
|
319 | |||
318 | cachepath = repo.ui.config(b"remotefilelog", b"servercachepath") |
|
320 | cachepath = repo.ui.config(b"remotefilelog", b"servercachepath") | |
319 | if not cachepath: |
|
321 | if not cachepath: | |
320 | cachepath = os.path.join(repo.path, b"remotefilelogcache") |
|
322 | cachepath = os.path.join(repo.path, b"remotefilelogcache") | |
321 |
|
323 | |||
322 | while True: |
|
324 | while True: | |
323 | request = fin.readline()[:-1] |
|
325 | request = fin.readline()[:-1] | |
324 | if not request: |
|
326 | if not request: | |
325 | break |
|
327 | break | |
326 |
|
328 | |||
327 | node = bin(request[:40]) |
|
329 | node = bin(request[:40]) | |
328 | if node == nullid: |
|
330 | if node == nullid: | |
329 | yield b'0\n' |
|
331 | yield b'0\n' | |
330 | continue |
|
332 | continue | |
331 |
|
333 | |||
332 | path = request[40:] |
|
334 | path = request[40:] | |
333 |
|
335 | |||
334 | text = _loadfileblob(repo, cachepath, path, node) |
|
336 | text = _loadfileblob(repo, cachepath, path, node) | |
335 |
|
337 | |||
336 | yield b'%d\n%s' % (len(text), text) |
|
338 | yield b'%d\n%s' % (len(text), text) | |
337 |
|
339 | |||
338 | # it would be better to only flush after processing a whole batch |
|
340 | # it would be better to only flush after processing a whole batch | |
339 | # but currently we don't know if there are more requests coming |
|
341 | # but currently we don't know if there are more requests coming | |
340 | proto._fout.flush() |
|
342 | proto._fout.flush() | |
341 |
|
343 | |||
342 | return wireprototypes.streamres(streamer()) |
|
344 | return wireprototypes.streamres(streamer()) | |
343 |
|
345 | |||
344 |
|
346 | |||
345 | def createfileblob(filectx): |
|
347 | def createfileblob(filectx): | |
346 | """ |
|
348 | """ | |
347 | format: |
|
349 | format: | |
348 | v0: |
|
350 | v0: | |
349 | str(len(rawtext)) + '\0' + rawtext + ancestortext |
|
351 | str(len(rawtext)) + '\0' + rawtext + ancestortext | |
350 | v1: |
|
352 | v1: | |
351 | 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext |
|
353 | 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext | |
352 | metalist := metalist + '\n' + meta | meta |
|
354 | metalist := metalist + '\n' + meta | meta | |
353 | meta := sizemeta | flagmeta |
|
355 | meta := sizemeta | flagmeta | |
354 | sizemeta := METAKEYSIZE + str(len(rawtext)) |
|
356 | sizemeta := METAKEYSIZE + str(len(rawtext)) | |
355 | flagmeta := METAKEYFLAG + str(flag) |
|
357 | flagmeta := METAKEYFLAG + str(flag) | |
356 |
|
358 | |||
357 | note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a |
|
359 | note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a | |
358 | length of 1. |
|
360 | length of 1. | |
359 | """ |
|
361 | """ | |
360 | flog = filectx.filelog() |
|
362 | flog = filectx.filelog() | |
361 | frev = filectx.filerev() |
|
363 | frev = filectx.filerev() | |
362 | revlogflags = flog._revlog.flags(frev) |
|
364 | revlogflags = flog._revlog.flags(frev) | |
363 | if revlogflags == 0: |
|
365 | if revlogflags == 0: | |
364 | # normal files |
|
366 | # normal files | |
365 | text = filectx.data() |
|
367 | text = filectx.data() | |
366 | else: |
|
368 | else: | |
367 | # lfs, read raw revision data |
|
369 | # lfs, read raw revision data | |
368 | text = flog.rawdata(frev) |
|
370 | text = flog.rawdata(frev) | |
369 |
|
371 | |||
370 | repo = filectx._repo |
|
372 | repo = filectx._repo | |
371 |
|
373 | |||
372 | ancestors = [filectx] |
|
374 | ancestors = [filectx] | |
373 |
|
375 | |||
374 | try: |
|
376 | try: | |
375 | repo.forcelinkrev = True |
|
377 | repo.forcelinkrev = True | |
376 | ancestors.extend([f for f in filectx.ancestors()]) |
|
378 | ancestors.extend([f for f in filectx.ancestors()]) | |
377 |
|
379 | |||
378 | ancestortext = b"" |
|
380 | ancestortext = b"" | |
379 | for ancestorctx in ancestors: |
|
381 | for ancestorctx in ancestors: | |
380 | parents = ancestorctx.parents() |
|
382 | parents = ancestorctx.parents() | |
381 | p1 = nullid |
|
383 | p1 = nullid | |
382 | p2 = nullid |
|
384 | p2 = nullid | |
383 | if len(parents) > 0: |
|
385 | if len(parents) > 0: | |
384 | p1 = parents[0].filenode() |
|
386 | p1 = parents[0].filenode() | |
385 | if len(parents) > 1: |
|
387 | if len(parents) > 1: | |
386 | p2 = parents[1].filenode() |
|
388 | p2 = parents[1].filenode() | |
387 |
|
389 | |||
388 | copyname = b"" |
|
390 | copyname = b"" | |
389 | rename = ancestorctx.renamed() |
|
391 | rename = ancestorctx.renamed() | |
390 | if rename: |
|
392 | if rename: | |
391 | copyname = rename[0] |
|
393 | copyname = rename[0] | |
392 | linknode = ancestorctx.node() |
|
394 | linknode = ancestorctx.node() | |
393 | ancestortext += b"%s%s%s%s%s\0" % ( |
|
395 | ancestortext += b"%s%s%s%s%s\0" % ( | |
394 | ancestorctx.filenode(), |
|
396 | ancestorctx.filenode(), | |
395 | p1, |
|
397 | p1, | |
396 | p2, |
|
398 | p2, | |
397 | linknode, |
|
399 | linknode, | |
398 | copyname, |
|
400 | copyname, | |
399 | ) |
|
401 | ) | |
400 | finally: |
|
402 | finally: | |
401 | repo.forcelinkrev = False |
|
403 | repo.forcelinkrev = False | |
402 |
|
404 | |||
403 | header = shallowutil.buildfileblobheader(len(text), revlogflags) |
|
405 | header = shallowutil.buildfileblobheader(len(text), revlogflags) | |
404 |
|
406 | |||
405 | return b"%s\0%s%s" % (header, text, ancestortext) |
|
407 | return b"%s\0%s%s" % (header, text, ancestortext) | |
406 |
|
408 | |||
407 |
|
409 | |||
408 | def gcserver(ui, repo): |
|
410 | def gcserver(ui, repo): | |
409 | if not repo.ui.configbool(b"remotefilelog", b"server"): |
|
411 | if not repo.ui.configbool(b"remotefilelog", b"server"): | |
410 | return |
|
412 | return | |
411 |
|
413 | |||
412 | neededfiles = set() |
|
414 | neededfiles = set() | |
413 | heads = repo.revs(b"heads(tip~25000:) - null") |
|
415 | heads = repo.revs(b"heads(tip~25000:) - null") | |
414 |
|
416 | |||
415 | cachepath = repo.vfs.join(b"remotefilelogcache") |
|
417 | cachepath = repo.vfs.join(b"remotefilelogcache") | |
416 | for head in heads: |
|
418 | for head in heads: | |
417 | mf = repo[head].manifest() |
|
419 | mf = repo[head].manifest() | |
418 | for filename, filenode in pycompat.iteritems(mf): |
|
420 | for filename, filenode in pycompat.iteritems(mf): | |
419 | filecachepath = os.path.join(cachepath, filename, hex(filenode)) |
|
421 | filecachepath = os.path.join(cachepath, filename, hex(filenode)) | |
420 | neededfiles.add(filecachepath) |
|
422 | neededfiles.add(filecachepath) | |
421 |
|
423 | |||
422 | # delete unneeded older files |
|
424 | # delete unneeded older files | |
423 | days = repo.ui.configint(b"remotefilelog", b"serverexpiration") |
|
425 | days = repo.ui.configint(b"remotefilelog", b"serverexpiration") | |
424 | expiration = time.time() - (days * 24 * 60 * 60) |
|
426 | expiration = time.time() - (days * 24 * 60 * 60) | |
425 |
|
427 | |||
426 | progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files") |
|
428 | progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files") | |
427 | progress.update(0) |
|
429 | progress.update(0) | |
428 | for root, dirs, files in os.walk(cachepath): |
|
430 | for root, dirs, files in os.walk(cachepath): | |
429 | for file in files: |
|
431 | for file in files: | |
430 | filepath = os.path.join(root, file) |
|
432 | filepath = os.path.join(root, file) | |
431 | progress.increment() |
|
433 | progress.increment() | |
432 | if filepath in neededfiles: |
|
434 | if filepath in neededfiles: | |
433 | continue |
|
435 | continue | |
434 |
|
436 | |||
435 | stat = os.stat(filepath) |
|
437 | stat = os.stat(filepath) | |
436 | if stat.st_mtime < expiration: |
|
438 | if stat.st_mtime < expiration: | |
437 | os.remove(filepath) |
|
439 | os.remove(filepath) | |
438 |
|
440 | |||
439 | progress.complete() |
|
441 | progress.complete() |
@@ -1,546 +1,546 | |||||
1 | # repair.py - functions for repository repair for mercurial |
|
1 | # repair.py - functions for repository repair for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # Copyright 2007 Olivia Mackall |
|
4 | # Copyright 2007 Olivia Mackall | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | import errno |
|
11 | import errno | |
12 |
|
12 | |||
13 | from .i18n import _ |
|
13 | from .i18n import _ | |
14 | from .node import ( |
|
14 | from .node import ( | |
15 | hex, |
|
15 | hex, | |
16 | short, |
|
16 | short, | |
17 | ) |
|
17 | ) | |
18 | from . import ( |
|
18 | from . import ( | |
19 | bundle2, |
|
19 | bundle2, | |
20 | changegroup, |
|
20 | changegroup, | |
21 | discovery, |
|
21 | discovery, | |
22 | error, |
|
22 | error, | |
23 | exchange, |
|
23 | exchange, | |
24 | obsolete, |
|
24 | obsolete, | |
25 | obsutil, |
|
25 | obsutil, | |
26 | pathutil, |
|
26 | pathutil, | |
27 | phases, |
|
27 | phases, | |
28 | pycompat, |
|
28 | pycompat, | |
29 | requirements, |
|
29 | requirements, | |
30 | scmutil, |
|
30 | scmutil, | |
31 | util, |
|
31 | util, | |
32 | ) |
|
32 | ) | |
33 | from .utils import ( |
|
33 | from .utils import ( | |
34 | hashutil, |
|
34 | hashutil, | |
35 | stringutil, |
|
35 | stringutil, | |
36 | ) |
|
36 | ) | |
37 |
|
37 | |||
38 |
|
38 | |||
39 | def backupbundle( |
|
39 | def backupbundle( | |
40 | repo, bases, heads, node, suffix, compress=True, obsolescence=True |
|
40 | repo, bases, heads, node, suffix, compress=True, obsolescence=True | |
41 | ): |
|
41 | ): | |
42 | """create a bundle with the specified revisions as a backup""" |
|
42 | """create a bundle with the specified revisions as a backup""" | |
43 |
|
43 | |||
44 | backupdir = b"strip-backup" |
|
44 | backupdir = b"strip-backup" | |
45 | vfs = repo.vfs |
|
45 | vfs = repo.vfs | |
46 | if not vfs.isdir(backupdir): |
|
46 | if not vfs.isdir(backupdir): | |
47 | vfs.mkdir(backupdir) |
|
47 | vfs.mkdir(backupdir) | |
48 |
|
48 | |||
49 | # Include a hash of all the nodes in the filename for uniqueness |
|
49 | # Include a hash of all the nodes in the filename for uniqueness | |
50 | allcommits = repo.set(b'%ln::%ln', bases, heads) |
|
50 | allcommits = repo.set(b'%ln::%ln', bases, heads) | |
51 | allhashes = sorted(c.hex() for c in allcommits) |
|
51 | allhashes = sorted(c.hex() for c in allcommits) | |
52 | totalhash = hashutil.sha1(b''.join(allhashes)).digest() |
|
52 | totalhash = hashutil.sha1(b''.join(allhashes)).digest() | |
53 | name = b"%s/%s-%s-%s.hg" % ( |
|
53 | name = b"%s/%s-%s-%s.hg" % ( | |
54 | backupdir, |
|
54 | backupdir, | |
55 | short(node), |
|
55 | short(node), | |
56 | hex(totalhash[:4]), |
|
56 | hex(totalhash[:4]), | |
57 | suffix, |
|
57 | suffix, | |
58 | ) |
|
58 | ) | |
59 |
|
59 | |||
60 | cgversion = changegroup.localversion(repo) |
|
60 | cgversion = changegroup.localversion(repo) | |
61 | comp = None |
|
61 | comp = None | |
62 | if cgversion != b'01': |
|
62 | if cgversion != b'01': | |
63 | bundletype = b"HG20" |
|
63 | bundletype = b"HG20" | |
64 | if compress: |
|
64 | if compress: | |
65 | comp = b'BZ' |
|
65 | comp = b'BZ' | |
66 | elif compress: |
|
66 | elif compress: | |
67 | bundletype = b"HG10BZ" |
|
67 | bundletype = b"HG10BZ" | |
68 | else: |
|
68 | else: | |
69 | bundletype = b"HG10UN" |
|
69 | bundletype = b"HG10UN" | |
70 |
|
70 | |||
71 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) |
|
71 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) | |
72 | contentopts = { |
|
72 | contentopts = { | |
73 | b'cg.version': cgversion, |
|
73 | b'cg.version': cgversion, | |
74 | b'obsolescence': obsolescence, |
|
74 | b'obsolescence': obsolescence, | |
75 | b'phases': True, |
|
75 | b'phases': True, | |
76 | } |
|
76 | } | |
77 | return bundle2.writenewbundle( |
|
77 | return bundle2.writenewbundle( | |
78 | repo.ui, |
|
78 | repo.ui, | |
79 | repo, |
|
79 | repo, | |
80 | b'strip', |
|
80 | b'strip', | |
81 | name, |
|
81 | name, | |
82 | bundletype, |
|
82 | bundletype, | |
83 | outgoing, |
|
83 | outgoing, | |
84 | contentopts, |
|
84 | contentopts, | |
85 | vfs, |
|
85 | vfs, | |
86 | compression=comp, |
|
86 | compression=comp, | |
87 | ) |
|
87 | ) | |
88 |
|
88 | |||
89 |
|
89 | |||
90 | def _collectfiles(repo, striprev): |
|
90 | def _collectfiles(repo, striprev): | |
91 | """find out the filelogs affected by the strip""" |
|
91 | """find out the filelogs affected by the strip""" | |
92 | files = set() |
|
92 | files = set() | |
93 |
|
93 | |||
94 | for x in pycompat.xrange(striprev, len(repo)): |
|
94 | for x in pycompat.xrange(striprev, len(repo)): | |
95 | files.update(repo[x].files()) |
|
95 | files.update(repo[x].files()) | |
96 |
|
96 | |||
97 | return sorted(files) |
|
97 | return sorted(files) | |
98 |
|
98 | |||
99 |
|
99 | |||
100 | def _collectrevlog(revlog, striprev): |
|
100 | def _collectrevlog(revlog, striprev): | |
101 | _, brokenset = revlog.getstrippoint(striprev) |
|
101 | _, brokenset = revlog.getstrippoint(striprev) | |
102 | return [revlog.linkrev(r) for r in brokenset] |
|
102 | return [revlog.linkrev(r) for r in brokenset] | |
103 |
|
103 | |||
104 |
|
104 | |||
105 | def _collectbrokencsets(repo, files, striprev): |
|
105 | def _collectbrokencsets(repo, files, striprev): | |
106 | """return the changesets which will be broken by the truncation""" |
|
106 | """return the changesets which will be broken by the truncation""" | |
107 | s = set() |
|
107 | s = set() | |
108 |
|
108 | |||
109 | for revlog in manifestrevlogs(repo): |
|
109 | for revlog in manifestrevlogs(repo): | |
110 | s.update(_collectrevlog(revlog, striprev)) |
|
110 | s.update(_collectrevlog(revlog, striprev)) | |
111 | for fname in files: |
|
111 | for fname in files: | |
112 | s.update(_collectrevlog(repo.file(fname), striprev)) |
|
112 | s.update(_collectrevlog(repo.file(fname), striprev)) | |
113 |
|
113 | |||
114 | return s |
|
114 | return s | |
115 |
|
115 | |||
116 |
|
116 | |||
117 | def strip(ui, repo, nodelist, backup=True, topic=b'backup'): |
|
117 | def strip(ui, repo, nodelist, backup=True, topic=b'backup'): | |
118 | # This function requires the caller to lock the repo, but it operates |
|
118 | # This function requires the caller to lock the repo, but it operates | |
119 | # within a transaction of its own, and thus requires there to be no current |
|
119 | # within a transaction of its own, and thus requires there to be no current | |
120 | # transaction when it is called. |
|
120 | # transaction when it is called. | |
121 | if repo.currenttransaction() is not None: |
|
121 | if repo.currenttransaction() is not None: | |
122 | raise error.ProgrammingError(b'cannot strip from inside a transaction') |
|
122 | raise error.ProgrammingError(b'cannot strip from inside a transaction') | |
123 |
|
123 | |||
124 | # Simple way to maintain backwards compatibility for this |
|
124 | # Simple way to maintain backwards compatibility for this | |
125 | # argument. |
|
125 | # argument. | |
126 | if backup in [b'none', b'strip']: |
|
126 | if backup in [b'none', b'strip']: | |
127 | backup = False |
|
127 | backup = False | |
128 |
|
128 | |||
129 | repo = repo.unfiltered() |
|
129 | repo = repo.unfiltered() | |
130 | repo.destroying() |
|
130 | repo.destroying() | |
131 | vfs = repo.vfs |
|
131 | vfs = repo.vfs | |
132 | # load bookmark before changelog to avoid side effect from outdated |
|
132 | # load bookmark before changelog to avoid side effect from outdated | |
133 | # changelog (see repo._refreshchangelog) |
|
133 | # changelog (see repo._refreshchangelog) | |
134 | repo._bookmarks |
|
134 | repo._bookmarks | |
135 | cl = repo.changelog |
|
135 | cl = repo.changelog | |
136 |
|
136 | |||
137 | # TODO handle undo of merge sets |
|
137 | # TODO handle undo of merge sets | |
138 | if isinstance(nodelist, bytes): |
|
138 | if isinstance(nodelist, bytes): | |
139 | nodelist = [nodelist] |
|
139 | nodelist = [nodelist] | |
140 | striplist = [cl.rev(node) for node in nodelist] |
|
140 | striplist = [cl.rev(node) for node in nodelist] | |
141 | striprev = min(striplist) |
|
141 | striprev = min(striplist) | |
142 |
|
142 | |||
143 | files = _collectfiles(repo, striprev) |
|
143 | files = _collectfiles(repo, striprev) | |
144 | saverevs = _collectbrokencsets(repo, files, striprev) |
|
144 | saverevs = _collectbrokencsets(repo, files, striprev) | |
145 |
|
145 | |||
146 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
146 | # Some revisions with rev > striprev may not be descendants of striprev. | |
147 | # We have to find these revisions and put them in a bundle, so that |
|
147 | # We have to find these revisions and put them in a bundle, so that | |
148 | # we can restore them after the truncations. |
|
148 | # we can restore them after the truncations. | |
149 | # To create the bundle we use repo.changegroupsubset which requires |
|
149 | # To create the bundle we use repo.changegroupsubset which requires | |
150 | # the list of heads and bases of the set of interesting revisions. |
|
150 | # the list of heads and bases of the set of interesting revisions. | |
151 | # (head = revision in the set that has no descendant in the set; |
|
151 | # (head = revision in the set that has no descendant in the set; | |
152 | # base = revision in the set that has no ancestor in the set) |
|
152 | # base = revision in the set that has no ancestor in the set) | |
153 | tostrip = set(striplist) |
|
153 | tostrip = set(striplist) | |
154 | saveheads = set(saverevs) |
|
154 | saveheads = set(saverevs) | |
155 | for r in cl.revs(start=striprev + 1): |
|
155 | for r in cl.revs(start=striprev + 1): | |
156 | if any(p in tostrip for p in cl.parentrevs(r)): |
|
156 | if any(p in tostrip for p in cl.parentrevs(r)): | |
157 | tostrip.add(r) |
|
157 | tostrip.add(r) | |
158 |
|
158 | |||
159 | if r not in tostrip: |
|
159 | if r not in tostrip: | |
160 | saverevs.add(r) |
|
160 | saverevs.add(r) | |
161 | saveheads.difference_update(cl.parentrevs(r)) |
|
161 | saveheads.difference_update(cl.parentrevs(r)) | |
162 | saveheads.add(r) |
|
162 | saveheads.add(r) | |
163 | saveheads = [cl.node(r) for r in saveheads] |
|
163 | saveheads = [cl.node(r) for r in saveheads] | |
164 |
|
164 | |||
165 | # compute base nodes |
|
165 | # compute base nodes | |
166 | if saverevs: |
|
166 | if saverevs: | |
167 | descendants = set(cl.descendants(saverevs)) |
|
167 | descendants = set(cl.descendants(saverevs)) | |
168 | saverevs.difference_update(descendants) |
|
168 | saverevs.difference_update(descendants) | |
169 | savebases = [cl.node(r) for r in saverevs] |
|
169 | savebases = [cl.node(r) for r in saverevs] | |
170 | stripbases = [cl.node(r) for r in tostrip] |
|
170 | stripbases = [cl.node(r) for r in tostrip] | |
171 |
|
171 | |||
172 | stripobsidx = obsmarkers = () |
|
172 | stripobsidx = obsmarkers = () | |
173 | if repo.ui.configbool(b'devel', b'strip-obsmarkers'): |
|
173 | if repo.ui.configbool(b'devel', b'strip-obsmarkers'): | |
174 | obsmarkers = obsutil.exclusivemarkers(repo, stripbases) |
|
174 | obsmarkers = obsutil.exclusivemarkers(repo, stripbases) | |
175 | if obsmarkers: |
|
175 | if obsmarkers: | |
176 | stripobsidx = [ |
|
176 | stripobsidx = [ | |
177 | i for i, m in enumerate(repo.obsstore) if m in obsmarkers |
|
177 | i for i, m in enumerate(repo.obsstore) if m in obsmarkers | |
178 | ] |
|
178 | ] | |
179 |
|
179 | |||
180 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
|
180 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) | |
181 |
|
181 | |||
182 | backupfile = None |
|
182 | backupfile = None | |
183 | node = nodelist[-1] |
|
183 | node = nodelist[-1] | |
184 | if backup: |
|
184 | if backup: | |
185 | backupfile = _createstripbackup(repo, stripbases, node, topic) |
|
185 | backupfile = _createstripbackup(repo, stripbases, node, topic) | |
186 | # create a changegroup for all the branches we need to keep |
|
186 | # create a changegroup for all the branches we need to keep | |
187 | tmpbundlefile = None |
|
187 | tmpbundlefile = None | |
188 | if saveheads: |
|
188 | if saveheads: | |
189 | # do not compress temporary bundle if we remove it from disk later |
|
189 | # do not compress temporary bundle if we remove it from disk later | |
190 | # |
|
190 | # | |
191 | # We do not include obsolescence, it might re-introduce prune markers |
|
191 | # We do not include obsolescence, it might re-introduce prune markers | |
192 | # we are trying to strip. This is harmless since the stripped markers |
|
192 | # we are trying to strip. This is harmless since the stripped markers | |
193 | # are already backed up and we did not touched the markers for the |
|
193 | # are already backed up and we did not touched the markers for the | |
194 | # saved changesets. |
|
194 | # saved changesets. | |
195 | tmpbundlefile = backupbundle( |
|
195 | tmpbundlefile = backupbundle( | |
196 | repo, |
|
196 | repo, | |
197 | savebases, |
|
197 | savebases, | |
198 | saveheads, |
|
198 | saveheads, | |
199 | node, |
|
199 | node, | |
200 | b'temp', |
|
200 | b'temp', | |
201 | compress=False, |
|
201 | compress=False, | |
202 | obsolescence=False, |
|
202 | obsolescence=False, | |
203 | ) |
|
203 | ) | |
204 |
|
204 | |||
205 | with ui.uninterruptible(): |
|
205 | with ui.uninterruptible(): | |
206 | try: |
|
206 | try: | |
207 | with repo.transaction(b"strip") as tr: |
|
207 | with repo.transaction(b"strip") as tr: | |
208 | # TODO this code violates the interface abstraction of the |
|
208 | # TODO this code violates the interface abstraction of the | |
209 | # transaction and makes assumptions that file storage is |
|
209 | # transaction and makes assumptions that file storage is | |
210 | # using append-only files. We'll need some kind of storage |
|
210 | # using append-only files. We'll need some kind of storage | |
211 | # API to handle stripping for us. |
|
211 | # API to handle stripping for us. | |
212 | oldfiles = set(tr._offsetmap.keys()) |
|
212 | oldfiles = set(tr._offsetmap.keys()) | |
213 | oldfiles.update(tr._newfiles) |
|
213 | oldfiles.update(tr._newfiles) | |
214 |
|
214 | |||
215 | tr.startgroup() |
|
215 | tr.startgroup() | |
216 | cl.strip(striprev, tr) |
|
216 | cl.strip(striprev, tr) | |
217 | stripmanifest(repo, striprev, tr, files) |
|
217 | stripmanifest(repo, striprev, tr, files) | |
218 |
|
218 | |||
219 | for fn in files: |
|
219 | for fn in files: | |
220 | repo.file(fn).strip(striprev, tr) |
|
220 | repo.file(fn).strip(striprev, tr) | |
221 | tr.endgroup() |
|
221 | tr.endgroup() | |
222 |
|
222 | |||
223 | entries = tr.readjournal() |
|
223 | entries = tr.readjournal() | |
224 |
|
224 | |||
225 | for file, troffset in entries: |
|
225 | for file, troffset in entries: | |
226 | if file in oldfiles: |
|
226 | if file in oldfiles: | |
227 | continue |
|
227 | continue | |
228 | with repo.svfs(file, b'a', checkambig=True) as fp: |
|
228 | with repo.svfs(file, b'a', checkambig=True) as fp: | |
229 | fp.truncate(troffset) |
|
229 | fp.truncate(troffset) | |
230 | if troffset == 0: |
|
230 | if troffset == 0: | |
231 | repo.store.markremoved(file) |
|
231 | repo.store.markremoved(file) | |
232 |
|
232 | |||
233 | deleteobsmarkers(repo.obsstore, stripobsidx) |
|
233 | deleteobsmarkers(repo.obsstore, stripobsidx) | |
234 | del repo.obsstore |
|
234 | del repo.obsstore | |
235 | repo.invalidatevolatilesets() |
|
235 | repo.invalidatevolatilesets() | |
236 | repo._phasecache.filterunknown(repo) |
|
236 | repo._phasecache.filterunknown(repo) | |
237 |
|
237 | |||
238 | if tmpbundlefile: |
|
238 | if tmpbundlefile: | |
239 | ui.note(_(b"adding branch\n")) |
|
239 | ui.note(_(b"adding branch\n")) | |
240 | f = vfs.open(tmpbundlefile, b"rb") |
|
240 | f = vfs.open(tmpbundlefile, b"rb") | |
241 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
|
241 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) | |
242 | if not repo.ui.verbose: |
|
242 | if not repo.ui.verbose: | |
243 | # silence internal shuffling chatter |
|
243 | # silence internal shuffling chatter | |
244 | repo.ui.pushbuffer() |
|
244 | repo.ui.pushbuffer() | |
245 | tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile) |
|
245 | tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile) | |
246 | txnname = b'strip' |
|
246 | txnname = b'strip' | |
247 | if not isinstance(gen, bundle2.unbundle20): |
|
247 | if not isinstance(gen, bundle2.unbundle20): | |
248 | txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl) |
|
248 | txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl) | |
249 | with repo.transaction(txnname) as tr: |
|
249 | with repo.transaction(txnname) as tr: | |
250 | bundle2.applybundle( |
|
250 | bundle2.applybundle( | |
251 | repo, gen, tr, source=b'strip', url=tmpbundleurl |
|
251 | repo, gen, tr, source=b'strip', url=tmpbundleurl | |
252 | ) |
|
252 | ) | |
253 | if not repo.ui.verbose: |
|
253 | if not repo.ui.verbose: | |
254 | repo.ui.popbuffer() |
|
254 | repo.ui.popbuffer() | |
255 | f.close() |
|
255 | f.close() | |
256 |
|
256 | |||
257 | with repo.transaction(b'repair') as tr: |
|
257 | with repo.transaction(b'repair') as tr: | |
258 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
|
258 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] | |
259 | repo._bookmarks.applychanges(repo, tr, bmchanges) |
|
259 | repo._bookmarks.applychanges(repo, tr, bmchanges) | |
260 |
|
260 | |||
261 | # remove undo files |
|
261 | # remove undo files | |
262 | for undovfs, undofile in repo.undofiles(): |
|
262 | for undovfs, undofile in repo.undofiles(): | |
263 | try: |
|
263 | try: | |
264 | undovfs.unlink(undofile) |
|
264 | undovfs.unlink(undofile) | |
265 | except OSError as e: |
|
265 | except OSError as e: | |
266 | if e.errno != errno.ENOENT: |
|
266 | if e.errno != errno.ENOENT: | |
267 | ui.warn( |
|
267 | ui.warn( | |
268 | _(b'error removing %s: %s\n') |
|
268 | _(b'error removing %s: %s\n') | |
269 | % ( |
|
269 | % ( | |
270 | undovfs.join(undofile), |
|
270 | undovfs.join(undofile), | |
271 | stringutil.forcebytestr(e), |
|
271 | stringutil.forcebytestr(e), | |
272 | ) |
|
272 | ) | |
273 | ) |
|
273 | ) | |
274 |
|
274 | |||
275 | except: # re-raises |
|
275 | except: # re-raises | |
276 | if backupfile: |
|
276 | if backupfile: | |
277 | ui.warn( |
|
277 | ui.warn( | |
278 | _(b"strip failed, backup bundle stored in '%s'\n") |
|
278 | _(b"strip failed, backup bundle stored in '%s'\n") | |
279 | % vfs.join(backupfile) |
|
279 | % vfs.join(backupfile) | |
280 | ) |
|
280 | ) | |
281 | if tmpbundlefile: |
|
281 | if tmpbundlefile: | |
282 | ui.warn( |
|
282 | ui.warn( | |
283 | _(b"strip failed, unrecovered changes stored in '%s'\n") |
|
283 | _(b"strip failed, unrecovered changes stored in '%s'\n") | |
284 | % vfs.join(tmpbundlefile) |
|
284 | % vfs.join(tmpbundlefile) | |
285 | ) |
|
285 | ) | |
286 | ui.warn( |
|
286 | ui.warn( | |
287 | _( |
|
287 | _( | |
288 | b"(fix the problem, then recover the changesets with " |
|
288 | b"(fix the problem, then recover the changesets with " | |
289 | b"\"hg unbundle '%s'\")\n" |
|
289 | b"\"hg unbundle '%s'\")\n" | |
290 | ) |
|
290 | ) | |
291 | % vfs.join(tmpbundlefile) |
|
291 | % vfs.join(tmpbundlefile) | |
292 | ) |
|
292 | ) | |
293 | raise |
|
293 | raise | |
294 | else: |
|
294 | else: | |
295 | if tmpbundlefile: |
|
295 | if tmpbundlefile: | |
296 | # Remove temporary bundle only if there were no exceptions |
|
296 | # Remove temporary bundle only if there were no exceptions | |
297 | vfs.unlink(tmpbundlefile) |
|
297 | vfs.unlink(tmpbundlefile) | |
298 |
|
298 | |||
299 | repo.destroyed() |
|
299 | repo.destroyed() | |
300 | # return the backup file path (or None if 'backup' was False) so |
|
300 | # return the backup file path (or None if 'backup' was False) so | |
301 | # extensions can use it |
|
301 | # extensions can use it | |
302 | return backupfile |
|
302 | return backupfile | |
303 |
|
303 | |||
304 |
|
304 | |||
305 | def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'): |
|
305 | def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'): | |
306 | """perform a "soft" strip using the archived phase""" |
|
306 | """perform a "soft" strip using the archived phase""" | |
307 | tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)] |
|
307 | tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)] | |
308 | if not tostrip: |
|
308 | if not tostrip: | |
309 | return None |
|
309 | return None | |
310 |
|
310 | |||
311 | backupfile = None |
|
311 | backupfile = None | |
312 | if backup: |
|
312 | if backup: | |
313 | node = tostrip[0] |
|
313 | node = tostrip[0] | |
314 | backupfile = _createstripbackup(repo, tostrip, node, topic) |
|
314 | backupfile = _createstripbackup(repo, tostrip, node, topic) | |
315 |
|
315 | |||
316 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
|
316 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) | |
317 | with repo.transaction(b'strip') as tr: |
|
317 | with repo.transaction(b'strip') as tr: | |
318 | phases.retractboundary(repo, tr, phases.archived, tostrip) |
|
318 | phases.retractboundary(repo, tr, phases.archived, tostrip) | |
319 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
|
319 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] | |
320 | repo._bookmarks.applychanges(repo, tr, bmchanges) |
|
320 | repo._bookmarks.applychanges(repo, tr, bmchanges) | |
321 | return backupfile |
|
321 | return backupfile | |
322 |
|
322 | |||
323 |
|
323 | |||
324 | def _bookmarkmovements(repo, tostrip): |
|
324 | def _bookmarkmovements(repo, tostrip): | |
325 | # compute necessary bookmark movement |
|
325 | # compute necessary bookmark movement | |
326 | bm = repo._bookmarks |
|
326 | bm = repo._bookmarks | |
327 | updatebm = [] |
|
327 | updatebm = [] | |
328 | for m in bm: |
|
328 | for m in bm: | |
329 | rev = repo[bm[m]].rev() |
|
329 | rev = repo[bm[m]].rev() | |
330 | if rev in tostrip: |
|
330 | if rev in tostrip: | |
331 | updatebm.append(m) |
|
331 | updatebm.append(m) | |
332 | newbmtarget = None |
|
332 | newbmtarget = None | |
333 | # If we need to move bookmarks, compute bookmark |
|
333 | # If we need to move bookmarks, compute bookmark | |
334 | # targets. Otherwise we can skip doing this logic. |
|
334 | # targets. Otherwise we can skip doing this logic. | |
335 | if updatebm: |
|
335 | if updatebm: | |
336 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), |
|
336 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), | |
337 | # but is much faster |
|
337 | # but is much faster | |
338 | newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip) |
|
338 | newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip) | |
339 | if newbmtarget: |
|
339 | if newbmtarget: | |
340 | newbmtarget = repo[newbmtarget.first()].node() |
|
340 | newbmtarget = repo[newbmtarget.first()].node() | |
341 | else: |
|
341 | else: | |
342 | newbmtarget = b'.' |
|
342 | newbmtarget = b'.' | |
343 | return newbmtarget, updatebm |
|
343 | return newbmtarget, updatebm | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | def _createstripbackup(repo, stripbases, node, topic): |
|
346 | def _createstripbackup(repo, stripbases, node, topic): | |
347 | # backup the changeset we are about to strip |
|
347 | # backup the changeset we are about to strip | |
348 | vfs = repo.vfs |
|
348 | vfs = repo.vfs | |
349 | cl = repo.changelog |
|
349 | cl = repo.changelog | |
350 | backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) |
|
350 | backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) | |
351 | repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile)) |
|
351 | repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile)) | |
352 | repo.ui.log( |
|
352 | repo.ui.log( | |
353 | b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile) |
|
353 | b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile) | |
354 | ) |
|
354 | ) | |
355 | return backupfile |
|
355 | return backupfile | |
356 |
|
356 | |||
357 |
|
357 | |||
358 | def safestriproots(ui, repo, nodes): |
|
358 | def safestriproots(ui, repo, nodes): | |
359 | """return list of roots of nodes where descendants are covered by nodes""" |
|
359 | """return list of roots of nodes where descendants are covered by nodes""" | |
360 | torev = repo.unfiltered().changelog.rev |
|
360 | torev = repo.unfiltered().changelog.rev | |
361 | revs = {torev(n) for n in nodes} |
|
361 | revs = {torev(n) for n in nodes} | |
362 | # tostrip = wanted - unsafe = wanted - ancestors(orphaned) |
|
362 | # tostrip = wanted - unsafe = wanted - ancestors(orphaned) | |
363 | # orphaned = affected - wanted |
|
363 | # orphaned = affected - wanted | |
364 | # affected = descendants(roots(wanted)) |
|
364 | # affected = descendants(roots(wanted)) | |
365 | # wanted = revs |
|
365 | # wanted = revs | |
366 | revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
|
366 | revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' | |
367 | tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
|
367 | tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) | |
368 | notstrip = revs - tostrip |
|
368 | notstrip = revs - tostrip | |
369 | if notstrip: |
|
369 | if notstrip: | |
370 | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
|
370 | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) | |
371 | ui.warn( |
|
371 | ui.warn( | |
372 | _(b'warning: orphaned descendants detected, not stripping %s\n') |
|
372 | _(b'warning: orphaned descendants detected, not stripping %s\n') | |
373 | % nodestr |
|
373 | % nodestr | |
374 | ) |
|
374 | ) | |
375 | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] |
|
375 | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] | |
376 |
|
376 | |||
377 |
|
377 | |||
378 | class stripcallback(object): |
|
378 | class stripcallback(object): | |
379 | """used as a transaction postclose callback""" |
|
379 | """used as a transaction postclose callback""" | |
380 |
|
380 | |||
381 | def __init__(self, ui, repo, backup, topic): |
|
381 | def __init__(self, ui, repo, backup, topic): | |
382 | self.ui = ui |
|
382 | self.ui = ui | |
383 | self.repo = repo |
|
383 | self.repo = repo | |
384 | self.backup = backup |
|
384 | self.backup = backup | |
385 | self.topic = topic or b'backup' |
|
385 | self.topic = topic or b'backup' | |
386 | self.nodelist = [] |
|
386 | self.nodelist = [] | |
387 |
|
387 | |||
388 | def addnodes(self, nodes): |
|
388 | def addnodes(self, nodes): | |
389 | self.nodelist.extend(nodes) |
|
389 | self.nodelist.extend(nodes) | |
390 |
|
390 | |||
391 | def __call__(self, tr): |
|
391 | def __call__(self, tr): | |
392 | roots = safestriproots(self.ui, self.repo, self.nodelist) |
|
392 | roots = safestriproots(self.ui, self.repo, self.nodelist) | |
393 | if roots: |
|
393 | if roots: | |
394 | strip(self.ui, self.repo, roots, self.backup, self.topic) |
|
394 | strip(self.ui, self.repo, roots, self.backup, self.topic) | |
395 |
|
395 | |||
396 |
|
396 | |||
397 | def delayedstrip(ui, repo, nodelist, topic=None, backup=True): |
|
397 | def delayedstrip(ui, repo, nodelist, topic=None, backup=True): | |
398 | """like strip, but works inside transaction and won't strip irreverent revs |
|
398 | """like strip, but works inside transaction and won't strip irreverent revs | |
399 |
|
399 | |||
400 | nodelist must explicitly contain all descendants. Otherwise a warning will |
|
400 | nodelist must explicitly contain all descendants. Otherwise a warning will | |
401 | be printed that some nodes are not stripped. |
|
401 | be printed that some nodes are not stripped. | |
402 |
|
402 | |||
403 | Will do a backup if `backup` is True. The last non-None "topic" will be |
|
403 | Will do a backup if `backup` is True. The last non-None "topic" will be | |
404 | used as the backup topic name. The default backup topic name is "backup". |
|
404 | used as the backup topic name. The default backup topic name is "backup". | |
405 | """ |
|
405 | """ | |
406 | tr = repo.currenttransaction() |
|
406 | tr = repo.currenttransaction() | |
407 | if not tr: |
|
407 | if not tr: | |
408 | nodes = safestriproots(ui, repo, nodelist) |
|
408 | nodes = safestriproots(ui, repo, nodelist) | |
409 | return strip(ui, repo, nodes, backup=backup, topic=topic) |
|
409 | return strip(ui, repo, nodes, backup=backup, topic=topic) | |
410 | # transaction postclose callbacks are called in alphabet order. |
|
410 | # transaction postclose callbacks are called in alphabet order. | |
411 | # use '\xff' as prefix so we are likely to be called last. |
|
411 | # use '\xff' as prefix so we are likely to be called last. | |
412 | callback = tr.getpostclose(b'\xffstrip') |
|
412 | callback = tr.getpostclose(b'\xffstrip') | |
413 | if callback is None: |
|
413 | if callback is None: | |
414 | callback = stripcallback(ui, repo, backup=backup, topic=topic) |
|
414 | callback = stripcallback(ui, repo, backup=backup, topic=topic) | |
415 | tr.addpostclose(b'\xffstrip', callback) |
|
415 | tr.addpostclose(b'\xffstrip', callback) | |
416 | if topic: |
|
416 | if topic: | |
417 | callback.topic = topic |
|
417 | callback.topic = topic | |
418 | callback.addnodes(nodelist) |
|
418 | callback.addnodes(nodelist) | |
419 |
|
419 | |||
420 |
|
420 | |||
421 | def stripmanifest(repo, striprev, tr, files): |
|
421 | def stripmanifest(repo, striprev, tr, files): | |
422 | for revlog in manifestrevlogs(repo): |
|
422 | for revlog in manifestrevlogs(repo): | |
423 | revlog.strip(striprev, tr) |
|
423 | revlog.strip(striprev, tr) | |
424 |
|
424 | |||
425 |
|
425 | |||
426 | def manifestrevlogs(repo): |
|
426 | def manifestrevlogs(repo): | |
427 | yield repo.manifestlog.getstorage(b'') |
|
427 | yield repo.manifestlog.getstorage(b'') | |
428 | if scmutil.istreemanifest(repo): |
|
428 | if scmutil.istreemanifest(repo): | |
429 | # This logic is safe if treemanifest isn't enabled, but also |
|
429 | # This logic is safe if treemanifest isn't enabled, but also | |
430 | # pointless, so we skip it if treemanifest isn't enabled. |
|
430 | # pointless, so we skip it if treemanifest isn't enabled. | |
431 | for unencoded, encoded, size in repo.store.datafiles(): |
|
431 | for t, unencoded, encoded, size in repo.store.datafiles(): | |
432 | if unencoded.startswith(b'meta/') and unencoded.endswith( |
|
432 | if unencoded.startswith(b'meta/') and unencoded.endswith( | |
433 | b'00manifest.i' |
|
433 | b'00manifest.i' | |
434 | ): |
|
434 | ): | |
435 | dir = unencoded[5:-12] |
|
435 | dir = unencoded[5:-12] | |
436 | yield repo.manifestlog.getstorage(dir) |
|
436 | yield repo.manifestlog.getstorage(dir) | |
437 |
|
437 | |||
438 |
|
438 | |||
439 | def rebuildfncache(ui, repo): |
|
439 | def rebuildfncache(ui, repo): | |
440 | """Rebuilds the fncache file from repo history. |
|
440 | """Rebuilds the fncache file from repo history. | |
441 |
|
441 | |||
442 | Missing entries will be added. Extra entries will be removed. |
|
442 | Missing entries will be added. Extra entries will be removed. | |
443 | """ |
|
443 | """ | |
444 | repo = repo.unfiltered() |
|
444 | repo = repo.unfiltered() | |
445 |
|
445 | |||
446 | if requirements.FNCACHE_REQUIREMENT not in repo.requirements: |
|
446 | if requirements.FNCACHE_REQUIREMENT not in repo.requirements: | |
447 | ui.warn( |
|
447 | ui.warn( | |
448 | _( |
|
448 | _( | |
449 | b'(not rebuilding fncache because repository does not ' |
|
449 | b'(not rebuilding fncache because repository does not ' | |
450 | b'support fncache)\n' |
|
450 | b'support fncache)\n' | |
451 | ) |
|
451 | ) | |
452 | ) |
|
452 | ) | |
453 | return |
|
453 | return | |
454 |
|
454 | |||
455 | with repo.lock(): |
|
455 | with repo.lock(): | |
456 | fnc = repo.store.fncache |
|
456 | fnc = repo.store.fncache | |
457 | fnc.ensureloaded(warn=ui.warn) |
|
457 | fnc.ensureloaded(warn=ui.warn) | |
458 |
|
458 | |||
459 | oldentries = set(fnc.entries) |
|
459 | oldentries = set(fnc.entries) | |
460 | newentries = set() |
|
460 | newentries = set() | |
461 | seenfiles = set() |
|
461 | seenfiles = set() | |
462 |
|
462 | |||
463 | progress = ui.makeprogress( |
|
463 | progress = ui.makeprogress( | |
464 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) |
|
464 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) | |
465 | ) |
|
465 | ) | |
466 | for rev in repo: |
|
466 | for rev in repo: | |
467 | progress.update(rev) |
|
467 | progress.update(rev) | |
468 |
|
468 | |||
469 | ctx = repo[rev] |
|
469 | ctx = repo[rev] | |
470 | for f in ctx.files(): |
|
470 | for f in ctx.files(): | |
471 | # This is to minimize I/O. |
|
471 | # This is to minimize I/O. | |
472 | if f in seenfiles: |
|
472 | if f in seenfiles: | |
473 | continue |
|
473 | continue | |
474 | seenfiles.add(f) |
|
474 | seenfiles.add(f) | |
475 |
|
475 | |||
476 | i = b'data/%s.i' % f |
|
476 | i = b'data/%s.i' % f | |
477 | d = b'data/%s.d' % f |
|
477 | d = b'data/%s.d' % f | |
478 |
|
478 | |||
479 | if repo.store._exists(i): |
|
479 | if repo.store._exists(i): | |
480 | newentries.add(i) |
|
480 | newentries.add(i) | |
481 | if repo.store._exists(d): |
|
481 | if repo.store._exists(d): | |
482 | newentries.add(d) |
|
482 | newentries.add(d) | |
483 |
|
483 | |||
484 | progress.complete() |
|
484 | progress.complete() | |
485 |
|
485 | |||
486 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: |
|
486 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
487 | # This logic is safe if treemanifest isn't enabled, but also |
|
487 | # This logic is safe if treemanifest isn't enabled, but also | |
488 | # pointless, so we skip it if treemanifest isn't enabled. |
|
488 | # pointless, so we skip it if treemanifest isn't enabled. | |
489 | for dir in pathutil.dirs(seenfiles): |
|
489 | for dir in pathutil.dirs(seenfiles): | |
490 | i = b'meta/%s/00manifest.i' % dir |
|
490 | i = b'meta/%s/00manifest.i' % dir | |
491 | d = b'meta/%s/00manifest.d' % dir |
|
491 | d = b'meta/%s/00manifest.d' % dir | |
492 |
|
492 | |||
493 | if repo.store._exists(i): |
|
493 | if repo.store._exists(i): | |
494 | newentries.add(i) |
|
494 | newentries.add(i) | |
495 | if repo.store._exists(d): |
|
495 | if repo.store._exists(d): | |
496 | newentries.add(d) |
|
496 | newentries.add(d) | |
497 |
|
497 | |||
498 | addcount = len(newentries - oldentries) |
|
498 | addcount = len(newentries - oldentries) | |
499 | removecount = len(oldentries - newentries) |
|
499 | removecount = len(oldentries - newentries) | |
500 | for p in sorted(oldentries - newentries): |
|
500 | for p in sorted(oldentries - newentries): | |
501 | ui.write(_(b'removing %s\n') % p) |
|
501 | ui.write(_(b'removing %s\n') % p) | |
502 | for p in sorted(newentries - oldentries): |
|
502 | for p in sorted(newentries - oldentries): | |
503 | ui.write(_(b'adding %s\n') % p) |
|
503 | ui.write(_(b'adding %s\n') % p) | |
504 |
|
504 | |||
505 | if addcount or removecount: |
|
505 | if addcount or removecount: | |
506 | ui.write( |
|
506 | ui.write( | |
507 | _(b'%d items added, %d removed from fncache\n') |
|
507 | _(b'%d items added, %d removed from fncache\n') | |
508 | % (addcount, removecount) |
|
508 | % (addcount, removecount) | |
509 | ) |
|
509 | ) | |
510 | fnc.entries = newentries |
|
510 | fnc.entries = newentries | |
511 | fnc._dirty = True |
|
511 | fnc._dirty = True | |
512 |
|
512 | |||
513 | with repo.transaction(b'fncache') as tr: |
|
513 | with repo.transaction(b'fncache') as tr: | |
514 | fnc.write(tr) |
|
514 | fnc.write(tr) | |
515 | else: |
|
515 | else: | |
516 | ui.write(_(b'fncache already up to date\n')) |
|
516 | ui.write(_(b'fncache already up to date\n')) | |
517 |
|
517 | |||
518 |
|
518 | |||
519 | def deleteobsmarkers(obsstore, indices): |
|
519 | def deleteobsmarkers(obsstore, indices): | |
520 | """Delete some obsmarkers from obsstore and return how many were deleted |
|
520 | """Delete some obsmarkers from obsstore and return how many were deleted | |
521 |
|
521 | |||
522 | 'indices' is a list of ints which are the indices |
|
522 | 'indices' is a list of ints which are the indices | |
523 | of the markers to be deleted. |
|
523 | of the markers to be deleted. | |
524 |
|
524 | |||
525 | Every invocation of this function completely rewrites the obsstore file, |
|
525 | Every invocation of this function completely rewrites the obsstore file, | |
526 | skipping the markers we want to be removed. The new temporary file is |
|
526 | skipping the markers we want to be removed. The new temporary file is | |
527 | created, remaining markers are written there and on .close() this file |
|
527 | created, remaining markers are written there and on .close() this file | |
528 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" |
|
528 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" | |
529 | if not indices: |
|
529 | if not indices: | |
530 | # we don't want to rewrite the obsstore with the same content |
|
530 | # we don't want to rewrite the obsstore with the same content | |
531 | return |
|
531 | return | |
532 |
|
532 | |||
533 | left = [] |
|
533 | left = [] | |
534 | current = obsstore._all |
|
534 | current = obsstore._all | |
535 | n = 0 |
|
535 | n = 0 | |
536 | for i, m in enumerate(current): |
|
536 | for i, m in enumerate(current): | |
537 | if i in indices: |
|
537 | if i in indices: | |
538 | n += 1 |
|
538 | n += 1 | |
539 | continue |
|
539 | continue | |
540 | left.append(m) |
|
540 | left.append(m) | |
541 |
|
541 | |||
542 | newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True) |
|
542 | newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True) | |
543 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): |
|
543 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): | |
544 | newobsstorefile.write(bytes) |
|
544 | newobsstorefile.write(bytes) | |
545 | newobsstorefile.close() |
|
545 | newobsstorefile.close() | |
546 | return n |
|
546 | return n |
@@ -1,756 +1,799 | |||||
1 | # store.py - repository store handling for Mercurial |
|
1 | # store.py - repository store handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2008 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import functools |
|
11 | import functools | |
12 | import os |
|
12 | import os | |
13 | import stat |
|
13 | import stat | |
14 |
|
14 | |||
15 | from .i18n import _ |
|
15 | from .i18n import _ | |
16 | from .pycompat import getattr |
|
16 | from .pycompat import getattr | |
17 | from .node import hex |
|
17 | from .node import hex | |
18 | from . import ( |
|
18 | from . import ( | |
19 | changelog, |
|
19 | changelog, | |
20 | error, |
|
20 | error, | |
21 | manifest, |
|
21 | manifest, | |
22 | policy, |
|
22 | policy, | |
23 | pycompat, |
|
23 | pycompat, | |
24 | util, |
|
24 | util, | |
25 | vfs as vfsmod, |
|
25 | vfs as vfsmod, | |
26 | ) |
|
26 | ) | |
27 | from .utils import hashutil |
|
27 | from .utils import hashutil | |
28 |
|
28 | |||
29 | parsers = policy.importmod('parsers') |
|
29 | parsers = policy.importmod('parsers') | |
30 | # how much bytes should be read from fncache in one read |
|
30 | # how much bytes should be read from fncache in one read | |
31 | # It is done to prevent loading large fncache files into memory |
|
31 | # It is done to prevent loading large fncache files into memory | |
32 | fncache_chunksize = 10 ** 6 |
|
32 | fncache_chunksize = 10 ** 6 | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | def _matchtrackedpath(path, matcher): |
|
35 | def _matchtrackedpath(path, matcher): | |
36 | """parses a fncache entry and returns whether the entry is tracking a path |
|
36 | """parses a fncache entry and returns whether the entry is tracking a path | |
37 | matched by matcher or not. |
|
37 | matched by matcher or not. | |
38 |
|
38 | |||
39 | If matcher is None, returns True""" |
|
39 | If matcher is None, returns True""" | |
40 |
|
40 | |||
41 | if matcher is None: |
|
41 | if matcher is None: | |
42 | return True |
|
42 | return True | |
43 | path = decodedir(path) |
|
43 | path = decodedir(path) | |
44 | if path.startswith(b'data/'): |
|
44 | if path.startswith(b'data/'): | |
45 | return matcher(path[len(b'data/') : -len(b'.i')]) |
|
45 | return matcher(path[len(b'data/') : -len(b'.i')]) | |
46 | elif path.startswith(b'meta/'): |
|
46 | elif path.startswith(b'meta/'): | |
47 | return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')]) |
|
47 | return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')]) | |
48 |
|
48 | |||
49 | raise error.ProgrammingError(b"cannot decode path %s" % path) |
|
49 | raise error.ProgrammingError(b"cannot decode path %s" % path) | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | # This avoids a collision between a file named foo and a dir named |
|
52 | # This avoids a collision between a file named foo and a dir named | |
53 | # foo.i or foo.d |
|
53 | # foo.i or foo.d | |
54 | def _encodedir(path): |
|
54 | def _encodedir(path): | |
55 | """ |
|
55 | """ | |
56 | >>> _encodedir(b'data/foo.i') |
|
56 | >>> _encodedir(b'data/foo.i') | |
57 | 'data/foo.i' |
|
57 | 'data/foo.i' | |
58 | >>> _encodedir(b'data/foo.i/bla.i') |
|
58 | >>> _encodedir(b'data/foo.i/bla.i') | |
59 | 'data/foo.i.hg/bla.i' |
|
59 | 'data/foo.i.hg/bla.i' | |
60 | >>> _encodedir(b'data/foo.i.hg/bla.i') |
|
60 | >>> _encodedir(b'data/foo.i.hg/bla.i') | |
61 | 'data/foo.i.hg.hg/bla.i' |
|
61 | 'data/foo.i.hg.hg/bla.i' | |
62 | >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') |
|
62 | >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') | |
63 | 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' |
|
63 | 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' | |
64 | """ |
|
64 | """ | |
65 | return ( |
|
65 | return ( | |
66 | path.replace(b".hg/", b".hg.hg/") |
|
66 | path.replace(b".hg/", b".hg.hg/") | |
67 | .replace(b".i/", b".i.hg/") |
|
67 | .replace(b".i/", b".i.hg/") | |
68 | .replace(b".d/", b".d.hg/") |
|
68 | .replace(b".d/", b".d.hg/") | |
69 | ) |
|
69 | ) | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | encodedir = getattr(parsers, 'encodedir', _encodedir) |
|
72 | encodedir = getattr(parsers, 'encodedir', _encodedir) | |
73 |
|
73 | |||
74 |
|
74 | |||
75 | def decodedir(path): |
|
75 | def decodedir(path): | |
76 | """ |
|
76 | """ | |
77 | >>> decodedir(b'data/foo.i') |
|
77 | >>> decodedir(b'data/foo.i') | |
78 | 'data/foo.i' |
|
78 | 'data/foo.i' | |
79 | >>> decodedir(b'data/foo.i.hg/bla.i') |
|
79 | >>> decodedir(b'data/foo.i.hg/bla.i') | |
80 | 'data/foo.i/bla.i' |
|
80 | 'data/foo.i/bla.i' | |
81 | >>> decodedir(b'data/foo.i.hg.hg/bla.i') |
|
81 | >>> decodedir(b'data/foo.i.hg.hg/bla.i') | |
82 | 'data/foo.i.hg/bla.i' |
|
82 | 'data/foo.i.hg/bla.i' | |
83 | """ |
|
83 | """ | |
84 | if b".hg/" not in path: |
|
84 | if b".hg/" not in path: | |
85 | return path |
|
85 | return path | |
86 | return ( |
|
86 | return ( | |
87 | path.replace(b".d.hg/", b".d/") |
|
87 | path.replace(b".d.hg/", b".d/") | |
88 | .replace(b".i.hg/", b".i/") |
|
88 | .replace(b".i.hg/", b".i/") | |
89 | .replace(b".hg.hg/", b".hg/") |
|
89 | .replace(b".hg.hg/", b".hg/") | |
90 | ) |
|
90 | ) | |
91 |
|
91 | |||
92 |
|
92 | |||
93 | def _reserved(): |
|
93 | def _reserved(): | |
94 | """characters that are problematic for filesystems |
|
94 | """characters that are problematic for filesystems | |
95 |
|
95 | |||
96 | * ascii escapes (0..31) |
|
96 | * ascii escapes (0..31) | |
97 | * ascii hi (126..255) |
|
97 | * ascii hi (126..255) | |
98 | * windows specials |
|
98 | * windows specials | |
99 |
|
99 | |||
100 | these characters will be escaped by encodefunctions |
|
100 | these characters will be escaped by encodefunctions | |
101 | """ |
|
101 | """ | |
102 | winreserved = [ord(x) for x in u'\\:*?"<>|'] |
|
102 | winreserved = [ord(x) for x in u'\\:*?"<>|'] | |
103 | for x in range(32): |
|
103 | for x in range(32): | |
104 | yield x |
|
104 | yield x | |
105 | for x in range(126, 256): |
|
105 | for x in range(126, 256): | |
106 | yield x |
|
106 | yield x | |
107 | for x in winreserved: |
|
107 | for x in winreserved: | |
108 | yield x |
|
108 | yield x | |
109 |
|
109 | |||
110 |
|
110 | |||
111 | def _buildencodefun(): |
|
111 | def _buildencodefun(): | |
112 | """ |
|
112 | """ | |
113 | >>> enc, dec = _buildencodefun() |
|
113 | >>> enc, dec = _buildencodefun() | |
114 |
|
114 | |||
115 | >>> enc(b'nothing/special.txt') |
|
115 | >>> enc(b'nothing/special.txt') | |
116 | 'nothing/special.txt' |
|
116 | 'nothing/special.txt' | |
117 | >>> dec(b'nothing/special.txt') |
|
117 | >>> dec(b'nothing/special.txt') | |
118 | 'nothing/special.txt' |
|
118 | 'nothing/special.txt' | |
119 |
|
119 | |||
120 | >>> enc(b'HELLO') |
|
120 | >>> enc(b'HELLO') | |
121 | '_h_e_l_l_o' |
|
121 | '_h_e_l_l_o' | |
122 | >>> dec(b'_h_e_l_l_o') |
|
122 | >>> dec(b'_h_e_l_l_o') | |
123 | 'HELLO' |
|
123 | 'HELLO' | |
124 |
|
124 | |||
125 | >>> enc(b'hello:world?') |
|
125 | >>> enc(b'hello:world?') | |
126 | 'hello~3aworld~3f' |
|
126 | 'hello~3aworld~3f' | |
127 | >>> dec(b'hello~3aworld~3f') |
|
127 | >>> dec(b'hello~3aworld~3f') | |
128 | 'hello:world?' |
|
128 | 'hello:world?' | |
129 |
|
129 | |||
130 | >>> enc(b'the\\x07quick\\xADshot') |
|
130 | >>> enc(b'the\\x07quick\\xADshot') | |
131 | 'the~07quick~adshot' |
|
131 | 'the~07quick~adshot' | |
132 | >>> dec(b'the~07quick~adshot') |
|
132 | >>> dec(b'the~07quick~adshot') | |
133 | 'the\\x07quick\\xadshot' |
|
133 | 'the\\x07quick\\xadshot' | |
134 | """ |
|
134 | """ | |
135 | e = b'_' |
|
135 | e = b'_' | |
136 | xchr = pycompat.bytechr |
|
136 | xchr = pycompat.bytechr | |
137 | asciistr = list(map(xchr, range(127))) |
|
137 | asciistr = list(map(xchr, range(127))) | |
138 | capitals = list(range(ord(b"A"), ord(b"Z") + 1)) |
|
138 | capitals = list(range(ord(b"A"), ord(b"Z") + 1)) | |
139 |
|
139 | |||
140 | cmap = {x: x for x in asciistr} |
|
140 | cmap = {x: x for x in asciistr} | |
141 | for x in _reserved(): |
|
141 | for x in _reserved(): | |
142 | cmap[xchr(x)] = b"~%02x" % x |
|
142 | cmap[xchr(x)] = b"~%02x" % x | |
143 | for x in capitals + [ord(e)]: |
|
143 | for x in capitals + [ord(e)]: | |
144 | cmap[xchr(x)] = e + xchr(x).lower() |
|
144 | cmap[xchr(x)] = e + xchr(x).lower() | |
145 |
|
145 | |||
146 | dmap = {} |
|
146 | dmap = {} | |
147 | for k, v in pycompat.iteritems(cmap): |
|
147 | for k, v in pycompat.iteritems(cmap): | |
148 | dmap[v] = k |
|
148 | dmap[v] = k | |
149 |
|
149 | |||
150 | def decode(s): |
|
150 | def decode(s): | |
151 | i = 0 |
|
151 | i = 0 | |
152 | while i < len(s): |
|
152 | while i < len(s): | |
153 | for l in pycompat.xrange(1, 4): |
|
153 | for l in pycompat.xrange(1, 4): | |
154 | try: |
|
154 | try: | |
155 | yield dmap[s[i : i + l]] |
|
155 | yield dmap[s[i : i + l]] | |
156 | i += l |
|
156 | i += l | |
157 | break |
|
157 | break | |
158 | except KeyError: |
|
158 | except KeyError: | |
159 | pass |
|
159 | pass | |
160 | else: |
|
160 | else: | |
161 | raise KeyError |
|
161 | raise KeyError | |
162 |
|
162 | |||
163 | return ( |
|
163 | return ( | |
164 | lambda s: b''.join( |
|
164 | lambda s: b''.join( | |
165 | [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))] |
|
165 | [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))] | |
166 | ), |
|
166 | ), | |
167 | lambda s: b''.join(list(decode(s))), |
|
167 | lambda s: b''.join(list(decode(s))), | |
168 | ) |
|
168 | ) | |
169 |
|
169 | |||
170 |
|
170 | |||
171 | _encodefname, _decodefname = _buildencodefun() |
|
171 | _encodefname, _decodefname = _buildencodefun() | |
172 |
|
172 | |||
173 |
|
173 | |||
174 | def encodefilename(s): |
|
174 | def encodefilename(s): | |
175 | """ |
|
175 | """ | |
176 | >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO') |
|
176 | >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO') | |
177 | 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' |
|
177 | 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' | |
178 | """ |
|
178 | """ | |
179 | return _encodefname(encodedir(s)) |
|
179 | return _encodefname(encodedir(s)) | |
180 |
|
180 | |||
181 |
|
181 | |||
182 | def decodefilename(s): |
|
182 | def decodefilename(s): | |
183 | """ |
|
183 | """ | |
184 | >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') |
|
184 | >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') | |
185 | 'foo.i/bar.d/bla.hg/hi:world?/HELLO' |
|
185 | 'foo.i/bar.d/bla.hg/hi:world?/HELLO' | |
186 | """ |
|
186 | """ | |
187 | return decodedir(_decodefname(s)) |
|
187 | return decodedir(_decodefname(s)) | |
188 |
|
188 | |||
189 |
|
189 | |||
190 | def _buildlowerencodefun(): |
|
190 | def _buildlowerencodefun(): | |
191 | """ |
|
191 | """ | |
192 | >>> f = _buildlowerencodefun() |
|
192 | >>> f = _buildlowerencodefun() | |
193 | >>> f(b'nothing/special.txt') |
|
193 | >>> f(b'nothing/special.txt') | |
194 | 'nothing/special.txt' |
|
194 | 'nothing/special.txt' | |
195 | >>> f(b'HELLO') |
|
195 | >>> f(b'HELLO') | |
196 | 'hello' |
|
196 | 'hello' | |
197 | >>> f(b'hello:world?') |
|
197 | >>> f(b'hello:world?') | |
198 | 'hello~3aworld~3f' |
|
198 | 'hello~3aworld~3f' | |
199 | >>> f(b'the\\x07quick\\xADshot') |
|
199 | >>> f(b'the\\x07quick\\xADshot') | |
200 | 'the~07quick~adshot' |
|
200 | 'the~07quick~adshot' | |
201 | """ |
|
201 | """ | |
202 | xchr = pycompat.bytechr |
|
202 | xchr = pycompat.bytechr | |
203 | cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)} |
|
203 | cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)} | |
204 | for x in _reserved(): |
|
204 | for x in _reserved(): | |
205 | cmap[xchr(x)] = b"~%02x" % x |
|
205 | cmap[xchr(x)] = b"~%02x" % x | |
206 | for x in range(ord(b"A"), ord(b"Z") + 1): |
|
206 | for x in range(ord(b"A"), ord(b"Z") + 1): | |
207 | cmap[xchr(x)] = xchr(x).lower() |
|
207 | cmap[xchr(x)] = xchr(x).lower() | |
208 |
|
208 | |||
209 | def lowerencode(s): |
|
209 | def lowerencode(s): | |
210 | return b"".join([cmap[c] for c in pycompat.iterbytestr(s)]) |
|
210 | return b"".join([cmap[c] for c in pycompat.iterbytestr(s)]) | |
211 |
|
211 | |||
212 | return lowerencode |
|
212 | return lowerencode | |
213 |
|
213 | |||
214 |
|
214 | |||
215 | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() |
|
215 | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() | |
216 |
|
216 | |||
217 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 |
|
217 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 | |
218 | _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3 |
|
218 | _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3 | |
219 | _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9) |
|
219 | _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9) | |
220 |
|
220 | |||
221 |
|
221 | |||
222 | def _auxencode(path, dotencode): |
|
222 | def _auxencode(path, dotencode): | |
223 | """ |
|
223 | """ | |
224 | Encodes filenames containing names reserved by Windows or which end in |
|
224 | Encodes filenames containing names reserved by Windows or which end in | |
225 | period or space. Does not touch other single reserved characters c. |
|
225 | period or space. Does not touch other single reserved characters c. | |
226 | Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. |
|
226 | Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. | |
227 | Additionally encodes space or period at the beginning, if dotencode is |
|
227 | Additionally encodes space or period at the beginning, if dotencode is | |
228 | True. Parameter path is assumed to be all lowercase. |
|
228 | True. Parameter path is assumed to be all lowercase. | |
229 | A segment only needs encoding if a reserved name appears as a |
|
229 | A segment only needs encoding if a reserved name appears as a | |
230 | basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" |
|
230 | basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" | |
231 | doesn't need encoding. |
|
231 | doesn't need encoding. | |
232 |
|
232 | |||
233 | >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.' |
|
233 | >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.' | |
234 | >>> _auxencode(s.split(b'/'), True) |
|
234 | >>> _auxencode(s.split(b'/'), True) | |
235 | ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] |
|
235 | ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] | |
236 | >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' |
|
236 | >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' | |
237 | >>> _auxencode(s.split(b'/'), False) |
|
237 | >>> _auxencode(s.split(b'/'), False) | |
238 | ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] |
|
238 | ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] | |
239 | >>> _auxencode([b'foo. '], True) |
|
239 | >>> _auxencode([b'foo. '], True) | |
240 | ['foo.~20'] |
|
240 | ['foo.~20'] | |
241 | >>> _auxencode([b' .foo'], True) |
|
241 | >>> _auxencode([b' .foo'], True) | |
242 | ['~20.foo'] |
|
242 | ['~20.foo'] | |
243 | """ |
|
243 | """ | |
244 | for i, n in enumerate(path): |
|
244 | for i, n in enumerate(path): | |
245 | if not n: |
|
245 | if not n: | |
246 | continue |
|
246 | continue | |
247 | if dotencode and n[0] in b'. ': |
|
247 | if dotencode and n[0] in b'. ': | |
248 | n = b"~%02x" % ord(n[0:1]) + n[1:] |
|
248 | n = b"~%02x" % ord(n[0:1]) + n[1:] | |
249 | path[i] = n |
|
249 | path[i] = n | |
250 | else: |
|
250 | else: | |
251 | l = n.find(b'.') |
|
251 | l = n.find(b'.') | |
252 | if l == -1: |
|
252 | if l == -1: | |
253 | l = len(n) |
|
253 | l = len(n) | |
254 | if (l == 3 and n[:3] in _winres3) or ( |
|
254 | if (l == 3 and n[:3] in _winres3) or ( | |
255 | l == 4 |
|
255 | l == 4 | |
256 | and n[3:4] <= b'9' |
|
256 | and n[3:4] <= b'9' | |
257 | and n[3:4] >= b'1' |
|
257 | and n[3:4] >= b'1' | |
258 | and n[:3] in _winres4 |
|
258 | and n[:3] in _winres4 | |
259 | ): |
|
259 | ): | |
260 | # encode third letter ('aux' -> 'au~78') |
|
260 | # encode third letter ('aux' -> 'au~78') | |
261 | ec = b"~%02x" % ord(n[2:3]) |
|
261 | ec = b"~%02x" % ord(n[2:3]) | |
262 | n = n[0:2] + ec + n[3:] |
|
262 | n = n[0:2] + ec + n[3:] | |
263 | path[i] = n |
|
263 | path[i] = n | |
264 | if n[-1] in b'. ': |
|
264 | if n[-1] in b'. ': | |
265 | # encode last period or space ('foo...' -> 'foo..~2e') |
|
265 | # encode last period or space ('foo...' -> 'foo..~2e') | |
266 | path[i] = n[:-1] + b"~%02x" % ord(n[-1:]) |
|
266 | path[i] = n[:-1] + b"~%02x" % ord(n[-1:]) | |
267 | return path |
|
267 | return path | |
268 |
|
268 | |||
269 |
|
269 | |||
270 | _maxstorepathlen = 120 |
|
270 | _maxstorepathlen = 120 | |
271 | _dirprefixlen = 8 |
|
271 | _dirprefixlen = 8 | |
272 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 |
|
272 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 | |
273 |
|
273 | |||
274 |
|
274 | |||
275 | def _hashencode(path, dotencode): |
|
275 | def _hashencode(path, dotencode): | |
276 | digest = hex(hashutil.sha1(path).digest()) |
|
276 | digest = hex(hashutil.sha1(path).digest()) | |
277 | le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/' |
|
277 | le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/' | |
278 | parts = _auxencode(le, dotencode) |
|
278 | parts = _auxencode(le, dotencode) | |
279 | basename = parts[-1] |
|
279 | basename = parts[-1] | |
280 | _root, ext = os.path.splitext(basename) |
|
280 | _root, ext = os.path.splitext(basename) | |
281 | sdirs = [] |
|
281 | sdirs = [] | |
282 | sdirslen = 0 |
|
282 | sdirslen = 0 | |
283 | for p in parts[:-1]: |
|
283 | for p in parts[:-1]: | |
284 | d = p[:_dirprefixlen] |
|
284 | d = p[:_dirprefixlen] | |
285 | if d[-1] in b'. ': |
|
285 | if d[-1] in b'. ': | |
286 | # Windows can't access dirs ending in period or space |
|
286 | # Windows can't access dirs ending in period or space | |
287 | d = d[:-1] + b'_' |
|
287 | d = d[:-1] + b'_' | |
288 | if sdirslen == 0: |
|
288 | if sdirslen == 0: | |
289 | t = len(d) |
|
289 | t = len(d) | |
290 | else: |
|
290 | else: | |
291 | t = sdirslen + 1 + len(d) |
|
291 | t = sdirslen + 1 + len(d) | |
292 | if t > _maxshortdirslen: |
|
292 | if t > _maxshortdirslen: | |
293 | break |
|
293 | break | |
294 | sdirs.append(d) |
|
294 | sdirs.append(d) | |
295 | sdirslen = t |
|
295 | sdirslen = t | |
296 | dirs = b'/'.join(sdirs) |
|
296 | dirs = b'/'.join(sdirs) | |
297 | if len(dirs) > 0: |
|
297 | if len(dirs) > 0: | |
298 | dirs += b'/' |
|
298 | dirs += b'/' | |
299 | res = b'dh/' + dirs + digest + ext |
|
299 | res = b'dh/' + dirs + digest + ext | |
300 | spaceleft = _maxstorepathlen - len(res) |
|
300 | spaceleft = _maxstorepathlen - len(res) | |
301 | if spaceleft > 0: |
|
301 | if spaceleft > 0: | |
302 | filler = basename[:spaceleft] |
|
302 | filler = basename[:spaceleft] | |
303 | res = b'dh/' + dirs + filler + digest + ext |
|
303 | res = b'dh/' + dirs + filler + digest + ext | |
304 | return res |
|
304 | return res | |
305 |
|
305 | |||
306 |
|
306 | |||
307 | def _hybridencode(path, dotencode): |
|
307 | def _hybridencode(path, dotencode): | |
308 | """encodes path with a length limit |
|
308 | """encodes path with a length limit | |
309 |
|
309 | |||
310 | Encodes all paths that begin with 'data/', according to the following. |
|
310 | Encodes all paths that begin with 'data/', according to the following. | |
311 |
|
311 | |||
312 | Default encoding (reversible): |
|
312 | Default encoding (reversible): | |
313 |
|
313 | |||
314 | Encodes all uppercase letters 'X' as '_x'. All reserved or illegal |
|
314 | Encodes all uppercase letters 'X' as '_x'. All reserved or illegal | |
315 | characters are encoded as '~xx', where xx is the two digit hex code |
|
315 | characters are encoded as '~xx', where xx is the two digit hex code | |
316 | of the character (see encodefilename). |
|
316 | of the character (see encodefilename). | |
317 | Relevant path components consisting of Windows reserved filenames are |
|
317 | Relevant path components consisting of Windows reserved filenames are | |
318 | masked by encoding the third character ('aux' -> 'au~78', see _auxencode). |
|
318 | masked by encoding the third character ('aux' -> 'au~78', see _auxencode). | |
319 |
|
319 | |||
320 | Hashed encoding (not reversible): |
|
320 | Hashed encoding (not reversible): | |
321 |
|
321 | |||
322 | If the default-encoded path is longer than _maxstorepathlen, a |
|
322 | If the default-encoded path is longer than _maxstorepathlen, a | |
323 | non-reversible hybrid hashing of the path is done instead. |
|
323 | non-reversible hybrid hashing of the path is done instead. | |
324 | This encoding uses up to _dirprefixlen characters of all directory |
|
324 | This encoding uses up to _dirprefixlen characters of all directory | |
325 | levels of the lowerencoded path, but not more levels than can fit into |
|
325 | levels of the lowerencoded path, but not more levels than can fit into | |
326 | _maxshortdirslen. |
|
326 | _maxshortdirslen. | |
327 | Then follows the filler followed by the sha digest of the full path. |
|
327 | Then follows the filler followed by the sha digest of the full path. | |
328 | The filler is the beginning of the basename of the lowerencoded path |
|
328 | The filler is the beginning of the basename of the lowerencoded path | |
329 | (the basename is everything after the last path separator). The filler |
|
329 | (the basename is everything after the last path separator). The filler | |
330 | is as long as possible, filling in characters from the basename until |
|
330 | is as long as possible, filling in characters from the basename until | |
331 | the encoded path has _maxstorepathlen characters (or all chars of the |
|
331 | the encoded path has _maxstorepathlen characters (or all chars of the | |
332 | basename have been taken). |
|
332 | basename have been taken). | |
333 | The extension (e.g. '.i' or '.d') is preserved. |
|
333 | The extension (e.g. '.i' or '.d') is preserved. | |
334 |
|
334 | |||
335 | The string 'data/' at the beginning is replaced with 'dh/', if the hashed |
|
335 | The string 'data/' at the beginning is replaced with 'dh/', if the hashed | |
336 | encoding was used. |
|
336 | encoding was used. | |
337 | """ |
|
337 | """ | |
338 | path = encodedir(path) |
|
338 | path = encodedir(path) | |
339 | ef = _encodefname(path).split(b'/') |
|
339 | ef = _encodefname(path).split(b'/') | |
340 | res = b'/'.join(_auxencode(ef, dotencode)) |
|
340 | res = b'/'.join(_auxencode(ef, dotencode)) | |
341 | if len(res) > _maxstorepathlen: |
|
341 | if len(res) > _maxstorepathlen: | |
342 | res = _hashencode(path, dotencode) |
|
342 | res = _hashencode(path, dotencode) | |
343 | return res |
|
343 | return res | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | def _pathencode(path): |
|
346 | def _pathencode(path): | |
347 | de = encodedir(path) |
|
347 | de = encodedir(path) | |
348 | if len(path) > _maxstorepathlen: |
|
348 | if len(path) > _maxstorepathlen: | |
349 | return _hashencode(de, True) |
|
349 | return _hashencode(de, True) | |
350 | ef = _encodefname(de).split(b'/') |
|
350 | ef = _encodefname(de).split(b'/') | |
351 | res = b'/'.join(_auxencode(ef, True)) |
|
351 | res = b'/'.join(_auxencode(ef, True)) | |
352 | if len(res) > _maxstorepathlen: |
|
352 | if len(res) > _maxstorepathlen: | |
353 | return _hashencode(de, True) |
|
353 | return _hashencode(de, True) | |
354 | return res |
|
354 | return res | |
355 |
|
355 | |||
356 |
|
356 | |||
357 | _pathencode = getattr(parsers, 'pathencode', _pathencode) |
|
357 | _pathencode = getattr(parsers, 'pathencode', _pathencode) | |
358 |
|
358 | |||
359 |
|
359 | |||
360 | def _plainhybridencode(f): |
|
360 | def _plainhybridencode(f): | |
361 | return _hybridencode(f, False) |
|
361 | return _hybridencode(f, False) | |
362 |
|
362 | |||
363 |
|
363 | |||
364 | def _calcmode(vfs): |
|
364 | def _calcmode(vfs): | |
365 | try: |
|
365 | try: | |
366 | # files in .hg/ will be created using this mode |
|
366 | # files in .hg/ will be created using this mode | |
367 | mode = vfs.stat().st_mode |
|
367 | mode = vfs.stat().st_mode | |
368 | # avoid some useless chmods |
|
368 | # avoid some useless chmods | |
369 | if (0o777 & ~util.umask) == (0o777 & mode): |
|
369 | if (0o777 & ~util.umask) == (0o777 & mode): | |
370 | mode = None |
|
370 | mode = None | |
371 | except OSError: |
|
371 | except OSError: | |
372 | mode = None |
|
372 | mode = None | |
373 | return mode |
|
373 | return mode | |
374 |
|
374 | |||
375 |
|
375 | |||
376 | _data = [ |
|
376 | _data = [ | |
377 | b'bookmarks', |
|
377 | b'bookmarks', | |
378 | b'narrowspec', |
|
378 | b'narrowspec', | |
379 | b'data', |
|
379 | b'data', | |
380 | b'meta', |
|
380 | b'meta', | |
381 | b'00manifest.d', |
|
381 | b'00manifest.d', | |
382 | b'00manifest.i', |
|
382 | b'00manifest.i', | |
383 | b'00changelog.d', |
|
383 | b'00changelog.d', | |
384 | b'00changelog.i', |
|
384 | b'00changelog.i', | |
385 | b'phaseroots', |
|
385 | b'phaseroots', | |
386 | b'obsstore', |
|
386 | b'obsstore', | |
387 | b'requires', |
|
387 | b'requires', | |
388 | ] |
|
388 | ] | |
389 |
|
389 | |||
390 |
REVLOG_FILES_EXT = (b'.i', b' |
|
390 | REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored') | |
|
391 | REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored') | |||
|
392 | ||||
|
393 | ||||
|
394 | def is_revlog(f, kind, st): | |||
|
395 | if kind != stat.S_IFREG: | |||
|
396 | return None | |||
|
397 | return revlog_type(f) | |||
|
398 | ||||
|
399 | ||||
|
400 | def revlog_type(f): | |||
|
401 | if f.endswith(REVLOG_FILES_MAIN_EXT): | |||
|
402 | return FILEFLAGS_REVLOG_MAIN | |||
|
403 | elif f.endswith(REVLOG_FILES_OTHER_EXT): | |||
|
404 | return FILETYPE_FILELOG_OTHER | |||
391 |
|
405 | |||
392 |
|
406 | |||
393 | def isrevlog(f, kind, st): |
|
407 | # the file is part of changelog data | |
394 | if kind != stat.S_IFREG: |
|
408 | FILEFLAGS_CHANGELOG = 1 << 13 | |
395 | return False |
|
409 | # the file is part of manifest data | |
396 | return f.endswith(REVLOG_FILES_EXT) |
|
410 | FILEFLAGS_MANIFESTLOG = 1 << 12 | |
|
411 | # the file is part of filelog data | |||
|
412 | FILEFLAGS_FILELOG = 1 << 11 | |||
|
413 | # file that are not directly part of a revlog | |||
|
414 | FILEFLAGS_OTHER = 1 << 10 | |||
|
415 | ||||
|
416 | # the main entry point for a revlog | |||
|
417 | FILEFLAGS_REVLOG_MAIN = 1 << 1 | |||
|
418 | # a secondary file for a revlog | |||
|
419 | FILEFLAGS_REVLOG_OTHER = 1 << 0 | |||
|
420 | ||||
|
421 | FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN | |||
|
422 | FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER | |||
|
423 | FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN | |||
|
424 | FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER | |||
|
425 | FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN | |||
|
426 | FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER | |||
|
427 | FILETYPE_OTHER = FILEFLAGS_OTHER | |||
397 |
|
428 | |||
398 |
|
429 | |||
399 | class basicstore(object): |
|
430 | class basicstore(object): | |
400 | '''base class for local repository stores''' |
|
431 | '''base class for local repository stores''' | |
401 |
|
432 | |||
402 | def __init__(self, path, vfstype): |
|
433 | def __init__(self, path, vfstype): | |
403 | vfs = vfstype(path) |
|
434 | vfs = vfstype(path) | |
404 | self.path = vfs.base |
|
435 | self.path = vfs.base | |
405 | self.createmode = _calcmode(vfs) |
|
436 | self.createmode = _calcmode(vfs) | |
406 | vfs.createmode = self.createmode |
|
437 | vfs.createmode = self.createmode | |
407 | self.rawvfs = vfs |
|
438 | self.rawvfs = vfs | |
408 | self.vfs = vfsmod.filtervfs(vfs, encodedir) |
|
439 | self.vfs = vfsmod.filtervfs(vfs, encodedir) | |
409 | self.opener = self.vfs |
|
440 | self.opener = self.vfs | |
410 |
|
441 | |||
411 | def join(self, f): |
|
442 | def join(self, f): | |
412 | return self.path + b'/' + encodedir(f) |
|
443 | return self.path + b'/' + encodedir(f) | |
413 |
|
444 | |||
414 | def _walk(self, relpath, recurse): |
|
445 | def _walk(self, relpath, recurse): | |
415 | '''yields (unencoded, encoded, size)''' |
|
446 | '''yields (unencoded, encoded, size)''' | |
416 | path = self.path |
|
447 | path = self.path | |
417 | if relpath: |
|
448 | if relpath: | |
418 | path += b'/' + relpath |
|
449 | path += b'/' + relpath | |
419 | striplen = len(self.path) + 1 |
|
450 | striplen = len(self.path) + 1 | |
420 | l = [] |
|
451 | l = [] | |
421 | if self.rawvfs.isdir(path): |
|
452 | if self.rawvfs.isdir(path): | |
422 | visit = [path] |
|
453 | visit = [path] | |
423 | readdir = self.rawvfs.readdir |
|
454 | readdir = self.rawvfs.readdir | |
424 | while visit: |
|
455 | while visit: | |
425 | p = visit.pop() |
|
456 | p = visit.pop() | |
426 | for f, kind, st in readdir(p, stat=True): |
|
457 | for f, kind, st in readdir(p, stat=True): | |
427 | fp = p + b'/' + f |
|
458 | fp = p + b'/' + f | |
428 |
|
|
459 | rl_type = is_revlog(f, kind, st) | |
|
460 | if rl_type is not None: | |||
429 | n = util.pconvert(fp[striplen:]) |
|
461 | n = util.pconvert(fp[striplen:]) | |
430 | l.append((decodedir(n), n, st.st_size)) |
|
462 | l.append((rl_type, decodedir(n), n, st.st_size)) | |
431 | elif kind == stat.S_IFDIR and recurse: |
|
463 | elif kind == stat.S_IFDIR and recurse: | |
432 | visit.append(fp) |
|
464 | visit.append(fp) | |
433 | l.sort() |
|
465 | l.sort() | |
434 | return l |
|
466 | return l | |
435 |
|
467 | |||
436 | def changelog(self, trypending, concurrencychecker=None): |
|
468 | def changelog(self, trypending, concurrencychecker=None): | |
437 | return changelog.changelog( |
|
469 | return changelog.changelog( | |
438 | self.vfs, |
|
470 | self.vfs, | |
439 | trypending=trypending, |
|
471 | trypending=trypending, | |
440 | concurrencychecker=concurrencychecker, |
|
472 | concurrencychecker=concurrencychecker, | |
441 | ) |
|
473 | ) | |
442 |
|
474 | |||
443 | def manifestlog(self, repo, storenarrowmatch): |
|
475 | def manifestlog(self, repo, storenarrowmatch): | |
444 | rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) |
|
476 | rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) | |
445 | return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) |
|
477 | return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) | |
446 |
|
478 | |||
447 | def datafiles(self, matcher=None): |
|
479 | def datafiles(self, matcher=None): | |
448 |
|
|
480 | files = self._walk(b'data', True) + self._walk(b'meta', True) | |
|
481 | for (t, u, e, s) in files: | |||
|
482 | yield (FILEFLAGS_FILELOG | t, u, e, s) | |||
449 |
|
483 | |||
450 | def topfiles(self): |
|
484 | def topfiles(self): | |
451 | # yield manifest before changelog |
|
485 | # yield manifest before changelog | |
452 |
|
|
486 | files = reversed(self._walk(b'', False)) | |
|
487 | for (t, u, e, s) in files: | |||
|
488 | if u.startswith(b'00changelog'): | |||
|
489 | yield (FILEFLAGS_CHANGELOG | t, u, e, s) | |||
|
490 | elif u.startswith(b'00manifest'): | |||
|
491 | yield (FILEFLAGS_MANIFESTLOG | t, u, e, s) | |||
|
492 | else: | |||
|
493 | yield (FILETYPE_OTHER | t, u, e, s) | |||
453 |
|
494 | |||
454 | def walk(self, matcher=None): |
|
495 | def walk(self, matcher=None): | |
455 | """return file related to data storage (ie: revlogs) |
|
496 | """return file related to data storage (ie: revlogs) | |
456 |
|
497 | |||
457 | yields (unencoded, encoded, size) |
|
498 | yields (file_type, unencoded, encoded, size) | |
458 |
|
499 | |||
459 | if a matcher is passed, storage files of only those tracked paths |
|
500 | if a matcher is passed, storage files of only those tracked paths | |
460 | are passed with matches the matcher |
|
501 | are passed with matches the matcher | |
461 | """ |
|
502 | """ | |
462 | # yield data files first |
|
503 | # yield data files first | |
463 | for x in self.datafiles(matcher): |
|
504 | for x in self.datafiles(matcher): | |
464 | yield x |
|
505 | yield x | |
465 | for x in self.topfiles(): |
|
506 | for x in self.topfiles(): | |
466 | yield x |
|
507 | yield x | |
467 |
|
508 | |||
468 | def copylist(self): |
|
509 | def copylist(self): | |
469 | return _data |
|
510 | return _data | |
470 |
|
511 | |||
471 | def write(self, tr): |
|
512 | def write(self, tr): | |
472 | pass |
|
513 | pass | |
473 |
|
514 | |||
474 | def invalidatecaches(self): |
|
515 | def invalidatecaches(self): | |
475 | pass |
|
516 | pass | |
476 |
|
517 | |||
477 | def markremoved(self, fn): |
|
518 | def markremoved(self, fn): | |
478 | pass |
|
519 | pass | |
479 |
|
520 | |||
480 | def __contains__(self, path): |
|
521 | def __contains__(self, path): | |
481 | '''Checks if the store contains path''' |
|
522 | '''Checks if the store contains path''' | |
482 | path = b"/".join((b"data", path)) |
|
523 | path = b"/".join((b"data", path)) | |
483 | # file? |
|
524 | # file? | |
484 | if self.vfs.exists(path + b".i"): |
|
525 | if self.vfs.exists(path + b".i"): | |
485 | return True |
|
526 | return True | |
486 | # dir? |
|
527 | # dir? | |
487 | if not path.endswith(b"/"): |
|
528 | if not path.endswith(b"/"): | |
488 | path = path + b"/" |
|
529 | path = path + b"/" | |
489 | return self.vfs.exists(path) |
|
530 | return self.vfs.exists(path) | |
490 |
|
531 | |||
491 |
|
532 | |||
492 | class encodedstore(basicstore): |
|
533 | class encodedstore(basicstore): | |
493 | def __init__(self, path, vfstype): |
|
534 | def __init__(self, path, vfstype): | |
494 | vfs = vfstype(path + b'/store') |
|
535 | vfs = vfstype(path + b'/store') | |
495 | self.path = vfs.base |
|
536 | self.path = vfs.base | |
496 | self.createmode = _calcmode(vfs) |
|
537 | self.createmode = _calcmode(vfs) | |
497 | vfs.createmode = self.createmode |
|
538 | vfs.createmode = self.createmode | |
498 | self.rawvfs = vfs |
|
539 | self.rawvfs = vfs | |
499 | self.vfs = vfsmod.filtervfs(vfs, encodefilename) |
|
540 | self.vfs = vfsmod.filtervfs(vfs, encodefilename) | |
500 | self.opener = self.vfs |
|
541 | self.opener = self.vfs | |
501 |
|
542 | |||
502 | def datafiles(self, matcher=None): |
|
543 | def datafiles(self, matcher=None): | |
503 | for a, b, size in super(encodedstore, self).datafiles(): |
|
544 | for t, a, b, size in super(encodedstore, self).datafiles(): | |
504 | try: |
|
545 | try: | |
505 | a = decodefilename(a) |
|
546 | a = decodefilename(a) | |
506 | except KeyError: |
|
547 | except KeyError: | |
507 | a = None |
|
548 | a = None | |
508 | if a is not None and not _matchtrackedpath(a, matcher): |
|
549 | if a is not None and not _matchtrackedpath(a, matcher): | |
509 | continue |
|
550 | continue | |
510 | yield a, b, size |
|
551 | yield t, a, b, size | |
511 |
|
552 | |||
512 | def join(self, f): |
|
553 | def join(self, f): | |
513 | return self.path + b'/' + encodefilename(f) |
|
554 | return self.path + b'/' + encodefilename(f) | |
514 |
|
555 | |||
515 | def copylist(self): |
|
556 | def copylist(self): | |
516 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] |
|
557 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] | |
517 |
|
558 | |||
518 |
|
559 | |||
519 | class fncache(object): |
|
560 | class fncache(object): | |
520 | # the filename used to be partially encoded |
|
561 | # the filename used to be partially encoded | |
521 | # hence the encodedir/decodedir dance |
|
562 | # hence the encodedir/decodedir dance | |
522 | def __init__(self, vfs): |
|
563 | def __init__(self, vfs): | |
523 | self.vfs = vfs |
|
564 | self.vfs = vfs | |
524 | self.entries = None |
|
565 | self.entries = None | |
525 | self._dirty = False |
|
566 | self._dirty = False | |
526 | # set of new additions to fncache |
|
567 | # set of new additions to fncache | |
527 | self.addls = set() |
|
568 | self.addls = set() | |
528 |
|
569 | |||
529 | def ensureloaded(self, warn=None): |
|
570 | def ensureloaded(self, warn=None): | |
530 | """read the fncache file if not already read. |
|
571 | """read the fncache file if not already read. | |
531 |
|
572 | |||
532 | If the file on disk is corrupted, raise. If warn is provided, |
|
573 | If the file on disk is corrupted, raise. If warn is provided, | |
533 | warn and keep going instead.""" |
|
574 | warn and keep going instead.""" | |
534 | if self.entries is None: |
|
575 | if self.entries is None: | |
535 | self._load(warn) |
|
576 | self._load(warn) | |
536 |
|
577 | |||
537 | def _load(self, warn=None): |
|
578 | def _load(self, warn=None): | |
538 | '''fill the entries from the fncache file''' |
|
579 | '''fill the entries from the fncache file''' | |
539 | self._dirty = False |
|
580 | self._dirty = False | |
540 | try: |
|
581 | try: | |
541 | fp = self.vfs(b'fncache', mode=b'rb') |
|
582 | fp = self.vfs(b'fncache', mode=b'rb') | |
542 | except IOError: |
|
583 | except IOError: | |
543 | # skip nonexistent file |
|
584 | # skip nonexistent file | |
544 | self.entries = set() |
|
585 | self.entries = set() | |
545 | return |
|
586 | return | |
546 |
|
587 | |||
547 | self.entries = set() |
|
588 | self.entries = set() | |
548 | chunk = b'' |
|
589 | chunk = b'' | |
549 | for c in iter(functools.partial(fp.read, fncache_chunksize), b''): |
|
590 | for c in iter(functools.partial(fp.read, fncache_chunksize), b''): | |
550 | chunk += c |
|
591 | chunk += c | |
551 | try: |
|
592 | try: | |
552 | p = chunk.rindex(b'\n') |
|
593 | p = chunk.rindex(b'\n') | |
553 | self.entries.update(decodedir(chunk[: p + 1]).splitlines()) |
|
594 | self.entries.update(decodedir(chunk[: p + 1]).splitlines()) | |
554 | chunk = chunk[p + 1 :] |
|
595 | chunk = chunk[p + 1 :] | |
555 | except ValueError: |
|
596 | except ValueError: | |
556 | # substring '\n' not found, maybe the entry is bigger than the |
|
597 | # substring '\n' not found, maybe the entry is bigger than the | |
557 | # chunksize, so let's keep iterating |
|
598 | # chunksize, so let's keep iterating | |
558 | pass |
|
599 | pass | |
559 |
|
600 | |||
560 | if chunk: |
|
601 | if chunk: | |
561 | msg = _(b"fncache does not ends with a newline") |
|
602 | msg = _(b"fncache does not ends with a newline") | |
562 | if warn: |
|
603 | if warn: | |
563 | warn(msg + b'\n') |
|
604 | warn(msg + b'\n') | |
564 | else: |
|
605 | else: | |
565 | raise error.Abort( |
|
606 | raise error.Abort( | |
566 | msg, |
|
607 | msg, | |
567 | hint=_( |
|
608 | hint=_( | |
568 | b"use 'hg debugrebuildfncache' to " |
|
609 | b"use 'hg debugrebuildfncache' to " | |
569 | b"rebuild the fncache" |
|
610 | b"rebuild the fncache" | |
570 | ), |
|
611 | ), | |
571 | ) |
|
612 | ) | |
572 | self._checkentries(fp, warn) |
|
613 | self._checkentries(fp, warn) | |
573 | fp.close() |
|
614 | fp.close() | |
574 |
|
615 | |||
575 | def _checkentries(self, fp, warn): |
|
616 | def _checkentries(self, fp, warn): | |
576 | """ make sure there is no empty string in entries """ |
|
617 | """ make sure there is no empty string in entries """ | |
577 | if b'' in self.entries: |
|
618 | if b'' in self.entries: | |
578 | fp.seek(0) |
|
619 | fp.seek(0) | |
579 | for n, line in enumerate(util.iterfile(fp)): |
|
620 | for n, line in enumerate(util.iterfile(fp)): | |
580 | if not line.rstrip(b'\n'): |
|
621 | if not line.rstrip(b'\n'): | |
581 | t = _(b'invalid entry in fncache, line %d') % (n + 1) |
|
622 | t = _(b'invalid entry in fncache, line %d') % (n + 1) | |
582 | if warn: |
|
623 | if warn: | |
583 | warn(t + b'\n') |
|
624 | warn(t + b'\n') | |
584 | else: |
|
625 | else: | |
585 | raise error.Abort(t) |
|
626 | raise error.Abort(t) | |
586 |
|
627 | |||
587 | def write(self, tr): |
|
628 | def write(self, tr): | |
588 | if self._dirty: |
|
629 | if self._dirty: | |
589 | assert self.entries is not None |
|
630 | assert self.entries is not None | |
590 | self.entries = self.entries | self.addls |
|
631 | self.entries = self.entries | self.addls | |
591 | self.addls = set() |
|
632 | self.addls = set() | |
592 | tr.addbackup(b'fncache') |
|
633 | tr.addbackup(b'fncache') | |
593 | fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True) |
|
634 | fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True) | |
594 | if self.entries: |
|
635 | if self.entries: | |
595 | fp.write(encodedir(b'\n'.join(self.entries) + b'\n')) |
|
636 | fp.write(encodedir(b'\n'.join(self.entries) + b'\n')) | |
596 | fp.close() |
|
637 | fp.close() | |
597 | self._dirty = False |
|
638 | self._dirty = False | |
598 | if self.addls: |
|
639 | if self.addls: | |
599 | # if we have just new entries, let's append them to the fncache |
|
640 | # if we have just new entries, let's append them to the fncache | |
600 | tr.addbackup(b'fncache') |
|
641 | tr.addbackup(b'fncache') | |
601 | fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True) |
|
642 | fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True) | |
602 | if self.addls: |
|
643 | if self.addls: | |
603 | fp.write(encodedir(b'\n'.join(self.addls) + b'\n')) |
|
644 | fp.write(encodedir(b'\n'.join(self.addls) + b'\n')) | |
604 | fp.close() |
|
645 | fp.close() | |
605 | self.entries = None |
|
646 | self.entries = None | |
606 | self.addls = set() |
|
647 | self.addls = set() | |
607 |
|
648 | |||
608 | def add(self, fn): |
|
649 | def add(self, fn): | |
609 | if self.entries is None: |
|
650 | if self.entries is None: | |
610 | self._load() |
|
651 | self._load() | |
611 | if fn not in self.entries: |
|
652 | if fn not in self.entries: | |
612 | self.addls.add(fn) |
|
653 | self.addls.add(fn) | |
613 |
|
654 | |||
614 | def remove(self, fn): |
|
655 | def remove(self, fn): | |
615 | if self.entries is None: |
|
656 | if self.entries is None: | |
616 | self._load() |
|
657 | self._load() | |
617 | if fn in self.addls: |
|
658 | if fn in self.addls: | |
618 | self.addls.remove(fn) |
|
659 | self.addls.remove(fn) | |
619 | return |
|
660 | return | |
620 | try: |
|
661 | try: | |
621 | self.entries.remove(fn) |
|
662 | self.entries.remove(fn) | |
622 | self._dirty = True |
|
663 | self._dirty = True | |
623 | except KeyError: |
|
664 | except KeyError: | |
624 | pass |
|
665 | pass | |
625 |
|
666 | |||
626 | def __contains__(self, fn): |
|
667 | def __contains__(self, fn): | |
627 | if fn in self.addls: |
|
668 | if fn in self.addls: | |
628 | return True |
|
669 | return True | |
629 | if self.entries is None: |
|
670 | if self.entries is None: | |
630 | self._load() |
|
671 | self._load() | |
631 | return fn in self.entries |
|
672 | return fn in self.entries | |
632 |
|
673 | |||
633 | def __iter__(self): |
|
674 | def __iter__(self): | |
634 | if self.entries is None: |
|
675 | if self.entries is None: | |
635 | self._load() |
|
676 | self._load() | |
636 | return iter(self.entries | self.addls) |
|
677 | return iter(self.entries | self.addls) | |
637 |
|
678 | |||
638 |
|
679 | |||
639 | class _fncachevfs(vfsmod.proxyvfs): |
|
680 | class _fncachevfs(vfsmod.proxyvfs): | |
640 | def __init__(self, vfs, fnc, encode): |
|
681 | def __init__(self, vfs, fnc, encode): | |
641 | vfsmod.proxyvfs.__init__(self, vfs) |
|
682 | vfsmod.proxyvfs.__init__(self, vfs) | |
642 | self.fncache = fnc |
|
683 | self.fncache = fnc | |
643 | self.encode = encode |
|
684 | self.encode = encode | |
644 |
|
685 | |||
645 | def __call__(self, path, mode=b'r', *args, **kw): |
|
686 | def __call__(self, path, mode=b'r', *args, **kw): | |
646 | encoded = self.encode(path) |
|
687 | encoded = self.encode(path) | |
647 | if mode not in (b'r', b'rb') and ( |
|
688 | if mode not in (b'r', b'rb') and ( | |
648 | path.startswith(b'data/') or path.startswith(b'meta/') |
|
689 | path.startswith(b'data/') or path.startswith(b'meta/') | |
649 | ): |
|
690 | ): | |
650 | # do not trigger a fncache load when adding a file that already is |
|
691 | # do not trigger a fncache load when adding a file that already is | |
651 | # known to exist. |
|
692 | # known to exist. | |
652 | notload = self.fncache.entries is None and self.vfs.exists(encoded) |
|
693 | notload = self.fncache.entries is None and self.vfs.exists(encoded) | |
653 | if notload and b'a' in mode and not self.vfs.stat(encoded).st_size: |
|
694 | if notload and b'a' in mode and not self.vfs.stat(encoded).st_size: | |
654 | # when appending to an existing file, if the file has size zero, |
|
695 | # when appending to an existing file, if the file has size zero, | |
655 | # it should be considered as missing. Such zero-size files are |
|
696 | # it should be considered as missing. Such zero-size files are | |
656 | # the result of truncation when a transaction is aborted. |
|
697 | # the result of truncation when a transaction is aborted. | |
657 | notload = False |
|
698 | notload = False | |
658 | if not notload: |
|
699 | if not notload: | |
659 | self.fncache.add(path) |
|
700 | self.fncache.add(path) | |
660 | return self.vfs(encoded, mode, *args, **kw) |
|
701 | return self.vfs(encoded, mode, *args, **kw) | |
661 |
|
702 | |||
662 | def join(self, path): |
|
703 | def join(self, path): | |
663 | if path: |
|
704 | if path: | |
664 | return self.vfs.join(self.encode(path)) |
|
705 | return self.vfs.join(self.encode(path)) | |
665 | else: |
|
706 | else: | |
666 | return self.vfs.join(path) |
|
707 | return self.vfs.join(path) | |
667 |
|
708 | |||
668 |
|
709 | |||
669 | class fncachestore(basicstore): |
|
710 | class fncachestore(basicstore): | |
670 | def __init__(self, path, vfstype, dotencode): |
|
711 | def __init__(self, path, vfstype, dotencode): | |
671 | if dotencode: |
|
712 | if dotencode: | |
672 | encode = _pathencode |
|
713 | encode = _pathencode | |
673 | else: |
|
714 | else: | |
674 | encode = _plainhybridencode |
|
715 | encode = _plainhybridencode | |
675 | self.encode = encode |
|
716 | self.encode = encode | |
676 | vfs = vfstype(path + b'/store') |
|
717 | vfs = vfstype(path + b'/store') | |
677 | self.path = vfs.base |
|
718 | self.path = vfs.base | |
678 | self.pathsep = self.path + b'/' |
|
719 | self.pathsep = self.path + b'/' | |
679 | self.createmode = _calcmode(vfs) |
|
720 | self.createmode = _calcmode(vfs) | |
680 | vfs.createmode = self.createmode |
|
721 | vfs.createmode = self.createmode | |
681 | self.rawvfs = vfs |
|
722 | self.rawvfs = vfs | |
682 | fnc = fncache(vfs) |
|
723 | fnc = fncache(vfs) | |
683 | self.fncache = fnc |
|
724 | self.fncache = fnc | |
684 | self.vfs = _fncachevfs(vfs, fnc, encode) |
|
725 | self.vfs = _fncachevfs(vfs, fnc, encode) | |
685 | self.opener = self.vfs |
|
726 | self.opener = self.vfs | |
686 |
|
727 | |||
687 | def join(self, f): |
|
728 | def join(self, f): | |
688 | return self.pathsep + self.encode(f) |
|
729 | return self.pathsep + self.encode(f) | |
689 |
|
730 | |||
690 | def getsize(self, path): |
|
731 | def getsize(self, path): | |
691 | return self.rawvfs.stat(path).st_size |
|
732 | return self.rawvfs.stat(path).st_size | |
692 |
|
733 | |||
693 | def datafiles(self, matcher=None): |
|
734 | def datafiles(self, matcher=None): | |
694 | for f in sorted(self.fncache): |
|
735 | for f in sorted(self.fncache): | |
695 | if not _matchtrackedpath(f, matcher): |
|
736 | if not _matchtrackedpath(f, matcher): | |
696 | continue |
|
737 | continue | |
697 | ef = self.encode(f) |
|
738 | ef = self.encode(f) | |
698 | try: |
|
739 | try: | |
699 | yield f, ef, self.getsize(ef) |
|
740 | t = revlog_type(f) | |
|
741 | t |= FILEFLAGS_FILELOG | |||
|
742 | yield t, f, ef, self.getsize(ef) | |||
700 | except OSError as err: |
|
743 | except OSError as err: | |
701 | if err.errno != errno.ENOENT: |
|
744 | if err.errno != errno.ENOENT: | |
702 | raise |
|
745 | raise | |
703 |
|
746 | |||
704 | def copylist(self): |
|
747 | def copylist(self): | |
705 | d = ( |
|
748 | d = ( | |
706 | b'bookmarks', |
|
749 | b'bookmarks', | |
707 | b'narrowspec', |
|
750 | b'narrowspec', | |
708 | b'data', |
|
751 | b'data', | |
709 | b'meta', |
|
752 | b'meta', | |
710 | b'dh', |
|
753 | b'dh', | |
711 | b'fncache', |
|
754 | b'fncache', | |
712 | b'phaseroots', |
|
755 | b'phaseroots', | |
713 | b'obsstore', |
|
756 | b'obsstore', | |
714 | b'00manifest.d', |
|
757 | b'00manifest.d', | |
715 | b'00manifest.i', |
|
758 | b'00manifest.i', | |
716 | b'00changelog.d', |
|
759 | b'00changelog.d', | |
717 | b'00changelog.i', |
|
760 | b'00changelog.i', | |
718 | b'requires', |
|
761 | b'requires', | |
719 | ) |
|
762 | ) | |
720 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d] |
|
763 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d] | |
721 |
|
764 | |||
722 | def write(self, tr): |
|
765 | def write(self, tr): | |
723 | self.fncache.write(tr) |
|
766 | self.fncache.write(tr) | |
724 |
|
767 | |||
725 | def invalidatecaches(self): |
|
768 | def invalidatecaches(self): | |
726 | self.fncache.entries = None |
|
769 | self.fncache.entries = None | |
727 | self.fncache.addls = set() |
|
770 | self.fncache.addls = set() | |
728 |
|
771 | |||
729 | def markremoved(self, fn): |
|
772 | def markremoved(self, fn): | |
730 | self.fncache.remove(fn) |
|
773 | self.fncache.remove(fn) | |
731 |
|
774 | |||
732 | def _exists(self, f): |
|
775 | def _exists(self, f): | |
733 | ef = self.encode(f) |
|
776 | ef = self.encode(f) | |
734 | try: |
|
777 | try: | |
735 | self.getsize(ef) |
|
778 | self.getsize(ef) | |
736 | return True |
|
779 | return True | |
737 | except OSError as err: |
|
780 | except OSError as err: | |
738 | if err.errno != errno.ENOENT: |
|
781 | if err.errno != errno.ENOENT: | |
739 | raise |
|
782 | raise | |
740 | # nonexistent entry |
|
783 | # nonexistent entry | |
741 | return False |
|
784 | return False | |
742 |
|
785 | |||
743 | def __contains__(self, path): |
|
786 | def __contains__(self, path): | |
744 | '''Checks if the store contains path''' |
|
787 | '''Checks if the store contains path''' | |
745 | path = b"/".join((b"data", path)) |
|
788 | path = b"/".join((b"data", path)) | |
746 | # check for files (exact match) |
|
789 | # check for files (exact match) | |
747 | e = path + b'.i' |
|
790 | e = path + b'.i' | |
748 | if e in self.fncache and self._exists(e): |
|
791 | if e in self.fncache and self._exists(e): | |
749 | return True |
|
792 | return True | |
750 | # now check for directories (prefix match) |
|
793 | # now check for directories (prefix match) | |
751 | if not path.endswith(b'/'): |
|
794 | if not path.endswith(b'/'): | |
752 | path += b'/' |
|
795 | path += b'/' | |
753 | for e in self.fncache: |
|
796 | for e in self.fncache: | |
754 | if e.startswith(path) and self._exists(e): |
|
797 | if e.startswith(path) and self._exists(e): | |
755 | return True |
|
798 | return True | |
756 | return False |
|
799 | return False |
@@ -1,735 +1,735 | |||||
1 | # streamclone.py - producing and consuming streaming repository data |
|
1 | # streamclone.py - producing and consuming streaming repository data | |
2 | # |
|
2 | # | |
3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> |
|
3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import contextlib |
|
10 | import contextlib | |
11 | import os |
|
11 | import os | |
12 | import struct |
|
12 | import struct | |
13 |
|
13 | |||
14 | from .i18n import _ |
|
14 | from .i18n import _ | |
15 | from .pycompat import open |
|
15 | from .pycompat import open | |
16 | from .interfaces import repository |
|
16 | from .interfaces import repository | |
17 | from . import ( |
|
17 | from . import ( | |
18 | cacheutil, |
|
18 | cacheutil, | |
19 | error, |
|
19 | error, | |
20 | narrowspec, |
|
20 | narrowspec, | |
21 | phases, |
|
21 | phases, | |
22 | pycompat, |
|
22 | pycompat, | |
23 | requirements as requirementsmod, |
|
23 | requirements as requirementsmod, | |
24 | scmutil, |
|
24 | scmutil, | |
25 | store, |
|
25 | store, | |
26 | util, |
|
26 | util, | |
27 | ) |
|
27 | ) | |
28 |
|
28 | |||
29 |
|
29 | |||
30 | def canperformstreamclone(pullop, bundle2=False): |
|
30 | def canperformstreamclone(pullop, bundle2=False): | |
31 | """Whether it is possible to perform a streaming clone as part of pull. |
|
31 | """Whether it is possible to perform a streaming clone as part of pull. | |
32 |
|
32 | |||
33 | ``bundle2`` will cause the function to consider stream clone through |
|
33 | ``bundle2`` will cause the function to consider stream clone through | |
34 | bundle2 and only through bundle2. |
|
34 | bundle2 and only through bundle2. | |
35 |
|
35 | |||
36 | Returns a tuple of (supported, requirements). ``supported`` is True if |
|
36 | Returns a tuple of (supported, requirements). ``supported`` is True if | |
37 | streaming clone is supported and False otherwise. ``requirements`` is |
|
37 | streaming clone is supported and False otherwise. ``requirements`` is | |
38 | a set of repo requirements from the remote, or ``None`` if stream clone |
|
38 | a set of repo requirements from the remote, or ``None`` if stream clone | |
39 | isn't supported. |
|
39 | isn't supported. | |
40 | """ |
|
40 | """ | |
41 | repo = pullop.repo |
|
41 | repo = pullop.repo | |
42 | remote = pullop.remote |
|
42 | remote = pullop.remote | |
43 |
|
43 | |||
44 | bundle2supported = False |
|
44 | bundle2supported = False | |
45 | if pullop.canusebundle2: |
|
45 | if pullop.canusebundle2: | |
46 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): |
|
46 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): | |
47 | bundle2supported = True |
|
47 | bundle2supported = True | |
48 | # else |
|
48 | # else | |
49 | # Server doesn't support bundle2 stream clone or doesn't support |
|
49 | # Server doesn't support bundle2 stream clone or doesn't support | |
50 | # the versions we support. Fall back and possibly allow legacy. |
|
50 | # the versions we support. Fall back and possibly allow legacy. | |
51 |
|
51 | |||
52 | # Ensures legacy code path uses available bundle2. |
|
52 | # Ensures legacy code path uses available bundle2. | |
53 | if bundle2supported and not bundle2: |
|
53 | if bundle2supported and not bundle2: | |
54 | return False, None |
|
54 | return False, None | |
55 | # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. |
|
55 | # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. | |
56 | elif bundle2 and not bundle2supported: |
|
56 | elif bundle2 and not bundle2supported: | |
57 | return False, None |
|
57 | return False, None | |
58 |
|
58 | |||
59 | # Streaming clone only works on empty repositories. |
|
59 | # Streaming clone only works on empty repositories. | |
60 | if len(repo): |
|
60 | if len(repo): | |
61 | return False, None |
|
61 | return False, None | |
62 |
|
62 | |||
63 | # Streaming clone only works if all data is being requested. |
|
63 | # Streaming clone only works if all data is being requested. | |
64 | if pullop.heads: |
|
64 | if pullop.heads: | |
65 | return False, None |
|
65 | return False, None | |
66 |
|
66 | |||
67 | streamrequested = pullop.streamclonerequested |
|
67 | streamrequested = pullop.streamclonerequested | |
68 |
|
68 | |||
69 | # If we don't have a preference, let the server decide for us. This |
|
69 | # If we don't have a preference, let the server decide for us. This | |
70 | # likely only comes into play in LANs. |
|
70 | # likely only comes into play in LANs. | |
71 | if streamrequested is None: |
|
71 | if streamrequested is None: | |
72 | # The server can advertise whether to prefer streaming clone. |
|
72 | # The server can advertise whether to prefer streaming clone. | |
73 | streamrequested = remote.capable(b'stream-preferred') |
|
73 | streamrequested = remote.capable(b'stream-preferred') | |
74 |
|
74 | |||
75 | if not streamrequested: |
|
75 | if not streamrequested: | |
76 | return False, None |
|
76 | return False, None | |
77 |
|
77 | |||
78 | # In order for stream clone to work, the client has to support all the |
|
78 | # In order for stream clone to work, the client has to support all the | |
79 | # requirements advertised by the server. |
|
79 | # requirements advertised by the server. | |
80 | # |
|
80 | # | |
81 | # The server advertises its requirements via the "stream" and "streamreqs" |
|
81 | # The server advertises its requirements via the "stream" and "streamreqs" | |
82 | # capability. "stream" (a value-less capability) is advertised if and only |
|
82 | # capability. "stream" (a value-less capability) is advertised if and only | |
83 | # if the only requirement is "revlogv1." Else, the "streamreqs" capability |
|
83 | # if the only requirement is "revlogv1." Else, the "streamreqs" capability | |
84 | # is advertised and contains a comma-delimited list of requirements. |
|
84 | # is advertised and contains a comma-delimited list of requirements. | |
85 | requirements = set() |
|
85 | requirements = set() | |
86 | if remote.capable(b'stream'): |
|
86 | if remote.capable(b'stream'): | |
87 | requirements.add(requirementsmod.REVLOGV1_REQUIREMENT) |
|
87 | requirements.add(requirementsmod.REVLOGV1_REQUIREMENT) | |
88 | else: |
|
88 | else: | |
89 | streamreqs = remote.capable(b'streamreqs') |
|
89 | streamreqs = remote.capable(b'streamreqs') | |
90 | # This is weird and shouldn't happen with modern servers. |
|
90 | # This is weird and shouldn't happen with modern servers. | |
91 | if not streamreqs: |
|
91 | if not streamreqs: | |
92 | pullop.repo.ui.warn( |
|
92 | pullop.repo.ui.warn( | |
93 | _( |
|
93 | _( | |
94 | b'warning: stream clone requested but server has them ' |
|
94 | b'warning: stream clone requested but server has them ' | |
95 | b'disabled\n' |
|
95 | b'disabled\n' | |
96 | ) |
|
96 | ) | |
97 | ) |
|
97 | ) | |
98 | return False, None |
|
98 | return False, None | |
99 |
|
99 | |||
100 | streamreqs = set(streamreqs.split(b',')) |
|
100 | streamreqs = set(streamreqs.split(b',')) | |
101 | # Server requires something we don't support. Bail. |
|
101 | # Server requires something we don't support. Bail. | |
102 | missingreqs = streamreqs - repo.supportedformats |
|
102 | missingreqs = streamreqs - repo.supportedformats | |
103 | if missingreqs: |
|
103 | if missingreqs: | |
104 | pullop.repo.ui.warn( |
|
104 | pullop.repo.ui.warn( | |
105 | _( |
|
105 | _( | |
106 | b'warning: stream clone requested but client is missing ' |
|
106 | b'warning: stream clone requested but client is missing ' | |
107 | b'requirements: %s\n' |
|
107 | b'requirements: %s\n' | |
108 | ) |
|
108 | ) | |
109 | % b', '.join(sorted(missingreqs)) |
|
109 | % b', '.join(sorted(missingreqs)) | |
110 | ) |
|
110 | ) | |
111 | pullop.repo.ui.warn( |
|
111 | pullop.repo.ui.warn( | |
112 | _( |
|
112 | _( | |
113 | b'(see https://www.mercurial-scm.org/wiki/MissingRequirement ' |
|
113 | b'(see https://www.mercurial-scm.org/wiki/MissingRequirement ' | |
114 | b'for more information)\n' |
|
114 | b'for more information)\n' | |
115 | ) |
|
115 | ) | |
116 | ) |
|
116 | ) | |
117 | return False, None |
|
117 | return False, None | |
118 | requirements = streamreqs |
|
118 | requirements = streamreqs | |
119 |
|
119 | |||
120 | return True, requirements |
|
120 | return True, requirements | |
121 |
|
121 | |||
122 |
|
122 | |||
123 | def maybeperformlegacystreamclone(pullop): |
|
123 | def maybeperformlegacystreamclone(pullop): | |
124 | """Possibly perform a legacy stream clone operation. |
|
124 | """Possibly perform a legacy stream clone operation. | |
125 |
|
125 | |||
126 | Legacy stream clones are performed as part of pull but before all other |
|
126 | Legacy stream clones are performed as part of pull but before all other | |
127 | operations. |
|
127 | operations. | |
128 |
|
128 | |||
129 | A legacy stream clone will not be performed if a bundle2 stream clone is |
|
129 | A legacy stream clone will not be performed if a bundle2 stream clone is | |
130 | supported. |
|
130 | supported. | |
131 | """ |
|
131 | """ | |
132 | from . import localrepo |
|
132 | from . import localrepo | |
133 |
|
133 | |||
134 | supported, requirements = canperformstreamclone(pullop) |
|
134 | supported, requirements = canperformstreamclone(pullop) | |
135 |
|
135 | |||
136 | if not supported: |
|
136 | if not supported: | |
137 | return |
|
137 | return | |
138 |
|
138 | |||
139 | repo = pullop.repo |
|
139 | repo = pullop.repo | |
140 | remote = pullop.remote |
|
140 | remote = pullop.remote | |
141 |
|
141 | |||
142 | # Save remote branchmap. We will use it later to speed up branchcache |
|
142 | # Save remote branchmap. We will use it later to speed up branchcache | |
143 | # creation. |
|
143 | # creation. | |
144 | rbranchmap = None |
|
144 | rbranchmap = None | |
145 | if remote.capable(b'branchmap'): |
|
145 | if remote.capable(b'branchmap'): | |
146 | with remote.commandexecutor() as e: |
|
146 | with remote.commandexecutor() as e: | |
147 | rbranchmap = e.callcommand(b'branchmap', {}).result() |
|
147 | rbranchmap = e.callcommand(b'branchmap', {}).result() | |
148 |
|
148 | |||
149 | repo.ui.status(_(b'streaming all changes\n')) |
|
149 | repo.ui.status(_(b'streaming all changes\n')) | |
150 |
|
150 | |||
151 | with remote.commandexecutor() as e: |
|
151 | with remote.commandexecutor() as e: | |
152 | fp = e.callcommand(b'stream_out', {}).result() |
|
152 | fp = e.callcommand(b'stream_out', {}).result() | |
153 |
|
153 | |||
154 | # TODO strictly speaking, this code should all be inside the context |
|
154 | # TODO strictly speaking, this code should all be inside the context | |
155 | # manager because the context manager is supposed to ensure all wire state |
|
155 | # manager because the context manager is supposed to ensure all wire state | |
156 | # is flushed when exiting. But the legacy peers don't do this, so it |
|
156 | # is flushed when exiting. But the legacy peers don't do this, so it | |
157 | # doesn't matter. |
|
157 | # doesn't matter. | |
158 | l = fp.readline() |
|
158 | l = fp.readline() | |
159 | try: |
|
159 | try: | |
160 | resp = int(l) |
|
160 | resp = int(l) | |
161 | except ValueError: |
|
161 | except ValueError: | |
162 | raise error.ResponseError( |
|
162 | raise error.ResponseError( | |
163 | _(b'unexpected response from remote server:'), l |
|
163 | _(b'unexpected response from remote server:'), l | |
164 | ) |
|
164 | ) | |
165 | if resp == 1: |
|
165 | if resp == 1: | |
166 | raise error.Abort(_(b'operation forbidden by server')) |
|
166 | raise error.Abort(_(b'operation forbidden by server')) | |
167 | elif resp == 2: |
|
167 | elif resp == 2: | |
168 | raise error.Abort(_(b'locking the remote repository failed')) |
|
168 | raise error.Abort(_(b'locking the remote repository failed')) | |
169 | elif resp != 0: |
|
169 | elif resp != 0: | |
170 | raise error.Abort(_(b'the server sent an unknown error code')) |
|
170 | raise error.Abort(_(b'the server sent an unknown error code')) | |
171 |
|
171 | |||
172 | l = fp.readline() |
|
172 | l = fp.readline() | |
173 | try: |
|
173 | try: | |
174 | filecount, bytecount = map(int, l.split(b' ', 1)) |
|
174 | filecount, bytecount = map(int, l.split(b' ', 1)) | |
175 | except (ValueError, TypeError): |
|
175 | except (ValueError, TypeError): | |
176 | raise error.ResponseError( |
|
176 | raise error.ResponseError( | |
177 | _(b'unexpected response from remote server:'), l |
|
177 | _(b'unexpected response from remote server:'), l | |
178 | ) |
|
178 | ) | |
179 |
|
179 | |||
180 | with repo.lock(): |
|
180 | with repo.lock(): | |
181 | consumev1(repo, fp, filecount, bytecount) |
|
181 | consumev1(repo, fp, filecount, bytecount) | |
182 |
|
182 | |||
183 | # new requirements = old non-format requirements + |
|
183 | # new requirements = old non-format requirements + | |
184 | # new format-related remote requirements |
|
184 | # new format-related remote requirements | |
185 | # requirements from the streamed-in repository |
|
185 | # requirements from the streamed-in repository | |
186 | repo.requirements = requirements | ( |
|
186 | repo.requirements = requirements | ( | |
187 | repo.requirements - repo.supportedformats |
|
187 | repo.requirements - repo.supportedformats | |
188 | ) |
|
188 | ) | |
189 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
189 | repo.svfs.options = localrepo.resolvestorevfsoptions( | |
190 | repo.ui, repo.requirements, repo.features |
|
190 | repo.ui, repo.requirements, repo.features | |
191 | ) |
|
191 | ) | |
192 | scmutil.writereporequirements(repo) |
|
192 | scmutil.writereporequirements(repo) | |
193 |
|
193 | |||
194 | if rbranchmap: |
|
194 | if rbranchmap: | |
195 | repo._branchcaches.replace(repo, rbranchmap) |
|
195 | repo._branchcaches.replace(repo, rbranchmap) | |
196 |
|
196 | |||
197 | repo.invalidate() |
|
197 | repo.invalidate() | |
198 |
|
198 | |||
199 |
|
199 | |||
200 | def allowservergeneration(repo): |
|
200 | def allowservergeneration(repo): | |
201 | """Whether streaming clones are allowed from the server.""" |
|
201 | """Whether streaming clones are allowed from the server.""" | |
202 | if repository.REPO_FEATURE_STREAM_CLONE not in repo.features: |
|
202 | if repository.REPO_FEATURE_STREAM_CLONE not in repo.features: | |
203 | return False |
|
203 | return False | |
204 |
|
204 | |||
205 | if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True): |
|
205 | if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True): | |
206 | return False |
|
206 | return False | |
207 |
|
207 | |||
208 | # The way stream clone works makes it impossible to hide secret changesets. |
|
208 | # The way stream clone works makes it impossible to hide secret changesets. | |
209 | # So don't allow this by default. |
|
209 | # So don't allow this by default. | |
210 | secret = phases.hassecret(repo) |
|
210 | secret = phases.hassecret(repo) | |
211 | if secret: |
|
211 | if secret: | |
212 | return repo.ui.configbool(b'server', b'uncompressedallowsecret') |
|
212 | return repo.ui.configbool(b'server', b'uncompressedallowsecret') | |
213 |
|
213 | |||
214 | return True |
|
214 | return True | |
215 |
|
215 | |||
216 |
|
216 | |||
217 | # This is it's own function so extensions can override it. |
|
217 | # This is it's own function so extensions can override it. | |
218 | def _walkstreamfiles(repo, matcher=None): |
|
218 | def _walkstreamfiles(repo, matcher=None): | |
219 | return repo.store.walk(matcher) |
|
219 | return repo.store.walk(matcher) | |
220 |
|
220 | |||
221 |
|
221 | |||
222 | def generatev1(repo): |
|
222 | def generatev1(repo): | |
223 | """Emit content for version 1 of a streaming clone. |
|
223 | """Emit content for version 1 of a streaming clone. | |
224 |
|
224 | |||
225 | This returns a 3-tuple of (file count, byte size, data iterator). |
|
225 | This returns a 3-tuple of (file count, byte size, data iterator). | |
226 |
|
226 | |||
227 | The data iterator consists of N entries for each file being transferred. |
|
227 | The data iterator consists of N entries for each file being transferred. | |
228 | Each file entry starts as a line with the file name and integer size |
|
228 | Each file entry starts as a line with the file name and integer size | |
229 | delimited by a null byte. |
|
229 | delimited by a null byte. | |
230 |
|
230 | |||
231 | The raw file data follows. Following the raw file data is the next file |
|
231 | The raw file data follows. Following the raw file data is the next file | |
232 | entry, or EOF. |
|
232 | entry, or EOF. | |
233 |
|
233 | |||
234 | When used on the wire protocol, an additional line indicating protocol |
|
234 | When used on the wire protocol, an additional line indicating protocol | |
235 | success will be prepended to the stream. This function is not responsible |
|
235 | success will be prepended to the stream. This function is not responsible | |
236 | for adding it. |
|
236 | for adding it. | |
237 |
|
237 | |||
238 | This function will obtain a repository lock to ensure a consistent view of |
|
238 | This function will obtain a repository lock to ensure a consistent view of | |
239 | the store is captured. It therefore may raise LockError. |
|
239 | the store is captured. It therefore may raise LockError. | |
240 | """ |
|
240 | """ | |
241 | entries = [] |
|
241 | entries = [] | |
242 | total_bytes = 0 |
|
242 | total_bytes = 0 | |
243 | # Get consistent snapshot of repo, lock during scan. |
|
243 | # Get consistent snapshot of repo, lock during scan. | |
244 | with repo.lock(): |
|
244 | with repo.lock(): | |
245 | repo.ui.debug(b'scanning\n') |
|
245 | repo.ui.debug(b'scanning\n') | |
246 | for name, ename, size in _walkstreamfiles(repo): |
|
246 | for file_type, name, ename, size in _walkstreamfiles(repo): | |
247 | if size: |
|
247 | if size: | |
248 | entries.append((name, size)) |
|
248 | entries.append((name, size)) | |
249 | total_bytes += size |
|
249 | total_bytes += size | |
250 |
|
250 | |||
251 | repo.ui.debug( |
|
251 | repo.ui.debug( | |
252 | b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes) |
|
252 | b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes) | |
253 | ) |
|
253 | ) | |
254 |
|
254 | |||
255 | svfs = repo.svfs |
|
255 | svfs = repo.svfs | |
256 | debugflag = repo.ui.debugflag |
|
256 | debugflag = repo.ui.debugflag | |
257 |
|
257 | |||
258 | def emitrevlogdata(): |
|
258 | def emitrevlogdata(): | |
259 | for name, size in entries: |
|
259 | for name, size in entries: | |
260 | if debugflag: |
|
260 | if debugflag: | |
261 | repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size)) |
|
261 | repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size)) | |
262 | # partially encode name over the wire for backwards compat |
|
262 | # partially encode name over the wire for backwards compat | |
263 | yield b'%s\0%d\n' % (store.encodedir(name), size) |
|
263 | yield b'%s\0%d\n' % (store.encodedir(name), size) | |
264 | # auditing at this stage is both pointless (paths are already |
|
264 | # auditing at this stage is both pointless (paths are already | |
265 | # trusted by the local repo) and expensive |
|
265 | # trusted by the local repo) and expensive | |
266 | with svfs(name, b'rb', auditpath=False) as fp: |
|
266 | with svfs(name, b'rb', auditpath=False) as fp: | |
267 | if size <= 65536: |
|
267 | if size <= 65536: | |
268 | yield fp.read(size) |
|
268 | yield fp.read(size) | |
269 | else: |
|
269 | else: | |
270 | for chunk in util.filechunkiter(fp, limit=size): |
|
270 | for chunk in util.filechunkiter(fp, limit=size): | |
271 | yield chunk |
|
271 | yield chunk | |
272 |
|
272 | |||
273 | return len(entries), total_bytes, emitrevlogdata() |
|
273 | return len(entries), total_bytes, emitrevlogdata() | |
274 |
|
274 | |||
275 |
|
275 | |||
276 | def generatev1wireproto(repo): |
|
276 | def generatev1wireproto(repo): | |
277 | """Emit content for version 1 of streaming clone suitable for the wire. |
|
277 | """Emit content for version 1 of streaming clone suitable for the wire. | |
278 |
|
278 | |||
279 | This is the data output from ``generatev1()`` with 2 header lines. The |
|
279 | This is the data output from ``generatev1()`` with 2 header lines. The | |
280 | first line indicates overall success. The 2nd contains the file count and |
|
280 | first line indicates overall success. The 2nd contains the file count and | |
281 | byte size of payload. |
|
281 | byte size of payload. | |
282 |
|
282 | |||
283 | The success line contains "0" for success, "1" for stream generation not |
|
283 | The success line contains "0" for success, "1" for stream generation not | |
284 | allowed, and "2" for error locking the repository (possibly indicating |
|
284 | allowed, and "2" for error locking the repository (possibly indicating | |
285 | a permissions error for the server process). |
|
285 | a permissions error for the server process). | |
286 | """ |
|
286 | """ | |
287 | if not allowservergeneration(repo): |
|
287 | if not allowservergeneration(repo): | |
288 | yield b'1\n' |
|
288 | yield b'1\n' | |
289 | return |
|
289 | return | |
290 |
|
290 | |||
291 | try: |
|
291 | try: | |
292 | filecount, bytecount, it = generatev1(repo) |
|
292 | filecount, bytecount, it = generatev1(repo) | |
293 | except error.LockError: |
|
293 | except error.LockError: | |
294 | yield b'2\n' |
|
294 | yield b'2\n' | |
295 | return |
|
295 | return | |
296 |
|
296 | |||
297 | # Indicates successful response. |
|
297 | # Indicates successful response. | |
298 | yield b'0\n' |
|
298 | yield b'0\n' | |
299 | yield b'%d %d\n' % (filecount, bytecount) |
|
299 | yield b'%d %d\n' % (filecount, bytecount) | |
300 | for chunk in it: |
|
300 | for chunk in it: | |
301 | yield chunk |
|
301 | yield chunk | |
302 |
|
302 | |||
303 |
|
303 | |||
304 | def generatebundlev1(repo, compression=b'UN'): |
|
304 | def generatebundlev1(repo, compression=b'UN'): | |
305 | """Emit content for version 1 of a stream clone bundle. |
|
305 | """Emit content for version 1 of a stream clone bundle. | |
306 |
|
306 | |||
307 | The first 4 bytes of the output ("HGS1") denote this as stream clone |
|
307 | The first 4 bytes of the output ("HGS1") denote this as stream clone | |
308 | bundle version 1. |
|
308 | bundle version 1. | |
309 |
|
309 | |||
310 | The next 2 bytes indicate the compression type. Only "UN" is currently |
|
310 | The next 2 bytes indicate the compression type. Only "UN" is currently | |
311 | supported. |
|
311 | supported. | |
312 |
|
312 | |||
313 | The next 16 bytes are two 64-bit big endian unsigned integers indicating |
|
313 | The next 16 bytes are two 64-bit big endian unsigned integers indicating | |
314 | file count and byte count, respectively. |
|
314 | file count and byte count, respectively. | |
315 |
|
315 | |||
316 | The next 2 bytes is a 16-bit big endian unsigned short declaring the length |
|
316 | The next 2 bytes is a 16-bit big endian unsigned short declaring the length | |
317 | of the requirements string, including a trailing \0. The following N bytes |
|
317 | of the requirements string, including a trailing \0. The following N bytes | |
318 | are the requirements string, which is ASCII containing a comma-delimited |
|
318 | are the requirements string, which is ASCII containing a comma-delimited | |
319 | list of repo requirements that are needed to support the data. |
|
319 | list of repo requirements that are needed to support the data. | |
320 |
|
320 | |||
321 | The remaining content is the output of ``generatev1()`` (which may be |
|
321 | The remaining content is the output of ``generatev1()`` (which may be | |
322 | compressed in the future). |
|
322 | compressed in the future). | |
323 |
|
323 | |||
324 | Returns a tuple of (requirements, data generator). |
|
324 | Returns a tuple of (requirements, data generator). | |
325 | """ |
|
325 | """ | |
326 | if compression != b'UN': |
|
326 | if compression != b'UN': | |
327 | raise ValueError(b'we do not support the compression argument yet') |
|
327 | raise ValueError(b'we do not support the compression argument yet') | |
328 |
|
328 | |||
329 | requirements = repo.requirements & repo.supportedformats |
|
329 | requirements = repo.requirements & repo.supportedformats | |
330 | requires = b','.join(sorted(requirements)) |
|
330 | requires = b','.join(sorted(requirements)) | |
331 |
|
331 | |||
332 | def gen(): |
|
332 | def gen(): | |
333 | yield b'HGS1' |
|
333 | yield b'HGS1' | |
334 | yield compression |
|
334 | yield compression | |
335 |
|
335 | |||
336 | filecount, bytecount, it = generatev1(repo) |
|
336 | filecount, bytecount, it = generatev1(repo) | |
337 | repo.ui.status( |
|
337 | repo.ui.status( | |
338 | _(b'writing %d bytes for %d files\n') % (bytecount, filecount) |
|
338 | _(b'writing %d bytes for %d files\n') % (bytecount, filecount) | |
339 | ) |
|
339 | ) | |
340 |
|
340 | |||
341 | yield struct.pack(b'>QQ', filecount, bytecount) |
|
341 | yield struct.pack(b'>QQ', filecount, bytecount) | |
342 | yield struct.pack(b'>H', len(requires) + 1) |
|
342 | yield struct.pack(b'>H', len(requires) + 1) | |
343 | yield requires + b'\0' |
|
343 | yield requires + b'\0' | |
344 |
|
344 | |||
345 | # This is where we'll add compression in the future. |
|
345 | # This is where we'll add compression in the future. | |
346 | assert compression == b'UN' |
|
346 | assert compression == b'UN' | |
347 |
|
347 | |||
348 | progress = repo.ui.makeprogress( |
|
348 | progress = repo.ui.makeprogress( | |
349 | _(b'bundle'), total=bytecount, unit=_(b'bytes') |
|
349 | _(b'bundle'), total=bytecount, unit=_(b'bytes') | |
350 | ) |
|
350 | ) | |
351 | progress.update(0) |
|
351 | progress.update(0) | |
352 |
|
352 | |||
353 | for chunk in it: |
|
353 | for chunk in it: | |
354 | progress.increment(step=len(chunk)) |
|
354 | progress.increment(step=len(chunk)) | |
355 | yield chunk |
|
355 | yield chunk | |
356 |
|
356 | |||
357 | progress.complete() |
|
357 | progress.complete() | |
358 |
|
358 | |||
359 | return requirements, gen() |
|
359 | return requirements, gen() | |
360 |
|
360 | |||
361 |
|
361 | |||
362 | def consumev1(repo, fp, filecount, bytecount): |
|
362 | def consumev1(repo, fp, filecount, bytecount): | |
363 | """Apply the contents from version 1 of a streaming clone file handle. |
|
363 | """Apply the contents from version 1 of a streaming clone file handle. | |
364 |
|
364 | |||
365 | This takes the output from "stream_out" and applies it to the specified |
|
365 | This takes the output from "stream_out" and applies it to the specified | |
366 | repository. |
|
366 | repository. | |
367 |
|
367 | |||
368 | Like "stream_out," the status line added by the wire protocol is not |
|
368 | Like "stream_out," the status line added by the wire protocol is not | |
369 | handled by this function. |
|
369 | handled by this function. | |
370 | """ |
|
370 | """ | |
371 | with repo.lock(): |
|
371 | with repo.lock(): | |
372 | repo.ui.status( |
|
372 | repo.ui.status( | |
373 | _(b'%d files to transfer, %s of data\n') |
|
373 | _(b'%d files to transfer, %s of data\n') | |
374 | % (filecount, util.bytecount(bytecount)) |
|
374 | % (filecount, util.bytecount(bytecount)) | |
375 | ) |
|
375 | ) | |
376 | progress = repo.ui.makeprogress( |
|
376 | progress = repo.ui.makeprogress( | |
377 | _(b'clone'), total=bytecount, unit=_(b'bytes') |
|
377 | _(b'clone'), total=bytecount, unit=_(b'bytes') | |
378 | ) |
|
378 | ) | |
379 | progress.update(0) |
|
379 | progress.update(0) | |
380 | start = util.timer() |
|
380 | start = util.timer() | |
381 |
|
381 | |||
382 | # TODO: get rid of (potential) inconsistency |
|
382 | # TODO: get rid of (potential) inconsistency | |
383 | # |
|
383 | # | |
384 | # If transaction is started and any @filecache property is |
|
384 | # If transaction is started and any @filecache property is | |
385 | # changed at this point, it causes inconsistency between |
|
385 | # changed at this point, it causes inconsistency between | |
386 | # in-memory cached property and streamclone-ed file on the |
|
386 | # in-memory cached property and streamclone-ed file on the | |
387 | # disk. Nested transaction prevents transaction scope "clone" |
|
387 | # disk. Nested transaction prevents transaction scope "clone" | |
388 | # below from writing in-memory changes out at the end of it, |
|
388 | # below from writing in-memory changes out at the end of it, | |
389 | # even though in-memory changes are discarded at the end of it |
|
389 | # even though in-memory changes are discarded at the end of it | |
390 | # regardless of transaction nesting. |
|
390 | # regardless of transaction nesting. | |
391 | # |
|
391 | # | |
392 | # But transaction nesting can't be simply prohibited, because |
|
392 | # But transaction nesting can't be simply prohibited, because | |
393 | # nesting occurs also in ordinary case (e.g. enabling |
|
393 | # nesting occurs also in ordinary case (e.g. enabling | |
394 | # clonebundles). |
|
394 | # clonebundles). | |
395 |
|
395 | |||
396 | with repo.transaction(b'clone'): |
|
396 | with repo.transaction(b'clone'): | |
397 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): |
|
397 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): | |
398 | for i in pycompat.xrange(filecount): |
|
398 | for i in pycompat.xrange(filecount): | |
399 | # XXX doesn't support '\n' or '\r' in filenames |
|
399 | # XXX doesn't support '\n' or '\r' in filenames | |
400 | l = fp.readline() |
|
400 | l = fp.readline() | |
401 | try: |
|
401 | try: | |
402 | name, size = l.split(b'\0', 1) |
|
402 | name, size = l.split(b'\0', 1) | |
403 | size = int(size) |
|
403 | size = int(size) | |
404 | except (ValueError, TypeError): |
|
404 | except (ValueError, TypeError): | |
405 | raise error.ResponseError( |
|
405 | raise error.ResponseError( | |
406 | _(b'unexpected response from remote server:'), l |
|
406 | _(b'unexpected response from remote server:'), l | |
407 | ) |
|
407 | ) | |
408 | if repo.ui.debugflag: |
|
408 | if repo.ui.debugflag: | |
409 | repo.ui.debug( |
|
409 | repo.ui.debug( | |
410 | b'adding %s (%s)\n' % (name, util.bytecount(size)) |
|
410 | b'adding %s (%s)\n' % (name, util.bytecount(size)) | |
411 | ) |
|
411 | ) | |
412 | # for backwards compat, name was partially encoded |
|
412 | # for backwards compat, name was partially encoded | |
413 | path = store.decodedir(name) |
|
413 | path = store.decodedir(name) | |
414 | with repo.svfs(path, b'w', backgroundclose=True) as ofp: |
|
414 | with repo.svfs(path, b'w', backgroundclose=True) as ofp: | |
415 | for chunk in util.filechunkiter(fp, limit=size): |
|
415 | for chunk in util.filechunkiter(fp, limit=size): | |
416 | progress.increment(step=len(chunk)) |
|
416 | progress.increment(step=len(chunk)) | |
417 | ofp.write(chunk) |
|
417 | ofp.write(chunk) | |
418 |
|
418 | |||
419 | # force @filecache properties to be reloaded from |
|
419 | # force @filecache properties to be reloaded from | |
420 | # streamclone-ed file at next access |
|
420 | # streamclone-ed file at next access | |
421 | repo.invalidate(clearfilecache=True) |
|
421 | repo.invalidate(clearfilecache=True) | |
422 |
|
422 | |||
423 | elapsed = util.timer() - start |
|
423 | elapsed = util.timer() - start | |
424 | if elapsed <= 0: |
|
424 | if elapsed <= 0: | |
425 | elapsed = 0.001 |
|
425 | elapsed = 0.001 | |
426 | progress.complete() |
|
426 | progress.complete() | |
427 | repo.ui.status( |
|
427 | repo.ui.status( | |
428 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
428 | _(b'transferred %s in %.1f seconds (%s/sec)\n') | |
429 | % ( |
|
429 | % ( | |
430 | util.bytecount(bytecount), |
|
430 | util.bytecount(bytecount), | |
431 | elapsed, |
|
431 | elapsed, | |
432 | util.bytecount(bytecount / elapsed), |
|
432 | util.bytecount(bytecount / elapsed), | |
433 | ) |
|
433 | ) | |
434 | ) |
|
434 | ) | |
435 |
|
435 | |||
436 |
|
436 | |||
437 | def readbundle1header(fp): |
|
437 | def readbundle1header(fp): | |
438 | compression = fp.read(2) |
|
438 | compression = fp.read(2) | |
439 | if compression != b'UN': |
|
439 | if compression != b'UN': | |
440 | raise error.Abort( |
|
440 | raise error.Abort( | |
441 | _( |
|
441 | _( | |
442 | b'only uncompressed stream clone bundles are ' |
|
442 | b'only uncompressed stream clone bundles are ' | |
443 | b'supported; got %s' |
|
443 | b'supported; got %s' | |
444 | ) |
|
444 | ) | |
445 | % compression |
|
445 | % compression | |
446 | ) |
|
446 | ) | |
447 |
|
447 | |||
448 | filecount, bytecount = struct.unpack(b'>QQ', fp.read(16)) |
|
448 | filecount, bytecount = struct.unpack(b'>QQ', fp.read(16)) | |
449 | requireslen = struct.unpack(b'>H', fp.read(2))[0] |
|
449 | requireslen = struct.unpack(b'>H', fp.read(2))[0] | |
450 | requires = fp.read(requireslen) |
|
450 | requires = fp.read(requireslen) | |
451 |
|
451 | |||
452 | if not requires.endswith(b'\0'): |
|
452 | if not requires.endswith(b'\0'): | |
453 | raise error.Abort( |
|
453 | raise error.Abort( | |
454 | _( |
|
454 | _( | |
455 | b'malformed stream clone bundle: ' |
|
455 | b'malformed stream clone bundle: ' | |
456 | b'requirements not properly encoded' |
|
456 | b'requirements not properly encoded' | |
457 | ) |
|
457 | ) | |
458 | ) |
|
458 | ) | |
459 |
|
459 | |||
460 | requirements = set(requires.rstrip(b'\0').split(b',')) |
|
460 | requirements = set(requires.rstrip(b'\0').split(b',')) | |
461 |
|
461 | |||
462 | return filecount, bytecount, requirements |
|
462 | return filecount, bytecount, requirements | |
463 |
|
463 | |||
464 |
|
464 | |||
465 | def applybundlev1(repo, fp): |
|
465 | def applybundlev1(repo, fp): | |
466 | """Apply the content from a stream clone bundle version 1. |
|
466 | """Apply the content from a stream clone bundle version 1. | |
467 |
|
467 | |||
468 | We assume the 4 byte header has been read and validated and the file handle |
|
468 | We assume the 4 byte header has been read and validated and the file handle | |
469 | is at the 2 byte compression identifier. |
|
469 | is at the 2 byte compression identifier. | |
470 | """ |
|
470 | """ | |
471 | if len(repo): |
|
471 | if len(repo): | |
472 | raise error.Abort( |
|
472 | raise error.Abort( | |
473 | _(b'cannot apply stream clone bundle on non-empty repo') |
|
473 | _(b'cannot apply stream clone bundle on non-empty repo') | |
474 | ) |
|
474 | ) | |
475 |
|
475 | |||
476 | filecount, bytecount, requirements = readbundle1header(fp) |
|
476 | filecount, bytecount, requirements = readbundle1header(fp) | |
477 | missingreqs = requirements - repo.supportedformats |
|
477 | missingreqs = requirements - repo.supportedformats | |
478 | if missingreqs: |
|
478 | if missingreqs: | |
479 | raise error.Abort( |
|
479 | raise error.Abort( | |
480 | _(b'unable to apply stream clone: unsupported format: %s') |
|
480 | _(b'unable to apply stream clone: unsupported format: %s') | |
481 | % b', '.join(sorted(missingreqs)) |
|
481 | % b', '.join(sorted(missingreqs)) | |
482 | ) |
|
482 | ) | |
483 |
|
483 | |||
484 | consumev1(repo, fp, filecount, bytecount) |
|
484 | consumev1(repo, fp, filecount, bytecount) | |
485 |
|
485 | |||
486 |
|
486 | |||
487 | class streamcloneapplier(object): |
|
487 | class streamcloneapplier(object): | |
488 | """Class to manage applying streaming clone bundles. |
|
488 | """Class to manage applying streaming clone bundles. | |
489 |
|
489 | |||
490 | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle |
|
490 | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle | |
491 | readers to perform bundle type-specific functionality. |
|
491 | readers to perform bundle type-specific functionality. | |
492 | """ |
|
492 | """ | |
493 |
|
493 | |||
494 | def __init__(self, fh): |
|
494 | def __init__(self, fh): | |
495 | self._fh = fh |
|
495 | self._fh = fh | |
496 |
|
496 | |||
497 | def apply(self, repo): |
|
497 | def apply(self, repo): | |
498 | return applybundlev1(repo, self._fh) |
|
498 | return applybundlev1(repo, self._fh) | |
499 |
|
499 | |||
500 |
|
500 | |||
501 | # type of file to stream |
|
501 | # type of file to stream | |
502 | _fileappend = 0 # append only file |
|
502 | _fileappend = 0 # append only file | |
503 | _filefull = 1 # full snapshot file |
|
503 | _filefull = 1 # full snapshot file | |
504 |
|
504 | |||
505 | # Source of the file |
|
505 | # Source of the file | |
506 | _srcstore = b's' # store (svfs) |
|
506 | _srcstore = b's' # store (svfs) | |
507 | _srccache = b'c' # cache (cache) |
|
507 | _srccache = b'c' # cache (cache) | |
508 |
|
508 | |||
509 | # This is it's own function so extensions can override it. |
|
509 | # This is it's own function so extensions can override it. | |
510 | def _walkstreamfullstorefiles(repo): |
|
510 | def _walkstreamfullstorefiles(repo): | |
511 | """list snapshot file from the store""" |
|
511 | """list snapshot file from the store""" | |
512 | fnames = [] |
|
512 | fnames = [] | |
513 | if not repo.publishing(): |
|
513 | if not repo.publishing(): | |
514 | fnames.append(b'phaseroots') |
|
514 | fnames.append(b'phaseroots') | |
515 | return fnames |
|
515 | return fnames | |
516 |
|
516 | |||
517 |
|
517 | |||
518 | def _filterfull(entry, copy, vfsmap): |
|
518 | def _filterfull(entry, copy, vfsmap): | |
519 | """actually copy the snapshot files""" |
|
519 | """actually copy the snapshot files""" | |
520 | src, name, ftype, data = entry |
|
520 | src, name, ftype, data = entry | |
521 | if ftype != _filefull: |
|
521 | if ftype != _filefull: | |
522 | return entry |
|
522 | return entry | |
523 | return (src, name, ftype, copy(vfsmap[src].join(name))) |
|
523 | return (src, name, ftype, copy(vfsmap[src].join(name))) | |
524 |
|
524 | |||
525 |
|
525 | |||
526 | @contextlib.contextmanager |
|
526 | @contextlib.contextmanager | |
527 | def maketempcopies(): |
|
527 | def maketempcopies(): | |
528 | """return a function to temporary copy file""" |
|
528 | """return a function to temporary copy file""" | |
529 | files = [] |
|
529 | files = [] | |
530 | try: |
|
530 | try: | |
531 |
|
531 | |||
532 | def copy(src): |
|
532 | def copy(src): | |
533 | fd, dst = pycompat.mkstemp() |
|
533 | fd, dst = pycompat.mkstemp() | |
534 | os.close(fd) |
|
534 | os.close(fd) | |
535 | files.append(dst) |
|
535 | files.append(dst) | |
536 | util.copyfiles(src, dst, hardlink=True) |
|
536 | util.copyfiles(src, dst, hardlink=True) | |
537 | return dst |
|
537 | return dst | |
538 |
|
538 | |||
539 | yield copy |
|
539 | yield copy | |
540 | finally: |
|
540 | finally: | |
541 | for tmp in files: |
|
541 | for tmp in files: | |
542 | util.tryunlink(tmp) |
|
542 | util.tryunlink(tmp) | |
543 |
|
543 | |||
544 |
|
544 | |||
545 | def _makemap(repo): |
|
545 | def _makemap(repo): | |
546 | """make a (src -> vfs) map for the repo""" |
|
546 | """make a (src -> vfs) map for the repo""" | |
547 | vfsmap = { |
|
547 | vfsmap = { | |
548 | _srcstore: repo.svfs, |
|
548 | _srcstore: repo.svfs, | |
549 | _srccache: repo.cachevfs, |
|
549 | _srccache: repo.cachevfs, | |
550 | } |
|
550 | } | |
551 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
|
551 | # we keep repo.vfs out of the on purpose, ther are too many danger there | |
552 | # (eg: .hg/hgrc) |
|
552 | # (eg: .hg/hgrc) | |
553 | assert repo.vfs not in vfsmap.values() |
|
553 | assert repo.vfs not in vfsmap.values() | |
554 |
|
554 | |||
555 | return vfsmap |
|
555 | return vfsmap | |
556 |
|
556 | |||
557 |
|
557 | |||
558 | def _emit2(repo, entries, totalfilesize): |
|
558 | def _emit2(repo, entries, totalfilesize): | |
559 | """actually emit the stream bundle""" |
|
559 | """actually emit the stream bundle""" | |
560 | vfsmap = _makemap(repo) |
|
560 | vfsmap = _makemap(repo) | |
561 | progress = repo.ui.makeprogress( |
|
561 | progress = repo.ui.makeprogress( | |
562 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') |
|
562 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') | |
563 | ) |
|
563 | ) | |
564 | progress.update(0) |
|
564 | progress.update(0) | |
565 | with maketempcopies() as copy, progress: |
|
565 | with maketempcopies() as copy, progress: | |
566 | # copy is delayed until we are in the try |
|
566 | # copy is delayed until we are in the try | |
567 | entries = [_filterfull(e, copy, vfsmap) for e in entries] |
|
567 | entries = [_filterfull(e, copy, vfsmap) for e in entries] | |
568 | yield None # this release the lock on the repository |
|
568 | yield None # this release the lock on the repository | |
569 | seen = 0 |
|
569 | seen = 0 | |
570 |
|
570 | |||
571 | for src, name, ftype, data in entries: |
|
571 | for src, name, ftype, data in entries: | |
572 | vfs = vfsmap[src] |
|
572 | vfs = vfsmap[src] | |
573 | yield src |
|
573 | yield src | |
574 | yield util.uvarintencode(len(name)) |
|
574 | yield util.uvarintencode(len(name)) | |
575 | if ftype == _fileappend: |
|
575 | if ftype == _fileappend: | |
576 | fp = vfs(name) |
|
576 | fp = vfs(name) | |
577 | size = data |
|
577 | size = data | |
578 | elif ftype == _filefull: |
|
578 | elif ftype == _filefull: | |
579 | fp = open(data, b'rb') |
|
579 | fp = open(data, b'rb') | |
580 | size = util.fstat(fp).st_size |
|
580 | size = util.fstat(fp).st_size | |
581 | try: |
|
581 | try: | |
582 | yield util.uvarintencode(size) |
|
582 | yield util.uvarintencode(size) | |
583 | yield name |
|
583 | yield name | |
584 | if size <= 65536: |
|
584 | if size <= 65536: | |
585 | chunks = (fp.read(size),) |
|
585 | chunks = (fp.read(size),) | |
586 | else: |
|
586 | else: | |
587 | chunks = util.filechunkiter(fp, limit=size) |
|
587 | chunks = util.filechunkiter(fp, limit=size) | |
588 | for chunk in chunks: |
|
588 | for chunk in chunks: | |
589 | seen += len(chunk) |
|
589 | seen += len(chunk) | |
590 | progress.update(seen) |
|
590 | progress.update(seen) | |
591 | yield chunk |
|
591 | yield chunk | |
592 | finally: |
|
592 | finally: | |
593 | fp.close() |
|
593 | fp.close() | |
594 |
|
594 | |||
595 |
|
595 | |||
596 | def generatev2(repo, includes, excludes, includeobsmarkers): |
|
596 | def generatev2(repo, includes, excludes, includeobsmarkers): | |
597 | """Emit content for version 2 of a streaming clone. |
|
597 | """Emit content for version 2 of a streaming clone. | |
598 |
|
598 | |||
599 | the data stream consists the following entries: |
|
599 | the data stream consists the following entries: | |
600 | 1) A char representing the file destination (eg: store or cache) |
|
600 | 1) A char representing the file destination (eg: store or cache) | |
601 | 2) A varint containing the length of the filename |
|
601 | 2) A varint containing the length of the filename | |
602 | 3) A varint containing the length of file data |
|
602 | 3) A varint containing the length of file data | |
603 | 4) N bytes containing the filename (the internal, store-agnostic form) |
|
603 | 4) N bytes containing the filename (the internal, store-agnostic form) | |
604 | 5) N bytes containing the file data |
|
604 | 5) N bytes containing the file data | |
605 |
|
605 | |||
606 | Returns a 3-tuple of (file count, file size, data iterator). |
|
606 | Returns a 3-tuple of (file count, file size, data iterator). | |
607 | """ |
|
607 | """ | |
608 |
|
608 | |||
609 | with repo.lock(): |
|
609 | with repo.lock(): | |
610 |
|
610 | |||
611 | entries = [] |
|
611 | entries = [] | |
612 | totalfilesize = 0 |
|
612 | totalfilesize = 0 | |
613 |
|
613 | |||
614 | matcher = None |
|
614 | matcher = None | |
615 | if includes or excludes: |
|
615 | if includes or excludes: | |
616 | matcher = narrowspec.match(repo.root, includes, excludes) |
|
616 | matcher = narrowspec.match(repo.root, includes, excludes) | |
617 |
|
617 | |||
618 | repo.ui.debug(b'scanning\n') |
|
618 | repo.ui.debug(b'scanning\n') | |
619 | for name, ename, size in _walkstreamfiles(repo, matcher): |
|
619 | for rl_type, name, ename, size in _walkstreamfiles(repo, matcher): | |
620 | if size: |
|
620 | if size: | |
621 | entries.append((_srcstore, name, _fileappend, size)) |
|
621 | entries.append((_srcstore, name, _fileappend, size)) | |
622 | totalfilesize += size |
|
622 | totalfilesize += size | |
623 | for name in _walkstreamfullstorefiles(repo): |
|
623 | for name in _walkstreamfullstorefiles(repo): | |
624 | if repo.svfs.exists(name): |
|
624 | if repo.svfs.exists(name): | |
625 | totalfilesize += repo.svfs.lstat(name).st_size |
|
625 | totalfilesize += repo.svfs.lstat(name).st_size | |
626 | entries.append((_srcstore, name, _filefull, None)) |
|
626 | entries.append((_srcstore, name, _filefull, None)) | |
627 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): |
|
627 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): | |
628 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size |
|
628 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size | |
629 | entries.append((_srcstore, b'obsstore', _filefull, None)) |
|
629 | entries.append((_srcstore, b'obsstore', _filefull, None)) | |
630 | for name in cacheutil.cachetocopy(repo): |
|
630 | for name in cacheutil.cachetocopy(repo): | |
631 | if repo.cachevfs.exists(name): |
|
631 | if repo.cachevfs.exists(name): | |
632 | totalfilesize += repo.cachevfs.lstat(name).st_size |
|
632 | totalfilesize += repo.cachevfs.lstat(name).st_size | |
633 | entries.append((_srccache, name, _filefull, None)) |
|
633 | entries.append((_srccache, name, _filefull, None)) | |
634 |
|
634 | |||
635 | chunks = _emit2(repo, entries, totalfilesize) |
|
635 | chunks = _emit2(repo, entries, totalfilesize) | |
636 | first = next(chunks) |
|
636 | first = next(chunks) | |
637 | assert first is None |
|
637 | assert first is None | |
638 |
|
638 | |||
639 | return len(entries), totalfilesize, chunks |
|
639 | return len(entries), totalfilesize, chunks | |
640 |
|
640 | |||
641 |
|
641 | |||
642 | @contextlib.contextmanager |
|
642 | @contextlib.contextmanager | |
643 | def nested(*ctxs): |
|
643 | def nested(*ctxs): | |
644 | this = ctxs[0] |
|
644 | this = ctxs[0] | |
645 | rest = ctxs[1:] |
|
645 | rest = ctxs[1:] | |
646 | with this: |
|
646 | with this: | |
647 | if rest: |
|
647 | if rest: | |
648 | with nested(*rest): |
|
648 | with nested(*rest): | |
649 | yield |
|
649 | yield | |
650 | else: |
|
650 | else: | |
651 | yield |
|
651 | yield | |
652 |
|
652 | |||
653 |
|
653 | |||
654 | def consumev2(repo, fp, filecount, filesize): |
|
654 | def consumev2(repo, fp, filecount, filesize): | |
655 | """Apply the contents from a version 2 streaming clone. |
|
655 | """Apply the contents from a version 2 streaming clone. | |
656 |
|
656 | |||
657 | Data is read from an object that only needs to provide a ``read(size)`` |
|
657 | Data is read from an object that only needs to provide a ``read(size)`` | |
658 | method. |
|
658 | method. | |
659 | """ |
|
659 | """ | |
660 | with repo.lock(): |
|
660 | with repo.lock(): | |
661 | repo.ui.status( |
|
661 | repo.ui.status( | |
662 | _(b'%d files to transfer, %s of data\n') |
|
662 | _(b'%d files to transfer, %s of data\n') | |
663 | % (filecount, util.bytecount(filesize)) |
|
663 | % (filecount, util.bytecount(filesize)) | |
664 | ) |
|
664 | ) | |
665 |
|
665 | |||
666 | start = util.timer() |
|
666 | start = util.timer() | |
667 | progress = repo.ui.makeprogress( |
|
667 | progress = repo.ui.makeprogress( | |
668 | _(b'clone'), total=filesize, unit=_(b'bytes') |
|
668 | _(b'clone'), total=filesize, unit=_(b'bytes') | |
669 | ) |
|
669 | ) | |
670 | progress.update(0) |
|
670 | progress.update(0) | |
671 |
|
671 | |||
672 | vfsmap = _makemap(repo) |
|
672 | vfsmap = _makemap(repo) | |
673 |
|
673 | |||
674 | with repo.transaction(b'clone'): |
|
674 | with repo.transaction(b'clone'): | |
675 | ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values()) |
|
675 | ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values()) | |
676 | with nested(*ctxs): |
|
676 | with nested(*ctxs): | |
677 | for i in range(filecount): |
|
677 | for i in range(filecount): | |
678 | src = util.readexactly(fp, 1) |
|
678 | src = util.readexactly(fp, 1) | |
679 | vfs = vfsmap[src] |
|
679 | vfs = vfsmap[src] | |
680 | namelen = util.uvarintdecodestream(fp) |
|
680 | namelen = util.uvarintdecodestream(fp) | |
681 | datalen = util.uvarintdecodestream(fp) |
|
681 | datalen = util.uvarintdecodestream(fp) | |
682 |
|
682 | |||
683 | name = util.readexactly(fp, namelen) |
|
683 | name = util.readexactly(fp, namelen) | |
684 |
|
684 | |||
685 | if repo.ui.debugflag: |
|
685 | if repo.ui.debugflag: | |
686 | repo.ui.debug( |
|
686 | repo.ui.debug( | |
687 | b'adding [%s] %s (%s)\n' |
|
687 | b'adding [%s] %s (%s)\n' | |
688 | % (src, name, util.bytecount(datalen)) |
|
688 | % (src, name, util.bytecount(datalen)) | |
689 | ) |
|
689 | ) | |
690 |
|
690 | |||
691 | with vfs(name, b'w') as ofp: |
|
691 | with vfs(name, b'w') as ofp: | |
692 | for chunk in util.filechunkiter(fp, limit=datalen): |
|
692 | for chunk in util.filechunkiter(fp, limit=datalen): | |
693 | progress.increment(step=len(chunk)) |
|
693 | progress.increment(step=len(chunk)) | |
694 | ofp.write(chunk) |
|
694 | ofp.write(chunk) | |
695 |
|
695 | |||
696 | # force @filecache properties to be reloaded from |
|
696 | # force @filecache properties to be reloaded from | |
697 | # streamclone-ed file at next access |
|
697 | # streamclone-ed file at next access | |
698 | repo.invalidate(clearfilecache=True) |
|
698 | repo.invalidate(clearfilecache=True) | |
699 |
|
699 | |||
700 | elapsed = util.timer() - start |
|
700 | elapsed = util.timer() - start | |
701 | if elapsed <= 0: |
|
701 | if elapsed <= 0: | |
702 | elapsed = 0.001 |
|
702 | elapsed = 0.001 | |
703 | repo.ui.status( |
|
703 | repo.ui.status( | |
704 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
704 | _(b'transferred %s in %.1f seconds (%s/sec)\n') | |
705 | % ( |
|
705 | % ( | |
706 | util.bytecount(progress.pos), |
|
706 | util.bytecount(progress.pos), | |
707 | elapsed, |
|
707 | elapsed, | |
708 | util.bytecount(progress.pos / elapsed), |
|
708 | util.bytecount(progress.pos / elapsed), | |
709 | ) |
|
709 | ) | |
710 | ) |
|
710 | ) | |
711 | progress.complete() |
|
711 | progress.complete() | |
712 |
|
712 | |||
713 |
|
713 | |||
714 | def applybundlev2(repo, fp, filecount, filesize, requirements): |
|
714 | def applybundlev2(repo, fp, filecount, filesize, requirements): | |
715 | from . import localrepo |
|
715 | from . import localrepo | |
716 |
|
716 | |||
717 | missingreqs = [r for r in requirements if r not in repo.supported] |
|
717 | missingreqs = [r for r in requirements if r not in repo.supported] | |
718 | if missingreqs: |
|
718 | if missingreqs: | |
719 | raise error.Abort( |
|
719 | raise error.Abort( | |
720 | _(b'unable to apply stream clone: unsupported format: %s') |
|
720 | _(b'unable to apply stream clone: unsupported format: %s') | |
721 | % b', '.join(sorted(missingreqs)) |
|
721 | % b', '.join(sorted(missingreqs)) | |
722 | ) |
|
722 | ) | |
723 |
|
723 | |||
724 | consumev2(repo, fp, filecount, filesize) |
|
724 | consumev2(repo, fp, filecount, filesize) | |
725 |
|
725 | |||
726 | # new requirements = old non-format requirements + |
|
726 | # new requirements = old non-format requirements + | |
727 | # new format-related remote requirements |
|
727 | # new format-related remote requirements | |
728 | # requirements from the streamed-in repository |
|
728 | # requirements from the streamed-in repository | |
729 | repo.requirements = set(requirements) | ( |
|
729 | repo.requirements = set(requirements) | ( | |
730 | repo.requirements - repo.supportedformats |
|
730 | repo.requirements - repo.supportedformats | |
731 | ) |
|
731 | ) | |
732 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
732 | repo.svfs.options = localrepo.resolvestorevfsoptions( | |
733 | repo.ui, repo.requirements, repo.features |
|
733 | repo.ui, repo.requirements, repo.features | |
734 | ) |
|
734 | ) | |
735 | scmutil.writereporequirements(repo) |
|
735 | scmutil.writereporequirements(repo) |
@@ -1,587 +1,587 | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import stat |
|
10 | import stat | |
11 |
|
11 | |||
12 | from ..i18n import _ |
|
12 | from ..i18n import _ | |
13 | from ..pycompat import getattr |
|
13 | from ..pycompat import getattr | |
14 | from .. import ( |
|
14 | from .. import ( | |
15 | changelog, |
|
15 | changelog, | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
18 | manifest, |
|
18 | manifest, | |
19 | metadata, |
|
19 | metadata, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | requirements, |
|
21 | requirements, | |
22 | revlog, |
|
22 | revlog, | |
23 | scmutil, |
|
23 | scmutil, | |
24 | util, |
|
24 | util, | |
25 | vfs as vfsmod, |
|
25 | vfs as vfsmod, | |
26 | ) |
|
26 | ) | |
27 | from ..revlogutils import nodemap |
|
27 | from ..revlogutils import nodemap | |
28 |
|
28 | |||
29 |
|
29 | |||
30 | def _revlogfrompath(repo, path): |
|
30 | def _revlogfrompath(repo, path): | |
31 | """Obtain a revlog from a repo path. |
|
31 | """Obtain a revlog from a repo path. | |
32 |
|
32 | |||
33 | An instance of the appropriate class is returned. |
|
33 | An instance of the appropriate class is returned. | |
34 | """ |
|
34 | """ | |
35 | if path == b'00changelog.i': |
|
35 | if path == b'00changelog.i': | |
36 | return changelog.changelog(repo.svfs) |
|
36 | return changelog.changelog(repo.svfs) | |
37 | elif path.endswith(b'00manifest.i'): |
|
37 | elif path.endswith(b'00manifest.i'): | |
38 | mandir = path[: -len(b'00manifest.i')] |
|
38 | mandir = path[: -len(b'00manifest.i')] | |
39 | return manifest.manifestrevlog( |
|
39 | return manifest.manifestrevlog( | |
40 | repo.nodeconstants, repo.svfs, tree=mandir |
|
40 | repo.nodeconstants, repo.svfs, tree=mandir | |
41 | ) |
|
41 | ) | |
42 | else: |
|
42 | else: | |
43 | # reverse of "/".join(("data", path + ".i")) |
|
43 | # reverse of "/".join(("data", path + ".i")) | |
44 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
44 | return filelog.filelog(repo.svfs, path[5:-2]) | |
45 |
|
45 | |||
46 |
|
46 | |||
47 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
47 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): | |
48 | """copy all relevant files for `oldrl` into `destrepo` store |
|
48 | """copy all relevant files for `oldrl` into `destrepo` store | |
49 |
|
49 | |||
50 | Files are copied "as is" without any transformation. The copy is performed |
|
50 | Files are copied "as is" without any transformation. The copy is performed | |
51 | without extra checks. Callers are responsible for making sure the copied |
|
51 | without extra checks. Callers are responsible for making sure the copied | |
52 | content is compatible with format of the destination repository. |
|
52 | content is compatible with format of the destination repository. | |
53 | """ |
|
53 | """ | |
54 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
54 | oldrl = getattr(oldrl, '_revlog', oldrl) | |
55 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
55 | newrl = _revlogfrompath(destrepo, unencodedname) | |
56 | newrl = getattr(newrl, '_revlog', newrl) |
|
56 | newrl = getattr(newrl, '_revlog', newrl) | |
57 |
|
57 | |||
58 | oldvfs = oldrl.opener |
|
58 | oldvfs = oldrl.opener | |
59 | newvfs = newrl.opener |
|
59 | newvfs = newrl.opener | |
60 | oldindex = oldvfs.join(oldrl.indexfile) |
|
60 | oldindex = oldvfs.join(oldrl.indexfile) | |
61 | newindex = newvfs.join(newrl.indexfile) |
|
61 | newindex = newvfs.join(newrl.indexfile) | |
62 | olddata = oldvfs.join(oldrl.datafile) |
|
62 | olddata = oldvfs.join(oldrl.datafile) | |
63 | newdata = newvfs.join(newrl.datafile) |
|
63 | newdata = newvfs.join(newrl.datafile) | |
64 |
|
64 | |||
65 | with newvfs(newrl.indexfile, b'w'): |
|
65 | with newvfs(newrl.indexfile, b'w'): | |
66 | pass # create all the directories |
|
66 | pass # create all the directories | |
67 |
|
67 | |||
68 | util.copyfile(oldindex, newindex) |
|
68 | util.copyfile(oldindex, newindex) | |
69 | copydata = oldrl.opener.exists(oldrl.datafile) |
|
69 | copydata = oldrl.opener.exists(oldrl.datafile) | |
70 | if copydata: |
|
70 | if copydata: | |
71 | util.copyfile(olddata, newdata) |
|
71 | util.copyfile(olddata, newdata) | |
72 |
|
72 | |||
73 | if not ( |
|
73 | if not ( | |
74 | unencodedname.endswith(b'00changelog.i') |
|
74 | unencodedname.endswith(b'00changelog.i') | |
75 | or unencodedname.endswith(b'00manifest.i') |
|
75 | or unencodedname.endswith(b'00manifest.i') | |
76 | ): |
|
76 | ): | |
77 | destrepo.svfs.fncache.add(unencodedname) |
|
77 | destrepo.svfs.fncache.add(unencodedname) | |
78 | if copydata: |
|
78 | if copydata: | |
79 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
79 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | |
80 |
|
80 | |||
81 |
|
81 | |||
82 | UPGRADE_CHANGELOG = b"changelog" |
|
82 | UPGRADE_CHANGELOG = b"changelog" | |
83 | UPGRADE_MANIFEST = b"manifest" |
|
83 | UPGRADE_MANIFEST = b"manifest" | |
84 | UPGRADE_FILELOGS = b"all-filelogs" |
|
84 | UPGRADE_FILELOGS = b"all-filelogs" | |
85 |
|
85 | |||
86 | UPGRADE_ALL_REVLOGS = frozenset( |
|
86 | UPGRADE_ALL_REVLOGS = frozenset( | |
87 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
87 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | |
88 | ) |
|
88 | ) | |
89 |
|
89 | |||
90 |
|
90 | |||
91 | def getsidedatacompanion(srcrepo, dstrepo): |
|
91 | def getsidedatacompanion(srcrepo, dstrepo): | |
92 | sidedatacompanion = None |
|
92 | sidedatacompanion = None | |
93 | removedreqs = srcrepo.requirements - dstrepo.requirements |
|
93 | removedreqs = srcrepo.requirements - dstrepo.requirements | |
94 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
94 | addedreqs = dstrepo.requirements - srcrepo.requirements | |
95 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: |
|
95 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: | |
96 |
|
96 | |||
97 | def sidedatacompanion(rl, rev): |
|
97 | def sidedatacompanion(rl, rev): | |
98 | rl = getattr(rl, '_revlog', rl) |
|
98 | rl = getattr(rl, '_revlog', rl) | |
99 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: |
|
99 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: | |
100 | return True, (), {}, 0, 0 |
|
100 | return True, (), {}, 0, 0 | |
101 | return False, (), {}, 0, 0 |
|
101 | return False, (), {}, 0, 0 | |
102 |
|
102 | |||
103 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: |
|
103 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: | |
104 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) |
|
104 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) | |
105 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: |
|
105 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: | |
106 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) |
|
106 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) | |
107 | return sidedatacompanion |
|
107 | return sidedatacompanion | |
108 |
|
108 | |||
109 |
|
109 | |||
110 | def matchrevlog(revlogfilter, entry): |
|
110 | def matchrevlog(revlogfilter, entry): | |
111 | """check if a revlog is selected for cloning. |
|
111 | """check if a revlog is selected for cloning. | |
112 |
|
112 | |||
113 | In other words, are there any updates which need to be done on revlog |
|
113 | In other words, are there any updates which need to be done on revlog | |
114 | or it can be blindly copied. |
|
114 | or it can be blindly copied. | |
115 |
|
115 | |||
116 | The store entry is checked against the passed filter""" |
|
116 | The store entry is checked against the passed filter""" | |
117 | if entry.endswith(b'00changelog.i'): |
|
117 | if entry.endswith(b'00changelog.i'): | |
118 | return UPGRADE_CHANGELOG in revlogfilter |
|
118 | return UPGRADE_CHANGELOG in revlogfilter | |
119 | elif entry.endswith(b'00manifest.i'): |
|
119 | elif entry.endswith(b'00manifest.i'): | |
120 | return UPGRADE_MANIFEST in revlogfilter |
|
120 | return UPGRADE_MANIFEST in revlogfilter | |
121 | return UPGRADE_FILELOGS in revlogfilter |
|
121 | return UPGRADE_FILELOGS in revlogfilter | |
122 |
|
122 | |||
123 |
|
123 | |||
124 | def _perform_clone( |
|
124 | def _perform_clone( | |
125 | ui, |
|
125 | ui, | |
126 | dstrepo, |
|
126 | dstrepo, | |
127 | tr, |
|
127 | tr, | |
128 | old_revlog, |
|
128 | old_revlog, | |
129 | unencoded, |
|
129 | unencoded, | |
130 | upgrade_op, |
|
130 | upgrade_op, | |
131 | sidedatacompanion, |
|
131 | sidedatacompanion, | |
132 | oncopiedrevision, |
|
132 | oncopiedrevision, | |
133 | ): |
|
133 | ): | |
134 | """ returns the new revlog object created""" |
|
134 | """ returns the new revlog object created""" | |
135 | newrl = None |
|
135 | newrl = None | |
136 | if matchrevlog(upgrade_op.revlogs_to_process, unencoded): |
|
136 | if matchrevlog(upgrade_op.revlogs_to_process, unencoded): | |
137 | ui.note( |
|
137 | ui.note( | |
138 | _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded) |
|
138 | _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded) | |
139 | ) |
|
139 | ) | |
140 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
140 | newrl = _revlogfrompath(dstrepo, unencoded) | |
141 | old_revlog.clone( |
|
141 | old_revlog.clone( | |
142 | tr, |
|
142 | tr, | |
143 | newrl, |
|
143 | newrl, | |
144 | addrevisioncb=oncopiedrevision, |
|
144 | addrevisioncb=oncopiedrevision, | |
145 | deltareuse=upgrade_op.delta_reuse_mode, |
|
145 | deltareuse=upgrade_op.delta_reuse_mode, | |
146 | forcedeltabothparents=upgrade_op.force_re_delta_both_parents, |
|
146 | forcedeltabothparents=upgrade_op.force_re_delta_both_parents, | |
147 | sidedatacompanion=sidedatacompanion, |
|
147 | sidedatacompanion=sidedatacompanion, | |
148 | ) |
|
148 | ) | |
149 | else: |
|
149 | else: | |
150 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
150 | msg = _(b'blindly copying %s containing %i revisions\n') | |
151 | ui.note(msg % (unencoded, len(old_revlog))) |
|
151 | ui.note(msg % (unencoded, len(old_revlog))) | |
152 | _copyrevlog(tr, dstrepo, old_revlog, unencoded) |
|
152 | _copyrevlog(tr, dstrepo, old_revlog, unencoded) | |
153 |
|
153 | |||
154 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
154 | newrl = _revlogfrompath(dstrepo, unencoded) | |
155 | return newrl |
|
155 | return newrl | |
156 |
|
156 | |||
157 |
|
157 | |||
158 | def _clonerevlogs( |
|
158 | def _clonerevlogs( | |
159 | ui, |
|
159 | ui, | |
160 | srcrepo, |
|
160 | srcrepo, | |
161 | dstrepo, |
|
161 | dstrepo, | |
162 | tr, |
|
162 | tr, | |
163 | upgrade_op, |
|
163 | upgrade_op, | |
164 | ): |
|
164 | ): | |
165 | """Copy revlogs between 2 repos.""" |
|
165 | """Copy revlogs between 2 repos.""" | |
166 | revcount = 0 |
|
166 | revcount = 0 | |
167 | srcsize = 0 |
|
167 | srcsize = 0 | |
168 | srcrawsize = 0 |
|
168 | srcrawsize = 0 | |
169 | dstsize = 0 |
|
169 | dstsize = 0 | |
170 | fcount = 0 |
|
170 | fcount = 0 | |
171 | frevcount = 0 |
|
171 | frevcount = 0 | |
172 | fsrcsize = 0 |
|
172 | fsrcsize = 0 | |
173 | frawsize = 0 |
|
173 | frawsize = 0 | |
174 | fdstsize = 0 |
|
174 | fdstsize = 0 | |
175 | mcount = 0 |
|
175 | mcount = 0 | |
176 | mrevcount = 0 |
|
176 | mrevcount = 0 | |
177 | msrcsize = 0 |
|
177 | msrcsize = 0 | |
178 | mrawsize = 0 |
|
178 | mrawsize = 0 | |
179 | mdstsize = 0 |
|
179 | mdstsize = 0 | |
180 | crevcount = 0 |
|
180 | crevcount = 0 | |
181 | csrcsize = 0 |
|
181 | csrcsize = 0 | |
182 | crawsize = 0 |
|
182 | crawsize = 0 | |
183 | cdstsize = 0 |
|
183 | cdstsize = 0 | |
184 |
|
184 | |||
185 | alldatafiles = list(srcrepo.store.walk()) |
|
185 | alldatafiles = list(srcrepo.store.walk()) | |
186 | # mapping of data files which needs to be cloned |
|
186 | # mapping of data files which needs to be cloned | |
187 | # key is unencoded filename |
|
187 | # key is unencoded filename | |
188 | # value is revlog_object_from_srcrepo |
|
188 | # value is revlog_object_from_srcrepo | |
189 | manifests = {} |
|
189 | manifests = {} | |
190 | changelogs = {} |
|
190 | changelogs = {} | |
191 | filelogs = {} |
|
191 | filelogs = {} | |
192 |
|
192 | |||
193 | # Perform a pass to collect metadata. This validates we can open all |
|
193 | # Perform a pass to collect metadata. This validates we can open all | |
194 | # source files and allows a unified progress bar to be displayed. |
|
194 | # source files and allows a unified progress bar to be displayed. | |
195 | for unencoded, encoded, size in alldatafiles: |
|
195 | for revlog_type, unencoded, encoded, size in alldatafiles: | |
196 | if not unencoded.endswith(b'.i'): |
|
196 | if not unencoded.endswith(b'.i'): | |
197 | continue |
|
197 | continue | |
198 |
|
198 | |||
199 | rl = _revlogfrompath(srcrepo, unencoded) |
|
199 | rl = _revlogfrompath(srcrepo, unencoded) | |
200 |
|
200 | |||
201 | info = rl.storageinfo( |
|
201 | info = rl.storageinfo( | |
202 | exclusivefiles=True, |
|
202 | exclusivefiles=True, | |
203 | revisionscount=True, |
|
203 | revisionscount=True, | |
204 | trackedsize=True, |
|
204 | trackedsize=True, | |
205 | storedsize=True, |
|
205 | storedsize=True, | |
206 | ) |
|
206 | ) | |
207 |
|
207 | |||
208 | revcount += info[b'revisionscount'] or 0 |
|
208 | revcount += info[b'revisionscount'] or 0 | |
209 | datasize = info[b'storedsize'] or 0 |
|
209 | datasize = info[b'storedsize'] or 0 | |
210 | rawsize = info[b'trackedsize'] or 0 |
|
210 | rawsize = info[b'trackedsize'] or 0 | |
211 |
|
211 | |||
212 | srcsize += datasize |
|
212 | srcsize += datasize | |
213 | srcrawsize += rawsize |
|
213 | srcrawsize += rawsize | |
214 |
|
214 | |||
215 | # This is for the separate progress bars. |
|
215 | # This is for the separate progress bars. | |
216 | if isinstance(rl, changelog.changelog): |
|
216 | if isinstance(rl, changelog.changelog): | |
217 | changelogs[unencoded] = rl |
|
217 | changelogs[unencoded] = rl | |
218 | crevcount += len(rl) |
|
218 | crevcount += len(rl) | |
219 | csrcsize += datasize |
|
219 | csrcsize += datasize | |
220 | crawsize += rawsize |
|
220 | crawsize += rawsize | |
221 | elif isinstance(rl, manifest.manifestrevlog): |
|
221 | elif isinstance(rl, manifest.manifestrevlog): | |
222 | manifests[unencoded] = rl |
|
222 | manifests[unencoded] = rl | |
223 | mcount += 1 |
|
223 | mcount += 1 | |
224 | mrevcount += len(rl) |
|
224 | mrevcount += len(rl) | |
225 | msrcsize += datasize |
|
225 | msrcsize += datasize | |
226 | mrawsize += rawsize |
|
226 | mrawsize += rawsize | |
227 | elif isinstance(rl, filelog.filelog): |
|
227 | elif isinstance(rl, filelog.filelog): | |
228 | filelogs[unencoded] = rl |
|
228 | filelogs[unencoded] = rl | |
229 | fcount += 1 |
|
229 | fcount += 1 | |
230 | frevcount += len(rl) |
|
230 | frevcount += len(rl) | |
231 | fsrcsize += datasize |
|
231 | fsrcsize += datasize | |
232 | frawsize += rawsize |
|
232 | frawsize += rawsize | |
233 | else: |
|
233 | else: | |
234 | error.ProgrammingError(b'unknown revlog type') |
|
234 | error.ProgrammingError(b'unknown revlog type') | |
235 |
|
235 | |||
236 | if not revcount: |
|
236 | if not revcount: | |
237 | return |
|
237 | return | |
238 |
|
238 | |||
239 | ui.status( |
|
239 | ui.status( | |
240 | _( |
|
240 | _( | |
241 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
241 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
242 | b'%d in changelog)\n' |
|
242 | b'%d in changelog)\n' | |
243 | ) |
|
243 | ) | |
244 | % (revcount, frevcount, mrevcount, crevcount) |
|
244 | % (revcount, frevcount, mrevcount, crevcount) | |
245 | ) |
|
245 | ) | |
246 | ui.status( |
|
246 | ui.status( | |
247 | _(b'migrating %s in store; %s tracked data\n') |
|
247 | _(b'migrating %s in store; %s tracked data\n') | |
248 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
248 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | |
249 | ) |
|
249 | ) | |
250 |
|
250 | |||
251 | # Used to keep track of progress. |
|
251 | # Used to keep track of progress. | |
252 | progress = None |
|
252 | progress = None | |
253 |
|
253 | |||
254 | def oncopiedrevision(rl, rev, node): |
|
254 | def oncopiedrevision(rl, rev, node): | |
255 | progress.increment() |
|
255 | progress.increment() | |
256 |
|
256 | |||
257 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) |
|
257 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) | |
258 |
|
258 | |||
259 | # Migrating filelogs |
|
259 | # Migrating filelogs | |
260 | ui.status( |
|
260 | ui.status( | |
261 | _( |
|
261 | _( | |
262 | b'migrating %d filelogs containing %d revisions ' |
|
262 | b'migrating %d filelogs containing %d revisions ' | |
263 | b'(%s in store; %s tracked data)\n' |
|
263 | b'(%s in store; %s tracked data)\n' | |
264 | ) |
|
264 | ) | |
265 | % ( |
|
265 | % ( | |
266 | fcount, |
|
266 | fcount, | |
267 | frevcount, |
|
267 | frevcount, | |
268 | util.bytecount(fsrcsize), |
|
268 | util.bytecount(fsrcsize), | |
269 | util.bytecount(frawsize), |
|
269 | util.bytecount(frawsize), | |
270 | ) |
|
270 | ) | |
271 | ) |
|
271 | ) | |
272 | progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount) |
|
272 | progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount) | |
273 | for unencoded, oldrl in sorted(filelogs.items()): |
|
273 | for unencoded, oldrl in sorted(filelogs.items()): | |
274 | newrl = _perform_clone( |
|
274 | newrl = _perform_clone( | |
275 | ui, |
|
275 | ui, | |
276 | dstrepo, |
|
276 | dstrepo, | |
277 | tr, |
|
277 | tr, | |
278 | oldrl, |
|
278 | oldrl, | |
279 | unencoded, |
|
279 | unencoded, | |
280 | upgrade_op, |
|
280 | upgrade_op, | |
281 | sidedatacompanion, |
|
281 | sidedatacompanion, | |
282 | oncopiedrevision, |
|
282 | oncopiedrevision, | |
283 | ) |
|
283 | ) | |
284 | info = newrl.storageinfo(storedsize=True) |
|
284 | info = newrl.storageinfo(storedsize=True) | |
285 | fdstsize += info[b'storedsize'] or 0 |
|
285 | fdstsize += info[b'storedsize'] or 0 | |
286 | ui.status( |
|
286 | ui.status( | |
287 | _( |
|
287 | _( | |
288 | b'finished migrating %d filelog revisions across %d ' |
|
288 | b'finished migrating %d filelog revisions across %d ' | |
289 | b'filelogs; change in size: %s\n' |
|
289 | b'filelogs; change in size: %s\n' | |
290 | ) |
|
290 | ) | |
291 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
291 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | |
292 | ) |
|
292 | ) | |
293 |
|
293 | |||
294 | # Migrating manifests |
|
294 | # Migrating manifests | |
295 | ui.status( |
|
295 | ui.status( | |
296 | _( |
|
296 | _( | |
297 | b'migrating %d manifests containing %d revisions ' |
|
297 | b'migrating %d manifests containing %d revisions ' | |
298 | b'(%s in store; %s tracked data)\n' |
|
298 | b'(%s in store; %s tracked data)\n' | |
299 | ) |
|
299 | ) | |
300 | % ( |
|
300 | % ( | |
301 | mcount, |
|
301 | mcount, | |
302 | mrevcount, |
|
302 | mrevcount, | |
303 | util.bytecount(msrcsize), |
|
303 | util.bytecount(msrcsize), | |
304 | util.bytecount(mrawsize), |
|
304 | util.bytecount(mrawsize), | |
305 | ) |
|
305 | ) | |
306 | ) |
|
306 | ) | |
307 | if progress: |
|
307 | if progress: | |
308 | progress.complete() |
|
308 | progress.complete() | |
309 | progress = srcrepo.ui.makeprogress( |
|
309 | progress = srcrepo.ui.makeprogress( | |
310 | _(b'manifest revisions'), total=mrevcount |
|
310 | _(b'manifest revisions'), total=mrevcount | |
311 | ) |
|
311 | ) | |
312 | for unencoded, oldrl in sorted(manifests.items()): |
|
312 | for unencoded, oldrl in sorted(manifests.items()): | |
313 | newrl = _perform_clone( |
|
313 | newrl = _perform_clone( | |
314 | ui, |
|
314 | ui, | |
315 | dstrepo, |
|
315 | dstrepo, | |
316 | tr, |
|
316 | tr, | |
317 | oldrl, |
|
317 | oldrl, | |
318 | unencoded, |
|
318 | unencoded, | |
319 | upgrade_op, |
|
319 | upgrade_op, | |
320 | sidedatacompanion, |
|
320 | sidedatacompanion, | |
321 | oncopiedrevision, |
|
321 | oncopiedrevision, | |
322 | ) |
|
322 | ) | |
323 | info = newrl.storageinfo(storedsize=True) |
|
323 | info = newrl.storageinfo(storedsize=True) | |
324 | mdstsize += info[b'storedsize'] or 0 |
|
324 | mdstsize += info[b'storedsize'] or 0 | |
325 | ui.status( |
|
325 | ui.status( | |
326 | _( |
|
326 | _( | |
327 | b'finished migrating %d manifest revisions across %d ' |
|
327 | b'finished migrating %d manifest revisions across %d ' | |
328 | b'manifests; change in size: %s\n' |
|
328 | b'manifests; change in size: %s\n' | |
329 | ) |
|
329 | ) | |
330 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
330 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | |
331 | ) |
|
331 | ) | |
332 |
|
332 | |||
333 | # Migrating changelog |
|
333 | # Migrating changelog | |
334 | ui.status( |
|
334 | ui.status( | |
335 | _( |
|
335 | _( | |
336 | b'migrating changelog containing %d revisions ' |
|
336 | b'migrating changelog containing %d revisions ' | |
337 | b'(%s in store; %s tracked data)\n' |
|
337 | b'(%s in store; %s tracked data)\n' | |
338 | ) |
|
338 | ) | |
339 | % ( |
|
339 | % ( | |
340 | crevcount, |
|
340 | crevcount, | |
341 | util.bytecount(csrcsize), |
|
341 | util.bytecount(csrcsize), | |
342 | util.bytecount(crawsize), |
|
342 | util.bytecount(crawsize), | |
343 | ) |
|
343 | ) | |
344 | ) |
|
344 | ) | |
345 | if progress: |
|
345 | if progress: | |
346 | progress.complete() |
|
346 | progress.complete() | |
347 | progress = srcrepo.ui.makeprogress( |
|
347 | progress = srcrepo.ui.makeprogress( | |
348 | _(b'changelog revisions'), total=crevcount |
|
348 | _(b'changelog revisions'), total=crevcount | |
349 | ) |
|
349 | ) | |
350 | for unencoded, oldrl in sorted(changelogs.items()): |
|
350 | for unencoded, oldrl in sorted(changelogs.items()): | |
351 | newrl = _perform_clone( |
|
351 | newrl = _perform_clone( | |
352 | ui, |
|
352 | ui, | |
353 | dstrepo, |
|
353 | dstrepo, | |
354 | tr, |
|
354 | tr, | |
355 | oldrl, |
|
355 | oldrl, | |
356 | unencoded, |
|
356 | unencoded, | |
357 | upgrade_op, |
|
357 | upgrade_op, | |
358 | sidedatacompanion, |
|
358 | sidedatacompanion, | |
359 | oncopiedrevision, |
|
359 | oncopiedrevision, | |
360 | ) |
|
360 | ) | |
361 | info = newrl.storageinfo(storedsize=True) |
|
361 | info = newrl.storageinfo(storedsize=True) | |
362 | cdstsize += info[b'storedsize'] or 0 |
|
362 | cdstsize += info[b'storedsize'] or 0 | |
363 | progress.complete() |
|
363 | progress.complete() | |
364 | ui.status( |
|
364 | ui.status( | |
365 | _( |
|
365 | _( | |
366 | b'finished migrating %d changelog revisions; change in size: ' |
|
366 | b'finished migrating %d changelog revisions; change in size: ' | |
367 | b'%s\n' |
|
367 | b'%s\n' | |
368 | ) |
|
368 | ) | |
369 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
369 | % (crevcount, util.bytecount(cdstsize - csrcsize)) | |
370 | ) |
|
370 | ) | |
371 |
|
371 | |||
372 | dstsize = fdstsize + mdstsize + cdstsize |
|
372 | dstsize = fdstsize + mdstsize + cdstsize | |
373 | ui.status( |
|
373 | ui.status( | |
374 | _( |
|
374 | _( | |
375 | b'finished migrating %d total revisions; total change in store ' |
|
375 | b'finished migrating %d total revisions; total change in store ' | |
376 | b'size: %s\n' |
|
376 | b'size: %s\n' | |
377 | ) |
|
377 | ) | |
378 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
378 | % (revcount, util.bytecount(dstsize - srcsize)) | |
379 | ) |
|
379 | ) | |
380 |
|
380 | |||
381 |
|
381 | |||
382 | def _files_to_copy_post_revlog_clone(srcrepo): |
|
382 | def _files_to_copy_post_revlog_clone(srcrepo): | |
383 | """yields files which should be copied to destination after revlogs |
|
383 | """yields files which should be copied to destination after revlogs | |
384 | are cloned""" |
|
384 | are cloned""" | |
385 | for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
385 | for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | |
386 | # don't copy revlogs as they are already cloned |
|
386 | # don't copy revlogs as they are already cloned | |
387 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): |
|
387 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): | |
388 | continue |
|
388 | continue | |
389 | # Skip transaction related files. |
|
389 | # Skip transaction related files. | |
390 | if path.startswith(b'undo'): |
|
390 | if path.startswith(b'undo'): | |
391 | continue |
|
391 | continue | |
392 | # Only copy regular files. |
|
392 | # Only copy regular files. | |
393 | if kind != stat.S_IFREG: |
|
393 | if kind != stat.S_IFREG: | |
394 | continue |
|
394 | continue | |
395 | # Skip other skipped files. |
|
395 | # Skip other skipped files. | |
396 | if path in (b'lock', b'fncache'): |
|
396 | if path in (b'lock', b'fncache'): | |
397 | continue |
|
397 | continue | |
398 | # TODO: should we skip cache too? |
|
398 | # TODO: should we skip cache too? | |
399 |
|
399 | |||
400 | yield path |
|
400 | yield path | |
401 |
|
401 | |||
402 |
|
402 | |||
403 | def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op): |
|
403 | def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op): | |
404 | """Replace the stores after current repository is upgraded |
|
404 | """Replace the stores after current repository is upgraded | |
405 |
|
405 | |||
406 | Creates a backup of current repository store at backup path |
|
406 | Creates a backup of current repository store at backup path | |
407 | Replaces upgraded store files in current repo from upgraded one |
|
407 | Replaces upgraded store files in current repo from upgraded one | |
408 |
|
408 | |||
409 | Arguments: |
|
409 | Arguments: | |
410 | currentrepo: repo object of current repository |
|
410 | currentrepo: repo object of current repository | |
411 | upgradedrepo: repo object of the upgraded data |
|
411 | upgradedrepo: repo object of the upgraded data | |
412 | backupvfs: vfs object for the backup path |
|
412 | backupvfs: vfs object for the backup path | |
413 | upgrade_op: upgrade operation object |
|
413 | upgrade_op: upgrade operation object | |
414 | to be used to decide what all is upgraded |
|
414 | to be used to decide what all is upgraded | |
415 | """ |
|
415 | """ | |
416 | # TODO: don't blindly rename everything in store |
|
416 | # TODO: don't blindly rename everything in store | |
417 | # There can be upgrades where store is not touched at all |
|
417 | # There can be upgrades where store is not touched at all | |
418 | if upgrade_op.backup_store: |
|
418 | if upgrade_op.backup_store: | |
419 | util.rename(currentrepo.spath, backupvfs.join(b'store')) |
|
419 | util.rename(currentrepo.spath, backupvfs.join(b'store')) | |
420 | else: |
|
420 | else: | |
421 | currentrepo.vfs.rmtree(b'store', forcibly=True) |
|
421 | currentrepo.vfs.rmtree(b'store', forcibly=True) | |
422 | util.rename(upgradedrepo.spath, currentrepo.spath) |
|
422 | util.rename(upgradedrepo.spath, currentrepo.spath) | |
423 |
|
423 | |||
424 |
|
424 | |||
425 | def finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
425 | def finishdatamigration(ui, srcrepo, dstrepo, requirements): | |
426 | """Hook point for extensions to perform additional actions during upgrade. |
|
426 | """Hook point for extensions to perform additional actions during upgrade. | |
427 |
|
427 | |||
428 | This function is called after revlogs and store files have been copied but |
|
428 | This function is called after revlogs and store files have been copied but | |
429 | before the new store is swapped into the original location. |
|
429 | before the new store is swapped into the original location. | |
430 | """ |
|
430 | """ | |
431 |
|
431 | |||
432 |
|
432 | |||
433 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): |
|
433 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): | |
434 | """Do the low-level work of upgrading a repository. |
|
434 | """Do the low-level work of upgrading a repository. | |
435 |
|
435 | |||
436 | The upgrade is effectively performed as a copy between a source |
|
436 | The upgrade is effectively performed as a copy between a source | |
437 | repository and a temporary destination repository. |
|
437 | repository and a temporary destination repository. | |
438 |
|
438 | |||
439 | The source repository is unmodified for as long as possible so the |
|
439 | The source repository is unmodified for as long as possible so the | |
440 | upgrade can abort at any time without causing loss of service for |
|
440 | upgrade can abort at any time without causing loss of service for | |
441 | readers and without corrupting the source repository. |
|
441 | readers and without corrupting the source repository. | |
442 | """ |
|
442 | """ | |
443 | assert srcrepo.currentwlock() |
|
443 | assert srcrepo.currentwlock() | |
444 | assert dstrepo.currentwlock() |
|
444 | assert dstrepo.currentwlock() | |
445 | backuppath = None |
|
445 | backuppath = None | |
446 | backupvfs = None |
|
446 | backupvfs = None | |
447 |
|
447 | |||
448 | ui.status( |
|
448 | ui.status( | |
449 | _( |
|
449 | _( | |
450 | b'(it is safe to interrupt this process any time before ' |
|
450 | b'(it is safe to interrupt this process any time before ' | |
451 | b'data migration completes)\n' |
|
451 | b'data migration completes)\n' | |
452 | ) |
|
452 | ) | |
453 | ) |
|
453 | ) | |
454 |
|
454 | |||
455 | if upgrade_op.requirements_only: |
|
455 | if upgrade_op.requirements_only: | |
456 | ui.status(_(b'upgrading repository requirements\n')) |
|
456 | ui.status(_(b'upgrading repository requirements\n')) | |
457 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
457 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
458 | # if there is only one action and that is persistent nodemap upgrade |
|
458 | # if there is only one action and that is persistent nodemap upgrade | |
459 | # directly write the nodemap file and update requirements instead of going |
|
459 | # directly write the nodemap file and update requirements instead of going | |
460 | # through the whole cloning process |
|
460 | # through the whole cloning process | |
461 | elif ( |
|
461 | elif ( | |
462 | len(upgrade_op.upgrade_actions) == 1 |
|
462 | len(upgrade_op.upgrade_actions) == 1 | |
463 | and b'persistent-nodemap' in upgrade_op._upgrade_actions_names |
|
463 | and b'persistent-nodemap' in upgrade_op._upgrade_actions_names | |
464 | and not upgrade_op.removed_actions |
|
464 | and not upgrade_op.removed_actions | |
465 | ): |
|
465 | ): | |
466 | ui.status( |
|
466 | ui.status( | |
467 | _(b'upgrading repository to use persistent nodemap feature\n') |
|
467 | _(b'upgrading repository to use persistent nodemap feature\n') | |
468 | ) |
|
468 | ) | |
469 | with srcrepo.transaction(b'upgrade') as tr: |
|
469 | with srcrepo.transaction(b'upgrade') as tr: | |
470 | unfi = srcrepo.unfiltered() |
|
470 | unfi = srcrepo.unfiltered() | |
471 | cl = unfi.changelog |
|
471 | cl = unfi.changelog | |
472 | nodemap.persist_nodemap(tr, cl, force=True) |
|
472 | nodemap.persist_nodemap(tr, cl, force=True) | |
473 | # we want to directly operate on the underlying revlog to force |
|
473 | # we want to directly operate on the underlying revlog to force | |
474 | # create a nodemap file. This is fine since this is upgrade code |
|
474 | # create a nodemap file. This is fine since this is upgrade code | |
475 | # and it heavily relies on repository being revlog based |
|
475 | # and it heavily relies on repository being revlog based | |
476 | # hence accessing private attributes can be justified |
|
476 | # hence accessing private attributes can be justified | |
477 | nodemap.persist_nodemap( |
|
477 | nodemap.persist_nodemap( | |
478 | tr, unfi.manifestlog._rootstore._revlog, force=True |
|
478 | tr, unfi.manifestlog._rootstore._revlog, force=True | |
479 | ) |
|
479 | ) | |
480 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
480 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
481 | elif ( |
|
481 | elif ( | |
482 | len(upgrade_op.removed_actions) == 1 |
|
482 | len(upgrade_op.removed_actions) == 1 | |
483 | and [ |
|
483 | and [ | |
484 | x |
|
484 | x | |
485 | for x in upgrade_op.removed_actions |
|
485 | for x in upgrade_op.removed_actions | |
486 | if x.name == b'persistent-nodemap' |
|
486 | if x.name == b'persistent-nodemap' | |
487 | ] |
|
487 | ] | |
488 | and not upgrade_op.upgrade_actions |
|
488 | and not upgrade_op.upgrade_actions | |
489 | ): |
|
489 | ): | |
490 | ui.status( |
|
490 | ui.status( | |
491 | _(b'downgrading repository to not use persistent nodemap feature\n') |
|
491 | _(b'downgrading repository to not use persistent nodemap feature\n') | |
492 | ) |
|
492 | ) | |
493 | with srcrepo.transaction(b'upgrade') as tr: |
|
493 | with srcrepo.transaction(b'upgrade') as tr: | |
494 | unfi = srcrepo.unfiltered() |
|
494 | unfi = srcrepo.unfiltered() | |
495 | cl = unfi.changelog |
|
495 | cl = unfi.changelog | |
496 | nodemap.delete_nodemap(tr, srcrepo, cl) |
|
496 | nodemap.delete_nodemap(tr, srcrepo, cl) | |
497 | # check comment 20 lines above for accessing private attributes |
|
497 | # check comment 20 lines above for accessing private attributes | |
498 | nodemap.delete_nodemap( |
|
498 | nodemap.delete_nodemap( | |
499 | tr, srcrepo, unfi.manifestlog._rootstore._revlog |
|
499 | tr, srcrepo, unfi.manifestlog._rootstore._revlog | |
500 | ) |
|
500 | ) | |
501 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
501 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
502 | else: |
|
502 | else: | |
503 | with dstrepo.transaction(b'upgrade') as tr: |
|
503 | with dstrepo.transaction(b'upgrade') as tr: | |
504 | _clonerevlogs( |
|
504 | _clonerevlogs( | |
505 | ui, |
|
505 | ui, | |
506 | srcrepo, |
|
506 | srcrepo, | |
507 | dstrepo, |
|
507 | dstrepo, | |
508 | tr, |
|
508 | tr, | |
509 | upgrade_op, |
|
509 | upgrade_op, | |
510 | ) |
|
510 | ) | |
511 |
|
511 | |||
512 | # Now copy other files in the store directory. |
|
512 | # Now copy other files in the store directory. | |
513 | for p in _files_to_copy_post_revlog_clone(srcrepo): |
|
513 | for p in _files_to_copy_post_revlog_clone(srcrepo): | |
514 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
514 | srcrepo.ui.status(_(b'copying %s\n') % p) | |
515 | src = srcrepo.store.rawvfs.join(p) |
|
515 | src = srcrepo.store.rawvfs.join(p) | |
516 | dst = dstrepo.store.rawvfs.join(p) |
|
516 | dst = dstrepo.store.rawvfs.join(p) | |
517 | util.copyfile(src, dst, copystat=True) |
|
517 | util.copyfile(src, dst, copystat=True) | |
518 |
|
518 | |||
519 | finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
519 | finishdatamigration(ui, srcrepo, dstrepo, requirements) | |
520 |
|
520 | |||
521 | ui.status(_(b'data fully upgraded in a temporary repository\n')) |
|
521 | ui.status(_(b'data fully upgraded in a temporary repository\n')) | |
522 |
|
522 | |||
523 | if upgrade_op.backup_store: |
|
523 | if upgrade_op.backup_store: | |
524 | backuppath = pycompat.mkdtemp( |
|
524 | backuppath = pycompat.mkdtemp( | |
525 | prefix=b'upgradebackup.', dir=srcrepo.path |
|
525 | prefix=b'upgradebackup.', dir=srcrepo.path | |
526 | ) |
|
526 | ) | |
527 | backupvfs = vfsmod.vfs(backuppath) |
|
527 | backupvfs = vfsmod.vfs(backuppath) | |
528 |
|
528 | |||
529 | # Make a backup of requires file first, as it is the first to be modified. |
|
529 | # Make a backup of requires file first, as it is the first to be modified. | |
530 | util.copyfile( |
|
530 | util.copyfile( | |
531 | srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') |
|
531 | srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | |
532 | ) |
|
532 | ) | |
533 |
|
533 | |||
534 | # We install an arbitrary requirement that clients must not support |
|
534 | # We install an arbitrary requirement that clients must not support | |
535 | # as a mechanism to lock out new clients during the data swap. This is |
|
535 | # as a mechanism to lock out new clients during the data swap. This is | |
536 | # better than allowing a client to continue while the repository is in |
|
536 | # better than allowing a client to continue while the repository is in | |
537 | # an inconsistent state. |
|
537 | # an inconsistent state. | |
538 | ui.status( |
|
538 | ui.status( | |
539 | _( |
|
539 | _( | |
540 | b'marking source repository as being upgraded; clients will be ' |
|
540 | b'marking source repository as being upgraded; clients will be ' | |
541 | b'unable to read from repository\n' |
|
541 | b'unable to read from repository\n' | |
542 | ) |
|
542 | ) | |
543 | ) |
|
543 | ) | |
544 | scmutil.writereporequirements( |
|
544 | scmutil.writereporequirements( | |
545 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
545 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | |
546 | ) |
|
546 | ) | |
547 |
|
547 | |||
548 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
548 | ui.status(_(b'starting in-place swap of repository data\n')) | |
549 | if upgrade_op.backup_store: |
|
549 | if upgrade_op.backup_store: | |
550 | ui.status( |
|
550 | ui.status( | |
551 | _(b'replaced files will be backed up at %s\n') % backuppath |
|
551 | _(b'replaced files will be backed up at %s\n') % backuppath | |
552 | ) |
|
552 | ) | |
553 |
|
553 | |||
554 | # Now swap in the new store directory. Doing it as a rename should make |
|
554 | # Now swap in the new store directory. Doing it as a rename should make | |
555 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
555 | # the operation nearly instantaneous and atomic (at least in well-behaved | |
556 | # environments). |
|
556 | # environments). | |
557 | ui.status(_(b'replacing store...\n')) |
|
557 | ui.status(_(b'replacing store...\n')) | |
558 | tstart = util.timer() |
|
558 | tstart = util.timer() | |
559 | _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op) |
|
559 | _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op) | |
560 | elapsed = util.timer() - tstart |
|
560 | elapsed = util.timer() - tstart | |
561 | ui.status( |
|
561 | ui.status( | |
562 | _( |
|
562 | _( | |
563 | b'store replacement complete; repository was inconsistent for ' |
|
563 | b'store replacement complete; repository was inconsistent for ' | |
564 | b'%0.1fs\n' |
|
564 | b'%0.1fs\n' | |
565 | ) |
|
565 | ) | |
566 | % elapsed |
|
566 | % elapsed | |
567 | ) |
|
567 | ) | |
568 |
|
568 | |||
569 | # We first write the requirements file. Any new requirements will lock |
|
569 | # We first write the requirements file. Any new requirements will lock | |
570 | # out legacy clients. |
|
570 | # out legacy clients. | |
571 | ui.status( |
|
571 | ui.status( | |
572 | _( |
|
572 | _( | |
573 | b'finalizing requirements file and making repository readable ' |
|
573 | b'finalizing requirements file and making repository readable ' | |
574 | b'again\n' |
|
574 | b'again\n' | |
575 | ) |
|
575 | ) | |
576 | ) |
|
576 | ) | |
577 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
577 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
578 |
|
578 | |||
579 | if upgrade_op.backup_store: |
|
579 | if upgrade_op.backup_store: | |
580 | # The lock file from the old store won't be removed because nothing has a |
|
580 | # The lock file from the old store won't be removed because nothing has a | |
581 | # reference to its new location. So clean it up manually. Alternatively, we |
|
581 | # reference to its new location. So clean it up manually. Alternatively, we | |
582 | # could update srcrepo.svfs and other variables to point to the new |
|
582 | # could update srcrepo.svfs and other variables to point to the new | |
583 | # location. This is simpler. |
|
583 | # location. This is simpler. | |
584 | assert backupvfs is not None # help pytype |
|
584 | assert backupvfs is not None # help pytype | |
585 | backupvfs.unlink(b'store/lock') |
|
585 | backupvfs.unlink(b'store/lock') | |
586 |
|
586 | |||
587 | return backuppath |
|
587 | return backuppath |
@@ -1,632 +1,632 | |||||
1 | # verify.py - repository integrity checking for Mercurial |
|
1 | # verify.py - repository integrity checking for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import os |
|
10 | import os | |
11 |
|
11 | |||
12 | from .i18n import _ |
|
12 | from .i18n import _ | |
13 | from .node import ( |
|
13 | from .node import ( | |
14 | nullid, |
|
14 | nullid, | |
15 | short, |
|
15 | short, | |
16 | ) |
|
16 | ) | |
17 | from .utils import ( |
|
17 | from .utils import ( | |
18 | stringutil, |
|
18 | stringutil, | |
19 | ) |
|
19 | ) | |
20 |
|
20 | |||
21 | from . import ( |
|
21 | from . import ( | |
22 | error, |
|
22 | error, | |
23 | pycompat, |
|
23 | pycompat, | |
24 | revlog, |
|
24 | revlog, | |
25 | util, |
|
25 | util, | |
26 | ) |
|
26 | ) | |
27 |
|
27 | |||
28 | VERIFY_DEFAULT = 0 |
|
28 | VERIFY_DEFAULT = 0 | |
29 | VERIFY_FULL = 1 |
|
29 | VERIFY_FULL = 1 | |
30 |
|
30 | |||
31 |
|
31 | |||
32 | def verify(repo, level=None): |
|
32 | def verify(repo, level=None): | |
33 | with repo.lock(): |
|
33 | with repo.lock(): | |
34 | v = verifier(repo, level) |
|
34 | v = verifier(repo, level) | |
35 | return v.verify() |
|
35 | return v.verify() | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | def _normpath(f): |
|
38 | def _normpath(f): | |
39 | # under hg < 2.4, convert didn't sanitize paths properly, so a |
|
39 | # under hg < 2.4, convert didn't sanitize paths properly, so a | |
40 | # converted repo may contain repeated slashes |
|
40 | # converted repo may contain repeated slashes | |
41 | while b'//' in f: |
|
41 | while b'//' in f: | |
42 | f = f.replace(b'//', b'/') |
|
42 | f = f.replace(b'//', b'/') | |
43 | return f |
|
43 | return f | |
44 |
|
44 | |||
45 |
|
45 | |||
46 | class verifier(object): |
|
46 | class verifier(object): | |
47 | def __init__(self, repo, level=None): |
|
47 | def __init__(self, repo, level=None): | |
48 | self.repo = repo.unfiltered() |
|
48 | self.repo = repo.unfiltered() | |
49 | self.ui = repo.ui |
|
49 | self.ui = repo.ui | |
50 | self.match = repo.narrowmatch() |
|
50 | self.match = repo.narrowmatch() | |
51 | if level is None: |
|
51 | if level is None: | |
52 | level = VERIFY_DEFAULT |
|
52 | level = VERIFY_DEFAULT | |
53 | self._level = level |
|
53 | self._level = level | |
54 | self.badrevs = set() |
|
54 | self.badrevs = set() | |
55 | self.errors = 0 |
|
55 | self.errors = 0 | |
56 | self.warnings = 0 |
|
56 | self.warnings = 0 | |
57 | self.havecl = len(repo.changelog) > 0 |
|
57 | self.havecl = len(repo.changelog) > 0 | |
58 | self.havemf = len(repo.manifestlog.getstorage(b'')) > 0 |
|
58 | self.havemf = len(repo.manifestlog.getstorage(b'')) > 0 | |
59 | self.revlogv1 = repo.changelog.version != revlog.REVLOGV0 |
|
59 | self.revlogv1 = repo.changelog.version != revlog.REVLOGV0 | |
60 | self.lrugetctx = util.lrucachefunc(repo.unfiltered().__getitem__) |
|
60 | self.lrugetctx = util.lrucachefunc(repo.unfiltered().__getitem__) | |
61 | self.refersmf = False |
|
61 | self.refersmf = False | |
62 | self.fncachewarned = False |
|
62 | self.fncachewarned = False | |
63 | # developer config: verify.skipflags |
|
63 | # developer config: verify.skipflags | |
64 | self.skipflags = repo.ui.configint(b'verify', b'skipflags') |
|
64 | self.skipflags = repo.ui.configint(b'verify', b'skipflags') | |
65 | self.warnorphanstorefiles = True |
|
65 | self.warnorphanstorefiles = True | |
66 |
|
66 | |||
67 | def _warn(self, msg): |
|
67 | def _warn(self, msg): | |
68 | """record a "warning" level issue""" |
|
68 | """record a "warning" level issue""" | |
69 | self.ui.warn(msg + b"\n") |
|
69 | self.ui.warn(msg + b"\n") | |
70 | self.warnings += 1 |
|
70 | self.warnings += 1 | |
71 |
|
71 | |||
72 | def _err(self, linkrev, msg, filename=None): |
|
72 | def _err(self, linkrev, msg, filename=None): | |
73 | """record a "error" level issue""" |
|
73 | """record a "error" level issue""" | |
74 | if linkrev is not None: |
|
74 | if linkrev is not None: | |
75 | self.badrevs.add(linkrev) |
|
75 | self.badrevs.add(linkrev) | |
76 | linkrev = b"%d" % linkrev |
|
76 | linkrev = b"%d" % linkrev | |
77 | else: |
|
77 | else: | |
78 | linkrev = b'?' |
|
78 | linkrev = b'?' | |
79 | msg = b"%s: %s" % (linkrev, msg) |
|
79 | msg = b"%s: %s" % (linkrev, msg) | |
80 | if filename: |
|
80 | if filename: | |
81 | msg = b"%s@%s" % (filename, msg) |
|
81 | msg = b"%s@%s" % (filename, msg) | |
82 | self.ui.warn(b" " + msg + b"\n") |
|
82 | self.ui.warn(b" " + msg + b"\n") | |
83 | self.errors += 1 |
|
83 | self.errors += 1 | |
84 |
|
84 | |||
85 | def _exc(self, linkrev, msg, inst, filename=None): |
|
85 | def _exc(self, linkrev, msg, inst, filename=None): | |
86 | """record exception raised during the verify process""" |
|
86 | """record exception raised during the verify process""" | |
87 | fmsg = stringutil.forcebytestr(inst) |
|
87 | fmsg = stringutil.forcebytestr(inst) | |
88 | if not fmsg: |
|
88 | if not fmsg: | |
89 | fmsg = pycompat.byterepr(inst) |
|
89 | fmsg = pycompat.byterepr(inst) | |
90 | self._err(linkrev, b"%s: %s" % (msg, fmsg), filename) |
|
90 | self._err(linkrev, b"%s: %s" % (msg, fmsg), filename) | |
91 |
|
91 | |||
92 | def _checkrevlog(self, obj, name, linkrev): |
|
92 | def _checkrevlog(self, obj, name, linkrev): | |
93 | """verify high level property of a revlog |
|
93 | """verify high level property of a revlog | |
94 |
|
94 | |||
95 | - revlog is present, |
|
95 | - revlog is present, | |
96 | - revlog is non-empty, |
|
96 | - revlog is non-empty, | |
97 | - sizes (index and data) are correct, |
|
97 | - sizes (index and data) are correct, | |
98 | - revlog's format version is correct. |
|
98 | - revlog's format version is correct. | |
99 | """ |
|
99 | """ | |
100 | if not len(obj) and (self.havecl or self.havemf): |
|
100 | if not len(obj) and (self.havecl or self.havemf): | |
101 | self._err(linkrev, _(b"empty or missing %s") % name) |
|
101 | self._err(linkrev, _(b"empty or missing %s") % name) | |
102 | return |
|
102 | return | |
103 |
|
103 | |||
104 | d = obj.checksize() |
|
104 | d = obj.checksize() | |
105 | if d[0]: |
|
105 | if d[0]: | |
106 | self._err(None, _(b"data length off by %d bytes") % d[0], name) |
|
106 | self._err(None, _(b"data length off by %d bytes") % d[0], name) | |
107 | if d[1]: |
|
107 | if d[1]: | |
108 | self._err(None, _(b"index contains %d extra bytes") % d[1], name) |
|
108 | self._err(None, _(b"index contains %d extra bytes") % d[1], name) | |
109 |
|
109 | |||
110 | if obj.version != revlog.REVLOGV0: |
|
110 | if obj.version != revlog.REVLOGV0: | |
111 | if not self.revlogv1: |
|
111 | if not self.revlogv1: | |
112 | self._warn(_(b"warning: `%s' uses revlog format 1") % name) |
|
112 | self._warn(_(b"warning: `%s' uses revlog format 1") % name) | |
113 | elif self.revlogv1: |
|
113 | elif self.revlogv1: | |
114 | self._warn(_(b"warning: `%s' uses revlog format 0") % name) |
|
114 | self._warn(_(b"warning: `%s' uses revlog format 0") % name) | |
115 |
|
115 | |||
116 | def _checkentry(self, obj, i, node, seen, linkrevs, f): |
|
116 | def _checkentry(self, obj, i, node, seen, linkrevs, f): | |
117 | """verify a single revlog entry |
|
117 | """verify a single revlog entry | |
118 |
|
118 | |||
119 | arguments are: |
|
119 | arguments are: | |
120 | - obj: the source revlog |
|
120 | - obj: the source revlog | |
121 | - i: the revision number |
|
121 | - i: the revision number | |
122 | - node: the revision node id |
|
122 | - node: the revision node id | |
123 | - seen: nodes previously seen for this revlog |
|
123 | - seen: nodes previously seen for this revlog | |
124 | - linkrevs: [changelog-revisions] introducing "node" |
|
124 | - linkrevs: [changelog-revisions] introducing "node" | |
125 | - f: string label ("changelog", "manifest", or filename) |
|
125 | - f: string label ("changelog", "manifest", or filename) | |
126 |
|
126 | |||
127 | Performs the following checks: |
|
127 | Performs the following checks: | |
128 | - linkrev points to an existing changelog revision, |
|
128 | - linkrev points to an existing changelog revision, | |
129 | - linkrev points to a changelog revision that introduces this revision, |
|
129 | - linkrev points to a changelog revision that introduces this revision, | |
130 | - linkrev points to the lowest of these changesets, |
|
130 | - linkrev points to the lowest of these changesets, | |
131 | - both parents exist in the revlog, |
|
131 | - both parents exist in the revlog, | |
132 | - the revision is not duplicated. |
|
132 | - the revision is not duplicated. | |
133 |
|
133 | |||
134 | Return the linkrev of the revision (or None for changelog's revisions). |
|
134 | Return the linkrev of the revision (or None for changelog's revisions). | |
135 | """ |
|
135 | """ | |
136 | lr = obj.linkrev(obj.rev(node)) |
|
136 | lr = obj.linkrev(obj.rev(node)) | |
137 | if lr < 0 or (self.havecl and lr not in linkrevs): |
|
137 | if lr < 0 or (self.havecl and lr not in linkrevs): | |
138 | if lr < 0 or lr >= len(self.repo.changelog): |
|
138 | if lr < 0 or lr >= len(self.repo.changelog): | |
139 | msg = _(b"rev %d points to nonexistent changeset %d") |
|
139 | msg = _(b"rev %d points to nonexistent changeset %d") | |
140 | else: |
|
140 | else: | |
141 | msg = _(b"rev %d points to unexpected changeset %d") |
|
141 | msg = _(b"rev %d points to unexpected changeset %d") | |
142 | self._err(None, msg % (i, lr), f) |
|
142 | self._err(None, msg % (i, lr), f) | |
143 | if linkrevs: |
|
143 | if linkrevs: | |
144 | if f and len(linkrevs) > 1: |
|
144 | if f and len(linkrevs) > 1: | |
145 | try: |
|
145 | try: | |
146 | # attempt to filter down to real linkrevs |
|
146 | # attempt to filter down to real linkrevs | |
147 | linkrevs = [ |
|
147 | linkrevs = [ | |
148 | l |
|
148 | l | |
149 | for l in linkrevs |
|
149 | for l in linkrevs | |
150 | if self.lrugetctx(l)[f].filenode() == node |
|
150 | if self.lrugetctx(l)[f].filenode() == node | |
151 | ] |
|
151 | ] | |
152 | except Exception: |
|
152 | except Exception: | |
153 | pass |
|
153 | pass | |
154 | self._warn( |
|
154 | self._warn( | |
155 | _(b" (expected %s)") |
|
155 | _(b" (expected %s)") | |
156 | % b" ".join(map(pycompat.bytestr, linkrevs)) |
|
156 | % b" ".join(map(pycompat.bytestr, linkrevs)) | |
157 | ) |
|
157 | ) | |
158 | lr = None # can't be trusted |
|
158 | lr = None # can't be trusted | |
159 |
|
159 | |||
160 | try: |
|
160 | try: | |
161 | p1, p2 = obj.parents(node) |
|
161 | p1, p2 = obj.parents(node) | |
162 | if p1 not in seen and p1 != nullid: |
|
162 | if p1 not in seen and p1 != nullid: | |
163 | self._err( |
|
163 | self._err( | |
164 | lr, |
|
164 | lr, | |
165 | _(b"unknown parent 1 %s of %s") % (short(p1), short(node)), |
|
165 | _(b"unknown parent 1 %s of %s") % (short(p1), short(node)), | |
166 | f, |
|
166 | f, | |
167 | ) |
|
167 | ) | |
168 | if p2 not in seen and p2 != nullid: |
|
168 | if p2 not in seen and p2 != nullid: | |
169 | self._err( |
|
169 | self._err( | |
170 | lr, |
|
170 | lr, | |
171 | _(b"unknown parent 2 %s of %s") % (short(p2), short(node)), |
|
171 | _(b"unknown parent 2 %s of %s") % (short(p2), short(node)), | |
172 | f, |
|
172 | f, | |
173 | ) |
|
173 | ) | |
174 | except Exception as inst: |
|
174 | except Exception as inst: | |
175 | self._exc(lr, _(b"checking parents of %s") % short(node), inst, f) |
|
175 | self._exc(lr, _(b"checking parents of %s") % short(node), inst, f) | |
176 |
|
176 | |||
177 | if node in seen: |
|
177 | if node in seen: | |
178 | self._err(lr, _(b"duplicate revision %d (%d)") % (i, seen[node]), f) |
|
178 | self._err(lr, _(b"duplicate revision %d (%d)") % (i, seen[node]), f) | |
179 | seen[node] = i |
|
179 | seen[node] = i | |
180 | return lr |
|
180 | return lr | |
181 |
|
181 | |||
182 | def verify(self): |
|
182 | def verify(self): | |
183 | """verify the content of the Mercurial repository |
|
183 | """verify the content of the Mercurial repository | |
184 |
|
184 | |||
185 | This method run all verifications, displaying issues as they are found. |
|
185 | This method run all verifications, displaying issues as they are found. | |
186 |
|
186 | |||
187 | return 1 if any error have been encountered, 0 otherwise.""" |
|
187 | return 1 if any error have been encountered, 0 otherwise.""" | |
188 | # initial validation and generic report |
|
188 | # initial validation and generic report | |
189 | repo = self.repo |
|
189 | repo = self.repo | |
190 | ui = repo.ui |
|
190 | ui = repo.ui | |
191 | if not repo.url().startswith(b'file:'): |
|
191 | if not repo.url().startswith(b'file:'): | |
192 | raise error.Abort(_(b"cannot verify bundle or remote repos")) |
|
192 | raise error.Abort(_(b"cannot verify bundle or remote repos")) | |
193 |
|
193 | |||
194 | if os.path.exists(repo.sjoin(b"journal")): |
|
194 | if os.path.exists(repo.sjoin(b"journal")): | |
195 | ui.warn(_(b"abandoned transaction found - run hg recover\n")) |
|
195 | ui.warn(_(b"abandoned transaction found - run hg recover\n")) | |
196 |
|
196 | |||
197 | if ui.verbose or not self.revlogv1: |
|
197 | if ui.verbose or not self.revlogv1: | |
198 | ui.status( |
|
198 | ui.status( | |
199 | _(b"repository uses revlog format %d\n") |
|
199 | _(b"repository uses revlog format %d\n") | |
200 | % (self.revlogv1 and 1 or 0) |
|
200 | % (self.revlogv1 and 1 or 0) | |
201 | ) |
|
201 | ) | |
202 |
|
202 | |||
203 | # data verification |
|
203 | # data verification | |
204 | mflinkrevs, filelinkrevs = self._verifychangelog() |
|
204 | mflinkrevs, filelinkrevs = self._verifychangelog() | |
205 | filenodes = self._verifymanifest(mflinkrevs) |
|
205 | filenodes = self._verifymanifest(mflinkrevs) | |
206 | del mflinkrevs |
|
206 | del mflinkrevs | |
207 | self._crosscheckfiles(filelinkrevs, filenodes) |
|
207 | self._crosscheckfiles(filelinkrevs, filenodes) | |
208 | totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs) |
|
208 | totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs) | |
209 |
|
209 | |||
210 | # final report |
|
210 | # final report | |
211 | ui.status( |
|
211 | ui.status( | |
212 | _(b"checked %d changesets with %d changes to %d files\n") |
|
212 | _(b"checked %d changesets with %d changes to %d files\n") | |
213 | % (len(repo.changelog), filerevisions, totalfiles) |
|
213 | % (len(repo.changelog), filerevisions, totalfiles) | |
214 | ) |
|
214 | ) | |
215 | if self.warnings: |
|
215 | if self.warnings: | |
216 | ui.warn(_(b"%d warnings encountered!\n") % self.warnings) |
|
216 | ui.warn(_(b"%d warnings encountered!\n") % self.warnings) | |
217 | if self.fncachewarned: |
|
217 | if self.fncachewarned: | |
218 | ui.warn( |
|
218 | ui.warn( | |
219 | _( |
|
219 | _( | |
220 | b'hint: run "hg debugrebuildfncache" to recover from ' |
|
220 | b'hint: run "hg debugrebuildfncache" to recover from ' | |
221 | b'corrupt fncache\n' |
|
221 | b'corrupt fncache\n' | |
222 | ) |
|
222 | ) | |
223 | ) |
|
223 | ) | |
224 | if self.errors: |
|
224 | if self.errors: | |
225 | ui.warn(_(b"%d integrity errors encountered!\n") % self.errors) |
|
225 | ui.warn(_(b"%d integrity errors encountered!\n") % self.errors) | |
226 | if self.badrevs: |
|
226 | if self.badrevs: | |
227 | ui.warn( |
|
227 | ui.warn( | |
228 | _(b"(first damaged changeset appears to be %d)\n") |
|
228 | _(b"(first damaged changeset appears to be %d)\n") | |
229 | % min(self.badrevs) |
|
229 | % min(self.badrevs) | |
230 | ) |
|
230 | ) | |
231 | return 1 |
|
231 | return 1 | |
232 | return 0 |
|
232 | return 0 | |
233 |
|
233 | |||
234 | def _verifychangelog(self): |
|
234 | def _verifychangelog(self): | |
235 | """verify the changelog of a repository |
|
235 | """verify the changelog of a repository | |
236 |
|
236 | |||
237 | The following checks are performed: |
|
237 | The following checks are performed: | |
238 | - all of `_checkrevlog` checks, |
|
238 | - all of `_checkrevlog` checks, | |
239 | - all of `_checkentry` checks (for each revisions), |
|
239 | - all of `_checkentry` checks (for each revisions), | |
240 | - each revision can be read. |
|
240 | - each revision can be read. | |
241 |
|
241 | |||
242 | The function returns some of the data observed in the changesets as a |
|
242 | The function returns some of the data observed in the changesets as a | |
243 | (mflinkrevs, filelinkrevs) tuples: |
|
243 | (mflinkrevs, filelinkrevs) tuples: | |
244 | - mflinkrevs: is a { manifest-node -> [changelog-rev] } mapping |
|
244 | - mflinkrevs: is a { manifest-node -> [changelog-rev] } mapping | |
245 | - filelinkrevs: is a { file-path -> [changelog-rev] } mapping |
|
245 | - filelinkrevs: is a { file-path -> [changelog-rev] } mapping | |
246 |
|
246 | |||
247 | If a matcher was specified, filelinkrevs will only contains matched |
|
247 | If a matcher was specified, filelinkrevs will only contains matched | |
248 | files. |
|
248 | files. | |
249 | """ |
|
249 | """ | |
250 | ui = self.ui |
|
250 | ui = self.ui | |
251 | repo = self.repo |
|
251 | repo = self.repo | |
252 | match = self.match |
|
252 | match = self.match | |
253 | cl = repo.changelog |
|
253 | cl = repo.changelog | |
254 |
|
254 | |||
255 | ui.status(_(b"checking changesets\n")) |
|
255 | ui.status(_(b"checking changesets\n")) | |
256 | mflinkrevs = {} |
|
256 | mflinkrevs = {} | |
257 | filelinkrevs = {} |
|
257 | filelinkrevs = {} | |
258 | seen = {} |
|
258 | seen = {} | |
259 | self._checkrevlog(cl, b"changelog", 0) |
|
259 | self._checkrevlog(cl, b"changelog", 0) | |
260 | progress = ui.makeprogress( |
|
260 | progress = ui.makeprogress( | |
261 | _(b'checking'), unit=_(b'changesets'), total=len(repo) |
|
261 | _(b'checking'), unit=_(b'changesets'), total=len(repo) | |
262 | ) |
|
262 | ) | |
263 | for i in repo: |
|
263 | for i in repo: | |
264 | progress.update(i) |
|
264 | progress.update(i) | |
265 | n = cl.node(i) |
|
265 | n = cl.node(i) | |
266 | self._checkentry(cl, i, n, seen, [i], b"changelog") |
|
266 | self._checkentry(cl, i, n, seen, [i], b"changelog") | |
267 |
|
267 | |||
268 | try: |
|
268 | try: | |
269 | changes = cl.read(n) |
|
269 | changes = cl.read(n) | |
270 | if changes[0] != nullid: |
|
270 | if changes[0] != nullid: | |
271 | mflinkrevs.setdefault(changes[0], []).append(i) |
|
271 | mflinkrevs.setdefault(changes[0], []).append(i) | |
272 | self.refersmf = True |
|
272 | self.refersmf = True | |
273 | for f in changes[3]: |
|
273 | for f in changes[3]: | |
274 | if match(f): |
|
274 | if match(f): | |
275 | filelinkrevs.setdefault(_normpath(f), []).append(i) |
|
275 | filelinkrevs.setdefault(_normpath(f), []).append(i) | |
276 | except Exception as inst: |
|
276 | except Exception as inst: | |
277 | self.refersmf = True |
|
277 | self.refersmf = True | |
278 | self._exc(i, _(b"unpacking changeset %s") % short(n), inst) |
|
278 | self._exc(i, _(b"unpacking changeset %s") % short(n), inst) | |
279 | progress.complete() |
|
279 | progress.complete() | |
280 | return mflinkrevs, filelinkrevs |
|
280 | return mflinkrevs, filelinkrevs | |
281 |
|
281 | |||
282 | def _verifymanifest( |
|
282 | def _verifymanifest( | |
283 | self, mflinkrevs, dir=b"", storefiles=None, subdirprogress=None |
|
283 | self, mflinkrevs, dir=b"", storefiles=None, subdirprogress=None | |
284 | ): |
|
284 | ): | |
285 | """verify the manifestlog content |
|
285 | """verify the manifestlog content | |
286 |
|
286 | |||
287 | Inputs: |
|
287 | Inputs: | |
288 | - mflinkrevs: a {manifest-node -> [changelog-revisions]} mapping |
|
288 | - mflinkrevs: a {manifest-node -> [changelog-revisions]} mapping | |
289 | - dir: a subdirectory to check (for tree manifest repo) |
|
289 | - dir: a subdirectory to check (for tree manifest repo) | |
290 | - storefiles: set of currently "orphan" files. |
|
290 | - storefiles: set of currently "orphan" files. | |
291 | - subdirprogress: a progress object |
|
291 | - subdirprogress: a progress object | |
292 |
|
292 | |||
293 | This function checks: |
|
293 | This function checks: | |
294 | * all of `_checkrevlog` checks (for all manifest related revlogs) |
|
294 | * all of `_checkrevlog` checks (for all manifest related revlogs) | |
295 | * all of `_checkentry` checks (for all manifest related revisions) |
|
295 | * all of `_checkentry` checks (for all manifest related revisions) | |
296 | * nodes for subdirectory exists in the sub-directory manifest |
|
296 | * nodes for subdirectory exists in the sub-directory manifest | |
297 | * each manifest entries have a file path |
|
297 | * each manifest entries have a file path | |
298 | * each manifest node refered in mflinkrevs exist in the manifest log |
|
298 | * each manifest node refered in mflinkrevs exist in the manifest log | |
299 |
|
299 | |||
300 | If tree manifest is in use and a matchers is specified, only the |
|
300 | If tree manifest is in use and a matchers is specified, only the | |
301 | sub-directories matching it will be verified. |
|
301 | sub-directories matching it will be verified. | |
302 |
|
302 | |||
303 | return a two level mapping: |
|
303 | return a two level mapping: | |
304 | {"path" -> { filenode -> changelog-revision}} |
|
304 | {"path" -> { filenode -> changelog-revision}} | |
305 |
|
305 | |||
306 | This mapping primarily contains entries for every files in the |
|
306 | This mapping primarily contains entries for every files in the | |
307 | repository. In addition, when tree-manifest is used, it also contains |
|
307 | repository. In addition, when tree-manifest is used, it also contains | |
308 | sub-directory entries. |
|
308 | sub-directory entries. | |
309 |
|
309 | |||
310 | If a matcher is provided, only matching paths will be included. |
|
310 | If a matcher is provided, only matching paths will be included. | |
311 | """ |
|
311 | """ | |
312 | repo = self.repo |
|
312 | repo = self.repo | |
313 | ui = self.ui |
|
313 | ui = self.ui | |
314 | match = self.match |
|
314 | match = self.match | |
315 | mfl = self.repo.manifestlog |
|
315 | mfl = self.repo.manifestlog | |
316 | mf = mfl.getstorage(dir) |
|
316 | mf = mfl.getstorage(dir) | |
317 |
|
317 | |||
318 | if not dir: |
|
318 | if not dir: | |
319 | self.ui.status(_(b"checking manifests\n")) |
|
319 | self.ui.status(_(b"checking manifests\n")) | |
320 |
|
320 | |||
321 | filenodes = {} |
|
321 | filenodes = {} | |
322 | subdirnodes = {} |
|
322 | subdirnodes = {} | |
323 | seen = {} |
|
323 | seen = {} | |
324 | label = b"manifest" |
|
324 | label = b"manifest" | |
325 | if dir: |
|
325 | if dir: | |
326 | label = dir |
|
326 | label = dir | |
327 | revlogfiles = mf.files() |
|
327 | revlogfiles = mf.files() | |
328 | storefiles.difference_update(revlogfiles) |
|
328 | storefiles.difference_update(revlogfiles) | |
329 | if subdirprogress: # should be true since we're in a subdirectory |
|
329 | if subdirprogress: # should be true since we're in a subdirectory | |
330 | subdirprogress.increment() |
|
330 | subdirprogress.increment() | |
331 | if self.refersmf: |
|
331 | if self.refersmf: | |
332 | # Do not check manifest if there are only changelog entries with |
|
332 | # Do not check manifest if there are only changelog entries with | |
333 | # null manifests. |
|
333 | # null manifests. | |
334 | self._checkrevlog(mf, label, 0) |
|
334 | self._checkrevlog(mf, label, 0) | |
335 | progress = ui.makeprogress( |
|
335 | progress = ui.makeprogress( | |
336 | _(b'checking'), unit=_(b'manifests'), total=len(mf) |
|
336 | _(b'checking'), unit=_(b'manifests'), total=len(mf) | |
337 | ) |
|
337 | ) | |
338 | for i in mf: |
|
338 | for i in mf: | |
339 | if not dir: |
|
339 | if not dir: | |
340 | progress.update(i) |
|
340 | progress.update(i) | |
341 | n = mf.node(i) |
|
341 | n = mf.node(i) | |
342 | lr = self._checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label) |
|
342 | lr = self._checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label) | |
343 | if n in mflinkrevs: |
|
343 | if n in mflinkrevs: | |
344 | del mflinkrevs[n] |
|
344 | del mflinkrevs[n] | |
345 | elif dir: |
|
345 | elif dir: | |
346 | self._err( |
|
346 | self._err( | |
347 | lr, |
|
347 | lr, | |
348 | _(b"%s not in parent-directory manifest") % short(n), |
|
348 | _(b"%s not in parent-directory manifest") % short(n), | |
349 | label, |
|
349 | label, | |
350 | ) |
|
350 | ) | |
351 | else: |
|
351 | else: | |
352 | self._err(lr, _(b"%s not in changesets") % short(n), label) |
|
352 | self._err(lr, _(b"%s not in changesets") % short(n), label) | |
353 |
|
353 | |||
354 | try: |
|
354 | try: | |
355 | mfdelta = mfl.get(dir, n).readdelta(shallow=True) |
|
355 | mfdelta = mfl.get(dir, n).readdelta(shallow=True) | |
356 | for f, fn, fl in mfdelta.iterentries(): |
|
356 | for f, fn, fl in mfdelta.iterentries(): | |
357 | if not f: |
|
357 | if not f: | |
358 | self._err(lr, _(b"entry without name in manifest")) |
|
358 | self._err(lr, _(b"entry without name in manifest")) | |
359 | elif f == b"/dev/null": # ignore this in very old repos |
|
359 | elif f == b"/dev/null": # ignore this in very old repos | |
360 | continue |
|
360 | continue | |
361 | fullpath = dir + _normpath(f) |
|
361 | fullpath = dir + _normpath(f) | |
362 | if fl == b't': |
|
362 | if fl == b't': | |
363 | if not match.visitdir(fullpath): |
|
363 | if not match.visitdir(fullpath): | |
364 | continue |
|
364 | continue | |
365 | subdirnodes.setdefault(fullpath + b'/', {}).setdefault( |
|
365 | subdirnodes.setdefault(fullpath + b'/', {}).setdefault( | |
366 | fn, [] |
|
366 | fn, [] | |
367 | ).append(lr) |
|
367 | ).append(lr) | |
368 | else: |
|
368 | else: | |
369 | if not match(fullpath): |
|
369 | if not match(fullpath): | |
370 | continue |
|
370 | continue | |
371 | filenodes.setdefault(fullpath, {}).setdefault(fn, lr) |
|
371 | filenodes.setdefault(fullpath, {}).setdefault(fn, lr) | |
372 | except Exception as inst: |
|
372 | except Exception as inst: | |
373 | self._exc(lr, _(b"reading delta %s") % short(n), inst, label) |
|
373 | self._exc(lr, _(b"reading delta %s") % short(n), inst, label) | |
374 | if self._level >= VERIFY_FULL: |
|
374 | if self._level >= VERIFY_FULL: | |
375 | try: |
|
375 | try: | |
376 | # Various issues can affect manifest. So we read each full |
|
376 | # Various issues can affect manifest. So we read each full | |
377 | # text from storage. This triggers the checks from the core |
|
377 | # text from storage. This triggers the checks from the core | |
378 | # code (eg: hash verification, filename are ordered, etc.) |
|
378 | # code (eg: hash verification, filename are ordered, etc.) | |
379 | mfdelta = mfl.get(dir, n).read() |
|
379 | mfdelta = mfl.get(dir, n).read() | |
380 | except Exception as inst: |
|
380 | except Exception as inst: | |
381 | self._exc( |
|
381 | self._exc( | |
382 | lr, |
|
382 | lr, | |
383 | _(b"reading full manifest %s") % short(n), |
|
383 | _(b"reading full manifest %s") % short(n), | |
384 | inst, |
|
384 | inst, | |
385 | label, |
|
385 | label, | |
386 | ) |
|
386 | ) | |
387 |
|
387 | |||
388 | if not dir: |
|
388 | if not dir: | |
389 | progress.complete() |
|
389 | progress.complete() | |
390 |
|
390 | |||
391 | if self.havemf: |
|
391 | if self.havemf: | |
392 | # since we delete entry in `mflinkrevs` during iteration, any |
|
392 | # since we delete entry in `mflinkrevs` during iteration, any | |
393 | # remaining entries are "missing". We need to issue errors for them. |
|
393 | # remaining entries are "missing". We need to issue errors for them. | |
394 | changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]] |
|
394 | changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]] | |
395 | for c, m in sorted(changesetpairs): |
|
395 | for c, m in sorted(changesetpairs): | |
396 | if dir: |
|
396 | if dir: | |
397 | self._err( |
|
397 | self._err( | |
398 | c, |
|
398 | c, | |
399 | _( |
|
399 | _( | |
400 | b"parent-directory manifest refers to unknown" |
|
400 | b"parent-directory manifest refers to unknown" | |
401 | b" revision %s" |
|
401 | b" revision %s" | |
402 | ) |
|
402 | ) | |
403 | % short(m), |
|
403 | % short(m), | |
404 | label, |
|
404 | label, | |
405 | ) |
|
405 | ) | |
406 | else: |
|
406 | else: | |
407 | self._err( |
|
407 | self._err( | |
408 | c, |
|
408 | c, | |
409 | _(b"changeset refers to unknown revision %s") |
|
409 | _(b"changeset refers to unknown revision %s") | |
410 | % short(m), |
|
410 | % short(m), | |
411 | label, |
|
411 | label, | |
412 | ) |
|
412 | ) | |
413 |
|
413 | |||
414 | if not dir and subdirnodes: |
|
414 | if not dir and subdirnodes: | |
415 | self.ui.status(_(b"checking directory manifests\n")) |
|
415 | self.ui.status(_(b"checking directory manifests\n")) | |
416 | storefiles = set() |
|
416 | storefiles = set() | |
417 | subdirs = set() |
|
417 | subdirs = set() | |
418 | revlogv1 = self.revlogv1 |
|
418 | revlogv1 = self.revlogv1 | |
419 | for f, f2, size in repo.store.datafiles(): |
|
419 | for t, f, f2, size in repo.store.datafiles(): | |
420 | if not f: |
|
420 | if not f: | |
421 | self._err(None, _(b"cannot decode filename '%s'") % f2) |
|
421 | self._err(None, _(b"cannot decode filename '%s'") % f2) | |
422 | elif (size > 0 or not revlogv1) and f.startswith(b'meta/'): |
|
422 | elif (size > 0 or not revlogv1) and f.startswith(b'meta/'): | |
423 | storefiles.add(_normpath(f)) |
|
423 | storefiles.add(_normpath(f)) | |
424 | subdirs.add(os.path.dirname(f)) |
|
424 | subdirs.add(os.path.dirname(f)) | |
425 | subdirprogress = ui.makeprogress( |
|
425 | subdirprogress = ui.makeprogress( | |
426 | _(b'checking'), unit=_(b'manifests'), total=len(subdirs) |
|
426 | _(b'checking'), unit=_(b'manifests'), total=len(subdirs) | |
427 | ) |
|
427 | ) | |
428 |
|
428 | |||
429 | for subdir, linkrevs in pycompat.iteritems(subdirnodes): |
|
429 | for subdir, linkrevs in pycompat.iteritems(subdirnodes): | |
430 | subdirfilenodes = self._verifymanifest( |
|
430 | subdirfilenodes = self._verifymanifest( | |
431 | linkrevs, subdir, storefiles, subdirprogress |
|
431 | linkrevs, subdir, storefiles, subdirprogress | |
432 | ) |
|
432 | ) | |
433 | for f, onefilenodes in pycompat.iteritems(subdirfilenodes): |
|
433 | for f, onefilenodes in pycompat.iteritems(subdirfilenodes): | |
434 | filenodes.setdefault(f, {}).update(onefilenodes) |
|
434 | filenodes.setdefault(f, {}).update(onefilenodes) | |
435 |
|
435 | |||
436 | if not dir and subdirnodes: |
|
436 | if not dir and subdirnodes: | |
437 | assert subdirprogress is not None # help pytype |
|
437 | assert subdirprogress is not None # help pytype | |
438 | subdirprogress.complete() |
|
438 | subdirprogress.complete() | |
439 | if self.warnorphanstorefiles: |
|
439 | if self.warnorphanstorefiles: | |
440 | for f in sorted(storefiles): |
|
440 | for f in sorted(storefiles): | |
441 | self._warn(_(b"warning: orphan data file '%s'") % f) |
|
441 | self._warn(_(b"warning: orphan data file '%s'") % f) | |
442 |
|
442 | |||
443 | return filenodes |
|
443 | return filenodes | |
444 |
|
444 | |||
445 | def _crosscheckfiles(self, filelinkrevs, filenodes): |
|
445 | def _crosscheckfiles(self, filelinkrevs, filenodes): | |
446 | repo = self.repo |
|
446 | repo = self.repo | |
447 | ui = self.ui |
|
447 | ui = self.ui | |
448 | ui.status(_(b"crosschecking files in changesets and manifests\n")) |
|
448 | ui.status(_(b"crosschecking files in changesets and manifests\n")) | |
449 |
|
449 | |||
450 | total = len(filelinkrevs) + len(filenodes) |
|
450 | total = len(filelinkrevs) + len(filenodes) | |
451 | progress = ui.makeprogress( |
|
451 | progress = ui.makeprogress( | |
452 | _(b'crosschecking'), unit=_(b'files'), total=total |
|
452 | _(b'crosschecking'), unit=_(b'files'), total=total | |
453 | ) |
|
453 | ) | |
454 | if self.havemf: |
|
454 | if self.havemf: | |
455 | for f in sorted(filelinkrevs): |
|
455 | for f in sorted(filelinkrevs): | |
456 | progress.increment() |
|
456 | progress.increment() | |
457 | if f not in filenodes: |
|
457 | if f not in filenodes: | |
458 | lr = filelinkrevs[f][0] |
|
458 | lr = filelinkrevs[f][0] | |
459 | self._err(lr, _(b"in changeset but not in manifest"), f) |
|
459 | self._err(lr, _(b"in changeset but not in manifest"), f) | |
460 |
|
460 | |||
461 | if self.havecl: |
|
461 | if self.havecl: | |
462 | for f in sorted(filenodes): |
|
462 | for f in sorted(filenodes): | |
463 | progress.increment() |
|
463 | progress.increment() | |
464 | if f not in filelinkrevs: |
|
464 | if f not in filelinkrevs: | |
465 | try: |
|
465 | try: | |
466 | fl = repo.file(f) |
|
466 | fl = repo.file(f) | |
467 | lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]]) |
|
467 | lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]]) | |
468 | except Exception: |
|
468 | except Exception: | |
469 | lr = None |
|
469 | lr = None | |
470 | self._err(lr, _(b"in manifest but not in changeset"), f) |
|
470 | self._err(lr, _(b"in manifest but not in changeset"), f) | |
471 |
|
471 | |||
472 | progress.complete() |
|
472 | progress.complete() | |
473 |
|
473 | |||
474 | def _verifyfiles(self, filenodes, filelinkrevs): |
|
474 | def _verifyfiles(self, filenodes, filelinkrevs): | |
475 | repo = self.repo |
|
475 | repo = self.repo | |
476 | ui = self.ui |
|
476 | ui = self.ui | |
477 | lrugetctx = self.lrugetctx |
|
477 | lrugetctx = self.lrugetctx | |
478 | revlogv1 = self.revlogv1 |
|
478 | revlogv1 = self.revlogv1 | |
479 | havemf = self.havemf |
|
479 | havemf = self.havemf | |
480 | ui.status(_(b"checking files\n")) |
|
480 | ui.status(_(b"checking files\n")) | |
481 |
|
481 | |||
482 | storefiles = set() |
|
482 | storefiles = set() | |
483 | for f, f2, size in repo.store.datafiles(): |
|
483 | for rl_type, f, f2, size in repo.store.datafiles(): | |
484 | if not f: |
|
484 | if not f: | |
485 | self._err(None, _(b"cannot decode filename '%s'") % f2) |
|
485 | self._err(None, _(b"cannot decode filename '%s'") % f2) | |
486 | elif (size > 0 or not revlogv1) and f.startswith(b'data/'): |
|
486 | elif (size > 0 or not revlogv1) and f.startswith(b'data/'): | |
487 | storefiles.add(_normpath(f)) |
|
487 | storefiles.add(_normpath(f)) | |
488 |
|
488 | |||
489 | state = { |
|
489 | state = { | |
490 | # TODO this assumes revlog storage for changelog. |
|
490 | # TODO this assumes revlog storage for changelog. | |
491 | b'expectedversion': self.repo.changelog.version & 0xFFFF, |
|
491 | b'expectedversion': self.repo.changelog.version & 0xFFFF, | |
492 | b'skipflags': self.skipflags, |
|
492 | b'skipflags': self.skipflags, | |
493 | # experimental config: censor.policy |
|
493 | # experimental config: censor.policy | |
494 | b'erroroncensored': ui.config(b'censor', b'policy') == b'abort', |
|
494 | b'erroroncensored': ui.config(b'censor', b'policy') == b'abort', | |
495 | } |
|
495 | } | |
496 |
|
496 | |||
497 | files = sorted(set(filenodes) | set(filelinkrevs)) |
|
497 | files = sorted(set(filenodes) | set(filelinkrevs)) | |
498 | revisions = 0 |
|
498 | revisions = 0 | |
499 | progress = ui.makeprogress( |
|
499 | progress = ui.makeprogress( | |
500 | _(b'checking'), unit=_(b'files'), total=len(files) |
|
500 | _(b'checking'), unit=_(b'files'), total=len(files) | |
501 | ) |
|
501 | ) | |
502 | for i, f in enumerate(files): |
|
502 | for i, f in enumerate(files): | |
503 | progress.update(i, item=f) |
|
503 | progress.update(i, item=f) | |
504 | try: |
|
504 | try: | |
505 | linkrevs = filelinkrevs[f] |
|
505 | linkrevs = filelinkrevs[f] | |
506 | except KeyError: |
|
506 | except KeyError: | |
507 | # in manifest but not in changelog |
|
507 | # in manifest but not in changelog | |
508 | linkrevs = [] |
|
508 | linkrevs = [] | |
509 |
|
509 | |||
510 | if linkrevs: |
|
510 | if linkrevs: | |
511 | lr = linkrevs[0] |
|
511 | lr = linkrevs[0] | |
512 | else: |
|
512 | else: | |
513 | lr = None |
|
513 | lr = None | |
514 |
|
514 | |||
515 | try: |
|
515 | try: | |
516 | fl = repo.file(f) |
|
516 | fl = repo.file(f) | |
517 | except error.StorageError as e: |
|
517 | except error.StorageError as e: | |
518 | self._err(lr, _(b"broken revlog! (%s)") % e, f) |
|
518 | self._err(lr, _(b"broken revlog! (%s)") % e, f) | |
519 | continue |
|
519 | continue | |
520 |
|
520 | |||
521 | for ff in fl.files(): |
|
521 | for ff in fl.files(): | |
522 | try: |
|
522 | try: | |
523 | storefiles.remove(ff) |
|
523 | storefiles.remove(ff) | |
524 | except KeyError: |
|
524 | except KeyError: | |
525 | if self.warnorphanstorefiles: |
|
525 | if self.warnorphanstorefiles: | |
526 | self._warn( |
|
526 | self._warn( | |
527 | _(b" warning: revlog '%s' not in fncache!") % ff |
|
527 | _(b" warning: revlog '%s' not in fncache!") % ff | |
528 | ) |
|
528 | ) | |
529 | self.fncachewarned = True |
|
529 | self.fncachewarned = True | |
530 |
|
530 | |||
531 | if not len(fl) and (self.havecl or self.havemf): |
|
531 | if not len(fl) and (self.havecl or self.havemf): | |
532 | self._err(lr, _(b"empty or missing %s") % f) |
|
532 | self._err(lr, _(b"empty or missing %s") % f) | |
533 | else: |
|
533 | else: | |
534 | # Guard against implementations not setting this. |
|
534 | # Guard against implementations not setting this. | |
535 | state[b'skipread'] = set() |
|
535 | state[b'skipread'] = set() | |
536 | state[b'safe_renamed'] = set() |
|
536 | state[b'safe_renamed'] = set() | |
537 |
|
537 | |||
538 | for problem in fl.verifyintegrity(state): |
|
538 | for problem in fl.verifyintegrity(state): | |
539 | if problem.node is not None: |
|
539 | if problem.node is not None: | |
540 | linkrev = fl.linkrev(fl.rev(problem.node)) |
|
540 | linkrev = fl.linkrev(fl.rev(problem.node)) | |
541 | else: |
|
541 | else: | |
542 | linkrev = None |
|
542 | linkrev = None | |
543 |
|
543 | |||
544 | if problem.warning: |
|
544 | if problem.warning: | |
545 | self._warn(problem.warning) |
|
545 | self._warn(problem.warning) | |
546 | elif problem.error: |
|
546 | elif problem.error: | |
547 | self._err( |
|
547 | self._err( | |
548 | linkrev if linkrev is not None else lr, |
|
548 | linkrev if linkrev is not None else lr, | |
549 | problem.error, |
|
549 | problem.error, | |
550 | f, |
|
550 | f, | |
551 | ) |
|
551 | ) | |
552 | else: |
|
552 | else: | |
553 | raise error.ProgrammingError( |
|
553 | raise error.ProgrammingError( | |
554 | b'problem instance does not set warning or error ' |
|
554 | b'problem instance does not set warning or error ' | |
555 | b'attribute: %s' % problem.msg |
|
555 | b'attribute: %s' % problem.msg | |
556 | ) |
|
556 | ) | |
557 |
|
557 | |||
558 | seen = {} |
|
558 | seen = {} | |
559 | for i in fl: |
|
559 | for i in fl: | |
560 | revisions += 1 |
|
560 | revisions += 1 | |
561 | n = fl.node(i) |
|
561 | n = fl.node(i) | |
562 | lr = self._checkentry(fl, i, n, seen, linkrevs, f) |
|
562 | lr = self._checkentry(fl, i, n, seen, linkrevs, f) | |
563 | if f in filenodes: |
|
563 | if f in filenodes: | |
564 | if havemf and n not in filenodes[f]: |
|
564 | if havemf and n not in filenodes[f]: | |
565 | self._err(lr, _(b"%s not in manifests") % (short(n)), f) |
|
565 | self._err(lr, _(b"%s not in manifests") % (short(n)), f) | |
566 | else: |
|
566 | else: | |
567 | del filenodes[f][n] |
|
567 | del filenodes[f][n] | |
568 |
|
568 | |||
569 | if n in state[b'skipread'] and n not in state[b'safe_renamed']: |
|
569 | if n in state[b'skipread'] and n not in state[b'safe_renamed']: | |
570 | continue |
|
570 | continue | |
571 |
|
571 | |||
572 | # check renames |
|
572 | # check renames | |
573 | try: |
|
573 | try: | |
574 | # This requires resolving fulltext (at least on revlogs, |
|
574 | # This requires resolving fulltext (at least on revlogs, | |
575 | # though not with LFS revisions). We may want |
|
575 | # though not with LFS revisions). We may want | |
576 | # ``verifyintegrity()`` to pass a set of nodes with |
|
576 | # ``verifyintegrity()`` to pass a set of nodes with | |
577 | # rename metadata as an optimization. |
|
577 | # rename metadata as an optimization. | |
578 | rp = fl.renamed(n) |
|
578 | rp = fl.renamed(n) | |
579 | if rp: |
|
579 | if rp: | |
580 | if lr is not None and ui.verbose: |
|
580 | if lr is not None and ui.verbose: | |
581 | ctx = lrugetctx(lr) |
|
581 | ctx = lrugetctx(lr) | |
582 | if not any(rp[0] in pctx for pctx in ctx.parents()): |
|
582 | if not any(rp[0] in pctx for pctx in ctx.parents()): | |
583 | self._warn( |
|
583 | self._warn( | |
584 | _( |
|
584 | _( | |
585 | b"warning: copy source of '%s' not" |
|
585 | b"warning: copy source of '%s' not" | |
586 | b" in parents of %s" |
|
586 | b" in parents of %s" | |
587 | ) |
|
587 | ) | |
588 | % (f, ctx) |
|
588 | % (f, ctx) | |
589 | ) |
|
589 | ) | |
590 | fl2 = repo.file(rp[0]) |
|
590 | fl2 = repo.file(rp[0]) | |
591 | if not len(fl2): |
|
591 | if not len(fl2): | |
592 | self._err( |
|
592 | self._err( | |
593 | lr, |
|
593 | lr, | |
594 | _( |
|
594 | _( | |
595 | b"empty or missing copy source revlog " |
|
595 | b"empty or missing copy source revlog " | |
596 | b"%s:%s" |
|
596 | b"%s:%s" | |
597 | ) |
|
597 | ) | |
598 | % (rp[0], short(rp[1])), |
|
598 | % (rp[0], short(rp[1])), | |
599 | f, |
|
599 | f, | |
600 | ) |
|
600 | ) | |
601 | elif rp[1] == nullid: |
|
601 | elif rp[1] == nullid: | |
602 | ui.note( |
|
602 | ui.note( | |
603 | _( |
|
603 | _( | |
604 | b"warning: %s@%s: copy source" |
|
604 | b"warning: %s@%s: copy source" | |
605 | b" revision is nullid %s:%s\n" |
|
605 | b" revision is nullid %s:%s\n" | |
606 | ) |
|
606 | ) | |
607 | % (f, lr, rp[0], short(rp[1])) |
|
607 | % (f, lr, rp[0], short(rp[1])) | |
608 | ) |
|
608 | ) | |
609 | else: |
|
609 | else: | |
610 | fl2.rev(rp[1]) |
|
610 | fl2.rev(rp[1]) | |
611 | except Exception as inst: |
|
611 | except Exception as inst: | |
612 | self._exc( |
|
612 | self._exc( | |
613 | lr, _(b"checking rename of %s") % short(n), inst, f |
|
613 | lr, _(b"checking rename of %s") % short(n), inst, f | |
614 | ) |
|
614 | ) | |
615 |
|
615 | |||
616 | # cross-check |
|
616 | # cross-check | |
617 | if f in filenodes: |
|
617 | if f in filenodes: | |
618 | fns = [(v, k) for k, v in pycompat.iteritems(filenodes[f])] |
|
618 | fns = [(v, k) for k, v in pycompat.iteritems(filenodes[f])] | |
619 | for lr, node in sorted(fns): |
|
619 | for lr, node in sorted(fns): | |
620 | self._err( |
|
620 | self._err( | |
621 | lr, |
|
621 | lr, | |
622 | _(b"manifest refers to unknown revision %s") |
|
622 | _(b"manifest refers to unknown revision %s") | |
623 | % short(node), |
|
623 | % short(node), | |
624 | f, |
|
624 | f, | |
625 | ) |
|
625 | ) | |
626 | progress.complete() |
|
626 | progress.complete() | |
627 |
|
627 | |||
628 | if self.warnorphanstorefiles: |
|
628 | if self.warnorphanstorefiles: | |
629 | for f in sorted(storefiles): |
|
629 | for f in sorted(storefiles): | |
630 | self._warn(_(b"warning: orphan data file '%s'") % f) |
|
630 | self._warn(_(b"warning: orphan data file '%s'") % f) | |
631 |
|
631 | |||
632 | return len(files), revisions |
|
632 | return len(files), revisions |
@@ -1,1615 +1,1616 | |||||
1 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
1 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
2 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
2 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
3 | # |
|
3 | # | |
4 | # This software may be used and distributed according to the terms of the |
|
4 | # This software may be used and distributed according to the terms of the | |
5 | # GNU General Public License version 2 or any later version. |
|
5 | # GNU General Public License version 2 or any later version. | |
6 |
|
6 | |||
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import collections |
|
9 | import collections | |
10 | import contextlib |
|
10 | import contextlib | |
11 |
|
11 | |||
12 | from .i18n import _ |
|
12 | from .i18n import _ | |
13 | from .node import ( |
|
13 | from .node import ( | |
14 | hex, |
|
14 | hex, | |
15 | nullid, |
|
15 | nullid, | |
16 | ) |
|
16 | ) | |
17 | from . import ( |
|
17 | from . import ( | |
18 | discovery, |
|
18 | discovery, | |
19 | encoding, |
|
19 | encoding, | |
20 | error, |
|
20 | error, | |
21 | match as matchmod, |
|
21 | match as matchmod, | |
22 | narrowspec, |
|
22 | narrowspec, | |
23 | pycompat, |
|
23 | pycompat, | |
24 | streamclone, |
|
24 | streamclone, | |
25 | templatefilters, |
|
25 | templatefilters, | |
26 | util, |
|
26 | util, | |
27 | wireprotoframing, |
|
27 | wireprotoframing, | |
28 | wireprototypes, |
|
28 | wireprototypes, | |
29 | ) |
|
29 | ) | |
30 | from .interfaces import util as interfaceutil |
|
30 | from .interfaces import util as interfaceutil | |
31 | from .utils import ( |
|
31 | from .utils import ( | |
32 | cborutil, |
|
32 | cborutil, | |
33 | hashutil, |
|
33 | hashutil, | |
34 | stringutil, |
|
34 | stringutil, | |
35 | ) |
|
35 | ) | |
36 |
|
36 | |||
37 | FRAMINGTYPE = b'application/mercurial-exp-framing-0006' |
|
37 | FRAMINGTYPE = b'application/mercurial-exp-framing-0006' | |
38 |
|
38 | |||
39 | HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2 |
|
39 | HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2 | |
40 |
|
40 | |||
41 | COMMANDS = wireprototypes.commanddict() |
|
41 | COMMANDS = wireprototypes.commanddict() | |
42 |
|
42 | |||
43 | # Value inserted into cache key computation function. Change the value to |
|
43 | # Value inserted into cache key computation function. Change the value to | |
44 | # force new cache keys for every command request. This should be done when |
|
44 | # force new cache keys for every command request. This should be done when | |
45 | # there is a change to how caching works, etc. |
|
45 | # there is a change to how caching works, etc. | |
46 | GLOBAL_CACHE_VERSION = 1 |
|
46 | GLOBAL_CACHE_VERSION = 1 | |
47 |
|
47 | |||
48 |
|
48 | |||
49 | def handlehttpv2request(rctx, req, res, checkperm, urlparts): |
|
49 | def handlehttpv2request(rctx, req, res, checkperm, urlparts): | |
50 | from .hgweb import common as hgwebcommon |
|
50 | from .hgweb import common as hgwebcommon | |
51 |
|
51 | |||
52 | # URL space looks like: <permissions>/<command>, where <permission> can |
|
52 | # URL space looks like: <permissions>/<command>, where <permission> can | |
53 | # be ``ro`` or ``rw`` to signal read-only or read-write, respectively. |
|
53 | # be ``ro`` or ``rw`` to signal read-only or read-write, respectively. | |
54 |
|
54 | |||
55 | # Root URL does nothing meaningful... yet. |
|
55 | # Root URL does nothing meaningful... yet. | |
56 | if not urlparts: |
|
56 | if not urlparts: | |
57 | res.status = b'200 OK' |
|
57 | res.status = b'200 OK' | |
58 | res.headers[b'Content-Type'] = b'text/plain' |
|
58 | res.headers[b'Content-Type'] = b'text/plain' | |
59 | res.setbodybytes(_(b'HTTP version 2 API handler')) |
|
59 | res.setbodybytes(_(b'HTTP version 2 API handler')) | |
60 | return |
|
60 | return | |
61 |
|
61 | |||
62 | if len(urlparts) == 1: |
|
62 | if len(urlparts) == 1: | |
63 | res.status = b'404 Not Found' |
|
63 | res.status = b'404 Not Found' | |
64 | res.headers[b'Content-Type'] = b'text/plain' |
|
64 | res.headers[b'Content-Type'] = b'text/plain' | |
65 | res.setbodybytes( |
|
65 | res.setbodybytes( | |
66 | _(b'do not know how to process %s\n') % req.dispatchpath |
|
66 | _(b'do not know how to process %s\n') % req.dispatchpath | |
67 | ) |
|
67 | ) | |
68 | return |
|
68 | return | |
69 |
|
69 | |||
70 | permission, command = urlparts[0:2] |
|
70 | permission, command = urlparts[0:2] | |
71 |
|
71 | |||
72 | if permission not in (b'ro', b'rw'): |
|
72 | if permission not in (b'ro', b'rw'): | |
73 | res.status = b'404 Not Found' |
|
73 | res.status = b'404 Not Found' | |
74 | res.headers[b'Content-Type'] = b'text/plain' |
|
74 | res.headers[b'Content-Type'] = b'text/plain' | |
75 | res.setbodybytes(_(b'unknown permission: %s') % permission) |
|
75 | res.setbodybytes(_(b'unknown permission: %s') % permission) | |
76 | return |
|
76 | return | |
77 |
|
77 | |||
78 | if req.method != b'POST': |
|
78 | if req.method != b'POST': | |
79 | res.status = b'405 Method Not Allowed' |
|
79 | res.status = b'405 Method Not Allowed' | |
80 | res.headers[b'Allow'] = b'POST' |
|
80 | res.headers[b'Allow'] = b'POST' | |
81 | res.setbodybytes(_(b'commands require POST requests')) |
|
81 | res.setbodybytes(_(b'commands require POST requests')) | |
82 | return |
|
82 | return | |
83 |
|
83 | |||
84 | # At some point we'll want to use our own API instead of recycling the |
|
84 | # At some point we'll want to use our own API instead of recycling the | |
85 | # behavior of version 1 of the wire protocol... |
|
85 | # behavior of version 1 of the wire protocol... | |
86 | # TODO return reasonable responses - not responses that overload the |
|
86 | # TODO return reasonable responses - not responses that overload the | |
87 | # HTTP status line message for error reporting. |
|
87 | # HTTP status line message for error reporting. | |
88 | try: |
|
88 | try: | |
89 | checkperm(rctx, req, b'pull' if permission == b'ro' else b'push') |
|
89 | checkperm(rctx, req, b'pull' if permission == b'ro' else b'push') | |
90 | except hgwebcommon.ErrorResponse as e: |
|
90 | except hgwebcommon.ErrorResponse as e: | |
91 | res.status = hgwebcommon.statusmessage( |
|
91 | res.status = hgwebcommon.statusmessage( | |
92 | e.code, stringutil.forcebytestr(e) |
|
92 | e.code, stringutil.forcebytestr(e) | |
93 | ) |
|
93 | ) | |
94 | for k, v in e.headers: |
|
94 | for k, v in e.headers: | |
95 | res.headers[k] = v |
|
95 | res.headers[k] = v | |
96 | res.setbodybytes(b'permission denied') |
|
96 | res.setbodybytes(b'permission denied') | |
97 | return |
|
97 | return | |
98 |
|
98 | |||
99 | # We have a special endpoint to reflect the request back at the client. |
|
99 | # We have a special endpoint to reflect the request back at the client. | |
100 | if command == b'debugreflect': |
|
100 | if command == b'debugreflect': | |
101 | _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res) |
|
101 | _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res) | |
102 | return |
|
102 | return | |
103 |
|
103 | |||
104 | # Extra commands that we handle that aren't really wire protocol |
|
104 | # Extra commands that we handle that aren't really wire protocol | |
105 | # commands. Think extra hard before making this hackery available to |
|
105 | # commands. Think extra hard before making this hackery available to | |
106 | # extension. |
|
106 | # extension. | |
107 | extracommands = {b'multirequest'} |
|
107 | extracommands = {b'multirequest'} | |
108 |
|
108 | |||
109 | if command not in COMMANDS and command not in extracommands: |
|
109 | if command not in COMMANDS and command not in extracommands: | |
110 | res.status = b'404 Not Found' |
|
110 | res.status = b'404 Not Found' | |
111 | res.headers[b'Content-Type'] = b'text/plain' |
|
111 | res.headers[b'Content-Type'] = b'text/plain' | |
112 | res.setbodybytes(_(b'unknown wire protocol command: %s\n') % command) |
|
112 | res.setbodybytes(_(b'unknown wire protocol command: %s\n') % command) | |
113 | return |
|
113 | return | |
114 |
|
114 | |||
115 | repo = rctx.repo |
|
115 | repo = rctx.repo | |
116 | ui = repo.ui |
|
116 | ui = repo.ui | |
117 |
|
117 | |||
118 | proto = httpv2protocolhandler(req, ui) |
|
118 | proto = httpv2protocolhandler(req, ui) | |
119 |
|
119 | |||
120 | if ( |
|
120 | if ( | |
121 | not COMMANDS.commandavailable(command, proto) |
|
121 | not COMMANDS.commandavailable(command, proto) | |
122 | and command not in extracommands |
|
122 | and command not in extracommands | |
123 | ): |
|
123 | ): | |
124 | res.status = b'404 Not Found' |
|
124 | res.status = b'404 Not Found' | |
125 | res.headers[b'Content-Type'] = b'text/plain' |
|
125 | res.headers[b'Content-Type'] = b'text/plain' | |
126 | res.setbodybytes(_(b'invalid wire protocol command: %s') % command) |
|
126 | res.setbodybytes(_(b'invalid wire protocol command: %s') % command) | |
127 | return |
|
127 | return | |
128 |
|
128 | |||
129 | # TODO consider cases where proxies may add additional Accept headers. |
|
129 | # TODO consider cases where proxies may add additional Accept headers. | |
130 | if req.headers.get(b'Accept') != FRAMINGTYPE: |
|
130 | if req.headers.get(b'Accept') != FRAMINGTYPE: | |
131 | res.status = b'406 Not Acceptable' |
|
131 | res.status = b'406 Not Acceptable' | |
132 | res.headers[b'Content-Type'] = b'text/plain' |
|
132 | res.headers[b'Content-Type'] = b'text/plain' | |
133 | res.setbodybytes( |
|
133 | res.setbodybytes( | |
134 | _(b'client MUST specify Accept header with value: %s\n') |
|
134 | _(b'client MUST specify Accept header with value: %s\n') | |
135 | % FRAMINGTYPE |
|
135 | % FRAMINGTYPE | |
136 | ) |
|
136 | ) | |
137 | return |
|
137 | return | |
138 |
|
138 | |||
139 | if req.headers.get(b'Content-Type') != FRAMINGTYPE: |
|
139 | if req.headers.get(b'Content-Type') != FRAMINGTYPE: | |
140 | res.status = b'415 Unsupported Media Type' |
|
140 | res.status = b'415 Unsupported Media Type' | |
141 | # TODO we should send a response with appropriate media type, |
|
141 | # TODO we should send a response with appropriate media type, | |
142 | # since client does Accept it. |
|
142 | # since client does Accept it. | |
143 | res.headers[b'Content-Type'] = b'text/plain' |
|
143 | res.headers[b'Content-Type'] = b'text/plain' | |
144 | res.setbodybytes( |
|
144 | res.setbodybytes( | |
145 | _(b'client MUST send Content-Type header with value: %s\n') |
|
145 | _(b'client MUST send Content-Type header with value: %s\n') | |
146 | % FRAMINGTYPE |
|
146 | % FRAMINGTYPE | |
147 | ) |
|
147 | ) | |
148 | return |
|
148 | return | |
149 |
|
149 | |||
150 | _processhttpv2request(ui, repo, req, res, permission, command, proto) |
|
150 | _processhttpv2request(ui, repo, req, res, permission, command, proto) | |
151 |
|
151 | |||
152 |
|
152 | |||
153 | def _processhttpv2reflectrequest(ui, repo, req, res): |
|
153 | def _processhttpv2reflectrequest(ui, repo, req, res): | |
154 | """Reads unified frame protocol request and dumps out state to client. |
|
154 | """Reads unified frame protocol request and dumps out state to client. | |
155 |
|
155 | |||
156 | This special endpoint can be used to help debug the wire protocol. |
|
156 | This special endpoint can be used to help debug the wire protocol. | |
157 |
|
157 | |||
158 | Instead of routing the request through the normal dispatch mechanism, |
|
158 | Instead of routing the request through the normal dispatch mechanism, | |
159 | we instead read all frames, decode them, and feed them into our state |
|
159 | we instead read all frames, decode them, and feed them into our state | |
160 | tracker. We then dump the log of all that activity back out to the |
|
160 | tracker. We then dump the log of all that activity back out to the | |
161 | client. |
|
161 | client. | |
162 | """ |
|
162 | """ | |
163 | # Reflection APIs have a history of being abused, accidentally disclosing |
|
163 | # Reflection APIs have a history of being abused, accidentally disclosing | |
164 | # sensitive data, etc. So we have a config knob. |
|
164 | # sensitive data, etc. So we have a config knob. | |
165 | if not ui.configbool(b'experimental', b'web.api.debugreflect'): |
|
165 | if not ui.configbool(b'experimental', b'web.api.debugreflect'): | |
166 | res.status = b'404 Not Found' |
|
166 | res.status = b'404 Not Found' | |
167 | res.headers[b'Content-Type'] = b'text/plain' |
|
167 | res.headers[b'Content-Type'] = b'text/plain' | |
168 | res.setbodybytes(_(b'debugreflect service not available')) |
|
168 | res.setbodybytes(_(b'debugreflect service not available')) | |
169 | return |
|
169 | return | |
170 |
|
170 | |||
171 | # We assume we have a unified framing protocol request body. |
|
171 | # We assume we have a unified framing protocol request body. | |
172 |
|
172 | |||
173 | reactor = wireprotoframing.serverreactor(ui) |
|
173 | reactor = wireprotoframing.serverreactor(ui) | |
174 | states = [] |
|
174 | states = [] | |
175 |
|
175 | |||
176 | while True: |
|
176 | while True: | |
177 | frame = wireprotoframing.readframe(req.bodyfh) |
|
177 | frame = wireprotoframing.readframe(req.bodyfh) | |
178 |
|
178 | |||
179 | if not frame: |
|
179 | if not frame: | |
180 | states.append(b'received: <no frame>') |
|
180 | states.append(b'received: <no frame>') | |
181 | break |
|
181 | break | |
182 |
|
182 | |||
183 | states.append( |
|
183 | states.append( | |
184 | b'received: %d %d %d %s' |
|
184 | b'received: %d %d %d %s' | |
185 | % (frame.typeid, frame.flags, frame.requestid, frame.payload) |
|
185 | % (frame.typeid, frame.flags, frame.requestid, frame.payload) | |
186 | ) |
|
186 | ) | |
187 |
|
187 | |||
188 | action, meta = reactor.onframerecv(frame) |
|
188 | action, meta = reactor.onframerecv(frame) | |
189 | states.append(templatefilters.json((action, meta))) |
|
189 | states.append(templatefilters.json((action, meta))) | |
190 |
|
190 | |||
191 | action, meta = reactor.oninputeof() |
|
191 | action, meta = reactor.oninputeof() | |
192 | meta[b'action'] = action |
|
192 | meta[b'action'] = action | |
193 | states.append(templatefilters.json(meta)) |
|
193 | states.append(templatefilters.json(meta)) | |
194 |
|
194 | |||
195 | res.status = b'200 OK' |
|
195 | res.status = b'200 OK' | |
196 | res.headers[b'Content-Type'] = b'text/plain' |
|
196 | res.headers[b'Content-Type'] = b'text/plain' | |
197 | res.setbodybytes(b'\n'.join(states)) |
|
197 | res.setbodybytes(b'\n'.join(states)) | |
198 |
|
198 | |||
199 |
|
199 | |||
200 | def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto): |
|
200 | def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto): | |
201 | """Post-validation handler for HTTPv2 requests. |
|
201 | """Post-validation handler for HTTPv2 requests. | |
202 |
|
202 | |||
203 | Called when the HTTP request contains unified frame-based protocol |
|
203 | Called when the HTTP request contains unified frame-based protocol | |
204 | frames for evaluation. |
|
204 | frames for evaluation. | |
205 | """ |
|
205 | """ | |
206 | # TODO Some HTTP clients are full duplex and can receive data before |
|
206 | # TODO Some HTTP clients are full duplex and can receive data before | |
207 | # the entire request is transmitted. Figure out a way to indicate support |
|
207 | # the entire request is transmitted. Figure out a way to indicate support | |
208 | # for that so we can opt into full duplex mode. |
|
208 | # for that so we can opt into full duplex mode. | |
209 | reactor = wireprotoframing.serverreactor(ui, deferoutput=True) |
|
209 | reactor = wireprotoframing.serverreactor(ui, deferoutput=True) | |
210 | seencommand = False |
|
210 | seencommand = False | |
211 |
|
211 | |||
212 | outstream = None |
|
212 | outstream = None | |
213 |
|
213 | |||
214 | while True: |
|
214 | while True: | |
215 | frame = wireprotoframing.readframe(req.bodyfh) |
|
215 | frame = wireprotoframing.readframe(req.bodyfh) | |
216 | if not frame: |
|
216 | if not frame: | |
217 | break |
|
217 | break | |
218 |
|
218 | |||
219 | action, meta = reactor.onframerecv(frame) |
|
219 | action, meta = reactor.onframerecv(frame) | |
220 |
|
220 | |||
221 | if action == b'wantframe': |
|
221 | if action == b'wantframe': | |
222 | # Need more data before we can do anything. |
|
222 | # Need more data before we can do anything. | |
223 | continue |
|
223 | continue | |
224 | elif action == b'runcommand': |
|
224 | elif action == b'runcommand': | |
225 | # Defer creating output stream because we need to wait for |
|
225 | # Defer creating output stream because we need to wait for | |
226 | # protocol settings frames so proper encoding can be applied. |
|
226 | # protocol settings frames so proper encoding can be applied. | |
227 | if not outstream: |
|
227 | if not outstream: | |
228 | outstream = reactor.makeoutputstream() |
|
228 | outstream = reactor.makeoutputstream() | |
229 |
|
229 | |||
230 | sentoutput = _httpv2runcommand( |
|
230 | sentoutput = _httpv2runcommand( | |
231 | ui, |
|
231 | ui, | |
232 | repo, |
|
232 | repo, | |
233 | req, |
|
233 | req, | |
234 | res, |
|
234 | res, | |
235 | authedperm, |
|
235 | authedperm, | |
236 | reqcommand, |
|
236 | reqcommand, | |
237 | reactor, |
|
237 | reactor, | |
238 | outstream, |
|
238 | outstream, | |
239 | meta, |
|
239 | meta, | |
240 | issubsequent=seencommand, |
|
240 | issubsequent=seencommand, | |
241 | ) |
|
241 | ) | |
242 |
|
242 | |||
243 | if sentoutput: |
|
243 | if sentoutput: | |
244 | return |
|
244 | return | |
245 |
|
245 | |||
246 | seencommand = True |
|
246 | seencommand = True | |
247 |
|
247 | |||
248 | elif action == b'error': |
|
248 | elif action == b'error': | |
249 | # TODO define proper error mechanism. |
|
249 | # TODO define proper error mechanism. | |
250 | res.status = b'200 OK' |
|
250 | res.status = b'200 OK' | |
251 | res.headers[b'Content-Type'] = b'text/plain' |
|
251 | res.headers[b'Content-Type'] = b'text/plain' | |
252 | res.setbodybytes(meta[b'message'] + b'\n') |
|
252 | res.setbodybytes(meta[b'message'] + b'\n') | |
253 | return |
|
253 | return | |
254 | else: |
|
254 | else: | |
255 | raise error.ProgrammingError( |
|
255 | raise error.ProgrammingError( | |
256 | b'unhandled action from frame processor: %s' % action |
|
256 | b'unhandled action from frame processor: %s' % action | |
257 | ) |
|
257 | ) | |
258 |
|
258 | |||
259 | action, meta = reactor.oninputeof() |
|
259 | action, meta = reactor.oninputeof() | |
260 | if action == b'sendframes': |
|
260 | if action == b'sendframes': | |
261 | # We assume we haven't started sending the response yet. If we're |
|
261 | # We assume we haven't started sending the response yet. If we're | |
262 | # wrong, the response type will raise an exception. |
|
262 | # wrong, the response type will raise an exception. | |
263 | res.status = b'200 OK' |
|
263 | res.status = b'200 OK' | |
264 | res.headers[b'Content-Type'] = FRAMINGTYPE |
|
264 | res.headers[b'Content-Type'] = FRAMINGTYPE | |
265 | res.setbodygen(meta[b'framegen']) |
|
265 | res.setbodygen(meta[b'framegen']) | |
266 | elif action == b'noop': |
|
266 | elif action == b'noop': | |
267 | pass |
|
267 | pass | |
268 | else: |
|
268 | else: | |
269 | raise error.ProgrammingError( |
|
269 | raise error.ProgrammingError( | |
270 | b'unhandled action from frame processor: %s' % action |
|
270 | b'unhandled action from frame processor: %s' % action | |
271 | ) |
|
271 | ) | |
272 |
|
272 | |||
273 |
|
273 | |||
274 | def _httpv2runcommand( |
|
274 | def _httpv2runcommand( | |
275 | ui, |
|
275 | ui, | |
276 | repo, |
|
276 | repo, | |
277 | req, |
|
277 | req, | |
278 | res, |
|
278 | res, | |
279 | authedperm, |
|
279 | authedperm, | |
280 | reqcommand, |
|
280 | reqcommand, | |
281 | reactor, |
|
281 | reactor, | |
282 | outstream, |
|
282 | outstream, | |
283 | command, |
|
283 | command, | |
284 | issubsequent, |
|
284 | issubsequent, | |
285 | ): |
|
285 | ): | |
286 | """Dispatch a wire protocol command made from HTTPv2 requests. |
|
286 | """Dispatch a wire protocol command made from HTTPv2 requests. | |
287 |
|
287 | |||
288 | The authenticated permission (``authedperm``) along with the original |
|
288 | The authenticated permission (``authedperm``) along with the original | |
289 | command from the URL (``reqcommand``) are passed in. |
|
289 | command from the URL (``reqcommand``) are passed in. | |
290 | """ |
|
290 | """ | |
291 | # We already validated that the session has permissions to perform the |
|
291 | # We already validated that the session has permissions to perform the | |
292 | # actions in ``authedperm``. In the unified frame protocol, the canonical |
|
292 | # actions in ``authedperm``. In the unified frame protocol, the canonical | |
293 | # command to run is expressed in a frame. However, the URL also requested |
|
293 | # command to run is expressed in a frame. However, the URL also requested | |
294 | # to run a specific command. We need to be careful that the command we |
|
294 | # to run a specific command. We need to be careful that the command we | |
295 | # run doesn't have permissions requirements greater than what was granted |
|
295 | # run doesn't have permissions requirements greater than what was granted | |
296 | # by ``authedperm``. |
|
296 | # by ``authedperm``. | |
297 | # |
|
297 | # | |
298 | # Our rule for this is we only allow one command per HTTP request and |
|
298 | # Our rule for this is we only allow one command per HTTP request and | |
299 | # that command must match the command in the URL. However, we make |
|
299 | # that command must match the command in the URL. However, we make | |
300 | # an exception for the ``multirequest`` URL. This URL is allowed to |
|
300 | # an exception for the ``multirequest`` URL. This URL is allowed to | |
301 | # execute multiple commands. We double check permissions of each command |
|
301 | # execute multiple commands. We double check permissions of each command | |
302 | # as it is invoked to ensure there is no privilege escalation. |
|
302 | # as it is invoked to ensure there is no privilege escalation. | |
303 | # TODO consider allowing multiple commands to regular command URLs |
|
303 | # TODO consider allowing multiple commands to regular command URLs | |
304 | # iff each command is the same. |
|
304 | # iff each command is the same. | |
305 |
|
305 | |||
306 | proto = httpv2protocolhandler(req, ui, args=command[b'args']) |
|
306 | proto = httpv2protocolhandler(req, ui, args=command[b'args']) | |
307 |
|
307 | |||
308 | if reqcommand == b'multirequest': |
|
308 | if reqcommand == b'multirequest': | |
309 | if not COMMANDS.commandavailable(command[b'command'], proto): |
|
309 | if not COMMANDS.commandavailable(command[b'command'], proto): | |
310 | # TODO proper error mechanism |
|
310 | # TODO proper error mechanism | |
311 | res.status = b'200 OK' |
|
311 | res.status = b'200 OK' | |
312 | res.headers[b'Content-Type'] = b'text/plain' |
|
312 | res.headers[b'Content-Type'] = b'text/plain' | |
313 | res.setbodybytes( |
|
313 | res.setbodybytes( | |
314 | _(b'wire protocol command not available: %s') |
|
314 | _(b'wire protocol command not available: %s') | |
315 | % command[b'command'] |
|
315 | % command[b'command'] | |
316 | ) |
|
316 | ) | |
317 | return True |
|
317 | return True | |
318 |
|
318 | |||
319 | # TODO don't use assert here, since it may be elided by -O. |
|
319 | # TODO don't use assert here, since it may be elided by -O. | |
320 | assert authedperm in (b'ro', b'rw') |
|
320 | assert authedperm in (b'ro', b'rw') | |
321 | wirecommand = COMMANDS[command[b'command']] |
|
321 | wirecommand = COMMANDS[command[b'command']] | |
322 | assert wirecommand.permission in (b'push', b'pull') |
|
322 | assert wirecommand.permission in (b'push', b'pull') | |
323 |
|
323 | |||
324 | if authedperm == b'ro' and wirecommand.permission != b'pull': |
|
324 | if authedperm == b'ro' and wirecommand.permission != b'pull': | |
325 | # TODO proper error mechanism |
|
325 | # TODO proper error mechanism | |
326 | res.status = b'403 Forbidden' |
|
326 | res.status = b'403 Forbidden' | |
327 | res.headers[b'Content-Type'] = b'text/plain' |
|
327 | res.headers[b'Content-Type'] = b'text/plain' | |
328 | res.setbodybytes( |
|
328 | res.setbodybytes( | |
329 | _(b'insufficient permissions to execute command: %s') |
|
329 | _(b'insufficient permissions to execute command: %s') | |
330 | % command[b'command'] |
|
330 | % command[b'command'] | |
331 | ) |
|
331 | ) | |
332 | return True |
|
332 | return True | |
333 |
|
333 | |||
334 | # TODO should we also call checkperm() here? Maybe not if we're going |
|
334 | # TODO should we also call checkperm() here? Maybe not if we're going | |
335 | # to overhaul that API. The granted scope from the URL check should |
|
335 | # to overhaul that API. The granted scope from the URL check should | |
336 | # be good enough. |
|
336 | # be good enough. | |
337 |
|
337 | |||
338 | else: |
|
338 | else: | |
339 | # Don't allow multiple commands outside of ``multirequest`` URL. |
|
339 | # Don't allow multiple commands outside of ``multirequest`` URL. | |
340 | if issubsequent: |
|
340 | if issubsequent: | |
341 | # TODO proper error mechanism |
|
341 | # TODO proper error mechanism | |
342 | res.status = b'200 OK' |
|
342 | res.status = b'200 OK' | |
343 | res.headers[b'Content-Type'] = b'text/plain' |
|
343 | res.headers[b'Content-Type'] = b'text/plain' | |
344 | res.setbodybytes( |
|
344 | res.setbodybytes( | |
345 | _(b'multiple commands cannot be issued to this URL') |
|
345 | _(b'multiple commands cannot be issued to this URL') | |
346 | ) |
|
346 | ) | |
347 | return True |
|
347 | return True | |
348 |
|
348 | |||
349 | if reqcommand != command[b'command']: |
|
349 | if reqcommand != command[b'command']: | |
350 | # TODO define proper error mechanism |
|
350 | # TODO define proper error mechanism | |
351 | res.status = b'200 OK' |
|
351 | res.status = b'200 OK' | |
352 | res.headers[b'Content-Type'] = b'text/plain' |
|
352 | res.headers[b'Content-Type'] = b'text/plain' | |
353 | res.setbodybytes(_(b'command in frame must match command in URL')) |
|
353 | res.setbodybytes(_(b'command in frame must match command in URL')) | |
354 | return True |
|
354 | return True | |
355 |
|
355 | |||
356 | res.status = b'200 OK' |
|
356 | res.status = b'200 OK' | |
357 | res.headers[b'Content-Type'] = FRAMINGTYPE |
|
357 | res.headers[b'Content-Type'] = FRAMINGTYPE | |
358 |
|
358 | |||
359 | try: |
|
359 | try: | |
360 | objs = dispatch(repo, proto, command[b'command'], command[b'redirect']) |
|
360 | objs = dispatch(repo, proto, command[b'command'], command[b'redirect']) | |
361 |
|
361 | |||
362 | action, meta = reactor.oncommandresponsereadyobjects( |
|
362 | action, meta = reactor.oncommandresponsereadyobjects( | |
363 | outstream, command[b'requestid'], objs |
|
363 | outstream, command[b'requestid'], objs | |
364 | ) |
|
364 | ) | |
365 |
|
365 | |||
366 | except error.WireprotoCommandError as e: |
|
366 | except error.WireprotoCommandError as e: | |
367 | action, meta = reactor.oncommanderror( |
|
367 | action, meta = reactor.oncommanderror( | |
368 | outstream, command[b'requestid'], e.message, e.messageargs |
|
368 | outstream, command[b'requestid'], e.message, e.messageargs | |
369 | ) |
|
369 | ) | |
370 |
|
370 | |||
371 | except Exception as e: |
|
371 | except Exception as e: | |
372 | action, meta = reactor.onservererror( |
|
372 | action, meta = reactor.onservererror( | |
373 | outstream, |
|
373 | outstream, | |
374 | command[b'requestid'], |
|
374 | command[b'requestid'], | |
375 | _(b'exception when invoking command: %s') |
|
375 | _(b'exception when invoking command: %s') | |
376 | % stringutil.forcebytestr(e), |
|
376 | % stringutil.forcebytestr(e), | |
377 | ) |
|
377 | ) | |
378 |
|
378 | |||
379 | if action == b'sendframes': |
|
379 | if action == b'sendframes': | |
380 | res.setbodygen(meta[b'framegen']) |
|
380 | res.setbodygen(meta[b'framegen']) | |
381 | return True |
|
381 | return True | |
382 | elif action == b'noop': |
|
382 | elif action == b'noop': | |
383 | return False |
|
383 | return False | |
384 | else: |
|
384 | else: | |
385 | raise error.ProgrammingError( |
|
385 | raise error.ProgrammingError( | |
386 | b'unhandled event from reactor: %s' % action |
|
386 | b'unhandled event from reactor: %s' % action | |
387 | ) |
|
387 | ) | |
388 |
|
388 | |||
389 |
|
389 | |||
390 | def getdispatchrepo(repo, proto, command): |
|
390 | def getdispatchrepo(repo, proto, command): | |
391 | viewconfig = repo.ui.config(b'server', b'view') |
|
391 | viewconfig = repo.ui.config(b'server', b'view') | |
392 | return repo.filtered(viewconfig) |
|
392 | return repo.filtered(viewconfig) | |
393 |
|
393 | |||
394 |
|
394 | |||
395 | def dispatch(repo, proto, command, redirect): |
|
395 | def dispatch(repo, proto, command, redirect): | |
396 | """Run a wire protocol command. |
|
396 | """Run a wire protocol command. | |
397 |
|
397 | |||
398 | Returns an iterable of objects that will be sent to the client. |
|
398 | Returns an iterable of objects that will be sent to the client. | |
399 | """ |
|
399 | """ | |
400 | repo = getdispatchrepo(repo, proto, command) |
|
400 | repo = getdispatchrepo(repo, proto, command) | |
401 |
|
401 | |||
402 | entry = COMMANDS[command] |
|
402 | entry = COMMANDS[command] | |
403 | func = entry.func |
|
403 | func = entry.func | |
404 | spec = entry.args |
|
404 | spec = entry.args | |
405 |
|
405 | |||
406 | args = proto.getargs(spec) |
|
406 | args = proto.getargs(spec) | |
407 |
|
407 | |||
408 | # There is some duplicate boilerplate code here for calling the command and |
|
408 | # There is some duplicate boilerplate code here for calling the command and | |
409 | # emitting objects. It is either that or a lot of indented code that looks |
|
409 | # emitting objects. It is either that or a lot of indented code that looks | |
410 | # like a pyramid (since there are a lot of code paths that result in not |
|
410 | # like a pyramid (since there are a lot of code paths that result in not | |
411 | # using the cacher). |
|
411 | # using the cacher). | |
412 | callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args)) |
|
412 | callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args)) | |
413 |
|
413 | |||
414 | # Request is not cacheable. Don't bother instantiating a cacher. |
|
414 | # Request is not cacheable. Don't bother instantiating a cacher. | |
415 | if not entry.cachekeyfn: |
|
415 | if not entry.cachekeyfn: | |
416 | for o in callcommand(): |
|
416 | for o in callcommand(): | |
417 | yield o |
|
417 | yield o | |
418 | return |
|
418 | return | |
419 |
|
419 | |||
420 | if redirect: |
|
420 | if redirect: | |
421 | redirecttargets = redirect[b'targets'] |
|
421 | redirecttargets = redirect[b'targets'] | |
422 | redirecthashes = redirect[b'hashes'] |
|
422 | redirecthashes = redirect[b'hashes'] | |
423 | else: |
|
423 | else: | |
424 | redirecttargets = [] |
|
424 | redirecttargets = [] | |
425 | redirecthashes = [] |
|
425 | redirecthashes = [] | |
426 |
|
426 | |||
427 | cacher = makeresponsecacher( |
|
427 | cacher = makeresponsecacher( | |
428 | repo, |
|
428 | repo, | |
429 | proto, |
|
429 | proto, | |
430 | command, |
|
430 | command, | |
431 | args, |
|
431 | args, | |
432 | cborutil.streamencode, |
|
432 | cborutil.streamencode, | |
433 | redirecttargets=redirecttargets, |
|
433 | redirecttargets=redirecttargets, | |
434 | redirecthashes=redirecthashes, |
|
434 | redirecthashes=redirecthashes, | |
435 | ) |
|
435 | ) | |
436 |
|
436 | |||
437 | # But we have no cacher. Do default handling. |
|
437 | # But we have no cacher. Do default handling. | |
438 | if not cacher: |
|
438 | if not cacher: | |
439 | for o in callcommand(): |
|
439 | for o in callcommand(): | |
440 | yield o |
|
440 | yield o | |
441 | return |
|
441 | return | |
442 |
|
442 | |||
443 | with cacher: |
|
443 | with cacher: | |
444 | cachekey = entry.cachekeyfn( |
|
444 | cachekey = entry.cachekeyfn( | |
445 | repo, proto, cacher, **pycompat.strkwargs(args) |
|
445 | repo, proto, cacher, **pycompat.strkwargs(args) | |
446 | ) |
|
446 | ) | |
447 |
|
447 | |||
448 | # No cache key or the cacher doesn't like it. Do default handling. |
|
448 | # No cache key or the cacher doesn't like it. Do default handling. | |
449 | if cachekey is None or not cacher.setcachekey(cachekey): |
|
449 | if cachekey is None or not cacher.setcachekey(cachekey): | |
450 | for o in callcommand(): |
|
450 | for o in callcommand(): | |
451 | yield o |
|
451 | yield o | |
452 | return |
|
452 | return | |
453 |
|
453 | |||
454 | # Serve it from the cache, if possible. |
|
454 | # Serve it from the cache, if possible. | |
455 | cached = cacher.lookup() |
|
455 | cached = cacher.lookup() | |
456 |
|
456 | |||
457 | if cached: |
|
457 | if cached: | |
458 | for o in cached[b'objs']: |
|
458 | for o in cached[b'objs']: | |
459 | yield o |
|
459 | yield o | |
460 | return |
|
460 | return | |
461 |
|
461 | |||
462 | # Else call the command and feed its output into the cacher, allowing |
|
462 | # Else call the command and feed its output into the cacher, allowing | |
463 | # the cacher to buffer/mutate objects as it desires. |
|
463 | # the cacher to buffer/mutate objects as it desires. | |
464 | for o in callcommand(): |
|
464 | for o in callcommand(): | |
465 | for o in cacher.onobject(o): |
|
465 | for o in cacher.onobject(o): | |
466 | yield o |
|
466 | yield o | |
467 |
|
467 | |||
468 | for o in cacher.onfinished(): |
|
468 | for o in cacher.onfinished(): | |
469 | yield o |
|
469 | yield o | |
470 |
|
470 | |||
471 |
|
471 | |||
472 | @interfaceutil.implementer(wireprototypes.baseprotocolhandler) |
|
472 | @interfaceutil.implementer(wireprototypes.baseprotocolhandler) | |
473 | class httpv2protocolhandler(object): |
|
473 | class httpv2protocolhandler(object): | |
474 | def __init__(self, req, ui, args=None): |
|
474 | def __init__(self, req, ui, args=None): | |
475 | self._req = req |
|
475 | self._req = req | |
476 | self._ui = ui |
|
476 | self._ui = ui | |
477 | self._args = args |
|
477 | self._args = args | |
478 |
|
478 | |||
479 | @property |
|
479 | @property | |
480 | def name(self): |
|
480 | def name(self): | |
481 | return HTTP_WIREPROTO_V2 |
|
481 | return HTTP_WIREPROTO_V2 | |
482 |
|
482 | |||
483 | def getargs(self, args): |
|
483 | def getargs(self, args): | |
484 | # First look for args that were passed but aren't registered on this |
|
484 | # First look for args that were passed but aren't registered on this | |
485 | # command. |
|
485 | # command. | |
486 | extra = set(self._args) - set(args) |
|
486 | extra = set(self._args) - set(args) | |
487 | if extra: |
|
487 | if extra: | |
488 | raise error.WireprotoCommandError( |
|
488 | raise error.WireprotoCommandError( | |
489 | b'unsupported argument to command: %s' |
|
489 | b'unsupported argument to command: %s' | |
490 | % b', '.join(sorted(extra)) |
|
490 | % b', '.join(sorted(extra)) | |
491 | ) |
|
491 | ) | |
492 |
|
492 | |||
493 | # And look for required arguments that are missing. |
|
493 | # And look for required arguments that are missing. | |
494 | missing = {a for a in args if args[a][b'required']} - set(self._args) |
|
494 | missing = {a for a in args if args[a][b'required']} - set(self._args) | |
495 |
|
495 | |||
496 | if missing: |
|
496 | if missing: | |
497 | raise error.WireprotoCommandError( |
|
497 | raise error.WireprotoCommandError( | |
498 | b'missing required arguments: %s' % b', '.join(sorted(missing)) |
|
498 | b'missing required arguments: %s' % b', '.join(sorted(missing)) | |
499 | ) |
|
499 | ) | |
500 |
|
500 | |||
501 | # Now derive the arguments to pass to the command, taking into |
|
501 | # Now derive the arguments to pass to the command, taking into | |
502 | # account the arguments specified by the client. |
|
502 | # account the arguments specified by the client. | |
503 | data = {} |
|
503 | data = {} | |
504 | for k, meta in sorted(args.items()): |
|
504 | for k, meta in sorted(args.items()): | |
505 | # This argument wasn't passed by the client. |
|
505 | # This argument wasn't passed by the client. | |
506 | if k not in self._args: |
|
506 | if k not in self._args: | |
507 | data[k] = meta[b'default']() |
|
507 | data[k] = meta[b'default']() | |
508 | continue |
|
508 | continue | |
509 |
|
509 | |||
510 | v = self._args[k] |
|
510 | v = self._args[k] | |
511 |
|
511 | |||
512 | # Sets may be expressed as lists. Silently normalize. |
|
512 | # Sets may be expressed as lists. Silently normalize. | |
513 | if meta[b'type'] == b'set' and isinstance(v, list): |
|
513 | if meta[b'type'] == b'set' and isinstance(v, list): | |
514 | v = set(v) |
|
514 | v = set(v) | |
515 |
|
515 | |||
516 | # TODO consider more/stronger type validation. |
|
516 | # TODO consider more/stronger type validation. | |
517 |
|
517 | |||
518 | data[k] = v |
|
518 | data[k] = v | |
519 |
|
519 | |||
520 | return data |
|
520 | return data | |
521 |
|
521 | |||
522 | def getprotocaps(self): |
|
522 | def getprotocaps(self): | |
523 | # Protocol capabilities are currently not implemented for HTTP V2. |
|
523 | # Protocol capabilities are currently not implemented for HTTP V2. | |
524 | return set() |
|
524 | return set() | |
525 |
|
525 | |||
526 | def getpayload(self): |
|
526 | def getpayload(self): | |
527 | raise NotImplementedError |
|
527 | raise NotImplementedError | |
528 |
|
528 | |||
529 | @contextlib.contextmanager |
|
529 | @contextlib.contextmanager | |
530 | def mayberedirectstdio(self): |
|
530 | def mayberedirectstdio(self): | |
531 | raise NotImplementedError |
|
531 | raise NotImplementedError | |
532 |
|
532 | |||
533 | def client(self): |
|
533 | def client(self): | |
534 | raise NotImplementedError |
|
534 | raise NotImplementedError | |
535 |
|
535 | |||
536 | def addcapabilities(self, repo, caps): |
|
536 | def addcapabilities(self, repo, caps): | |
537 | return caps |
|
537 | return caps | |
538 |
|
538 | |||
539 | def checkperm(self, perm): |
|
539 | def checkperm(self, perm): | |
540 | raise NotImplementedError |
|
540 | raise NotImplementedError | |
541 |
|
541 | |||
542 |
|
542 | |||
543 | def httpv2apidescriptor(req, repo): |
|
543 | def httpv2apidescriptor(req, repo): | |
544 | proto = httpv2protocolhandler(req, repo.ui) |
|
544 | proto = httpv2protocolhandler(req, repo.ui) | |
545 |
|
545 | |||
546 | return _capabilitiesv2(repo, proto) |
|
546 | return _capabilitiesv2(repo, proto) | |
547 |
|
547 | |||
548 |
|
548 | |||
549 | def _capabilitiesv2(repo, proto): |
|
549 | def _capabilitiesv2(repo, proto): | |
550 | """Obtain the set of capabilities for version 2 transports. |
|
550 | """Obtain the set of capabilities for version 2 transports. | |
551 |
|
551 | |||
552 | These capabilities are distinct from the capabilities for version 1 |
|
552 | These capabilities are distinct from the capabilities for version 1 | |
553 | transports. |
|
553 | transports. | |
554 | """ |
|
554 | """ | |
555 | caps = { |
|
555 | caps = { | |
556 | b'commands': {}, |
|
556 | b'commands': {}, | |
557 | b'framingmediatypes': [FRAMINGTYPE], |
|
557 | b'framingmediatypes': [FRAMINGTYPE], | |
558 | b'pathfilterprefixes': set(narrowspec.VALID_PREFIXES), |
|
558 | b'pathfilterprefixes': set(narrowspec.VALID_PREFIXES), | |
559 | } |
|
559 | } | |
560 |
|
560 | |||
561 | for command, entry in COMMANDS.items(): |
|
561 | for command, entry in COMMANDS.items(): | |
562 | args = {} |
|
562 | args = {} | |
563 |
|
563 | |||
564 | for arg, meta in entry.args.items(): |
|
564 | for arg, meta in entry.args.items(): | |
565 | args[arg] = { |
|
565 | args[arg] = { | |
566 | # TODO should this be a normalized type using CBOR's |
|
566 | # TODO should this be a normalized type using CBOR's | |
567 | # terminology? |
|
567 | # terminology? | |
568 | b'type': meta[b'type'], |
|
568 | b'type': meta[b'type'], | |
569 | b'required': meta[b'required'], |
|
569 | b'required': meta[b'required'], | |
570 | } |
|
570 | } | |
571 |
|
571 | |||
572 | if not meta[b'required']: |
|
572 | if not meta[b'required']: | |
573 | args[arg][b'default'] = meta[b'default']() |
|
573 | args[arg][b'default'] = meta[b'default']() | |
574 |
|
574 | |||
575 | if meta[b'validvalues']: |
|
575 | if meta[b'validvalues']: | |
576 | args[arg][b'validvalues'] = meta[b'validvalues'] |
|
576 | args[arg][b'validvalues'] = meta[b'validvalues'] | |
577 |
|
577 | |||
578 | # TODO this type of check should be defined in a per-command callback. |
|
578 | # TODO this type of check should be defined in a per-command callback. | |
579 | if ( |
|
579 | if ( | |
580 | command == b'rawstorefiledata' |
|
580 | command == b'rawstorefiledata' | |
581 | and not streamclone.allowservergeneration(repo) |
|
581 | and not streamclone.allowservergeneration(repo) | |
582 | ): |
|
582 | ): | |
583 | continue |
|
583 | continue | |
584 |
|
584 | |||
585 | caps[b'commands'][command] = { |
|
585 | caps[b'commands'][command] = { | |
586 | b'args': args, |
|
586 | b'args': args, | |
587 | b'permissions': [entry.permission], |
|
587 | b'permissions': [entry.permission], | |
588 | } |
|
588 | } | |
589 |
|
589 | |||
590 | if entry.extracapabilitiesfn: |
|
590 | if entry.extracapabilitiesfn: | |
591 | extracaps = entry.extracapabilitiesfn(repo, proto) |
|
591 | extracaps = entry.extracapabilitiesfn(repo, proto) | |
592 | caps[b'commands'][command].update(extracaps) |
|
592 | caps[b'commands'][command].update(extracaps) | |
593 |
|
593 | |||
594 | caps[b'rawrepoformats'] = sorted(repo.requirements & repo.supportedformats) |
|
594 | caps[b'rawrepoformats'] = sorted(repo.requirements & repo.supportedformats) | |
595 |
|
595 | |||
596 | targets = getadvertisedredirecttargets(repo, proto) |
|
596 | targets = getadvertisedredirecttargets(repo, proto) | |
597 | if targets: |
|
597 | if targets: | |
598 | caps[b'redirect'] = { |
|
598 | caps[b'redirect'] = { | |
599 | b'targets': [], |
|
599 | b'targets': [], | |
600 | b'hashes': [b'sha256', b'sha1'], |
|
600 | b'hashes': [b'sha256', b'sha1'], | |
601 | } |
|
601 | } | |
602 |
|
602 | |||
603 | for target in targets: |
|
603 | for target in targets: | |
604 | entry = { |
|
604 | entry = { | |
605 | b'name': target[b'name'], |
|
605 | b'name': target[b'name'], | |
606 | b'protocol': target[b'protocol'], |
|
606 | b'protocol': target[b'protocol'], | |
607 | b'uris': target[b'uris'], |
|
607 | b'uris': target[b'uris'], | |
608 | } |
|
608 | } | |
609 |
|
609 | |||
610 | for key in (b'snirequired', b'tlsversions'): |
|
610 | for key in (b'snirequired', b'tlsversions'): | |
611 | if key in target: |
|
611 | if key in target: | |
612 | entry[key] = target[key] |
|
612 | entry[key] = target[key] | |
613 |
|
613 | |||
614 | caps[b'redirect'][b'targets'].append(entry) |
|
614 | caps[b'redirect'][b'targets'].append(entry) | |
615 |
|
615 | |||
616 | return proto.addcapabilities(repo, caps) |
|
616 | return proto.addcapabilities(repo, caps) | |
617 |
|
617 | |||
618 |
|
618 | |||
619 | def getadvertisedredirecttargets(repo, proto): |
|
619 | def getadvertisedredirecttargets(repo, proto): | |
620 | """Obtain a list of content redirect targets. |
|
620 | """Obtain a list of content redirect targets. | |
621 |
|
621 | |||
622 | Returns a list containing potential redirect targets that will be |
|
622 | Returns a list containing potential redirect targets that will be | |
623 | advertised in capabilities data. Each dict MUST have the following |
|
623 | advertised in capabilities data. Each dict MUST have the following | |
624 | keys: |
|
624 | keys: | |
625 |
|
625 | |||
626 | name |
|
626 | name | |
627 | The name of this redirect target. This is the identifier clients use |
|
627 | The name of this redirect target. This is the identifier clients use | |
628 | to refer to a target. It is transferred as part of every command |
|
628 | to refer to a target. It is transferred as part of every command | |
629 | request. |
|
629 | request. | |
630 |
|
630 | |||
631 | protocol |
|
631 | protocol | |
632 | Network protocol used by this target. Typically this is the string |
|
632 | Network protocol used by this target. Typically this is the string | |
633 | in front of the ``://`` in a URL. e.g. ``https``. |
|
633 | in front of the ``://`` in a URL. e.g. ``https``. | |
634 |
|
634 | |||
635 | uris |
|
635 | uris | |
636 | List of representative URIs for this target. Clients can use the |
|
636 | List of representative URIs for this target. Clients can use the | |
637 | URIs to test parsing for compatibility or for ordering preference |
|
637 | URIs to test parsing for compatibility or for ordering preference | |
638 | for which target to use. |
|
638 | for which target to use. | |
639 |
|
639 | |||
640 | The following optional keys are recognized: |
|
640 | The following optional keys are recognized: | |
641 |
|
641 | |||
642 | snirequired |
|
642 | snirequired | |
643 | Bool indicating if Server Name Indication (SNI) is required to |
|
643 | Bool indicating if Server Name Indication (SNI) is required to | |
644 | connect to this target. |
|
644 | connect to this target. | |
645 |
|
645 | |||
646 | tlsversions |
|
646 | tlsversions | |
647 | List of bytes indicating which TLS versions are supported by this |
|
647 | List of bytes indicating which TLS versions are supported by this | |
648 | target. |
|
648 | target. | |
649 |
|
649 | |||
650 | By default, clients reflect the target order advertised by servers |
|
650 | By default, clients reflect the target order advertised by servers | |
651 | and servers will use the first client-advertised target when picking |
|
651 | and servers will use the first client-advertised target when picking | |
652 | a redirect target. So targets should be advertised in the order the |
|
652 | a redirect target. So targets should be advertised in the order the | |
653 | server prefers they be used. |
|
653 | server prefers they be used. | |
654 | """ |
|
654 | """ | |
655 | return [] |
|
655 | return [] | |
656 |
|
656 | |||
657 |
|
657 | |||
658 | def wireprotocommand( |
|
658 | def wireprotocommand( | |
659 | name, |
|
659 | name, | |
660 | args=None, |
|
660 | args=None, | |
661 | permission=b'push', |
|
661 | permission=b'push', | |
662 | cachekeyfn=None, |
|
662 | cachekeyfn=None, | |
663 | extracapabilitiesfn=None, |
|
663 | extracapabilitiesfn=None, | |
664 | ): |
|
664 | ): | |
665 | """Decorator to declare a wire protocol command. |
|
665 | """Decorator to declare a wire protocol command. | |
666 |
|
666 | |||
667 | ``name`` is the name of the wire protocol command being provided. |
|
667 | ``name`` is the name of the wire protocol command being provided. | |
668 |
|
668 | |||
669 | ``args`` is a dict defining arguments accepted by the command. Keys are |
|
669 | ``args`` is a dict defining arguments accepted by the command. Keys are | |
670 | the argument name. Values are dicts with the following keys: |
|
670 | the argument name. Values are dicts with the following keys: | |
671 |
|
671 | |||
672 | ``type`` |
|
672 | ``type`` | |
673 | The argument data type. Must be one of the following string |
|
673 | The argument data type. Must be one of the following string | |
674 | literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``, |
|
674 | literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``, | |
675 | or ``bool``. |
|
675 | or ``bool``. | |
676 |
|
676 | |||
677 | ``default`` |
|
677 | ``default`` | |
678 | A callable returning the default value for this argument. If not |
|
678 | A callable returning the default value for this argument. If not | |
679 | specified, ``None`` will be the default value. |
|
679 | specified, ``None`` will be the default value. | |
680 |
|
680 | |||
681 | ``example`` |
|
681 | ``example`` | |
682 | An example value for this argument. |
|
682 | An example value for this argument. | |
683 |
|
683 | |||
684 | ``validvalues`` |
|
684 | ``validvalues`` | |
685 | Set of recognized values for this argument. |
|
685 | Set of recognized values for this argument. | |
686 |
|
686 | |||
687 | ``permission`` defines the permission type needed to run this command. |
|
687 | ``permission`` defines the permission type needed to run this command. | |
688 | Can be ``push`` or ``pull``. These roughly map to read-write and read-only, |
|
688 | Can be ``push`` or ``pull``. These roughly map to read-write and read-only, | |
689 | respectively. Default is to assume command requires ``push`` permissions |
|
689 | respectively. Default is to assume command requires ``push`` permissions | |
690 | because otherwise commands not declaring their permissions could modify |
|
690 | because otherwise commands not declaring their permissions could modify | |
691 | a repository that is supposed to be read-only. |
|
691 | a repository that is supposed to be read-only. | |
692 |
|
692 | |||
693 | ``cachekeyfn`` defines an optional callable that can derive the |
|
693 | ``cachekeyfn`` defines an optional callable that can derive the | |
694 | cache key for this request. |
|
694 | cache key for this request. | |
695 |
|
695 | |||
696 | ``extracapabilitiesfn`` defines an optional callable that defines extra |
|
696 | ``extracapabilitiesfn`` defines an optional callable that defines extra | |
697 | command capabilities/parameters that are advertised next to the command |
|
697 | command capabilities/parameters that are advertised next to the command | |
698 | in the capabilities data structure describing the server. The callable |
|
698 | in the capabilities data structure describing the server. The callable | |
699 | receives as arguments the repository and protocol objects. It returns |
|
699 | receives as arguments the repository and protocol objects. It returns | |
700 | a dict of extra fields to add to the command descriptor. |
|
700 | a dict of extra fields to add to the command descriptor. | |
701 |
|
701 | |||
702 | Wire protocol commands are generators of objects to be serialized and |
|
702 | Wire protocol commands are generators of objects to be serialized and | |
703 | sent to the client. |
|
703 | sent to the client. | |
704 |
|
704 | |||
705 | If a command raises an uncaught exception, this will be translated into |
|
705 | If a command raises an uncaught exception, this will be translated into | |
706 | a command error. |
|
706 | a command error. | |
707 |
|
707 | |||
708 | All commands can opt in to being cacheable by defining a function |
|
708 | All commands can opt in to being cacheable by defining a function | |
709 | (``cachekeyfn``) that is called to derive a cache key. This function |
|
709 | (``cachekeyfn``) that is called to derive a cache key. This function | |
710 | receives the same arguments as the command itself plus a ``cacher`` |
|
710 | receives the same arguments as the command itself plus a ``cacher`` | |
711 | argument containing the active cacher for the request and returns a bytes |
|
711 | argument containing the active cacher for the request and returns a bytes | |
712 | containing the key in a cache the response to this command may be cached |
|
712 | containing the key in a cache the response to this command may be cached | |
713 | under. |
|
713 | under. | |
714 | """ |
|
714 | """ | |
715 | transports = { |
|
715 | transports = { | |
716 | k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 2 |
|
716 | k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 2 | |
717 | } |
|
717 | } | |
718 |
|
718 | |||
719 | if permission not in (b'push', b'pull'): |
|
719 | if permission not in (b'push', b'pull'): | |
720 | raise error.ProgrammingError( |
|
720 | raise error.ProgrammingError( | |
721 | b'invalid wire protocol permission; ' |
|
721 | b'invalid wire protocol permission; ' | |
722 | b'got %s; expected "push" or "pull"' % permission |
|
722 | b'got %s; expected "push" or "pull"' % permission | |
723 | ) |
|
723 | ) | |
724 |
|
724 | |||
725 | if args is None: |
|
725 | if args is None: | |
726 | args = {} |
|
726 | args = {} | |
727 |
|
727 | |||
728 | if not isinstance(args, dict): |
|
728 | if not isinstance(args, dict): | |
729 | raise error.ProgrammingError( |
|
729 | raise error.ProgrammingError( | |
730 | b'arguments for version 2 commands must be declared as dicts' |
|
730 | b'arguments for version 2 commands must be declared as dicts' | |
731 | ) |
|
731 | ) | |
732 |
|
732 | |||
733 | for arg, meta in args.items(): |
|
733 | for arg, meta in args.items(): | |
734 | if arg == b'*': |
|
734 | if arg == b'*': | |
735 | raise error.ProgrammingError( |
|
735 | raise error.ProgrammingError( | |
736 | b'* argument name not allowed on version 2 commands' |
|
736 | b'* argument name not allowed on version 2 commands' | |
737 | ) |
|
737 | ) | |
738 |
|
738 | |||
739 | if not isinstance(meta, dict): |
|
739 | if not isinstance(meta, dict): | |
740 | raise error.ProgrammingError( |
|
740 | raise error.ProgrammingError( | |
741 | b'arguments for version 2 commands ' |
|
741 | b'arguments for version 2 commands ' | |
742 | b'must declare metadata as a dict' |
|
742 | b'must declare metadata as a dict' | |
743 | ) |
|
743 | ) | |
744 |
|
744 | |||
745 | if b'type' not in meta: |
|
745 | if b'type' not in meta: | |
746 | raise error.ProgrammingError( |
|
746 | raise error.ProgrammingError( | |
747 | b'%s argument for command %s does not ' |
|
747 | b'%s argument for command %s does not ' | |
748 | b'declare type field' % (arg, name) |
|
748 | b'declare type field' % (arg, name) | |
749 | ) |
|
749 | ) | |
750 |
|
750 | |||
751 | if meta[b'type'] not in ( |
|
751 | if meta[b'type'] not in ( | |
752 | b'bytes', |
|
752 | b'bytes', | |
753 | b'int', |
|
753 | b'int', | |
754 | b'list', |
|
754 | b'list', | |
755 | b'dict', |
|
755 | b'dict', | |
756 | b'set', |
|
756 | b'set', | |
757 | b'bool', |
|
757 | b'bool', | |
758 | ): |
|
758 | ): | |
759 | raise error.ProgrammingError( |
|
759 | raise error.ProgrammingError( | |
760 | b'%s argument for command %s has ' |
|
760 | b'%s argument for command %s has ' | |
761 | b'illegal type: %s' % (arg, name, meta[b'type']) |
|
761 | b'illegal type: %s' % (arg, name, meta[b'type']) | |
762 | ) |
|
762 | ) | |
763 |
|
763 | |||
764 | if b'example' not in meta: |
|
764 | if b'example' not in meta: | |
765 | raise error.ProgrammingError( |
|
765 | raise error.ProgrammingError( | |
766 | b'%s argument for command %s does not ' |
|
766 | b'%s argument for command %s does not ' | |
767 | b'declare example field' % (arg, name) |
|
767 | b'declare example field' % (arg, name) | |
768 | ) |
|
768 | ) | |
769 |
|
769 | |||
770 | meta[b'required'] = b'default' not in meta |
|
770 | meta[b'required'] = b'default' not in meta | |
771 |
|
771 | |||
772 | meta.setdefault(b'default', lambda: None) |
|
772 | meta.setdefault(b'default', lambda: None) | |
773 | meta.setdefault(b'validvalues', None) |
|
773 | meta.setdefault(b'validvalues', None) | |
774 |
|
774 | |||
775 | def register(func): |
|
775 | def register(func): | |
776 | if name in COMMANDS: |
|
776 | if name in COMMANDS: | |
777 | raise error.ProgrammingError( |
|
777 | raise error.ProgrammingError( | |
778 | b'%s command already registered for version 2' % name |
|
778 | b'%s command already registered for version 2' % name | |
779 | ) |
|
779 | ) | |
780 |
|
780 | |||
781 | COMMANDS[name] = wireprototypes.commandentry( |
|
781 | COMMANDS[name] = wireprototypes.commandentry( | |
782 | func, |
|
782 | func, | |
783 | args=args, |
|
783 | args=args, | |
784 | transports=transports, |
|
784 | transports=transports, | |
785 | permission=permission, |
|
785 | permission=permission, | |
786 | cachekeyfn=cachekeyfn, |
|
786 | cachekeyfn=cachekeyfn, | |
787 | extracapabilitiesfn=extracapabilitiesfn, |
|
787 | extracapabilitiesfn=extracapabilitiesfn, | |
788 | ) |
|
788 | ) | |
789 |
|
789 | |||
790 | return func |
|
790 | return func | |
791 |
|
791 | |||
792 | return register |
|
792 | return register | |
793 |
|
793 | |||
794 |
|
794 | |||
795 | def makecommandcachekeyfn(command, localversion=None, allargs=False): |
|
795 | def makecommandcachekeyfn(command, localversion=None, allargs=False): | |
796 | """Construct a cache key derivation function with common features. |
|
796 | """Construct a cache key derivation function with common features. | |
797 |
|
797 | |||
798 | By default, the cache key is a hash of: |
|
798 | By default, the cache key is a hash of: | |
799 |
|
799 | |||
800 | * The command name. |
|
800 | * The command name. | |
801 | * A global cache version number. |
|
801 | * A global cache version number. | |
802 | * A local cache version number (passed via ``localversion``). |
|
802 | * A local cache version number (passed via ``localversion``). | |
803 | * All the arguments passed to the command. |
|
803 | * All the arguments passed to the command. | |
804 | * The media type used. |
|
804 | * The media type used. | |
805 | * Wire protocol version string. |
|
805 | * Wire protocol version string. | |
806 | * The repository path. |
|
806 | * The repository path. | |
807 | """ |
|
807 | """ | |
808 | if not allargs: |
|
808 | if not allargs: | |
809 | raise error.ProgrammingError( |
|
809 | raise error.ProgrammingError( | |
810 | b'only allargs=True is currently supported' |
|
810 | b'only allargs=True is currently supported' | |
811 | ) |
|
811 | ) | |
812 |
|
812 | |||
813 | if localversion is None: |
|
813 | if localversion is None: | |
814 | raise error.ProgrammingError(b'must set localversion argument value') |
|
814 | raise error.ProgrammingError(b'must set localversion argument value') | |
815 |
|
815 | |||
816 | def cachekeyfn(repo, proto, cacher, **args): |
|
816 | def cachekeyfn(repo, proto, cacher, **args): | |
817 | spec = COMMANDS[command] |
|
817 | spec = COMMANDS[command] | |
818 |
|
818 | |||
819 | # Commands that mutate the repo can not be cached. |
|
819 | # Commands that mutate the repo can not be cached. | |
820 | if spec.permission == b'push': |
|
820 | if spec.permission == b'push': | |
821 | return None |
|
821 | return None | |
822 |
|
822 | |||
823 | # TODO config option to disable caching. |
|
823 | # TODO config option to disable caching. | |
824 |
|
824 | |||
825 | # Our key derivation strategy is to construct a data structure |
|
825 | # Our key derivation strategy is to construct a data structure | |
826 | # holding everything that could influence cacheability and to hash |
|
826 | # holding everything that could influence cacheability and to hash | |
827 | # the CBOR representation of that. Using CBOR seems like it might |
|
827 | # the CBOR representation of that. Using CBOR seems like it might | |
828 | # be overkill. However, simpler hashing mechanisms are prone to |
|
828 | # be overkill. However, simpler hashing mechanisms are prone to | |
829 | # duplicate input issues. e.g. if you just concatenate two values, |
|
829 | # duplicate input issues. e.g. if you just concatenate two values, | |
830 | # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides |
|
830 | # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides | |
831 | # "padding" between values and prevents these problems. |
|
831 | # "padding" between values and prevents these problems. | |
832 |
|
832 | |||
833 | # Seed the hash with various data. |
|
833 | # Seed the hash with various data. | |
834 | state = { |
|
834 | state = { | |
835 | # To invalidate all cache keys. |
|
835 | # To invalidate all cache keys. | |
836 | b'globalversion': GLOBAL_CACHE_VERSION, |
|
836 | b'globalversion': GLOBAL_CACHE_VERSION, | |
837 | # More granular cache key invalidation. |
|
837 | # More granular cache key invalidation. | |
838 | b'localversion': localversion, |
|
838 | b'localversion': localversion, | |
839 | # Cache keys are segmented by command. |
|
839 | # Cache keys are segmented by command. | |
840 | b'command': command, |
|
840 | b'command': command, | |
841 | # Throw in the media type and API version strings so changes |
|
841 | # Throw in the media type and API version strings so changes | |
842 | # to exchange semantics invalid cache. |
|
842 | # to exchange semantics invalid cache. | |
843 | b'mediatype': FRAMINGTYPE, |
|
843 | b'mediatype': FRAMINGTYPE, | |
844 | b'version': HTTP_WIREPROTO_V2, |
|
844 | b'version': HTTP_WIREPROTO_V2, | |
845 | # So same requests for different repos don't share cache keys. |
|
845 | # So same requests for different repos don't share cache keys. | |
846 | b'repo': repo.root, |
|
846 | b'repo': repo.root, | |
847 | } |
|
847 | } | |
848 |
|
848 | |||
849 | # The arguments passed to us will have already been normalized. |
|
849 | # The arguments passed to us will have already been normalized. | |
850 | # Default values will be set, etc. This is important because it |
|
850 | # Default values will be set, etc. This is important because it | |
851 | # means that it doesn't matter if clients send an explicit argument |
|
851 | # means that it doesn't matter if clients send an explicit argument | |
852 | # or rely on the default value: it will all normalize to the same |
|
852 | # or rely on the default value: it will all normalize to the same | |
853 | # set of arguments on the server and therefore the same cache key. |
|
853 | # set of arguments on the server and therefore the same cache key. | |
854 | # |
|
854 | # | |
855 | # Arguments by their very nature must support being encoded to CBOR. |
|
855 | # Arguments by their very nature must support being encoded to CBOR. | |
856 | # And the CBOR encoder is deterministic. So we hash the arguments |
|
856 | # And the CBOR encoder is deterministic. So we hash the arguments | |
857 | # by feeding the CBOR of their representation into the hasher. |
|
857 | # by feeding the CBOR of their representation into the hasher. | |
858 | if allargs: |
|
858 | if allargs: | |
859 | state[b'args'] = pycompat.byteskwargs(args) |
|
859 | state[b'args'] = pycompat.byteskwargs(args) | |
860 |
|
860 | |||
861 | cacher.adjustcachekeystate(state) |
|
861 | cacher.adjustcachekeystate(state) | |
862 |
|
862 | |||
863 | hasher = hashutil.sha1() |
|
863 | hasher = hashutil.sha1() | |
864 | for chunk in cborutil.streamencode(state): |
|
864 | for chunk in cborutil.streamencode(state): | |
865 | hasher.update(chunk) |
|
865 | hasher.update(chunk) | |
866 |
|
866 | |||
867 | return pycompat.sysbytes(hasher.hexdigest()) |
|
867 | return pycompat.sysbytes(hasher.hexdigest()) | |
868 |
|
868 | |||
869 | return cachekeyfn |
|
869 | return cachekeyfn | |
870 |
|
870 | |||
871 |
|
871 | |||
872 | def makeresponsecacher( |
|
872 | def makeresponsecacher( | |
873 | repo, proto, command, args, objencoderfn, redirecttargets, redirecthashes |
|
873 | repo, proto, command, args, objencoderfn, redirecttargets, redirecthashes | |
874 | ): |
|
874 | ): | |
875 | """Construct a cacher for a cacheable command. |
|
875 | """Construct a cacher for a cacheable command. | |
876 |
|
876 | |||
877 | Returns an ``iwireprotocolcommandcacher`` instance. |
|
877 | Returns an ``iwireprotocolcommandcacher`` instance. | |
878 |
|
878 | |||
879 | Extensions can monkeypatch this function to provide custom caching |
|
879 | Extensions can monkeypatch this function to provide custom caching | |
880 | backends. |
|
880 | backends. | |
881 | """ |
|
881 | """ | |
882 | return None |
|
882 | return None | |
883 |
|
883 | |||
884 |
|
884 | |||
885 | def resolvenodes(repo, revisions): |
|
885 | def resolvenodes(repo, revisions): | |
886 | """Resolve nodes from a revisions specifier data structure.""" |
|
886 | """Resolve nodes from a revisions specifier data structure.""" | |
887 | cl = repo.changelog |
|
887 | cl = repo.changelog | |
888 | clhasnode = cl.hasnode |
|
888 | clhasnode = cl.hasnode | |
889 |
|
889 | |||
890 | seen = set() |
|
890 | seen = set() | |
891 | nodes = [] |
|
891 | nodes = [] | |
892 |
|
892 | |||
893 | if not isinstance(revisions, list): |
|
893 | if not isinstance(revisions, list): | |
894 | raise error.WireprotoCommandError( |
|
894 | raise error.WireprotoCommandError( | |
895 | b'revisions must be defined as an array' |
|
895 | b'revisions must be defined as an array' | |
896 | ) |
|
896 | ) | |
897 |
|
897 | |||
898 | for spec in revisions: |
|
898 | for spec in revisions: | |
899 | if b'type' not in spec: |
|
899 | if b'type' not in spec: | |
900 | raise error.WireprotoCommandError( |
|
900 | raise error.WireprotoCommandError( | |
901 | b'type key not present in revision specifier' |
|
901 | b'type key not present in revision specifier' | |
902 | ) |
|
902 | ) | |
903 |
|
903 | |||
904 | typ = spec[b'type'] |
|
904 | typ = spec[b'type'] | |
905 |
|
905 | |||
906 | if typ == b'changesetexplicit': |
|
906 | if typ == b'changesetexplicit': | |
907 | if b'nodes' not in spec: |
|
907 | if b'nodes' not in spec: | |
908 | raise error.WireprotoCommandError( |
|
908 | raise error.WireprotoCommandError( | |
909 | b'nodes key not present in changesetexplicit revision ' |
|
909 | b'nodes key not present in changesetexplicit revision ' | |
910 | b'specifier' |
|
910 | b'specifier' | |
911 | ) |
|
911 | ) | |
912 |
|
912 | |||
913 | for node in spec[b'nodes']: |
|
913 | for node in spec[b'nodes']: | |
914 | if node not in seen: |
|
914 | if node not in seen: | |
915 | nodes.append(node) |
|
915 | nodes.append(node) | |
916 | seen.add(node) |
|
916 | seen.add(node) | |
917 |
|
917 | |||
918 | elif typ == b'changesetexplicitdepth': |
|
918 | elif typ == b'changesetexplicitdepth': | |
919 | for key in (b'nodes', b'depth'): |
|
919 | for key in (b'nodes', b'depth'): | |
920 | if key not in spec: |
|
920 | if key not in spec: | |
921 | raise error.WireprotoCommandError( |
|
921 | raise error.WireprotoCommandError( | |
922 | b'%s key not present in changesetexplicitdepth revision ' |
|
922 | b'%s key not present in changesetexplicitdepth revision ' | |
923 | b'specifier', |
|
923 | b'specifier', | |
924 | (key,), |
|
924 | (key,), | |
925 | ) |
|
925 | ) | |
926 |
|
926 | |||
927 | for rev in repo.revs( |
|
927 | for rev in repo.revs( | |
928 | b'ancestors(%ln, %s)', spec[b'nodes'], spec[b'depth'] - 1 |
|
928 | b'ancestors(%ln, %s)', spec[b'nodes'], spec[b'depth'] - 1 | |
929 | ): |
|
929 | ): | |
930 | node = cl.node(rev) |
|
930 | node = cl.node(rev) | |
931 |
|
931 | |||
932 | if node not in seen: |
|
932 | if node not in seen: | |
933 | nodes.append(node) |
|
933 | nodes.append(node) | |
934 | seen.add(node) |
|
934 | seen.add(node) | |
935 |
|
935 | |||
936 | elif typ == b'changesetdagrange': |
|
936 | elif typ == b'changesetdagrange': | |
937 | for key in (b'roots', b'heads'): |
|
937 | for key in (b'roots', b'heads'): | |
938 | if key not in spec: |
|
938 | if key not in spec: | |
939 | raise error.WireprotoCommandError( |
|
939 | raise error.WireprotoCommandError( | |
940 | b'%s key not present in changesetdagrange revision ' |
|
940 | b'%s key not present in changesetdagrange revision ' | |
941 | b'specifier', |
|
941 | b'specifier', | |
942 | (key,), |
|
942 | (key,), | |
943 | ) |
|
943 | ) | |
944 |
|
944 | |||
945 | if not spec[b'heads']: |
|
945 | if not spec[b'heads']: | |
946 | raise error.WireprotoCommandError( |
|
946 | raise error.WireprotoCommandError( | |
947 | b'heads key in changesetdagrange cannot be empty' |
|
947 | b'heads key in changesetdagrange cannot be empty' | |
948 | ) |
|
948 | ) | |
949 |
|
949 | |||
950 | if spec[b'roots']: |
|
950 | if spec[b'roots']: | |
951 | common = [n for n in spec[b'roots'] if clhasnode(n)] |
|
951 | common = [n for n in spec[b'roots'] if clhasnode(n)] | |
952 | else: |
|
952 | else: | |
953 | common = [nullid] |
|
953 | common = [nullid] | |
954 |
|
954 | |||
955 | for n in discovery.outgoing(repo, common, spec[b'heads']).missing: |
|
955 | for n in discovery.outgoing(repo, common, spec[b'heads']).missing: | |
956 | if n not in seen: |
|
956 | if n not in seen: | |
957 | nodes.append(n) |
|
957 | nodes.append(n) | |
958 | seen.add(n) |
|
958 | seen.add(n) | |
959 |
|
959 | |||
960 | else: |
|
960 | else: | |
961 | raise error.WireprotoCommandError( |
|
961 | raise error.WireprotoCommandError( | |
962 | b'unknown revision specifier type: %s', (typ,) |
|
962 | b'unknown revision specifier type: %s', (typ,) | |
963 | ) |
|
963 | ) | |
964 |
|
964 | |||
965 | return nodes |
|
965 | return nodes | |
966 |
|
966 | |||
967 |
|
967 | |||
968 | @wireprotocommand(b'branchmap', permission=b'pull') |
|
968 | @wireprotocommand(b'branchmap', permission=b'pull') | |
969 | def branchmapv2(repo, proto): |
|
969 | def branchmapv2(repo, proto): | |
970 | yield { |
|
970 | yield { | |
971 | encoding.fromlocal(k): v |
|
971 | encoding.fromlocal(k): v | |
972 | for k, v in pycompat.iteritems(repo.branchmap()) |
|
972 | for k, v in pycompat.iteritems(repo.branchmap()) | |
973 | } |
|
973 | } | |
974 |
|
974 | |||
975 |
|
975 | |||
976 | @wireprotocommand(b'capabilities', permission=b'pull') |
|
976 | @wireprotocommand(b'capabilities', permission=b'pull') | |
977 | def capabilitiesv2(repo, proto): |
|
977 | def capabilitiesv2(repo, proto): | |
978 | yield _capabilitiesv2(repo, proto) |
|
978 | yield _capabilitiesv2(repo, proto) | |
979 |
|
979 | |||
980 |
|
980 | |||
981 | @wireprotocommand( |
|
981 | @wireprotocommand( | |
982 | b'changesetdata', |
|
982 | b'changesetdata', | |
983 | args={ |
|
983 | args={ | |
984 | b'revisions': { |
|
984 | b'revisions': { | |
985 | b'type': b'list', |
|
985 | b'type': b'list', | |
986 | b'example': [ |
|
986 | b'example': [ | |
987 | { |
|
987 | { | |
988 | b'type': b'changesetexplicit', |
|
988 | b'type': b'changesetexplicit', | |
989 | b'nodes': [b'abcdef...'], |
|
989 | b'nodes': [b'abcdef...'], | |
990 | } |
|
990 | } | |
991 | ], |
|
991 | ], | |
992 | }, |
|
992 | }, | |
993 | b'fields': { |
|
993 | b'fields': { | |
994 | b'type': b'set', |
|
994 | b'type': b'set', | |
995 | b'default': set, |
|
995 | b'default': set, | |
996 | b'example': {b'parents', b'revision'}, |
|
996 | b'example': {b'parents', b'revision'}, | |
997 | b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'}, |
|
997 | b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'}, | |
998 | }, |
|
998 | }, | |
999 | }, |
|
999 | }, | |
1000 | permission=b'pull', |
|
1000 | permission=b'pull', | |
1001 | ) |
|
1001 | ) | |
1002 | def changesetdata(repo, proto, revisions, fields): |
|
1002 | def changesetdata(repo, proto, revisions, fields): | |
1003 | # TODO look for unknown fields and abort when they can't be serviced. |
|
1003 | # TODO look for unknown fields and abort when they can't be serviced. | |
1004 | # This could probably be validated by dispatcher using validvalues. |
|
1004 | # This could probably be validated by dispatcher using validvalues. | |
1005 |
|
1005 | |||
1006 | cl = repo.changelog |
|
1006 | cl = repo.changelog | |
1007 | outgoing = resolvenodes(repo, revisions) |
|
1007 | outgoing = resolvenodes(repo, revisions) | |
1008 | publishing = repo.publishing() |
|
1008 | publishing = repo.publishing() | |
1009 |
|
1009 | |||
1010 | if outgoing: |
|
1010 | if outgoing: | |
1011 | repo.hook(b'preoutgoing', throw=True, source=b'serve') |
|
1011 | repo.hook(b'preoutgoing', throw=True, source=b'serve') | |
1012 |
|
1012 | |||
1013 | yield { |
|
1013 | yield { | |
1014 | b'totalitems': len(outgoing), |
|
1014 | b'totalitems': len(outgoing), | |
1015 | } |
|
1015 | } | |
1016 |
|
1016 | |||
1017 | # The phases of nodes already transferred to the client may have changed |
|
1017 | # The phases of nodes already transferred to the client may have changed | |
1018 | # since the client last requested data. We send phase-only records |
|
1018 | # since the client last requested data. We send phase-only records | |
1019 | # for these revisions, if requested. |
|
1019 | # for these revisions, if requested. | |
1020 | # TODO actually do this. We'll probably want to emit phase heads |
|
1020 | # TODO actually do this. We'll probably want to emit phase heads | |
1021 | # in the ancestry set of the outgoing revisions. This will ensure |
|
1021 | # in the ancestry set of the outgoing revisions. This will ensure | |
1022 | # that phase updates within that set are seen. |
|
1022 | # that phase updates within that set are seen. | |
1023 | if b'phase' in fields: |
|
1023 | if b'phase' in fields: | |
1024 | pass |
|
1024 | pass | |
1025 |
|
1025 | |||
1026 | nodebookmarks = {} |
|
1026 | nodebookmarks = {} | |
1027 | for mark, node in repo._bookmarks.items(): |
|
1027 | for mark, node in repo._bookmarks.items(): | |
1028 | nodebookmarks.setdefault(node, set()).add(mark) |
|
1028 | nodebookmarks.setdefault(node, set()).add(mark) | |
1029 |
|
1029 | |||
1030 | # It is already topologically sorted by revision number. |
|
1030 | # It is already topologically sorted by revision number. | |
1031 | for node in outgoing: |
|
1031 | for node in outgoing: | |
1032 | d = { |
|
1032 | d = { | |
1033 | b'node': node, |
|
1033 | b'node': node, | |
1034 | } |
|
1034 | } | |
1035 |
|
1035 | |||
1036 | if b'parents' in fields: |
|
1036 | if b'parents' in fields: | |
1037 | d[b'parents'] = cl.parents(node) |
|
1037 | d[b'parents'] = cl.parents(node) | |
1038 |
|
1038 | |||
1039 | if b'phase' in fields: |
|
1039 | if b'phase' in fields: | |
1040 | if publishing: |
|
1040 | if publishing: | |
1041 | d[b'phase'] = b'public' |
|
1041 | d[b'phase'] = b'public' | |
1042 | else: |
|
1042 | else: | |
1043 | ctx = repo[node] |
|
1043 | ctx = repo[node] | |
1044 | d[b'phase'] = ctx.phasestr() |
|
1044 | d[b'phase'] = ctx.phasestr() | |
1045 |
|
1045 | |||
1046 | if b'bookmarks' in fields and node in nodebookmarks: |
|
1046 | if b'bookmarks' in fields and node in nodebookmarks: | |
1047 | d[b'bookmarks'] = sorted(nodebookmarks[node]) |
|
1047 | d[b'bookmarks'] = sorted(nodebookmarks[node]) | |
1048 | del nodebookmarks[node] |
|
1048 | del nodebookmarks[node] | |
1049 |
|
1049 | |||
1050 | followingmeta = [] |
|
1050 | followingmeta = [] | |
1051 | followingdata = [] |
|
1051 | followingdata = [] | |
1052 |
|
1052 | |||
1053 | if b'revision' in fields: |
|
1053 | if b'revision' in fields: | |
1054 | revisiondata = cl.revision(node) |
|
1054 | revisiondata = cl.revision(node) | |
1055 | followingmeta.append((b'revision', len(revisiondata))) |
|
1055 | followingmeta.append((b'revision', len(revisiondata))) | |
1056 | followingdata.append(revisiondata) |
|
1056 | followingdata.append(revisiondata) | |
1057 |
|
1057 | |||
1058 | # TODO make it possible for extensions to wrap a function or register |
|
1058 | # TODO make it possible for extensions to wrap a function or register | |
1059 | # a handler to service custom fields. |
|
1059 | # a handler to service custom fields. | |
1060 |
|
1060 | |||
1061 | if followingmeta: |
|
1061 | if followingmeta: | |
1062 | d[b'fieldsfollowing'] = followingmeta |
|
1062 | d[b'fieldsfollowing'] = followingmeta | |
1063 |
|
1063 | |||
1064 | yield d |
|
1064 | yield d | |
1065 |
|
1065 | |||
1066 | for extra in followingdata: |
|
1066 | for extra in followingdata: | |
1067 | yield extra |
|
1067 | yield extra | |
1068 |
|
1068 | |||
1069 | # If requested, send bookmarks from nodes that didn't have revision |
|
1069 | # If requested, send bookmarks from nodes that didn't have revision | |
1070 | # data sent so receiver is aware of any bookmark updates. |
|
1070 | # data sent so receiver is aware of any bookmark updates. | |
1071 | if b'bookmarks' in fields: |
|
1071 | if b'bookmarks' in fields: | |
1072 | for node, marks in sorted(pycompat.iteritems(nodebookmarks)): |
|
1072 | for node, marks in sorted(pycompat.iteritems(nodebookmarks)): | |
1073 | yield { |
|
1073 | yield { | |
1074 | b'node': node, |
|
1074 | b'node': node, | |
1075 | b'bookmarks': sorted(marks), |
|
1075 | b'bookmarks': sorted(marks), | |
1076 | } |
|
1076 | } | |
1077 |
|
1077 | |||
1078 |
|
1078 | |||
1079 | class FileAccessError(Exception): |
|
1079 | class FileAccessError(Exception): | |
1080 | """Represents an error accessing a specific file.""" |
|
1080 | """Represents an error accessing a specific file.""" | |
1081 |
|
1081 | |||
1082 | def __init__(self, path, msg, args): |
|
1082 | def __init__(self, path, msg, args): | |
1083 | self.path = path |
|
1083 | self.path = path | |
1084 | self.msg = msg |
|
1084 | self.msg = msg | |
1085 | self.args = args |
|
1085 | self.args = args | |
1086 |
|
1086 | |||
1087 |
|
1087 | |||
1088 | def getfilestore(repo, proto, path): |
|
1088 | def getfilestore(repo, proto, path): | |
1089 | """Obtain a file storage object for use with wire protocol. |
|
1089 | """Obtain a file storage object for use with wire protocol. | |
1090 |
|
1090 | |||
1091 | Exists as a standalone function so extensions can monkeypatch to add |
|
1091 | Exists as a standalone function so extensions can monkeypatch to add | |
1092 | access control. |
|
1092 | access control. | |
1093 | """ |
|
1093 | """ | |
1094 | # This seems to work even if the file doesn't exist. So catch |
|
1094 | # This seems to work even if the file doesn't exist. So catch | |
1095 | # "empty" files and return an error. |
|
1095 | # "empty" files and return an error. | |
1096 | fl = repo.file(path) |
|
1096 | fl = repo.file(path) | |
1097 |
|
1097 | |||
1098 | if not len(fl): |
|
1098 | if not len(fl): | |
1099 | raise FileAccessError(path, b'unknown file: %s', (path,)) |
|
1099 | raise FileAccessError(path, b'unknown file: %s', (path,)) | |
1100 |
|
1100 | |||
1101 | return fl |
|
1101 | return fl | |
1102 |
|
1102 | |||
1103 |
|
1103 | |||
1104 | def emitfilerevisions(repo, path, revisions, linknodes, fields): |
|
1104 | def emitfilerevisions(repo, path, revisions, linknodes, fields): | |
1105 | for revision in revisions: |
|
1105 | for revision in revisions: | |
1106 | d = { |
|
1106 | d = { | |
1107 | b'node': revision.node, |
|
1107 | b'node': revision.node, | |
1108 | } |
|
1108 | } | |
1109 |
|
1109 | |||
1110 | if b'parents' in fields: |
|
1110 | if b'parents' in fields: | |
1111 | d[b'parents'] = [revision.p1node, revision.p2node] |
|
1111 | d[b'parents'] = [revision.p1node, revision.p2node] | |
1112 |
|
1112 | |||
1113 | if b'linknode' in fields: |
|
1113 | if b'linknode' in fields: | |
1114 | d[b'linknode'] = linknodes[revision.node] |
|
1114 | d[b'linknode'] = linknodes[revision.node] | |
1115 |
|
1115 | |||
1116 | followingmeta = [] |
|
1116 | followingmeta = [] | |
1117 | followingdata = [] |
|
1117 | followingdata = [] | |
1118 |
|
1118 | |||
1119 | if b'revision' in fields: |
|
1119 | if b'revision' in fields: | |
1120 | if revision.revision is not None: |
|
1120 | if revision.revision is not None: | |
1121 | followingmeta.append((b'revision', len(revision.revision))) |
|
1121 | followingmeta.append((b'revision', len(revision.revision))) | |
1122 | followingdata.append(revision.revision) |
|
1122 | followingdata.append(revision.revision) | |
1123 | else: |
|
1123 | else: | |
1124 | d[b'deltabasenode'] = revision.basenode |
|
1124 | d[b'deltabasenode'] = revision.basenode | |
1125 | followingmeta.append((b'delta', len(revision.delta))) |
|
1125 | followingmeta.append((b'delta', len(revision.delta))) | |
1126 | followingdata.append(revision.delta) |
|
1126 | followingdata.append(revision.delta) | |
1127 |
|
1127 | |||
1128 | if followingmeta: |
|
1128 | if followingmeta: | |
1129 | d[b'fieldsfollowing'] = followingmeta |
|
1129 | d[b'fieldsfollowing'] = followingmeta | |
1130 |
|
1130 | |||
1131 | yield d |
|
1131 | yield d | |
1132 |
|
1132 | |||
1133 | for extra in followingdata: |
|
1133 | for extra in followingdata: | |
1134 | yield extra |
|
1134 | yield extra | |
1135 |
|
1135 | |||
1136 |
|
1136 | |||
1137 | def makefilematcher(repo, pathfilter): |
|
1137 | def makefilematcher(repo, pathfilter): | |
1138 | """Construct a matcher from a path filter dict.""" |
|
1138 | """Construct a matcher from a path filter dict.""" | |
1139 |
|
1139 | |||
1140 | # Validate values. |
|
1140 | # Validate values. | |
1141 | if pathfilter: |
|
1141 | if pathfilter: | |
1142 | for key in (b'include', b'exclude'): |
|
1142 | for key in (b'include', b'exclude'): | |
1143 | for pattern in pathfilter.get(key, []): |
|
1143 | for pattern in pathfilter.get(key, []): | |
1144 | if not pattern.startswith((b'path:', b'rootfilesin:')): |
|
1144 | if not pattern.startswith((b'path:', b'rootfilesin:')): | |
1145 | raise error.WireprotoCommandError( |
|
1145 | raise error.WireprotoCommandError( | |
1146 | b'%s pattern must begin with `path:` or `rootfilesin:`; ' |
|
1146 | b'%s pattern must begin with `path:` or `rootfilesin:`; ' | |
1147 | b'got %s', |
|
1147 | b'got %s', | |
1148 | (key, pattern), |
|
1148 | (key, pattern), | |
1149 | ) |
|
1149 | ) | |
1150 |
|
1150 | |||
1151 | if pathfilter: |
|
1151 | if pathfilter: | |
1152 | matcher = matchmod.match( |
|
1152 | matcher = matchmod.match( | |
1153 | repo.root, |
|
1153 | repo.root, | |
1154 | b'', |
|
1154 | b'', | |
1155 | include=pathfilter.get(b'include', []), |
|
1155 | include=pathfilter.get(b'include', []), | |
1156 | exclude=pathfilter.get(b'exclude', []), |
|
1156 | exclude=pathfilter.get(b'exclude', []), | |
1157 | ) |
|
1157 | ) | |
1158 | else: |
|
1158 | else: | |
1159 | matcher = matchmod.match(repo.root, b'') |
|
1159 | matcher = matchmod.match(repo.root, b'') | |
1160 |
|
1160 | |||
1161 | # Requested patterns could include files not in the local store. So |
|
1161 | # Requested patterns could include files not in the local store. So | |
1162 | # filter those out. |
|
1162 | # filter those out. | |
1163 | return repo.narrowmatch(matcher) |
|
1163 | return repo.narrowmatch(matcher) | |
1164 |
|
1164 | |||
1165 |
|
1165 | |||
1166 | @wireprotocommand( |
|
1166 | @wireprotocommand( | |
1167 | b'filedata', |
|
1167 | b'filedata', | |
1168 | args={ |
|
1168 | args={ | |
1169 | b'haveparents': { |
|
1169 | b'haveparents': { | |
1170 | b'type': b'bool', |
|
1170 | b'type': b'bool', | |
1171 | b'default': lambda: False, |
|
1171 | b'default': lambda: False, | |
1172 | b'example': True, |
|
1172 | b'example': True, | |
1173 | }, |
|
1173 | }, | |
1174 | b'nodes': { |
|
1174 | b'nodes': { | |
1175 | b'type': b'list', |
|
1175 | b'type': b'list', | |
1176 | b'example': [b'0123456...'], |
|
1176 | b'example': [b'0123456...'], | |
1177 | }, |
|
1177 | }, | |
1178 | b'fields': { |
|
1178 | b'fields': { | |
1179 | b'type': b'set', |
|
1179 | b'type': b'set', | |
1180 | b'default': set, |
|
1180 | b'default': set, | |
1181 | b'example': {b'parents', b'revision'}, |
|
1181 | b'example': {b'parents', b'revision'}, | |
1182 | b'validvalues': {b'parents', b'revision', b'linknode'}, |
|
1182 | b'validvalues': {b'parents', b'revision', b'linknode'}, | |
1183 | }, |
|
1183 | }, | |
1184 | b'path': { |
|
1184 | b'path': { | |
1185 | b'type': b'bytes', |
|
1185 | b'type': b'bytes', | |
1186 | b'example': b'foo.txt', |
|
1186 | b'example': b'foo.txt', | |
1187 | }, |
|
1187 | }, | |
1188 | }, |
|
1188 | }, | |
1189 | permission=b'pull', |
|
1189 | permission=b'pull', | |
1190 | # TODO censoring a file revision won't invalidate the cache. |
|
1190 | # TODO censoring a file revision won't invalidate the cache. | |
1191 | # Figure out a way to take censoring into account when deriving |
|
1191 | # Figure out a way to take censoring into account when deriving | |
1192 | # the cache key. |
|
1192 | # the cache key. | |
1193 | cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True), |
|
1193 | cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True), | |
1194 | ) |
|
1194 | ) | |
1195 | def filedata(repo, proto, haveparents, nodes, fields, path): |
|
1195 | def filedata(repo, proto, haveparents, nodes, fields, path): | |
1196 | # TODO this API allows access to file revisions that are attached to |
|
1196 | # TODO this API allows access to file revisions that are attached to | |
1197 | # secret changesets. filesdata does not have this problem. Maybe this |
|
1197 | # secret changesets. filesdata does not have this problem. Maybe this | |
1198 | # API should be deleted? |
|
1198 | # API should be deleted? | |
1199 |
|
1199 | |||
1200 | try: |
|
1200 | try: | |
1201 | # Extensions may wish to access the protocol handler. |
|
1201 | # Extensions may wish to access the protocol handler. | |
1202 | store = getfilestore(repo, proto, path) |
|
1202 | store = getfilestore(repo, proto, path) | |
1203 | except FileAccessError as e: |
|
1203 | except FileAccessError as e: | |
1204 | raise error.WireprotoCommandError(e.msg, e.args) |
|
1204 | raise error.WireprotoCommandError(e.msg, e.args) | |
1205 |
|
1205 | |||
1206 | clnode = repo.changelog.node |
|
1206 | clnode = repo.changelog.node | |
1207 | linknodes = {} |
|
1207 | linknodes = {} | |
1208 |
|
1208 | |||
1209 | # Validate requested nodes. |
|
1209 | # Validate requested nodes. | |
1210 | for node in nodes: |
|
1210 | for node in nodes: | |
1211 | try: |
|
1211 | try: | |
1212 | store.rev(node) |
|
1212 | store.rev(node) | |
1213 | except error.LookupError: |
|
1213 | except error.LookupError: | |
1214 | raise error.WireprotoCommandError( |
|
1214 | raise error.WireprotoCommandError( | |
1215 | b'unknown file node: %s', (hex(node),) |
|
1215 | b'unknown file node: %s', (hex(node),) | |
1216 | ) |
|
1216 | ) | |
1217 |
|
1217 | |||
1218 | # TODO by creating the filectx against a specific file revision |
|
1218 | # TODO by creating the filectx against a specific file revision | |
1219 | # instead of changeset, linkrev() is always used. This is wrong for |
|
1219 | # instead of changeset, linkrev() is always used. This is wrong for | |
1220 | # cases where linkrev() may refer to a hidden changeset. But since this |
|
1220 | # cases where linkrev() may refer to a hidden changeset. But since this | |
1221 | # API doesn't know anything about changesets, we're not sure how to |
|
1221 | # API doesn't know anything about changesets, we're not sure how to | |
1222 | # disambiguate the linknode. Perhaps we should delete this API? |
|
1222 | # disambiguate the linknode. Perhaps we should delete this API? | |
1223 | fctx = repo.filectx(path, fileid=node) |
|
1223 | fctx = repo.filectx(path, fileid=node) | |
1224 | linknodes[node] = clnode(fctx.introrev()) |
|
1224 | linknodes[node] = clnode(fctx.introrev()) | |
1225 |
|
1225 | |||
1226 | revisions = store.emitrevisions( |
|
1226 | revisions = store.emitrevisions( | |
1227 | nodes, |
|
1227 | nodes, | |
1228 | revisiondata=b'revision' in fields, |
|
1228 | revisiondata=b'revision' in fields, | |
1229 | assumehaveparentrevisions=haveparents, |
|
1229 | assumehaveparentrevisions=haveparents, | |
1230 | ) |
|
1230 | ) | |
1231 |
|
1231 | |||
1232 | yield { |
|
1232 | yield { | |
1233 | b'totalitems': len(nodes), |
|
1233 | b'totalitems': len(nodes), | |
1234 | } |
|
1234 | } | |
1235 |
|
1235 | |||
1236 | for o in emitfilerevisions(repo, path, revisions, linknodes, fields): |
|
1236 | for o in emitfilerevisions(repo, path, revisions, linknodes, fields): | |
1237 | yield o |
|
1237 | yield o | |
1238 |
|
1238 | |||
1239 |
|
1239 | |||
1240 | def filesdatacapabilities(repo, proto): |
|
1240 | def filesdatacapabilities(repo, proto): | |
1241 | batchsize = repo.ui.configint( |
|
1241 | batchsize = repo.ui.configint( | |
1242 | b'experimental', b'server.filesdata.recommended-batch-size' |
|
1242 | b'experimental', b'server.filesdata.recommended-batch-size' | |
1243 | ) |
|
1243 | ) | |
1244 | return { |
|
1244 | return { | |
1245 | b'recommendedbatchsize': batchsize, |
|
1245 | b'recommendedbatchsize': batchsize, | |
1246 | } |
|
1246 | } | |
1247 |
|
1247 | |||
1248 |
|
1248 | |||
1249 | @wireprotocommand( |
|
1249 | @wireprotocommand( | |
1250 | b'filesdata', |
|
1250 | b'filesdata', | |
1251 | args={ |
|
1251 | args={ | |
1252 | b'haveparents': { |
|
1252 | b'haveparents': { | |
1253 | b'type': b'bool', |
|
1253 | b'type': b'bool', | |
1254 | b'default': lambda: False, |
|
1254 | b'default': lambda: False, | |
1255 | b'example': True, |
|
1255 | b'example': True, | |
1256 | }, |
|
1256 | }, | |
1257 | b'fields': { |
|
1257 | b'fields': { | |
1258 | b'type': b'set', |
|
1258 | b'type': b'set', | |
1259 | b'default': set, |
|
1259 | b'default': set, | |
1260 | b'example': {b'parents', b'revision'}, |
|
1260 | b'example': {b'parents', b'revision'}, | |
1261 | b'validvalues': { |
|
1261 | b'validvalues': { | |
1262 | b'firstchangeset', |
|
1262 | b'firstchangeset', | |
1263 | b'linknode', |
|
1263 | b'linknode', | |
1264 | b'parents', |
|
1264 | b'parents', | |
1265 | b'revision', |
|
1265 | b'revision', | |
1266 | }, |
|
1266 | }, | |
1267 | }, |
|
1267 | }, | |
1268 | b'pathfilter': { |
|
1268 | b'pathfilter': { | |
1269 | b'type': b'dict', |
|
1269 | b'type': b'dict', | |
1270 | b'default': lambda: None, |
|
1270 | b'default': lambda: None, | |
1271 | b'example': {b'include': [b'path:tests']}, |
|
1271 | b'example': {b'include': [b'path:tests']}, | |
1272 | }, |
|
1272 | }, | |
1273 | b'revisions': { |
|
1273 | b'revisions': { | |
1274 | b'type': b'list', |
|
1274 | b'type': b'list', | |
1275 | b'example': [ |
|
1275 | b'example': [ | |
1276 | { |
|
1276 | { | |
1277 | b'type': b'changesetexplicit', |
|
1277 | b'type': b'changesetexplicit', | |
1278 | b'nodes': [b'abcdef...'], |
|
1278 | b'nodes': [b'abcdef...'], | |
1279 | } |
|
1279 | } | |
1280 | ], |
|
1280 | ], | |
1281 | }, |
|
1281 | }, | |
1282 | }, |
|
1282 | }, | |
1283 | permission=b'pull', |
|
1283 | permission=b'pull', | |
1284 | # TODO censoring a file revision won't invalidate the cache. |
|
1284 | # TODO censoring a file revision won't invalidate the cache. | |
1285 | # Figure out a way to take censoring into account when deriving |
|
1285 | # Figure out a way to take censoring into account when deriving | |
1286 | # the cache key. |
|
1286 | # the cache key. | |
1287 | cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True), |
|
1287 | cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True), | |
1288 | extracapabilitiesfn=filesdatacapabilities, |
|
1288 | extracapabilitiesfn=filesdatacapabilities, | |
1289 | ) |
|
1289 | ) | |
1290 | def filesdata(repo, proto, haveparents, fields, pathfilter, revisions): |
|
1290 | def filesdata(repo, proto, haveparents, fields, pathfilter, revisions): | |
1291 | # TODO This should operate on a repo that exposes obsolete changesets. There |
|
1291 | # TODO This should operate on a repo that exposes obsolete changesets. There | |
1292 | # is a race between a client making a push that obsoletes a changeset and |
|
1292 | # is a race between a client making a push that obsoletes a changeset and | |
1293 | # another client fetching files data for that changeset. If a client has a |
|
1293 | # another client fetching files data for that changeset. If a client has a | |
1294 | # changeset, it should probably be allowed to access files data for that |
|
1294 | # changeset, it should probably be allowed to access files data for that | |
1295 | # changeset. |
|
1295 | # changeset. | |
1296 |
|
1296 | |||
1297 | outgoing = resolvenodes(repo, revisions) |
|
1297 | outgoing = resolvenodes(repo, revisions) | |
1298 | filematcher = makefilematcher(repo, pathfilter) |
|
1298 | filematcher = makefilematcher(repo, pathfilter) | |
1299 |
|
1299 | |||
1300 | # path -> {fnode: linknode} |
|
1300 | # path -> {fnode: linknode} | |
1301 | fnodes = collections.defaultdict(dict) |
|
1301 | fnodes = collections.defaultdict(dict) | |
1302 |
|
1302 | |||
1303 | # We collect the set of relevant file revisions by iterating the changeset |
|
1303 | # We collect the set of relevant file revisions by iterating the changeset | |
1304 | # revisions and either walking the set of files recorded in the changeset |
|
1304 | # revisions and either walking the set of files recorded in the changeset | |
1305 | # or by walking the manifest at that revision. There is probably room for a |
|
1305 | # or by walking the manifest at that revision. There is probably room for a | |
1306 | # storage-level API to request this data, as it can be expensive to compute |
|
1306 | # storage-level API to request this data, as it can be expensive to compute | |
1307 | # and would benefit from caching or alternate storage from what revlogs |
|
1307 | # and would benefit from caching or alternate storage from what revlogs | |
1308 | # provide. |
|
1308 | # provide. | |
1309 | for node in outgoing: |
|
1309 | for node in outgoing: | |
1310 | ctx = repo[node] |
|
1310 | ctx = repo[node] | |
1311 | mctx = ctx.manifestctx() |
|
1311 | mctx = ctx.manifestctx() | |
1312 | md = mctx.read() |
|
1312 | md = mctx.read() | |
1313 |
|
1313 | |||
1314 | if haveparents: |
|
1314 | if haveparents: | |
1315 | checkpaths = ctx.files() |
|
1315 | checkpaths = ctx.files() | |
1316 | else: |
|
1316 | else: | |
1317 | checkpaths = md.keys() |
|
1317 | checkpaths = md.keys() | |
1318 |
|
1318 | |||
1319 | for path in checkpaths: |
|
1319 | for path in checkpaths: | |
1320 | fnode = md[path] |
|
1320 | fnode = md[path] | |
1321 |
|
1321 | |||
1322 | if path in fnodes and fnode in fnodes[path]: |
|
1322 | if path in fnodes and fnode in fnodes[path]: | |
1323 | continue |
|
1323 | continue | |
1324 |
|
1324 | |||
1325 | if not filematcher(path): |
|
1325 | if not filematcher(path): | |
1326 | continue |
|
1326 | continue | |
1327 |
|
1327 | |||
1328 | fnodes[path].setdefault(fnode, node) |
|
1328 | fnodes[path].setdefault(fnode, node) | |
1329 |
|
1329 | |||
1330 | yield { |
|
1330 | yield { | |
1331 | b'totalpaths': len(fnodes), |
|
1331 | b'totalpaths': len(fnodes), | |
1332 | b'totalitems': sum(len(v) for v in fnodes.values()), |
|
1332 | b'totalitems': sum(len(v) for v in fnodes.values()), | |
1333 | } |
|
1333 | } | |
1334 |
|
1334 | |||
1335 | for path, filenodes in sorted(fnodes.items()): |
|
1335 | for path, filenodes in sorted(fnodes.items()): | |
1336 | try: |
|
1336 | try: | |
1337 | store = getfilestore(repo, proto, path) |
|
1337 | store = getfilestore(repo, proto, path) | |
1338 | except FileAccessError as e: |
|
1338 | except FileAccessError as e: | |
1339 | raise error.WireprotoCommandError(e.msg, e.args) |
|
1339 | raise error.WireprotoCommandError(e.msg, e.args) | |
1340 |
|
1340 | |||
1341 | yield { |
|
1341 | yield { | |
1342 | b'path': path, |
|
1342 | b'path': path, | |
1343 | b'totalitems': len(filenodes), |
|
1343 | b'totalitems': len(filenodes), | |
1344 | } |
|
1344 | } | |
1345 |
|
1345 | |||
1346 | revisions = store.emitrevisions( |
|
1346 | revisions = store.emitrevisions( | |
1347 | filenodes.keys(), |
|
1347 | filenodes.keys(), | |
1348 | revisiondata=b'revision' in fields, |
|
1348 | revisiondata=b'revision' in fields, | |
1349 | assumehaveparentrevisions=haveparents, |
|
1349 | assumehaveparentrevisions=haveparents, | |
1350 | ) |
|
1350 | ) | |
1351 |
|
1351 | |||
1352 | for o in emitfilerevisions(repo, path, revisions, filenodes, fields): |
|
1352 | for o in emitfilerevisions(repo, path, revisions, filenodes, fields): | |
1353 | yield o |
|
1353 | yield o | |
1354 |
|
1354 | |||
1355 |
|
1355 | |||
1356 | @wireprotocommand( |
|
1356 | @wireprotocommand( | |
1357 | b'heads', |
|
1357 | b'heads', | |
1358 | args={ |
|
1358 | args={ | |
1359 | b'publiconly': { |
|
1359 | b'publiconly': { | |
1360 | b'type': b'bool', |
|
1360 | b'type': b'bool', | |
1361 | b'default': lambda: False, |
|
1361 | b'default': lambda: False, | |
1362 | b'example': False, |
|
1362 | b'example': False, | |
1363 | }, |
|
1363 | }, | |
1364 | }, |
|
1364 | }, | |
1365 | permission=b'pull', |
|
1365 | permission=b'pull', | |
1366 | ) |
|
1366 | ) | |
1367 | def headsv2(repo, proto, publiconly): |
|
1367 | def headsv2(repo, proto, publiconly): | |
1368 | if publiconly: |
|
1368 | if publiconly: | |
1369 | repo = repo.filtered(b'immutable') |
|
1369 | repo = repo.filtered(b'immutable') | |
1370 |
|
1370 | |||
1371 | yield repo.heads() |
|
1371 | yield repo.heads() | |
1372 |
|
1372 | |||
1373 |
|
1373 | |||
1374 | @wireprotocommand( |
|
1374 | @wireprotocommand( | |
1375 | b'known', |
|
1375 | b'known', | |
1376 | args={ |
|
1376 | args={ | |
1377 | b'nodes': { |
|
1377 | b'nodes': { | |
1378 | b'type': b'list', |
|
1378 | b'type': b'list', | |
1379 | b'default': list, |
|
1379 | b'default': list, | |
1380 | b'example': [b'deadbeef'], |
|
1380 | b'example': [b'deadbeef'], | |
1381 | }, |
|
1381 | }, | |
1382 | }, |
|
1382 | }, | |
1383 | permission=b'pull', |
|
1383 | permission=b'pull', | |
1384 | ) |
|
1384 | ) | |
1385 | def knownv2(repo, proto, nodes): |
|
1385 | def knownv2(repo, proto, nodes): | |
1386 | result = b''.join(b'1' if n else b'0' for n in repo.known(nodes)) |
|
1386 | result = b''.join(b'1' if n else b'0' for n in repo.known(nodes)) | |
1387 | yield result |
|
1387 | yield result | |
1388 |
|
1388 | |||
1389 |
|
1389 | |||
1390 | @wireprotocommand( |
|
1390 | @wireprotocommand( | |
1391 | b'listkeys', |
|
1391 | b'listkeys', | |
1392 | args={ |
|
1392 | args={ | |
1393 | b'namespace': { |
|
1393 | b'namespace': { | |
1394 | b'type': b'bytes', |
|
1394 | b'type': b'bytes', | |
1395 | b'example': b'ns', |
|
1395 | b'example': b'ns', | |
1396 | }, |
|
1396 | }, | |
1397 | }, |
|
1397 | }, | |
1398 | permission=b'pull', |
|
1398 | permission=b'pull', | |
1399 | ) |
|
1399 | ) | |
1400 | def listkeysv2(repo, proto, namespace): |
|
1400 | def listkeysv2(repo, proto, namespace): | |
1401 | keys = repo.listkeys(encoding.tolocal(namespace)) |
|
1401 | keys = repo.listkeys(encoding.tolocal(namespace)) | |
1402 | keys = { |
|
1402 | keys = { | |
1403 | encoding.fromlocal(k): encoding.fromlocal(v) |
|
1403 | encoding.fromlocal(k): encoding.fromlocal(v) | |
1404 | for k, v in pycompat.iteritems(keys) |
|
1404 | for k, v in pycompat.iteritems(keys) | |
1405 | } |
|
1405 | } | |
1406 |
|
1406 | |||
1407 | yield keys |
|
1407 | yield keys | |
1408 |
|
1408 | |||
1409 |
|
1409 | |||
1410 | @wireprotocommand( |
|
1410 | @wireprotocommand( | |
1411 | b'lookup', |
|
1411 | b'lookup', | |
1412 | args={ |
|
1412 | args={ | |
1413 | b'key': { |
|
1413 | b'key': { | |
1414 | b'type': b'bytes', |
|
1414 | b'type': b'bytes', | |
1415 | b'example': b'foo', |
|
1415 | b'example': b'foo', | |
1416 | }, |
|
1416 | }, | |
1417 | }, |
|
1417 | }, | |
1418 | permission=b'pull', |
|
1418 | permission=b'pull', | |
1419 | ) |
|
1419 | ) | |
1420 | def lookupv2(repo, proto, key): |
|
1420 | def lookupv2(repo, proto, key): | |
1421 | key = encoding.tolocal(key) |
|
1421 | key = encoding.tolocal(key) | |
1422 |
|
1422 | |||
1423 | # TODO handle exception. |
|
1423 | # TODO handle exception. | |
1424 | node = repo.lookup(key) |
|
1424 | node = repo.lookup(key) | |
1425 |
|
1425 | |||
1426 | yield node |
|
1426 | yield node | |
1427 |
|
1427 | |||
1428 |
|
1428 | |||
1429 | def manifestdatacapabilities(repo, proto): |
|
1429 | def manifestdatacapabilities(repo, proto): | |
1430 | batchsize = repo.ui.configint( |
|
1430 | batchsize = repo.ui.configint( | |
1431 | b'experimental', b'server.manifestdata.recommended-batch-size' |
|
1431 | b'experimental', b'server.manifestdata.recommended-batch-size' | |
1432 | ) |
|
1432 | ) | |
1433 |
|
1433 | |||
1434 | return { |
|
1434 | return { | |
1435 | b'recommendedbatchsize': batchsize, |
|
1435 | b'recommendedbatchsize': batchsize, | |
1436 | } |
|
1436 | } | |
1437 |
|
1437 | |||
1438 |
|
1438 | |||
1439 | @wireprotocommand( |
|
1439 | @wireprotocommand( | |
1440 | b'manifestdata', |
|
1440 | b'manifestdata', | |
1441 | args={ |
|
1441 | args={ | |
1442 | b'nodes': { |
|
1442 | b'nodes': { | |
1443 | b'type': b'list', |
|
1443 | b'type': b'list', | |
1444 | b'example': [b'0123456...'], |
|
1444 | b'example': [b'0123456...'], | |
1445 | }, |
|
1445 | }, | |
1446 | b'haveparents': { |
|
1446 | b'haveparents': { | |
1447 | b'type': b'bool', |
|
1447 | b'type': b'bool', | |
1448 | b'default': lambda: False, |
|
1448 | b'default': lambda: False, | |
1449 | b'example': True, |
|
1449 | b'example': True, | |
1450 | }, |
|
1450 | }, | |
1451 | b'fields': { |
|
1451 | b'fields': { | |
1452 | b'type': b'set', |
|
1452 | b'type': b'set', | |
1453 | b'default': set, |
|
1453 | b'default': set, | |
1454 | b'example': {b'parents', b'revision'}, |
|
1454 | b'example': {b'parents', b'revision'}, | |
1455 | b'validvalues': {b'parents', b'revision'}, |
|
1455 | b'validvalues': {b'parents', b'revision'}, | |
1456 | }, |
|
1456 | }, | |
1457 | b'tree': { |
|
1457 | b'tree': { | |
1458 | b'type': b'bytes', |
|
1458 | b'type': b'bytes', | |
1459 | b'example': b'', |
|
1459 | b'example': b'', | |
1460 | }, |
|
1460 | }, | |
1461 | }, |
|
1461 | }, | |
1462 | permission=b'pull', |
|
1462 | permission=b'pull', | |
1463 | cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True), |
|
1463 | cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True), | |
1464 | extracapabilitiesfn=manifestdatacapabilities, |
|
1464 | extracapabilitiesfn=manifestdatacapabilities, | |
1465 | ) |
|
1465 | ) | |
1466 | def manifestdata(repo, proto, haveparents, nodes, fields, tree): |
|
1466 | def manifestdata(repo, proto, haveparents, nodes, fields, tree): | |
1467 | store = repo.manifestlog.getstorage(tree) |
|
1467 | store = repo.manifestlog.getstorage(tree) | |
1468 |
|
1468 | |||
1469 | # Validate the node is known and abort on unknown revisions. |
|
1469 | # Validate the node is known and abort on unknown revisions. | |
1470 | for node in nodes: |
|
1470 | for node in nodes: | |
1471 | try: |
|
1471 | try: | |
1472 | store.rev(node) |
|
1472 | store.rev(node) | |
1473 | except error.LookupError: |
|
1473 | except error.LookupError: | |
1474 | raise error.WireprotoCommandError(b'unknown node: %s', (node,)) |
|
1474 | raise error.WireprotoCommandError(b'unknown node: %s', (node,)) | |
1475 |
|
1475 | |||
1476 | revisions = store.emitrevisions( |
|
1476 | revisions = store.emitrevisions( | |
1477 | nodes, |
|
1477 | nodes, | |
1478 | revisiondata=b'revision' in fields, |
|
1478 | revisiondata=b'revision' in fields, | |
1479 | assumehaveparentrevisions=haveparents, |
|
1479 | assumehaveparentrevisions=haveparents, | |
1480 | ) |
|
1480 | ) | |
1481 |
|
1481 | |||
1482 | yield { |
|
1482 | yield { | |
1483 | b'totalitems': len(nodes), |
|
1483 | b'totalitems': len(nodes), | |
1484 | } |
|
1484 | } | |
1485 |
|
1485 | |||
1486 | for revision in revisions: |
|
1486 | for revision in revisions: | |
1487 | d = { |
|
1487 | d = { | |
1488 | b'node': revision.node, |
|
1488 | b'node': revision.node, | |
1489 | } |
|
1489 | } | |
1490 |
|
1490 | |||
1491 | if b'parents' in fields: |
|
1491 | if b'parents' in fields: | |
1492 | d[b'parents'] = [revision.p1node, revision.p2node] |
|
1492 | d[b'parents'] = [revision.p1node, revision.p2node] | |
1493 |
|
1493 | |||
1494 | followingmeta = [] |
|
1494 | followingmeta = [] | |
1495 | followingdata = [] |
|
1495 | followingdata = [] | |
1496 |
|
1496 | |||
1497 | if b'revision' in fields: |
|
1497 | if b'revision' in fields: | |
1498 | if revision.revision is not None: |
|
1498 | if revision.revision is not None: | |
1499 | followingmeta.append((b'revision', len(revision.revision))) |
|
1499 | followingmeta.append((b'revision', len(revision.revision))) | |
1500 | followingdata.append(revision.revision) |
|
1500 | followingdata.append(revision.revision) | |
1501 | else: |
|
1501 | else: | |
1502 | d[b'deltabasenode'] = revision.basenode |
|
1502 | d[b'deltabasenode'] = revision.basenode | |
1503 | followingmeta.append((b'delta', len(revision.delta))) |
|
1503 | followingmeta.append((b'delta', len(revision.delta))) | |
1504 | followingdata.append(revision.delta) |
|
1504 | followingdata.append(revision.delta) | |
1505 |
|
1505 | |||
1506 | if followingmeta: |
|
1506 | if followingmeta: | |
1507 | d[b'fieldsfollowing'] = followingmeta |
|
1507 | d[b'fieldsfollowing'] = followingmeta | |
1508 |
|
1508 | |||
1509 | yield d |
|
1509 | yield d | |
1510 |
|
1510 | |||
1511 | for extra in followingdata: |
|
1511 | for extra in followingdata: | |
1512 | yield extra |
|
1512 | yield extra | |
1513 |
|
1513 | |||
1514 |
|
1514 | |||
1515 | @wireprotocommand( |
|
1515 | @wireprotocommand( | |
1516 | b'pushkey', |
|
1516 | b'pushkey', | |
1517 | args={ |
|
1517 | args={ | |
1518 | b'namespace': { |
|
1518 | b'namespace': { | |
1519 | b'type': b'bytes', |
|
1519 | b'type': b'bytes', | |
1520 | b'example': b'ns', |
|
1520 | b'example': b'ns', | |
1521 | }, |
|
1521 | }, | |
1522 | b'key': { |
|
1522 | b'key': { | |
1523 | b'type': b'bytes', |
|
1523 | b'type': b'bytes', | |
1524 | b'example': b'key', |
|
1524 | b'example': b'key', | |
1525 | }, |
|
1525 | }, | |
1526 | b'old': { |
|
1526 | b'old': { | |
1527 | b'type': b'bytes', |
|
1527 | b'type': b'bytes', | |
1528 | b'example': b'old', |
|
1528 | b'example': b'old', | |
1529 | }, |
|
1529 | }, | |
1530 | b'new': { |
|
1530 | b'new': { | |
1531 | b'type': b'bytes', |
|
1531 | b'type': b'bytes', | |
1532 | b'example': b'new', |
|
1532 | b'example': b'new', | |
1533 | }, |
|
1533 | }, | |
1534 | }, |
|
1534 | }, | |
1535 | permission=b'push', |
|
1535 | permission=b'push', | |
1536 | ) |
|
1536 | ) | |
1537 | def pushkeyv2(repo, proto, namespace, key, old, new): |
|
1537 | def pushkeyv2(repo, proto, namespace, key, old, new): | |
1538 | # TODO handle ui output redirection |
|
1538 | # TODO handle ui output redirection | |
1539 | yield repo.pushkey( |
|
1539 | yield repo.pushkey( | |
1540 | encoding.tolocal(namespace), |
|
1540 | encoding.tolocal(namespace), | |
1541 | encoding.tolocal(key), |
|
1541 | encoding.tolocal(key), | |
1542 | encoding.tolocal(old), |
|
1542 | encoding.tolocal(old), | |
1543 | encoding.tolocal(new), |
|
1543 | encoding.tolocal(new), | |
1544 | ) |
|
1544 | ) | |
1545 |
|
1545 | |||
1546 |
|
1546 | |||
1547 | @wireprotocommand( |
|
1547 | @wireprotocommand( | |
1548 | b'rawstorefiledata', |
|
1548 | b'rawstorefiledata', | |
1549 | args={ |
|
1549 | args={ | |
1550 | b'files': { |
|
1550 | b'files': { | |
1551 | b'type': b'list', |
|
1551 | b'type': b'list', | |
1552 | b'example': [b'changelog', b'manifestlog'], |
|
1552 | b'example': [b'changelog', b'manifestlog'], | |
1553 | }, |
|
1553 | }, | |
1554 | b'pathfilter': { |
|
1554 | b'pathfilter': { | |
1555 | b'type': b'list', |
|
1555 | b'type': b'list', | |
1556 | b'default': lambda: None, |
|
1556 | b'default': lambda: None, | |
1557 | b'example': {b'include': [b'path:tests']}, |
|
1557 | b'example': {b'include': [b'path:tests']}, | |
1558 | }, |
|
1558 | }, | |
1559 | }, |
|
1559 | }, | |
1560 | permission=b'pull', |
|
1560 | permission=b'pull', | |
1561 | ) |
|
1561 | ) | |
1562 | def rawstorefiledata(repo, proto, files, pathfilter): |
|
1562 | def rawstorefiledata(repo, proto, files, pathfilter): | |
1563 | if not streamclone.allowservergeneration(repo): |
|
1563 | if not streamclone.allowservergeneration(repo): | |
1564 | raise error.WireprotoCommandError(b'stream clone is disabled') |
|
1564 | raise error.WireprotoCommandError(b'stream clone is disabled') | |
1565 |
|
1565 | |||
1566 | # TODO support dynamically advertising what store files "sets" are |
|
1566 | # TODO support dynamically advertising what store files "sets" are | |
1567 | # available. For now, we support changelog, manifestlog, and files. |
|
1567 | # available. For now, we support changelog, manifestlog, and files. | |
1568 | files = set(files) |
|
1568 | files = set(files) | |
1569 | allowedfiles = {b'changelog', b'manifestlog'} |
|
1569 | allowedfiles = {b'changelog', b'manifestlog'} | |
1570 |
|
1570 | |||
1571 | unsupported = files - allowedfiles |
|
1571 | unsupported = files - allowedfiles | |
1572 | if unsupported: |
|
1572 | if unsupported: | |
1573 | raise error.WireprotoCommandError( |
|
1573 | raise error.WireprotoCommandError( | |
1574 | b'unknown file type: %s', (b', '.join(sorted(unsupported)),) |
|
1574 | b'unknown file type: %s', (b', '.join(sorted(unsupported)),) | |
1575 | ) |
|
1575 | ) | |
1576 |
|
1576 | |||
1577 | with repo.lock(): |
|
1577 | with repo.lock(): | |
1578 | topfiles = list(repo.store.topfiles()) |
|
1578 | topfiles = list(repo.store.topfiles()) | |
1579 |
|
1579 | |||
1580 | sendfiles = [] |
|
1580 | sendfiles = [] | |
1581 | totalsize = 0 |
|
1581 | totalsize = 0 | |
1582 |
|
1582 | |||
1583 | # TODO this is a bunch of storage layer interface abstractions because |
|
1583 | # TODO this is a bunch of storage layer interface abstractions because | |
1584 | # it assumes revlogs. |
|
1584 | # it assumes revlogs. | |
1585 | for name, encodedname, size in topfiles: |
|
1585 | for rl_type, name, encodedname, size in topfiles: | |
|
1586 | # XXX use the `rl_type` for that | |||
1586 | if b'changelog' in files and name.startswith(b'00changelog'): |
|
1587 | if b'changelog' in files and name.startswith(b'00changelog'): | |
1587 | pass |
|
1588 | pass | |
1588 | elif b'manifestlog' in files and name.startswith(b'00manifest'): |
|
1589 | elif b'manifestlog' in files and name.startswith(b'00manifest'): | |
1589 | pass |
|
1590 | pass | |
1590 | else: |
|
1591 | else: | |
1591 | continue |
|
1592 | continue | |
1592 |
|
1593 | |||
1593 | sendfiles.append((b'store', name, size)) |
|
1594 | sendfiles.append((b'store', name, size)) | |
1594 | totalsize += size |
|
1595 | totalsize += size | |
1595 |
|
1596 | |||
1596 | yield { |
|
1597 | yield { | |
1597 | b'filecount': len(sendfiles), |
|
1598 | b'filecount': len(sendfiles), | |
1598 | b'totalsize': totalsize, |
|
1599 | b'totalsize': totalsize, | |
1599 | } |
|
1600 | } | |
1600 |
|
1601 | |||
1601 | for location, name, size in sendfiles: |
|
1602 | for location, name, size in sendfiles: | |
1602 | yield { |
|
1603 | yield { | |
1603 | b'location': location, |
|
1604 | b'location': location, | |
1604 | b'path': name, |
|
1605 | b'path': name, | |
1605 | b'size': size, |
|
1606 | b'size': size, | |
1606 | } |
|
1607 | } | |
1607 |
|
1608 | |||
1608 | # We have to use a closure for this to ensure the context manager is |
|
1609 | # We have to use a closure for this to ensure the context manager is | |
1609 | # closed only after sending the final chunk. |
|
1610 | # closed only after sending the final chunk. | |
1610 | def getfiledata(): |
|
1611 | def getfiledata(): | |
1611 | with repo.svfs(name, b'rb', auditpath=False) as fh: |
|
1612 | with repo.svfs(name, b'rb', auditpath=False) as fh: | |
1612 | for chunk in util.filechunkiter(fh, limit=size): |
|
1613 | for chunk in util.filechunkiter(fh, limit=size): | |
1613 | yield chunk |
|
1614 | yield chunk | |
1614 |
|
1615 | |||
1615 | yield wireprototypes.indefinitebytestringresponse(getfiledata()) |
|
1616 | yield wireprototypes.indefinitebytestringresponse(getfiledata()) |
@@ -1,777 +1,777 | |||||
1 | =================================== |
|
1 | =================================== | |
2 | Test the persistent on-disk nodemap |
|
2 | Test the persistent on-disk nodemap | |
3 | =================================== |
|
3 | =================================== | |
4 |
|
4 | |||
5 |
|
5 | |||
6 | #if no-rust |
|
6 | #if no-rust | |
7 |
|
7 | |||
8 | $ cat << EOF >> $HGRCPATH |
|
8 | $ cat << EOF >> $HGRCPATH | |
9 | > [format] |
|
9 | > [format] | |
10 | > use-persistent-nodemap=yes |
|
10 | > use-persistent-nodemap=yes | |
11 | > [devel] |
|
11 | > [devel] | |
12 | > persistent-nodemap=yes |
|
12 | > persistent-nodemap=yes | |
13 | > EOF |
|
13 | > EOF | |
14 |
|
14 | |||
15 | #endif |
|
15 | #endif | |
16 |
|
16 | |||
17 | $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow |
|
17 | $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow | |
18 | $ cd test-repo |
|
18 | $ cd test-repo | |
19 |
|
19 | |||
20 | Check handling of the default slow-path value |
|
20 | Check handling of the default slow-path value | |
21 |
|
21 | |||
22 | #if no-pure no-rust |
|
22 | #if no-pure no-rust | |
23 |
|
23 | |||
24 | $ hg id |
|
24 | $ hg id | |
25 | abort: accessing `persistent-nodemap` repository without associated fast implementation. |
|
25 | abort: accessing `persistent-nodemap` repository without associated fast implementation. | |
26 | (check `hg help config.format.use-persistent-nodemap` for details) |
|
26 | (check `hg help config.format.use-persistent-nodemap` for details) | |
27 | [255] |
|
27 | [255] | |
28 |
|
28 | |||
29 | Unlock further check (we are here to test the feature) |
|
29 | Unlock further check (we are here to test the feature) | |
30 |
|
30 | |||
31 | $ cat << EOF >> $HGRCPATH |
|
31 | $ cat << EOF >> $HGRCPATH | |
32 | > [storage] |
|
32 | > [storage] | |
33 | > # to avoid spamming the test |
|
33 | > # to avoid spamming the test | |
34 | > revlog.persistent-nodemap.slow-path=allow |
|
34 | > revlog.persistent-nodemap.slow-path=allow | |
35 | > EOF |
|
35 | > EOF | |
36 |
|
36 | |||
37 | #endif |
|
37 | #endif | |
38 |
|
38 | |||
39 | #if rust |
|
39 | #if rust | |
40 |
|
40 | |||
41 | Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule: |
|
41 | Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule: | |
42 | in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t` |
|
42 | in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t` | |
43 | (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs` |
|
43 | (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs` | |
44 | incorrectly used `libc::c_int` (32 bits). |
|
44 | incorrectly used `libc::c_int` (32 bits). | |
45 | As a result, -1 passed from Rust for the null revision became 4294967295 in C. |
|
45 | As a result, -1 passed from Rust for the null revision became 4294967295 in C. | |
46 |
|
46 | |||
47 | $ hg log -r 00000000 |
|
47 | $ hg log -r 00000000 | |
48 | changeset: -1:000000000000 |
|
48 | changeset: -1:000000000000 | |
49 | tag: tip |
|
49 | tag: tip | |
50 | user: |
|
50 | user: | |
51 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
51 | date: Thu Jan 01 00:00:00 1970 +0000 | |
52 |
|
52 | |||
53 |
|
53 | |||
54 | #endif |
|
54 | #endif | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | $ hg debugformat |
|
57 | $ hg debugformat | |
58 | format-variant repo |
|
58 | format-variant repo | |
59 | fncache: yes |
|
59 | fncache: yes | |
60 | dotencode: yes |
|
60 | dotencode: yes | |
61 | generaldelta: yes |
|
61 | generaldelta: yes | |
62 | share-safe: no |
|
62 | share-safe: no | |
63 | sparserevlog: yes |
|
63 | sparserevlog: yes | |
64 | persistent-nodemap: yes |
|
64 | persistent-nodemap: yes | |
65 | copies-sdc: no |
|
65 | copies-sdc: no | |
66 | revlog-v2: no |
|
66 | revlog-v2: no | |
67 | plain-cl-delta: yes |
|
67 | plain-cl-delta: yes | |
68 | compression: zlib (no-zstd !) |
|
68 | compression: zlib (no-zstd !) | |
69 | compression: zstd (zstd !) |
|
69 | compression: zstd (zstd !) | |
70 | compression-level: default |
|
70 | compression-level: default | |
71 | $ hg debugbuilddag .+5000 --new-file |
|
71 | $ hg debugbuilddag .+5000 --new-file | |
72 |
|
72 | |||
73 | $ hg debugnodemap --metadata |
|
73 | $ hg debugnodemap --metadata | |
74 | uid: ???????????????? (glob) |
|
74 | uid: ???????????????? (glob) | |
75 | tip-rev: 5000 |
|
75 | tip-rev: 5000 | |
76 | tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c |
|
76 | tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c | |
77 | data-length: 121088 |
|
77 | data-length: 121088 | |
78 | data-unused: 0 |
|
78 | data-unused: 0 | |
79 | data-unused: 0.000% |
|
79 | data-unused: 0.000% | |
80 | $ f --size .hg/store/00changelog.n |
|
80 | $ f --size .hg/store/00changelog.n | |
81 | .hg/store/00changelog.n: size=70 |
|
81 | .hg/store/00changelog.n: size=70 | |
82 |
|
82 | |||
83 | Simple lookup works |
|
83 | Simple lookup works | |
84 |
|
84 | |||
85 | $ ANYNODE=`hg log --template '{node|short}\n' --rev tip` |
|
85 | $ ANYNODE=`hg log --template '{node|short}\n' --rev tip` | |
86 | $ hg log -r "$ANYNODE" --template '{rev}\n' |
|
86 | $ hg log -r "$ANYNODE" --template '{rev}\n' | |
87 | 5000 |
|
87 | 5000 | |
88 |
|
88 | |||
89 |
|
89 | |||
90 | #if rust |
|
90 | #if rust | |
91 |
|
91 | |||
92 | $ f --sha256 .hg/store/00changelog-*.nd |
|
92 | $ f --sha256 .hg/store/00changelog-*.nd | |
93 | .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob) |
|
93 | .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob) | |
94 |
|
94 | |||
95 | $ f --sha256 .hg/store/00manifest-*.nd |
|
95 | $ f --sha256 .hg/store/00manifest-*.nd | |
96 | .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob) |
|
96 | .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob) | |
97 | $ hg debugnodemap --dump-new | f --sha256 --size |
|
97 | $ hg debugnodemap --dump-new | f --sha256 --size | |
98 | size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd |
|
98 | size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd | |
99 | $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size |
|
99 | $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size | |
100 | size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd |
|
100 | size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd | |
101 | 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........| |
|
101 | 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........| | |
102 | 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."| |
|
102 | 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."| | |
103 | 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^| |
|
103 | 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^| | |
104 | 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....| |
|
104 | 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....| | |
105 | 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8| |
|
105 | 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8| | |
106 | 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i| |
|
106 | 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i| | |
107 | 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....| |
|
107 | 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....| | |
108 | 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........| |
|
108 | 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........| | |
109 | 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....| |
|
109 | 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....| | |
110 | 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................| |
|
110 | 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................| | |
111 | 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+| |
|
111 | 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+| | |
112 | 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................| |
|
112 | 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................| | |
113 | 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5| |
|
113 | 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5| | |
114 | 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U| |
|
114 | 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U| | |
115 | 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................| |
|
115 | 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................| | |
116 | 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................| |
|
116 | 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................| | |
117 |
|
117 | |||
118 |
|
118 | |||
119 | #else |
|
119 | #else | |
120 |
|
120 | |||
121 | $ f --sha256 .hg/store/00changelog-*.nd |
|
121 | $ f --sha256 .hg/store/00changelog-*.nd | |
122 | .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob) |
|
122 | .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob) | |
123 | $ hg debugnodemap --dump-new | f --sha256 --size |
|
123 | $ hg debugnodemap --dump-new | f --sha256 --size | |
124 | size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 |
|
124 | size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 | |
125 | $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size |
|
125 | $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size | |
126 | size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 |
|
126 | size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 | |
127 | 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
127 | 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
128 | 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
128 | 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
129 | 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................| |
|
129 | 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................| | |
130 | 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
130 | 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
131 | 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
131 | 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
132 | 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................| |
|
132 | 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................| | |
133 | 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............| |
|
133 | 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............| | |
134 | 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
134 | 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
135 | 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
135 | 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
136 | 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................| |
|
136 | 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................| | |
137 | 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........| |
|
137 | 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........| | |
138 | 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
138 | 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
139 | 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
139 | 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
140 | 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| |
|
140 | 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| | |
141 | 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................| |
|
141 | 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................| | |
142 | 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................| |
|
142 | 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................| | |
143 |
|
143 | |||
144 | #endif |
|
144 | #endif | |
145 |
|
145 | |||
146 | $ hg debugnodemap --check |
|
146 | $ hg debugnodemap --check | |
147 | revision in index: 5001 |
|
147 | revision in index: 5001 | |
148 | revision in nodemap: 5001 |
|
148 | revision in nodemap: 5001 | |
149 |
|
149 | |||
150 | add a new commit |
|
150 | add a new commit | |
151 |
|
151 | |||
152 | $ hg up |
|
152 | $ hg up | |
153 | 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
153 | 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
154 | $ echo foo > foo |
|
154 | $ echo foo > foo | |
155 | $ hg add foo |
|
155 | $ hg add foo | |
156 |
|
156 | |||
157 |
|
157 | |||
158 | Check slow-path config value handling |
|
158 | Check slow-path config value handling | |
159 | ------------------------------------- |
|
159 | ------------------------------------- | |
160 |
|
160 | |||
161 | #if no-pure no-rust |
|
161 | #if no-pure no-rust | |
162 |
|
162 | |||
163 | $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value" |
|
163 | $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value" | |
164 | unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value" |
|
164 | unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value" | |
165 | falling back to default value: abort |
|
165 | falling back to default value: abort | |
166 | abort: accessing `persistent-nodemap` repository without associated fast implementation. |
|
166 | abort: accessing `persistent-nodemap` repository without associated fast implementation. | |
167 | (check `hg help config.format.use-persistent-nodemap` for details) |
|
167 | (check `hg help config.format.use-persistent-nodemap` for details) | |
168 | [255] |
|
168 | [255] | |
169 |
|
169 | |||
170 | $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn" |
|
170 | $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn" | |
171 | warning: accessing `persistent-nodemap` repository without associated fast implementation. |
|
171 | warning: accessing `persistent-nodemap` repository without associated fast implementation. | |
172 | (check `hg help config.format.use-persistent-nodemap` for details) |
|
172 | (check `hg help config.format.use-persistent-nodemap` for details) | |
173 | changeset: 5000:6b02b8c7b966 |
|
173 | changeset: 5000:6b02b8c7b966 | |
174 | tag: tip |
|
174 | tag: tip | |
175 | user: debugbuilddag |
|
175 | user: debugbuilddag | |
176 | date: Thu Jan 01 01:23:20 1970 +0000 |
|
176 | date: Thu Jan 01 01:23:20 1970 +0000 | |
177 | summary: r5000 |
|
177 | summary: r5000 | |
178 |
|
178 | |||
179 | $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort" |
|
179 | $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort" | |
180 | abort: accessing `persistent-nodemap` repository without associated fast implementation. |
|
180 | abort: accessing `persistent-nodemap` repository without associated fast implementation. | |
181 | (check `hg help config.format.use-persistent-nodemap` for details) |
|
181 | (check `hg help config.format.use-persistent-nodemap` for details) | |
182 | [255] |
|
182 | [255] | |
183 |
|
183 | |||
184 | #else |
|
184 | #else | |
185 |
|
185 | |||
186 | $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value" |
|
186 | $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value" | |
187 | unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value" |
|
187 | unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value" | |
188 | falling back to default value: abort |
|
188 | falling back to default value: abort | |
189 | 6b02b8c7b966+ tip |
|
189 | 6b02b8c7b966+ tip | |
190 |
|
190 | |||
191 | #endif |
|
191 | #endif | |
192 |
|
192 | |||
193 | $ hg ci -m 'foo' |
|
193 | $ hg ci -m 'foo' | |
194 |
|
194 | |||
195 | #if no-pure no-rust |
|
195 | #if no-pure no-rust | |
196 | $ hg debugnodemap --metadata |
|
196 | $ hg debugnodemap --metadata | |
197 | uid: ???????????????? (glob) |
|
197 | uid: ???????????????? (glob) | |
198 | tip-rev: 5001 |
|
198 | tip-rev: 5001 | |
199 | tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c |
|
199 | tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c | |
200 | data-length: 121088 |
|
200 | data-length: 121088 | |
201 | data-unused: 0 |
|
201 | data-unused: 0 | |
202 | data-unused: 0.000% |
|
202 | data-unused: 0.000% | |
203 | #else |
|
203 | #else | |
204 | $ hg debugnodemap --metadata |
|
204 | $ hg debugnodemap --metadata | |
205 | uid: ???????????????? (glob) |
|
205 | uid: ???????????????? (glob) | |
206 | tip-rev: 5001 |
|
206 | tip-rev: 5001 | |
207 | tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c |
|
207 | tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c | |
208 | data-length: 121344 |
|
208 | data-length: 121344 | |
209 | data-unused: 256 |
|
209 | data-unused: 256 | |
210 | data-unused: 0.211% |
|
210 | data-unused: 0.211% | |
211 | #endif |
|
211 | #endif | |
212 |
|
212 | |||
213 | $ f --size .hg/store/00changelog.n |
|
213 | $ f --size .hg/store/00changelog.n | |
214 | .hg/store/00changelog.n: size=70 |
|
214 | .hg/store/00changelog.n: size=70 | |
215 |
|
215 | |||
216 | (The pure code use the debug code that perform incremental update, the C code reencode from scratch) |
|
216 | (The pure code use the debug code that perform incremental update, the C code reencode from scratch) | |
217 |
|
217 | |||
218 | #if pure |
|
218 | #if pure | |
219 | $ f --sha256 .hg/store/00changelog-*.nd --size |
|
219 | $ f --sha256 .hg/store/00changelog-*.nd --size | |
220 | .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob) |
|
220 | .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob) | |
221 | #endif |
|
221 | #endif | |
222 |
|
222 | |||
223 | #if rust |
|
223 | #if rust | |
224 | $ f --sha256 .hg/store/00changelog-*.nd --size |
|
224 | $ f --sha256 .hg/store/00changelog-*.nd --size | |
225 | .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob) |
|
225 | .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob) | |
226 | #endif |
|
226 | #endif | |
227 |
|
227 | |||
228 | #if no-pure no-rust |
|
228 | #if no-pure no-rust | |
229 | $ f --sha256 .hg/store/00changelog-*.nd --size |
|
229 | $ f --sha256 .hg/store/00changelog-*.nd --size | |
230 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob) |
|
230 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob) | |
231 | #endif |
|
231 | #endif | |
232 |
|
232 | |||
233 | $ hg debugnodemap --check |
|
233 | $ hg debugnodemap --check | |
234 | revision in index: 5002 |
|
234 | revision in index: 5002 | |
235 | revision in nodemap: 5002 |
|
235 | revision in nodemap: 5002 | |
236 |
|
236 | |||
237 | Test code path without mmap |
|
237 | Test code path without mmap | |
238 | --------------------------- |
|
238 | --------------------------- | |
239 |
|
239 | |||
240 | $ echo bar > bar |
|
240 | $ echo bar > bar | |
241 | $ hg add bar |
|
241 | $ hg add bar | |
242 | $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no |
|
242 | $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no | |
243 |
|
243 | |||
244 | $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes |
|
244 | $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes | |
245 | revision in index: 5003 |
|
245 | revision in index: 5003 | |
246 | revision in nodemap: 5003 |
|
246 | revision in nodemap: 5003 | |
247 | $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no |
|
247 | $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no | |
248 | revision in index: 5003 |
|
248 | revision in index: 5003 | |
249 | revision in nodemap: 5003 |
|
249 | revision in nodemap: 5003 | |
250 |
|
250 | |||
251 |
|
251 | |||
252 | #if pure |
|
252 | #if pure | |
253 | $ hg debugnodemap --metadata |
|
253 | $ hg debugnodemap --metadata | |
254 | uid: ???????????????? (glob) |
|
254 | uid: ???????????????? (glob) | |
255 | tip-rev: 5002 |
|
255 | tip-rev: 5002 | |
256 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
256 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
257 | data-length: 121600 |
|
257 | data-length: 121600 | |
258 | data-unused: 512 |
|
258 | data-unused: 512 | |
259 | data-unused: 0.421% |
|
259 | data-unused: 0.421% | |
260 | $ f --sha256 .hg/store/00changelog-*.nd --size |
|
260 | $ f --sha256 .hg/store/00changelog-*.nd --size | |
261 | .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob) |
|
261 | .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob) | |
262 | #endif |
|
262 | #endif | |
263 | #if rust |
|
263 | #if rust | |
264 | $ hg debugnodemap --metadata |
|
264 | $ hg debugnodemap --metadata | |
265 | uid: ???????????????? (glob) |
|
265 | uid: ???????????????? (glob) | |
266 | tip-rev: 5002 |
|
266 | tip-rev: 5002 | |
267 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
267 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
268 | data-length: 121600 |
|
268 | data-length: 121600 | |
269 | data-unused: 512 |
|
269 | data-unused: 512 | |
270 | data-unused: 0.421% |
|
270 | data-unused: 0.421% | |
271 | $ f --sha256 .hg/store/00changelog-*.nd --size |
|
271 | $ f --sha256 .hg/store/00changelog-*.nd --size | |
272 | .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob) |
|
272 | .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob) | |
273 | #endif |
|
273 | #endif | |
274 | #if no-pure no-rust |
|
274 | #if no-pure no-rust | |
275 | $ hg debugnodemap --metadata |
|
275 | $ hg debugnodemap --metadata | |
276 | uid: ???????????????? (glob) |
|
276 | uid: ???????????????? (glob) | |
277 | tip-rev: 5002 |
|
277 | tip-rev: 5002 | |
278 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
278 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
279 | data-length: 121088 |
|
279 | data-length: 121088 | |
280 | data-unused: 0 |
|
280 | data-unused: 0 | |
281 | data-unused: 0.000% |
|
281 | data-unused: 0.000% | |
282 | $ f --sha256 .hg/store/00changelog-*.nd --size |
|
282 | $ f --sha256 .hg/store/00changelog-*.nd --size | |
283 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob) |
|
283 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob) | |
284 | #endif |
|
284 | #endif | |
285 |
|
285 | |||
286 | Test force warming the cache |
|
286 | Test force warming the cache | |
287 |
|
287 | |||
288 | $ rm .hg/store/00changelog.n |
|
288 | $ rm .hg/store/00changelog.n | |
289 | $ hg debugnodemap --metadata |
|
289 | $ hg debugnodemap --metadata | |
290 | $ hg debugupdatecache |
|
290 | $ hg debugupdatecache | |
291 | #if pure |
|
291 | #if pure | |
292 | $ hg debugnodemap --metadata |
|
292 | $ hg debugnodemap --metadata | |
293 | uid: ???????????????? (glob) |
|
293 | uid: ???????????????? (glob) | |
294 | tip-rev: 5002 |
|
294 | tip-rev: 5002 | |
295 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
295 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
296 | data-length: 121088 |
|
296 | data-length: 121088 | |
297 | data-unused: 0 |
|
297 | data-unused: 0 | |
298 | data-unused: 0.000% |
|
298 | data-unused: 0.000% | |
299 | #else |
|
299 | #else | |
300 | $ hg debugnodemap --metadata |
|
300 | $ hg debugnodemap --metadata | |
301 | uid: ???????????????? (glob) |
|
301 | uid: ???????????????? (glob) | |
302 | tip-rev: 5002 |
|
302 | tip-rev: 5002 | |
303 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
303 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
304 | data-length: 121088 |
|
304 | data-length: 121088 | |
305 | data-unused: 0 |
|
305 | data-unused: 0 | |
306 | data-unused: 0.000% |
|
306 | data-unused: 0.000% | |
307 | #endif |
|
307 | #endif | |
308 |
|
308 | |||
309 | Check out of sync nodemap |
|
309 | Check out of sync nodemap | |
310 | ========================= |
|
310 | ========================= | |
311 |
|
311 | |||
312 | First copy old data on the side. |
|
312 | First copy old data on the side. | |
313 |
|
313 | |||
314 | $ mkdir ../tmp-copies |
|
314 | $ mkdir ../tmp-copies | |
315 | $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies |
|
315 | $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies | |
316 |
|
316 | |||
317 | Nodemap lagging behind |
|
317 | Nodemap lagging behind | |
318 | ---------------------- |
|
318 | ---------------------- | |
319 |
|
319 | |||
320 | make a new commit |
|
320 | make a new commit | |
321 |
|
321 | |||
322 | $ echo bar2 > bar |
|
322 | $ echo bar2 > bar | |
323 | $ hg ci -m 'bar2' |
|
323 | $ hg ci -m 'bar2' | |
324 | $ NODE=`hg log -r tip -T '{node}\n'` |
|
324 | $ NODE=`hg log -r tip -T '{node}\n'` | |
325 | $ hg log -r "$NODE" -T '{rev}\n' |
|
325 | $ hg log -r "$NODE" -T '{rev}\n' | |
326 | 5003 |
|
326 | 5003 | |
327 |
|
327 | |||
328 | If the nodemap is lagging behind, it can catch up fine |
|
328 | If the nodemap is lagging behind, it can catch up fine | |
329 |
|
329 | |||
330 | $ hg debugnodemap --metadata |
|
330 | $ hg debugnodemap --metadata | |
331 | uid: ???????????????? (glob) |
|
331 | uid: ???????????????? (glob) | |
332 | tip-rev: 5003 |
|
332 | tip-rev: 5003 | |
333 | tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3 |
|
333 | tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3 | |
334 | data-length: 121344 (pure !) |
|
334 | data-length: 121344 (pure !) | |
335 | data-length: 121344 (rust !) |
|
335 | data-length: 121344 (rust !) | |
336 | data-length: 121152 (no-rust no-pure !) |
|
336 | data-length: 121152 (no-rust no-pure !) | |
337 | data-unused: 192 (pure !) |
|
337 | data-unused: 192 (pure !) | |
338 | data-unused: 192 (rust !) |
|
338 | data-unused: 192 (rust !) | |
339 | data-unused: 0 (no-rust no-pure !) |
|
339 | data-unused: 0 (no-rust no-pure !) | |
340 | data-unused: 0.158% (pure !) |
|
340 | data-unused: 0.158% (pure !) | |
341 | data-unused: 0.158% (rust !) |
|
341 | data-unused: 0.158% (rust !) | |
342 | data-unused: 0.000% (no-rust no-pure !) |
|
342 | data-unused: 0.000% (no-rust no-pure !) | |
343 | $ cp -f ../tmp-copies/* .hg/store/ |
|
343 | $ cp -f ../tmp-copies/* .hg/store/ | |
344 | $ hg debugnodemap --metadata |
|
344 | $ hg debugnodemap --metadata | |
345 | uid: ???????????????? (glob) |
|
345 | uid: ???????????????? (glob) | |
346 | tip-rev: 5002 |
|
346 | tip-rev: 5002 | |
347 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
347 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
348 | data-length: 121088 |
|
348 | data-length: 121088 | |
349 | data-unused: 0 |
|
349 | data-unused: 0 | |
350 | data-unused: 0.000% |
|
350 | data-unused: 0.000% | |
351 | $ hg log -r "$NODE" -T '{rev}\n' |
|
351 | $ hg log -r "$NODE" -T '{rev}\n' | |
352 | 5003 |
|
352 | 5003 | |
353 |
|
353 | |||
354 | changelog altered |
|
354 | changelog altered | |
355 | ----------------- |
|
355 | ----------------- | |
356 |
|
356 | |||
357 | If the nodemap is not gated behind a requirements, an unaware client can alter |
|
357 | If the nodemap is not gated behind a requirements, an unaware client can alter | |
358 | the repository so the revlog used to generate the nodemap is not longer |
|
358 | the repository so the revlog used to generate the nodemap is not longer | |
359 | compatible with the persistent nodemap. We need to detect that. |
|
359 | compatible with the persistent nodemap. We need to detect that. | |
360 |
|
360 | |||
361 | $ hg up "$NODE~5" |
|
361 | $ hg up "$NODE~5" | |
362 | 0 files updated, 0 files merged, 4 files removed, 0 files unresolved |
|
362 | 0 files updated, 0 files merged, 4 files removed, 0 files unresolved | |
363 | $ echo bar > babar |
|
363 | $ echo bar > babar | |
364 | $ hg add babar |
|
364 | $ hg add babar | |
365 | $ hg ci -m 'babar' |
|
365 | $ hg ci -m 'babar' | |
366 | created new head |
|
366 | created new head | |
367 | $ OTHERNODE=`hg log -r tip -T '{node}\n'` |
|
367 | $ OTHERNODE=`hg log -r tip -T '{node}\n'` | |
368 | $ hg log -r "$OTHERNODE" -T '{rev}\n' |
|
368 | $ hg log -r "$OTHERNODE" -T '{rev}\n' | |
369 | 5004 |
|
369 | 5004 | |
370 |
|
370 | |||
371 | $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup |
|
371 | $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup | |
372 |
|
372 | |||
373 | the nodemap should detect the changelog have been tampered with and recover. |
|
373 | the nodemap should detect the changelog have been tampered with and recover. | |
374 |
|
374 | |||
375 | $ hg debugnodemap --metadata |
|
375 | $ hg debugnodemap --metadata | |
376 | uid: ???????????????? (glob) |
|
376 | uid: ???????????????? (glob) | |
377 | tip-rev: 5002 |
|
377 | tip-rev: 5002 | |
378 | tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944 |
|
378 | tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944 | |
379 | data-length: 121536 (pure !) |
|
379 | data-length: 121536 (pure !) | |
380 | data-length: 121088 (rust !) |
|
380 | data-length: 121088 (rust !) | |
381 | data-length: 121088 (no-pure no-rust !) |
|
381 | data-length: 121088 (no-pure no-rust !) | |
382 | data-unused: 448 (pure !) |
|
382 | data-unused: 448 (pure !) | |
383 | data-unused: 0 (rust !) |
|
383 | data-unused: 0 (rust !) | |
384 | data-unused: 0 (no-pure no-rust !) |
|
384 | data-unused: 0 (no-pure no-rust !) | |
385 | data-unused: 0.000% (rust !) |
|
385 | data-unused: 0.000% (rust !) | |
386 | data-unused: 0.369% (pure !) |
|
386 | data-unused: 0.369% (pure !) | |
387 | data-unused: 0.000% (no-pure no-rust !) |
|
387 | data-unused: 0.000% (no-pure no-rust !) | |
388 |
|
388 | |||
389 | $ cp -f ../tmp-copies/* .hg/store/ |
|
389 | $ cp -f ../tmp-copies/* .hg/store/ | |
390 | $ hg debugnodemap --metadata |
|
390 | $ hg debugnodemap --metadata | |
391 | uid: ???????????????? (glob) |
|
391 | uid: ???????????????? (glob) | |
392 | tip-rev: 5002 |
|
392 | tip-rev: 5002 | |
393 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd |
|
393 | tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd | |
394 | data-length: 121088 |
|
394 | data-length: 121088 | |
395 | data-unused: 0 |
|
395 | data-unused: 0 | |
396 | data-unused: 0.000% |
|
396 | data-unused: 0.000% | |
397 | $ hg log -r "$OTHERNODE" -T '{rev}\n' |
|
397 | $ hg log -r "$OTHERNODE" -T '{rev}\n' | |
398 | 5002 |
|
398 | 5002 | |
399 |
|
399 | |||
400 | missing data file |
|
400 | missing data file | |
401 | ----------------- |
|
401 | ----------------- | |
402 |
|
402 | |||
403 | $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \ |
|
403 | $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \ | |
404 | > sed 's/uid: //'` |
|
404 | > sed 's/uid: //'` | |
405 | $ FILE=.hg/store/00changelog-"${UUID}".nd |
|
405 | $ FILE=.hg/store/00changelog-"${UUID}".nd | |
406 | $ mv $FILE ../tmp-data-file |
|
406 | $ mv $FILE ../tmp-data-file | |
407 | $ cp .hg/store/00changelog.n ../tmp-docket |
|
407 | $ cp .hg/store/00changelog.n ../tmp-docket | |
408 |
|
408 | |||
409 | mercurial don't crash |
|
409 | mercurial don't crash | |
410 |
|
410 | |||
411 | $ hg log -r . |
|
411 | $ hg log -r . | |
412 | changeset: 5002:b355ef8adce0 |
|
412 | changeset: 5002:b355ef8adce0 | |
413 | tag: tip |
|
413 | tag: tip | |
414 | parent: 4998:d918ad6d18d3 |
|
414 | parent: 4998:d918ad6d18d3 | |
415 | user: test |
|
415 | user: test | |
416 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
416 | date: Thu Jan 01 00:00:00 1970 +0000 | |
417 | summary: babar |
|
417 | summary: babar | |
418 |
|
418 | |||
419 | $ hg debugnodemap --metadata |
|
419 | $ hg debugnodemap --metadata | |
420 |
|
420 | |||
421 | $ hg debugupdatecache |
|
421 | $ hg debugupdatecache | |
422 | $ hg debugnodemap --metadata |
|
422 | $ hg debugnodemap --metadata | |
423 | uid: * (glob) |
|
423 | uid: * (glob) | |
424 | tip-rev: 5002 |
|
424 | tip-rev: 5002 | |
425 | tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944 |
|
425 | tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944 | |
426 | data-length: 121088 |
|
426 | data-length: 121088 | |
427 | data-unused: 0 |
|
427 | data-unused: 0 | |
428 | data-unused: 0.000% |
|
428 | data-unused: 0.000% | |
429 | $ mv ../tmp-data-file $FILE |
|
429 | $ mv ../tmp-data-file $FILE | |
430 | $ mv ../tmp-docket .hg/store/00changelog.n |
|
430 | $ mv ../tmp-docket .hg/store/00changelog.n | |
431 |
|
431 | |||
432 | Check transaction related property |
|
432 | Check transaction related property | |
433 | ================================== |
|
433 | ================================== | |
434 |
|
434 | |||
435 | An up to date nodemap should be available to shell hooks, |
|
435 | An up to date nodemap should be available to shell hooks, | |
436 |
|
436 | |||
437 | $ echo dsljfl > a |
|
437 | $ echo dsljfl > a | |
438 | $ hg add a |
|
438 | $ hg add a | |
439 | $ hg ci -m a |
|
439 | $ hg ci -m a | |
440 | $ hg debugnodemap --metadata |
|
440 | $ hg debugnodemap --metadata | |
441 | uid: ???????????????? (glob) |
|
441 | uid: ???????????????? (glob) | |
442 | tip-rev: 5003 |
|
442 | tip-rev: 5003 | |
443 | tip-node: a52c5079765b5865d97b993b303a18740113bbb2 |
|
443 | tip-node: a52c5079765b5865d97b993b303a18740113bbb2 | |
444 | data-length: 121088 |
|
444 | data-length: 121088 | |
445 | data-unused: 0 |
|
445 | data-unused: 0 | |
446 | data-unused: 0.000% |
|
446 | data-unused: 0.000% | |
447 | $ echo babar2 > babar |
|
447 | $ echo babar2 > babar | |
448 | $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata" |
|
448 | $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata" | |
449 | uid: ???????????????? (glob) |
|
449 | uid: ???????????????? (glob) | |
450 | tip-rev: 5004 |
|
450 | tip-rev: 5004 | |
451 | tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984 |
|
451 | tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984 | |
452 | data-length: 121280 (pure !) |
|
452 | data-length: 121280 (pure !) | |
453 | data-length: 121280 (rust !) |
|
453 | data-length: 121280 (rust !) | |
454 | data-length: 121088 (no-pure no-rust !) |
|
454 | data-length: 121088 (no-pure no-rust !) | |
455 | data-unused: 192 (pure !) |
|
455 | data-unused: 192 (pure !) | |
456 | data-unused: 192 (rust !) |
|
456 | data-unused: 192 (rust !) | |
457 | data-unused: 0 (no-pure no-rust !) |
|
457 | data-unused: 0 (no-pure no-rust !) | |
458 | data-unused: 0.158% (pure !) |
|
458 | data-unused: 0.158% (pure !) | |
459 | data-unused: 0.158% (rust !) |
|
459 | data-unused: 0.158% (rust !) | |
460 | data-unused: 0.000% (no-pure no-rust !) |
|
460 | data-unused: 0.000% (no-pure no-rust !) | |
461 | $ hg debugnodemap --metadata |
|
461 | $ hg debugnodemap --metadata | |
462 | uid: ???????????????? (glob) |
|
462 | uid: ???????????????? (glob) | |
463 | tip-rev: 5004 |
|
463 | tip-rev: 5004 | |
464 | tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984 |
|
464 | tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984 | |
465 | data-length: 121280 (pure !) |
|
465 | data-length: 121280 (pure !) | |
466 | data-length: 121280 (rust !) |
|
466 | data-length: 121280 (rust !) | |
467 | data-length: 121088 (no-pure no-rust !) |
|
467 | data-length: 121088 (no-pure no-rust !) | |
468 | data-unused: 192 (pure !) |
|
468 | data-unused: 192 (pure !) | |
469 | data-unused: 192 (rust !) |
|
469 | data-unused: 192 (rust !) | |
470 | data-unused: 0 (no-pure no-rust !) |
|
470 | data-unused: 0 (no-pure no-rust !) | |
471 | data-unused: 0.158% (pure !) |
|
471 | data-unused: 0.158% (pure !) | |
472 | data-unused: 0.158% (rust !) |
|
472 | data-unused: 0.158% (rust !) | |
473 | data-unused: 0.000% (no-pure no-rust !) |
|
473 | data-unused: 0.000% (no-pure no-rust !) | |
474 |
|
474 | |||
475 | Another process does not see the pending nodemap content during run. |
|
475 | Another process does not see the pending nodemap content during run. | |
476 |
|
476 | |||
477 | $ PATH=$RUNTESTDIR/testlib/:$PATH |
|
477 | $ PATH=$RUNTESTDIR/testlib/:$PATH | |
478 | $ echo qpoasp > a |
|
478 | $ echo qpoasp > a | |
479 | $ hg ci -m a2 \ |
|
479 | $ hg ci -m a2 \ | |
480 | > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \ |
|
480 | > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \ | |
481 | > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 & |
|
481 | > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 & | |
482 |
|
482 | |||
483 | (read the repository while the commit transaction is pending) |
|
483 | (read the repository while the commit transaction is pending) | |
484 |
|
484 | |||
485 | $ wait-on-file 20 sync-txn-pending && \ |
|
485 | $ wait-on-file 20 sync-txn-pending && \ | |
486 | > hg debugnodemap --metadata && \ |
|
486 | > hg debugnodemap --metadata && \ | |
487 | > wait-on-file 20 sync-txn-close sync-repo-read |
|
487 | > wait-on-file 20 sync-txn-close sync-repo-read | |
488 | uid: ???????????????? (glob) |
|
488 | uid: ???????????????? (glob) | |
489 | tip-rev: 5004 |
|
489 | tip-rev: 5004 | |
490 | tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984 |
|
490 | tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984 | |
491 | data-length: 121280 (pure !) |
|
491 | data-length: 121280 (pure !) | |
492 | data-length: 121280 (rust !) |
|
492 | data-length: 121280 (rust !) | |
493 | data-length: 121088 (no-pure no-rust !) |
|
493 | data-length: 121088 (no-pure no-rust !) | |
494 | data-unused: 192 (pure !) |
|
494 | data-unused: 192 (pure !) | |
495 | data-unused: 192 (rust !) |
|
495 | data-unused: 192 (rust !) | |
496 | data-unused: 0 (no-pure no-rust !) |
|
496 | data-unused: 0 (no-pure no-rust !) | |
497 | data-unused: 0.158% (pure !) |
|
497 | data-unused: 0.158% (pure !) | |
498 | data-unused: 0.158% (rust !) |
|
498 | data-unused: 0.158% (rust !) | |
499 | data-unused: 0.000% (no-pure no-rust !) |
|
499 | data-unused: 0.000% (no-pure no-rust !) | |
500 | $ hg debugnodemap --metadata |
|
500 | $ hg debugnodemap --metadata | |
501 | uid: ???????????????? (glob) |
|
501 | uid: ???????????????? (glob) | |
502 | tip-rev: 5005 |
|
502 | tip-rev: 5005 | |
503 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
503 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
504 | data-length: 121536 (pure !) |
|
504 | data-length: 121536 (pure !) | |
505 | data-length: 121536 (rust !) |
|
505 | data-length: 121536 (rust !) | |
506 | data-length: 121088 (no-pure no-rust !) |
|
506 | data-length: 121088 (no-pure no-rust !) | |
507 | data-unused: 448 (pure !) |
|
507 | data-unused: 448 (pure !) | |
508 | data-unused: 448 (rust !) |
|
508 | data-unused: 448 (rust !) | |
509 | data-unused: 0 (no-pure no-rust !) |
|
509 | data-unused: 0 (no-pure no-rust !) | |
510 | data-unused: 0.369% (pure !) |
|
510 | data-unused: 0.369% (pure !) | |
511 | data-unused: 0.369% (rust !) |
|
511 | data-unused: 0.369% (rust !) | |
512 | data-unused: 0.000% (no-pure no-rust !) |
|
512 | data-unused: 0.000% (no-pure no-rust !) | |
513 |
|
513 | |||
514 | $ cat output.txt |
|
514 | $ cat output.txt | |
515 |
|
515 | |||
516 | Check that a failing transaction will properly revert the data |
|
516 | Check that a failing transaction will properly revert the data | |
517 |
|
517 | |||
518 | $ echo plakfe > a |
|
518 | $ echo plakfe > a | |
519 | $ f --size --sha256 .hg/store/00changelog-*.nd |
|
519 | $ f --size --sha256 .hg/store/00changelog-*.nd | |
520 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !) |
|
520 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !) | |
521 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !) |
|
521 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !) | |
522 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !) |
|
522 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !) | |
523 | $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py" |
|
523 | $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py" | |
524 | transaction abort! |
|
524 | transaction abort! | |
525 | rollback completed |
|
525 | rollback completed | |
526 | abort: This is a late abort |
|
526 | abort: This is a late abort | |
527 | [255] |
|
527 | [255] | |
528 | $ hg debugnodemap --metadata |
|
528 | $ hg debugnodemap --metadata | |
529 | uid: ???????????????? (glob) |
|
529 | uid: ???????????????? (glob) | |
530 | tip-rev: 5005 |
|
530 | tip-rev: 5005 | |
531 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
531 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
532 | data-length: 121536 (pure !) |
|
532 | data-length: 121536 (pure !) | |
533 | data-length: 121536 (rust !) |
|
533 | data-length: 121536 (rust !) | |
534 | data-length: 121088 (no-pure no-rust !) |
|
534 | data-length: 121088 (no-pure no-rust !) | |
535 | data-unused: 448 (pure !) |
|
535 | data-unused: 448 (pure !) | |
536 | data-unused: 448 (rust !) |
|
536 | data-unused: 448 (rust !) | |
537 | data-unused: 0 (no-pure no-rust !) |
|
537 | data-unused: 0 (no-pure no-rust !) | |
538 | data-unused: 0.369% (pure !) |
|
538 | data-unused: 0.369% (pure !) | |
539 | data-unused: 0.369% (rust !) |
|
539 | data-unused: 0.369% (rust !) | |
540 | data-unused: 0.000% (no-pure no-rust !) |
|
540 | data-unused: 0.000% (no-pure no-rust !) | |
541 | $ f --size --sha256 .hg/store/00changelog-*.nd |
|
541 | $ f --size --sha256 .hg/store/00changelog-*.nd | |
542 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !) |
|
542 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !) | |
543 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !) |
|
543 | .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !) | |
544 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !) |
|
544 | .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !) | |
545 |
|
545 | |||
546 | Check that removing content does not confuse the nodemap |
|
546 | Check that removing content does not confuse the nodemap | |
547 | -------------------------------------------------------- |
|
547 | -------------------------------------------------------- | |
548 |
|
548 | |||
549 | removing data with rollback |
|
549 | removing data with rollback | |
550 |
|
550 | |||
551 | $ echo aso > a |
|
551 | $ echo aso > a | |
552 | $ hg ci -m a4 |
|
552 | $ hg ci -m a4 | |
553 | $ hg rollback |
|
553 | $ hg rollback | |
554 | repository tip rolled back to revision 5005 (undo commit) |
|
554 | repository tip rolled back to revision 5005 (undo commit) | |
555 | working directory now based on revision 5005 |
|
555 | working directory now based on revision 5005 | |
556 | $ hg id -r . |
|
556 | $ hg id -r . | |
557 | 90d5d3ba2fc4 tip |
|
557 | 90d5d3ba2fc4 tip | |
558 |
|
558 | |||
559 | roming data with strip |
|
559 | roming data with strip | |
560 |
|
560 | |||
561 | $ echo aso > a |
|
561 | $ echo aso > a | |
562 | $ hg ci -m a4 |
|
562 | $ hg ci -m a4 | |
563 | $ hg --config extensions.strip= strip -r . --no-backup |
|
563 | $ hg --config extensions.strip= strip -r . --no-backup | |
564 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
564 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
565 | $ hg id -r . --traceback |
|
565 | $ hg id -r . --traceback | |
566 | 90d5d3ba2fc4 tip |
|
566 | 90d5d3ba2fc4 tip | |
567 |
|
567 | |||
568 | Test upgrade / downgrade |
|
568 | Test upgrade / downgrade | |
569 | ======================== |
|
569 | ======================== | |
570 |
|
570 | |||
571 | downgrading |
|
571 | downgrading | |
572 |
|
572 | |||
573 | $ cat << EOF >> .hg/hgrc |
|
573 | $ cat << EOF >> .hg/hgrc | |
574 | > [format] |
|
574 | > [format] | |
575 | > use-persistent-nodemap=no |
|
575 | > use-persistent-nodemap=no | |
576 | > EOF |
|
576 | > EOF | |
577 | $ hg debugformat -v |
|
577 | $ hg debugformat -v | |
578 | format-variant repo config default |
|
578 | format-variant repo config default | |
579 | fncache: yes yes yes |
|
579 | fncache: yes yes yes | |
580 | dotencode: yes yes yes |
|
580 | dotencode: yes yes yes | |
581 | generaldelta: yes yes yes |
|
581 | generaldelta: yes yes yes | |
582 | share-safe: no no no |
|
582 | share-safe: no no no | |
583 | sparserevlog: yes yes yes |
|
583 | sparserevlog: yes yes yes | |
584 | persistent-nodemap: yes no no |
|
584 | persistent-nodemap: yes no no | |
585 | copies-sdc: no no no |
|
585 | copies-sdc: no no no | |
586 | revlog-v2: no no no |
|
586 | revlog-v2: no no no | |
587 | plain-cl-delta: yes yes yes |
|
587 | plain-cl-delta: yes yes yes | |
588 | compression: zlib zlib zlib (no-zstd !) |
|
588 | compression: zlib zlib zlib (no-zstd !) | |
589 | compression: zstd zstd zstd (zstd !) |
|
589 | compression: zstd zstd zstd (zstd !) | |
590 | compression-level: default default default |
|
590 | compression-level: default default default | |
591 | $ hg debugupgraderepo --run --no-backup |
|
591 | $ hg debugupgraderepo --run --no-backup | |
592 | upgrade will perform the following actions: |
|
592 | upgrade will perform the following actions: | |
593 |
|
593 | |||
594 | requirements |
|
594 | requirements | |
595 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !) |
|
595 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !) | |
596 | preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !) |
|
596 | preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !) | |
597 | removed: persistent-nodemap |
|
597 | removed: persistent-nodemap | |
598 |
|
598 | |||
599 | processed revlogs: |
|
599 | processed revlogs: | |
600 | - all-filelogs |
|
600 | - all-filelogs | |
601 | - changelog |
|
601 | - changelog | |
602 | - manifest |
|
602 | - manifest | |
603 |
|
603 | |||
604 | beginning upgrade... |
|
604 | beginning upgrade... | |
605 | repository locked and read-only |
|
605 | repository locked and read-only | |
606 | creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob) |
|
606 | creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob) | |
607 | (it is safe to interrupt this process any time before data migration completes) |
|
607 | (it is safe to interrupt this process any time before data migration completes) | |
608 | downgrading repository to not use persistent nodemap feature |
|
608 | downgrading repository to not use persistent nodemap feature | |
609 | removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob) |
|
609 | removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob) | |
610 | $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' |
|
610 | $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' | |
611 | 00changelog-*.nd (glob) |
|
611 | 00changelog-*.nd (glob) | |
612 | 00manifest-*.nd (glob) |
|
612 | 00manifest-*.nd (glob) | |
613 | undo.backup.00changelog.n |
|
613 | undo.backup.00changelog.n | |
614 | undo.backup.00manifest.n |
|
614 | undo.backup.00manifest.n | |
615 | $ hg debugnodemap --metadata |
|
615 | $ hg debugnodemap --metadata | |
616 |
|
616 | |||
617 |
|
617 | |||
618 | upgrading |
|
618 | upgrading | |
619 |
|
619 | |||
620 | $ cat << EOF >> .hg/hgrc |
|
620 | $ cat << EOF >> .hg/hgrc | |
621 | > [format] |
|
621 | > [format] | |
622 | > use-persistent-nodemap=yes |
|
622 | > use-persistent-nodemap=yes | |
623 | > EOF |
|
623 | > EOF | |
624 | $ hg debugformat -v |
|
624 | $ hg debugformat -v | |
625 | format-variant repo config default |
|
625 | format-variant repo config default | |
626 | fncache: yes yes yes |
|
626 | fncache: yes yes yes | |
627 | dotencode: yes yes yes |
|
627 | dotencode: yes yes yes | |
628 | generaldelta: yes yes yes |
|
628 | generaldelta: yes yes yes | |
629 | share-safe: no no no |
|
629 | share-safe: no no no | |
630 | sparserevlog: yes yes yes |
|
630 | sparserevlog: yes yes yes | |
631 | persistent-nodemap: no yes no |
|
631 | persistent-nodemap: no yes no | |
632 | copies-sdc: no no no |
|
632 | copies-sdc: no no no | |
633 | revlog-v2: no no no |
|
633 | revlog-v2: no no no | |
634 | plain-cl-delta: yes yes yes |
|
634 | plain-cl-delta: yes yes yes | |
635 | compression: zlib zlib zlib (no-zstd !) |
|
635 | compression: zlib zlib zlib (no-zstd !) | |
636 | compression: zstd zstd zstd (zstd !) |
|
636 | compression: zstd zstd zstd (zstd !) | |
637 | compression-level: default default default |
|
637 | compression-level: default default default | |
638 | $ hg debugupgraderepo --run --no-backup |
|
638 | $ hg debugupgraderepo --run --no-backup | |
639 | upgrade will perform the following actions: |
|
639 | upgrade will perform the following actions: | |
640 |
|
640 | |||
641 | requirements |
|
641 | requirements | |
642 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !) |
|
642 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !) | |
643 | preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !) |
|
643 | preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !) | |
644 | added: persistent-nodemap |
|
644 | added: persistent-nodemap | |
645 |
|
645 | |||
646 | persistent-nodemap |
|
646 | persistent-nodemap | |
647 | Speedup revision lookup by node id. |
|
647 | Speedup revision lookup by node id. | |
648 |
|
648 | |||
649 | processed revlogs: |
|
649 | processed revlogs: | |
650 | - all-filelogs |
|
650 | - all-filelogs | |
651 | - changelog |
|
651 | - changelog | |
652 | - manifest |
|
652 | - manifest | |
653 |
|
653 | |||
654 | beginning upgrade... |
|
654 | beginning upgrade... | |
655 | repository locked and read-only |
|
655 | repository locked and read-only | |
656 | creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob) |
|
656 | creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob) | |
657 | (it is safe to interrupt this process any time before data migration completes) |
|
657 | (it is safe to interrupt this process any time before data migration completes) | |
658 | upgrading repository to use persistent nodemap feature |
|
658 | upgrading repository to use persistent nodemap feature | |
659 | removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob) |
|
659 | removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob) | |
660 | $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' |
|
660 | $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' | |
661 | 00changelog-*.nd (glob) |
|
661 | 00changelog-*.nd (glob) | |
662 | 00changelog.n |
|
662 | 00changelog.n | |
663 | 00manifest-*.nd (glob) |
|
663 | 00manifest-*.nd (glob) | |
664 | 00manifest.n |
|
664 | 00manifest.n | |
665 | undo.backup.00changelog.n |
|
665 | undo.backup.00changelog.n | |
666 | undo.backup.00manifest.n |
|
666 | undo.backup.00manifest.n | |
667 |
|
667 | |||
668 | $ hg debugnodemap --metadata |
|
668 | $ hg debugnodemap --metadata | |
669 | uid: * (glob) |
|
669 | uid: * (glob) | |
670 | tip-rev: 5005 |
|
670 | tip-rev: 5005 | |
671 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
671 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
672 | data-length: 121088 |
|
672 | data-length: 121088 | |
673 | data-unused: 0 |
|
673 | data-unused: 0 | |
674 | data-unused: 0.000% |
|
674 | data-unused: 0.000% | |
675 |
|
675 | |||
676 | Running unrelated upgrade |
|
676 | Running unrelated upgrade | |
677 |
|
677 | |||
678 | $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all |
|
678 | $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all | |
679 | upgrade will perform the following actions: |
|
679 | upgrade will perform the following actions: | |
680 |
|
680 | |||
681 | requirements |
|
681 | requirements | |
682 | preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd !) |
|
682 | preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd !) | |
683 | preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !) |
|
683 | preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !) | |
684 |
|
684 | |||
685 | optimisations: re-delta-all |
|
685 | optimisations: re-delta-all | |
686 |
|
686 | |||
687 | processed revlogs: |
|
687 | processed revlogs: | |
688 | - all-filelogs |
|
688 | - all-filelogs | |
689 | - changelog |
|
689 | - changelog | |
690 | - manifest |
|
690 | - manifest | |
691 |
|
691 | |||
692 | $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' |
|
692 | $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' | |
693 | 00changelog-*.nd (glob) |
|
693 | 00changelog-*.nd (glob) | |
694 | 00changelog.n |
|
694 | 00changelog.n | |
695 | 00manifest-*.nd (glob) |
|
695 | 00manifest-*.nd (glob) | |
696 | 00manifest.n |
|
696 | 00manifest.n | |
697 |
|
697 | |||
698 | $ hg debugnodemap --metadata |
|
698 | $ hg debugnodemap --metadata | |
699 | uid: * (glob) |
|
699 | uid: * (glob) | |
700 | tip-rev: 5005 |
|
700 | tip-rev: 5005 | |
701 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
701 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
702 | data-length: 121088 |
|
702 | data-length: 121088 | |
703 | data-unused: 0 |
|
703 | data-unused: 0 | |
704 | data-unused: 0.000% |
|
704 | data-unused: 0.000% | |
705 |
|
705 | |||
706 | Persistent nodemap and local/streaming clone |
|
706 | Persistent nodemap and local/streaming clone | |
707 | ============================================ |
|
707 | ============================================ | |
708 |
|
708 | |||
709 | $ cd .. |
|
709 | $ cd .. | |
710 |
|
710 | |||
711 | standard clone |
|
711 | standard clone | |
712 | -------------- |
|
712 | -------------- | |
713 |
|
713 | |||
714 | The persistent nodemap should exist after a streaming clone |
|
714 | The persistent nodemap should exist after a streaming clone | |
715 |
|
715 | |||
716 | $ hg clone --pull --quiet -U test-repo standard-clone |
|
716 | $ hg clone --pull --quiet -U test-repo standard-clone | |
717 | $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' |
|
717 | $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' | |
718 | 00changelog-*.nd (glob) |
|
718 | 00changelog-*.nd (glob) | |
719 | 00changelog.n |
|
719 | 00changelog.n | |
720 | 00manifest-*.nd (glob) |
|
720 | 00manifest-*.nd (glob) | |
721 | 00manifest.n |
|
721 | 00manifest.n | |
722 | $ hg -R standard-clone debugnodemap --metadata |
|
722 | $ hg -R standard-clone debugnodemap --metadata | |
723 | uid: * (glob) |
|
723 | uid: * (glob) | |
724 | tip-rev: 5005 |
|
724 | tip-rev: 5005 | |
725 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
725 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
726 | data-length: 121088 |
|
726 | data-length: 121088 | |
727 | data-unused: 0 |
|
727 | data-unused: 0 | |
728 | data-unused: 0.000% |
|
728 | data-unused: 0.000% | |
729 |
|
729 | |||
730 |
|
730 | |||
731 | local clone |
|
731 | local clone | |
732 | ------------ |
|
732 | ------------ | |
733 |
|
733 | |||
734 | The persistent nodemap should exist after a streaming clone |
|
734 | The persistent nodemap should exist after a streaming clone | |
735 |
|
735 | |||
736 | $ hg clone -U test-repo local-clone |
|
736 | $ hg clone -U test-repo local-clone | |
737 | $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' |
|
737 | $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' | |
738 | 00changelog-*.nd (glob) |
|
738 | 00changelog-*.nd (glob) | |
739 | 00changelog.n |
|
739 | 00changelog.n | |
740 | 00manifest-*.nd (glob) |
|
740 | 00manifest-*.nd (glob) | |
741 | 00manifest.n |
|
741 | 00manifest.n | |
742 | $ hg -R local-clone debugnodemap --metadata |
|
742 | $ hg -R local-clone debugnodemap --metadata | |
743 | uid: * (glob) |
|
743 | uid: * (glob) | |
744 | tip-rev: 5005 |
|
744 | tip-rev: 5005 | |
745 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
745 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
746 | data-length: 121088 |
|
746 | data-length: 121088 | |
747 | data-unused: 0 |
|
747 | data-unused: 0 | |
748 | data-unused: 0.000% |
|
748 | data-unused: 0.000% | |
749 |
|
749 | |||
750 | stream clone |
|
750 | stream clone | |
751 | ------------ |
|
751 | ------------ | |
752 |
|
752 | |||
753 | The persistent nodemap should exist after a streaming clone |
|
753 | The persistent nodemap should exist after a streaming clone | |
754 |
|
754 | |||
755 | $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)' |
|
755 | $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)' | |
756 | adding [s] 00manifest.n (70 bytes) |
|
756 | adding [s] 00manifest.n (70 bytes) | |
757 | adding [s] 00manifest.i (313 KB) |
|
|||
758 | adding [s] 00manifest.d (452 KB) (no-zstd !) |
|
757 | adding [s] 00manifest.d (452 KB) (no-zstd !) | |
759 | adding [s] 00manifest.d (491 KB) (zstd !) |
|
758 | adding [s] 00manifest.d (491 KB) (zstd !) | |
760 | adding [s] 00manifest-*.nd (118 KB) (glob) |
|
759 | adding [s] 00manifest-*.nd (118 KB) (glob) | |
761 | adding [s] 00changelog.n (70 bytes) |
|
760 | adding [s] 00changelog.n (70 bytes) | |
762 | adding [s] 00changelog.i (313 KB) |
|
|||
763 | adding [s] 00changelog.d (360 KB) (no-zstd !) |
|
761 | adding [s] 00changelog.d (360 KB) (no-zstd !) | |
764 | adding [s] 00changelog.d (368 KB) (zstd !) |
|
762 | adding [s] 00changelog.d (368 KB) (zstd !) | |
765 | adding [s] 00changelog-*.nd (118 KB) (glob) |
|
763 | adding [s] 00changelog-*.nd (118 KB) (glob) | |
|
764 | adding [s] 00manifest.i (313 KB) | |||
|
765 | adding [s] 00changelog.i (313 KB) | |||
766 | $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' |
|
766 | $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)' | |
767 | 00changelog-*.nd (glob) |
|
767 | 00changelog-*.nd (glob) | |
768 | 00changelog.n |
|
768 | 00changelog.n | |
769 | 00manifest-*.nd (glob) |
|
769 | 00manifest-*.nd (glob) | |
770 | 00manifest.n |
|
770 | 00manifest.n | |
771 | $ hg -R stream-clone debugnodemap --metadata |
|
771 | $ hg -R stream-clone debugnodemap --metadata | |
772 | uid: * (glob) |
|
772 | uid: * (glob) | |
773 | tip-rev: 5005 |
|
773 | tip-rev: 5005 | |
774 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe |
|
774 | tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe | |
775 | data-length: 121088 |
|
775 | data-length: 121088 | |
776 | data-unused: 0 |
|
776 | data-unused: 0 | |
777 | data-unused: 0.000% |
|
777 | data-unused: 0.000% |
General Comments 0
You need to be logged in to leave comments.
Login now