Show More
@@ -1,305 +1,298 b'' | |||||
1 | # shallowrepo.py - shallow repository that uses remote filelogs |
|
1 | # shallowrepo.py - shallow repository that uses remote filelogs | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import os |
|
9 | import os | |
10 |
|
10 | |||
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | from mercurial.node import hex, nullid, nullrev |
|
12 | from mercurial.node import hex, nullid, nullrev | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | encoding, |
|
14 | encoding, | |
15 | error, |
|
15 | error, | |
16 | localrepo, |
|
16 | localrepo, | |
17 | match, |
|
17 | match, | |
18 | scmutil, |
|
18 | scmutil, | |
19 | sparse, |
|
19 | sparse, | |
20 | util, |
|
20 | util, | |
21 | ) |
|
21 | ) | |
22 | from mercurial.utils import procutil |
|
22 | from mercurial.utils import procutil | |
23 | from . import ( |
|
23 | from . import ( | |
24 | connectionpool, |
|
24 | connectionpool, | |
25 | constants, |
|
25 | constants, | |
26 | contentstore, |
|
26 | contentstore, | |
27 | datapack, |
|
27 | datapack, | |
28 | fileserverclient, |
|
28 | fileserverclient, | |
29 | historypack, |
|
29 | historypack, | |
30 | metadatastore, |
|
30 | metadatastore, | |
31 | remotefilectx, |
|
31 | remotefilectx, | |
32 | remotefilelog, |
|
32 | remotefilelog, | |
33 | shallowutil, |
|
33 | shallowutil, | |
34 | ) |
|
34 | ) | |
35 |
|
35 | |||
36 | if util.safehasattr(util, '_hgexecutable'): |
|
|||
37 | # Before 5be286db |
|
|||
38 | _hgexecutable = util.hgexecutable |
|
|||
39 | else: |
|
|||
40 | from mercurial.utils import procutil |
|
|||
41 | _hgexecutable = procutil.hgexecutable |
|
|||
42 |
|
||||
43 | # These make*stores functions are global so that other extensions can replace |
|
36 | # These make*stores functions are global so that other extensions can replace | |
44 | # them. |
|
37 | # them. | |
45 | def makelocalstores(repo): |
|
38 | def makelocalstores(repo): | |
46 | """In-repo stores, like .hg/store/data; can not be discarded.""" |
|
39 | """In-repo stores, like .hg/store/data; can not be discarded.""" | |
47 | localpath = os.path.join(repo.svfs.vfs.base, 'data') |
|
40 | localpath = os.path.join(repo.svfs.vfs.base, 'data') | |
48 | if not os.path.exists(localpath): |
|
41 | if not os.path.exists(localpath): | |
49 | os.makedirs(localpath) |
|
42 | os.makedirs(localpath) | |
50 |
|
43 | |||
51 | # Instantiate local data stores |
|
44 | # Instantiate local data stores | |
52 | localcontent = contentstore.remotefilelogcontentstore( |
|
45 | localcontent = contentstore.remotefilelogcontentstore( | |
53 | repo, localpath, repo.name, shared=False) |
|
46 | repo, localpath, repo.name, shared=False) | |
54 | localmetadata = metadatastore.remotefilelogmetadatastore( |
|
47 | localmetadata = metadatastore.remotefilelogmetadatastore( | |
55 | repo, localpath, repo.name, shared=False) |
|
48 | repo, localpath, repo.name, shared=False) | |
56 | return localcontent, localmetadata |
|
49 | return localcontent, localmetadata | |
57 |
|
50 | |||
58 | def makecachestores(repo): |
|
51 | def makecachestores(repo): | |
59 | """Typically machine-wide, cache of remote data; can be discarded.""" |
|
52 | """Typically machine-wide, cache of remote data; can be discarded.""" | |
60 | # Instantiate shared cache stores |
|
53 | # Instantiate shared cache stores | |
61 | cachepath = shallowutil.getcachepath(repo.ui) |
|
54 | cachepath = shallowutil.getcachepath(repo.ui) | |
62 | cachecontent = contentstore.remotefilelogcontentstore( |
|
55 | cachecontent = contentstore.remotefilelogcontentstore( | |
63 | repo, cachepath, repo.name, shared=True) |
|
56 | repo, cachepath, repo.name, shared=True) | |
64 | cachemetadata = metadatastore.remotefilelogmetadatastore( |
|
57 | cachemetadata = metadatastore.remotefilelogmetadatastore( | |
65 | repo, cachepath, repo.name, shared=True) |
|
58 | repo, cachepath, repo.name, shared=True) | |
66 |
|
59 | |||
67 | repo.sharedstore = cachecontent |
|
60 | repo.sharedstore = cachecontent | |
68 | repo.shareddatastores.append(cachecontent) |
|
61 | repo.shareddatastores.append(cachecontent) | |
69 | repo.sharedhistorystores.append(cachemetadata) |
|
62 | repo.sharedhistorystores.append(cachemetadata) | |
70 |
|
63 | |||
71 | return cachecontent, cachemetadata |
|
64 | return cachecontent, cachemetadata | |
72 |
|
65 | |||
73 | def makeremotestores(repo, cachecontent, cachemetadata): |
|
66 | def makeremotestores(repo, cachecontent, cachemetadata): | |
74 | """These stores fetch data from a remote server.""" |
|
67 | """These stores fetch data from a remote server.""" | |
75 | # Instantiate remote stores |
|
68 | # Instantiate remote stores | |
76 | repo.fileservice = fileserverclient.fileserverclient(repo) |
|
69 | repo.fileservice = fileserverclient.fileserverclient(repo) | |
77 | remotecontent = contentstore.remotecontentstore( |
|
70 | remotecontent = contentstore.remotecontentstore( | |
78 | repo.ui, repo.fileservice, cachecontent) |
|
71 | repo.ui, repo.fileservice, cachecontent) | |
79 | remotemetadata = metadatastore.remotemetadatastore( |
|
72 | remotemetadata = metadatastore.remotemetadatastore( | |
80 | repo.ui, repo.fileservice, cachemetadata) |
|
73 | repo.ui, repo.fileservice, cachemetadata) | |
81 | return remotecontent, remotemetadata |
|
74 | return remotecontent, remotemetadata | |
82 |
|
75 | |||
83 | def makepackstores(repo): |
|
76 | def makepackstores(repo): | |
84 | """Packs are more efficient (to read from) cache stores.""" |
|
77 | """Packs are more efficient (to read from) cache stores.""" | |
85 | # Instantiate pack stores |
|
78 | # Instantiate pack stores | |
86 | packpath = shallowutil.getcachepackpath(repo, |
|
79 | packpath = shallowutil.getcachepackpath(repo, | |
87 | constants.FILEPACK_CATEGORY) |
|
80 | constants.FILEPACK_CATEGORY) | |
88 | packcontentstore = datapack.datapackstore(repo.ui, packpath) |
|
81 | packcontentstore = datapack.datapackstore(repo.ui, packpath) | |
89 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) |
|
82 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) | |
90 |
|
83 | |||
91 | repo.shareddatastores.append(packcontentstore) |
|
84 | repo.shareddatastores.append(packcontentstore) | |
92 | repo.sharedhistorystores.append(packmetadatastore) |
|
85 | repo.sharedhistorystores.append(packmetadatastore) | |
93 | shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore, |
|
86 | shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore, | |
94 | packmetadatastore) |
|
87 | packmetadatastore) | |
95 | return packcontentstore, packmetadatastore |
|
88 | return packcontentstore, packmetadatastore | |
96 |
|
89 | |||
97 | def makeunionstores(repo): |
|
90 | def makeunionstores(repo): | |
98 | """Union stores iterate the other stores and return the first result.""" |
|
91 | """Union stores iterate the other stores and return the first result.""" | |
99 | repo.shareddatastores = [] |
|
92 | repo.shareddatastores = [] | |
100 | repo.sharedhistorystores = [] |
|
93 | repo.sharedhistorystores = [] | |
101 |
|
94 | |||
102 | packcontentstore, packmetadatastore = makepackstores(repo) |
|
95 | packcontentstore, packmetadatastore = makepackstores(repo) | |
103 | cachecontent, cachemetadata = makecachestores(repo) |
|
96 | cachecontent, cachemetadata = makecachestores(repo) | |
104 | localcontent, localmetadata = makelocalstores(repo) |
|
97 | localcontent, localmetadata = makelocalstores(repo) | |
105 | remotecontent, remotemetadata = makeremotestores(repo, cachecontent, |
|
98 | remotecontent, remotemetadata = makeremotestores(repo, cachecontent, | |
106 | cachemetadata) |
|
99 | cachemetadata) | |
107 |
|
100 | |||
108 | # Instantiate union stores |
|
101 | # Instantiate union stores | |
109 | repo.contentstore = contentstore.unioncontentstore( |
|
102 | repo.contentstore = contentstore.unioncontentstore( | |
110 | packcontentstore, cachecontent, |
|
103 | packcontentstore, cachecontent, | |
111 | localcontent, remotecontent, writestore=localcontent) |
|
104 | localcontent, remotecontent, writestore=localcontent) | |
112 | repo.metadatastore = metadatastore.unionmetadatastore( |
|
105 | repo.metadatastore = metadatastore.unionmetadatastore( | |
113 | packmetadatastore, cachemetadata, localmetadata, remotemetadata, |
|
106 | packmetadatastore, cachemetadata, localmetadata, remotemetadata, | |
114 | writestore=localmetadata) |
|
107 | writestore=localmetadata) | |
115 |
|
108 | |||
116 | fileservicedatawrite = cachecontent |
|
109 | fileservicedatawrite = cachecontent | |
117 | fileservicehistorywrite = cachemetadata |
|
110 | fileservicehistorywrite = cachemetadata | |
118 | repo.fileservice.setstore(repo.contentstore, repo.metadatastore, |
|
111 | repo.fileservice.setstore(repo.contentstore, repo.metadatastore, | |
119 | fileservicedatawrite, fileservicehistorywrite) |
|
112 | fileservicedatawrite, fileservicehistorywrite) | |
120 | shallowutil.reportpackmetrics(repo.ui, 'filestore', |
|
113 | shallowutil.reportpackmetrics(repo.ui, 'filestore', | |
121 | packcontentstore, packmetadatastore) |
|
114 | packcontentstore, packmetadatastore) | |
122 |
|
115 | |||
123 | def wraprepo(repo): |
|
116 | def wraprepo(repo): | |
124 | class shallowrepository(repo.__class__): |
|
117 | class shallowrepository(repo.__class__): | |
125 | @util.propertycache |
|
118 | @util.propertycache | |
126 | def name(self): |
|
119 | def name(self): | |
127 | return self.ui.config('remotefilelog', 'reponame') |
|
120 | return self.ui.config('remotefilelog', 'reponame') | |
128 |
|
121 | |||
129 | @util.propertycache |
|
122 | @util.propertycache | |
130 | def fallbackpath(self): |
|
123 | def fallbackpath(self): | |
131 | path = repo.ui.config("remotefilelog", "fallbackpath", |
|
124 | path = repo.ui.config("remotefilelog", "fallbackpath", | |
132 | repo.ui.config('paths', 'default')) |
|
125 | repo.ui.config('paths', 'default')) | |
133 | if not path: |
|
126 | if not path: | |
134 | raise error.Abort("no remotefilelog server " |
|
127 | raise error.Abort("no remotefilelog server " | |
135 | "configured - is your .hg/hgrc trusted?") |
|
128 | "configured - is your .hg/hgrc trusted?") | |
136 |
|
129 | |||
137 | return path |
|
130 | return path | |
138 |
|
131 | |||
139 | def maybesparsematch(self, *revs, **kwargs): |
|
132 | def maybesparsematch(self, *revs, **kwargs): | |
140 | ''' |
|
133 | ''' | |
141 | A wrapper that allows the remotefilelog to invoke sparsematch() if |
|
134 | A wrapper that allows the remotefilelog to invoke sparsematch() if | |
142 | this is a sparse repository, or returns None if this is not a |
|
135 | this is a sparse repository, or returns None if this is not a | |
143 | sparse repository. |
|
136 | sparse repository. | |
144 | ''' |
|
137 | ''' | |
145 | if revs: |
|
138 | if revs: | |
146 | ret = sparse.matcher(repo, revs=revs) |
|
139 | ret = sparse.matcher(repo, revs=revs) | |
147 | else: |
|
140 | else: | |
148 | ret = sparse.matcher(repo) |
|
141 | ret = sparse.matcher(repo) | |
149 |
|
142 | |||
150 | if ret.always(): |
|
143 | if ret.always(): | |
151 | return None |
|
144 | return None | |
152 | return ret |
|
145 | return ret | |
153 |
|
146 | |||
154 | def file(self, f): |
|
147 | def file(self, f): | |
155 | if f[0] == '/': |
|
148 | if f[0] == '/': | |
156 | f = f[1:] |
|
149 | f = f[1:] | |
157 |
|
150 | |||
158 | if self.shallowmatch(f): |
|
151 | if self.shallowmatch(f): | |
159 | return remotefilelog.remotefilelog(self.svfs, f, self) |
|
152 | return remotefilelog.remotefilelog(self.svfs, f, self) | |
160 | else: |
|
153 | else: | |
161 | return super(shallowrepository, self).file(f) |
|
154 | return super(shallowrepository, self).file(f) | |
162 |
|
155 | |||
163 | def filectx(self, path, *args, **kwargs): |
|
156 | def filectx(self, path, *args, **kwargs): | |
164 | if self.shallowmatch(path): |
|
157 | if self.shallowmatch(path): | |
165 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) |
|
158 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) | |
166 | else: |
|
159 | else: | |
167 | return super(shallowrepository, self).filectx(path, *args, |
|
160 | return super(shallowrepository, self).filectx(path, *args, | |
168 | **kwargs) |
|
161 | **kwargs) | |
169 |
|
162 | |||
170 | @localrepo.unfilteredmethod |
|
163 | @localrepo.unfilteredmethod | |
171 | def commitctx(self, ctx, error=False): |
|
164 | def commitctx(self, ctx, error=False): | |
172 | """Add a new revision to current repository. |
|
165 | """Add a new revision to current repository. | |
173 | Revision information is passed via the context argument. |
|
166 | Revision information is passed via the context argument. | |
174 | """ |
|
167 | """ | |
175 |
|
168 | |||
176 | # some contexts already have manifest nodes, they don't need any |
|
169 | # some contexts already have manifest nodes, they don't need any | |
177 | # prefetching (for example if we're just editing a commit message |
|
170 | # prefetching (for example if we're just editing a commit message | |
178 | # we can reuse manifest |
|
171 | # we can reuse manifest | |
179 | if not ctx.manifestnode(): |
|
172 | if not ctx.manifestnode(): | |
180 | # prefetch files that will likely be compared |
|
173 | # prefetch files that will likely be compared | |
181 | m1 = ctx.p1().manifest() |
|
174 | m1 = ctx.p1().manifest() | |
182 | files = [] |
|
175 | files = [] | |
183 | for f in ctx.modified() + ctx.added(): |
|
176 | for f in ctx.modified() + ctx.added(): | |
184 | fparent1 = m1.get(f, nullid) |
|
177 | fparent1 = m1.get(f, nullid) | |
185 | if fparent1 != nullid: |
|
178 | if fparent1 != nullid: | |
186 | files.append((f, hex(fparent1))) |
|
179 | files.append((f, hex(fparent1))) | |
187 | self.fileservice.prefetch(files) |
|
180 | self.fileservice.prefetch(files) | |
188 | return super(shallowrepository, self).commitctx(ctx, |
|
181 | return super(shallowrepository, self).commitctx(ctx, | |
189 | error=error) |
|
182 | error=error) | |
190 |
|
183 | |||
191 | def backgroundprefetch(self, revs, base=None, repack=False, pats=None, |
|
184 | def backgroundprefetch(self, revs, base=None, repack=False, pats=None, | |
192 | opts=None): |
|
185 | opts=None): | |
193 | """Runs prefetch in background with optional repack |
|
186 | """Runs prefetch in background with optional repack | |
194 | """ |
|
187 | """ | |
195 |
cmd = [ |
|
188 | cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'prefetch'] | |
196 | if repack: |
|
189 | if repack: | |
197 | cmd.append('--repack') |
|
190 | cmd.append('--repack') | |
198 | if revs: |
|
191 | if revs: | |
199 | cmd += ['-r', revs] |
|
192 | cmd += ['-r', revs] | |
200 | procutil.runbgcommand(cmd, encoding.environ) |
|
193 | procutil.runbgcommand(cmd, encoding.environ) | |
201 |
|
194 | |||
202 | def prefetch(self, revs, base=None, pats=None, opts=None): |
|
195 | def prefetch(self, revs, base=None, pats=None, opts=None): | |
203 | """Prefetches all the necessary file revisions for the given revs |
|
196 | """Prefetches all the necessary file revisions for the given revs | |
204 | Optionally runs repack in background |
|
197 | Optionally runs repack in background | |
205 | """ |
|
198 | """ | |
206 | with repo._lock(repo.svfs, 'prefetchlock', True, None, None, |
|
199 | with repo._lock(repo.svfs, 'prefetchlock', True, None, None, | |
207 | _('prefetching in %s') % repo.origroot): |
|
200 | _('prefetching in %s') % repo.origroot): | |
208 | self._prefetch(revs, base, pats, opts) |
|
201 | self._prefetch(revs, base, pats, opts) | |
209 |
|
202 | |||
210 | def _prefetch(self, revs, base=None, pats=None, opts=None): |
|
203 | def _prefetch(self, revs, base=None, pats=None, opts=None): | |
211 | fallbackpath = self.fallbackpath |
|
204 | fallbackpath = self.fallbackpath | |
212 | if fallbackpath: |
|
205 | if fallbackpath: | |
213 | # If we know a rev is on the server, we should fetch the server |
|
206 | # If we know a rev is on the server, we should fetch the server | |
214 | # version of those files, since our local file versions might |
|
207 | # version of those files, since our local file versions might | |
215 | # become obsolete if the local commits are stripped. |
|
208 | # become obsolete if the local commits are stripped. | |
216 | localrevs = repo.revs('outgoing(%s)', fallbackpath) |
|
209 | localrevs = repo.revs('outgoing(%s)', fallbackpath) | |
217 | if base is not None and base != nullrev: |
|
210 | if base is not None and base != nullrev: | |
218 | serverbase = list(repo.revs('first(reverse(::%s) - %ld)', |
|
211 | serverbase = list(repo.revs('first(reverse(::%s) - %ld)', | |
219 | base, localrevs)) |
|
212 | base, localrevs)) | |
220 | if serverbase: |
|
213 | if serverbase: | |
221 | base = serverbase[0] |
|
214 | base = serverbase[0] | |
222 | else: |
|
215 | else: | |
223 | localrevs = repo |
|
216 | localrevs = repo | |
224 |
|
217 | |||
225 | mfl = repo.manifestlog |
|
218 | mfl = repo.manifestlog | |
226 | mfrevlog = mfl.getstorage('') |
|
219 | mfrevlog = mfl.getstorage('') | |
227 | if base is not None: |
|
220 | if base is not None: | |
228 | mfdict = mfl[repo[base].manifestnode()].read() |
|
221 | mfdict = mfl[repo[base].manifestnode()].read() | |
229 | skip = set(mfdict.iteritems()) |
|
222 | skip = set(mfdict.iteritems()) | |
230 | else: |
|
223 | else: | |
231 | skip = set() |
|
224 | skip = set() | |
232 |
|
225 | |||
233 | # Copy the skip set to start large and avoid constant resizing, |
|
226 | # Copy the skip set to start large and avoid constant resizing, | |
234 | # and since it's likely to be very similar to the prefetch set. |
|
227 | # and since it's likely to be very similar to the prefetch set. | |
235 | files = skip.copy() |
|
228 | files = skip.copy() | |
236 | serverfiles = skip.copy() |
|
229 | serverfiles = skip.copy() | |
237 | visited = set() |
|
230 | visited = set() | |
238 | visited.add(nullrev) |
|
231 | visited.add(nullrev) | |
239 | revcount = len(revs) |
|
232 | revcount = len(revs) | |
240 | progress = self.ui.makeprogress(_('prefetching'), total=revcount) |
|
233 | progress = self.ui.makeprogress(_('prefetching'), total=revcount) | |
241 | progress.update(0) |
|
234 | progress.update(0) | |
242 | for rev in sorted(revs): |
|
235 | for rev in sorted(revs): | |
243 | ctx = repo[rev] |
|
236 | ctx = repo[rev] | |
244 | if pats: |
|
237 | if pats: | |
245 | m = scmutil.match(ctx, pats, opts) |
|
238 | m = scmutil.match(ctx, pats, opts) | |
246 | sparsematch = repo.maybesparsematch(rev) |
|
239 | sparsematch = repo.maybesparsematch(rev) | |
247 |
|
240 | |||
248 | mfnode = ctx.manifestnode() |
|
241 | mfnode = ctx.manifestnode() | |
249 | mfrev = mfrevlog.rev(mfnode) |
|
242 | mfrev = mfrevlog.rev(mfnode) | |
250 |
|
243 | |||
251 | # Decompressing manifests is expensive. |
|
244 | # Decompressing manifests is expensive. | |
252 | # When possible, only read the deltas. |
|
245 | # When possible, only read the deltas. | |
253 | p1, p2 = mfrevlog.parentrevs(mfrev) |
|
246 | p1, p2 = mfrevlog.parentrevs(mfrev) | |
254 | if p1 in visited and p2 in visited: |
|
247 | if p1 in visited and p2 in visited: | |
255 | mfdict = mfl[mfnode].readfast() |
|
248 | mfdict = mfl[mfnode].readfast() | |
256 | else: |
|
249 | else: | |
257 | mfdict = mfl[mfnode].read() |
|
250 | mfdict = mfl[mfnode].read() | |
258 |
|
251 | |||
259 | diff = mfdict.iteritems() |
|
252 | diff = mfdict.iteritems() | |
260 | if pats: |
|
253 | if pats: | |
261 | diff = (pf for pf in diff if m(pf[0])) |
|
254 | diff = (pf for pf in diff if m(pf[0])) | |
262 | if sparsematch: |
|
255 | if sparsematch: | |
263 | diff = (pf for pf in diff if sparsematch(pf[0])) |
|
256 | diff = (pf for pf in diff if sparsematch(pf[0])) | |
264 | if rev not in localrevs: |
|
257 | if rev not in localrevs: | |
265 | serverfiles.update(diff) |
|
258 | serverfiles.update(diff) | |
266 | else: |
|
259 | else: | |
267 | files.update(diff) |
|
260 | files.update(diff) | |
268 |
|
261 | |||
269 | visited.add(mfrev) |
|
262 | visited.add(mfrev) | |
270 | progress.increment() |
|
263 | progress.increment() | |
271 |
|
264 | |||
272 | files.difference_update(skip) |
|
265 | files.difference_update(skip) | |
273 | serverfiles.difference_update(skip) |
|
266 | serverfiles.difference_update(skip) | |
274 | progress.complete() |
|
267 | progress.complete() | |
275 |
|
268 | |||
276 | # Fetch files known to be on the server |
|
269 | # Fetch files known to be on the server | |
277 | if serverfiles: |
|
270 | if serverfiles: | |
278 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] |
|
271 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] | |
279 | repo.fileservice.prefetch(results, force=True) |
|
272 | repo.fileservice.prefetch(results, force=True) | |
280 |
|
273 | |||
281 | # Fetch files that may or may not be on the server |
|
274 | # Fetch files that may or may not be on the server | |
282 | if files: |
|
275 | if files: | |
283 | results = [(path, hex(fnode)) for (path, fnode) in files] |
|
276 | results = [(path, hex(fnode)) for (path, fnode) in files] | |
284 | repo.fileservice.prefetch(results) |
|
277 | repo.fileservice.prefetch(results) | |
285 |
|
278 | |||
286 | def close(self): |
|
279 | def close(self): | |
287 | super(shallowrepository, self).close() |
|
280 | super(shallowrepository, self).close() | |
288 | self.connectionpool.close() |
|
281 | self.connectionpool.close() | |
289 |
|
282 | |||
290 | repo.__class__ = shallowrepository |
|
283 | repo.__class__ = shallowrepository | |
291 |
|
284 | |||
292 | repo.shallowmatch = match.always() |
|
285 | repo.shallowmatch = match.always() | |
293 |
|
286 | |||
294 | makeunionstores(repo) |
|
287 | makeunionstores(repo) | |
295 |
|
288 | |||
296 | repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern", |
|
289 | repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern", | |
297 | None) |
|
290 | None) | |
298 | repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern", |
|
291 | repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern", | |
299 | None) |
|
292 | None) | |
300 | if not util.safehasattr(repo, 'connectionpool'): |
|
293 | if not util.safehasattr(repo, 'connectionpool'): | |
301 | repo.connectionpool = connectionpool.connectionpool(repo) |
|
294 | repo.connectionpool = connectionpool.connectionpool(repo) | |
302 |
|
295 | |||
303 | if repo.includepattern or repo.excludepattern: |
|
296 | if repo.includepattern or repo.excludepattern: | |
304 | repo.shallowmatch = match.match(repo.root, '', None, |
|
297 | repo.shallowmatch = match.match(repo.root, '', None, | |
305 | repo.includepattern, repo.excludepattern) |
|
298 | repo.includepattern, repo.excludepattern) |
General Comments 0
You need to be logged in to leave comments.
Login now