##// END OF EJS Templates
lfs: autoload the extension when cloning from repo with lfs enabled...
Matt Harbison -
r40360:6637b079 default
parent child Browse files
Show More
@@ -1,407 +1,413 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 repository,
17 repository,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 from mercurial.utils import (
22 from mercurial.utils import (
23 storageutil,
23 storageutil,
24 stringutil,
24 stringutil,
25 )
25 )
26
26
27 from ..largefiles import lfutil
27 from ..largefiles import lfutil
28
28
29 from . import (
29 from . import (
30 blobstore,
30 blobstore,
31 pointer,
31 pointer,
32 )
32 )
33
33
34 def localrepomakefilestorage(orig, requirements, features, **kwargs):
34 def localrepomakefilestorage(orig, requirements, features, **kwargs):
35 if b'lfs' in requirements:
35 if b'lfs' in requirements:
36 features.add(repository.REPO_FEATURE_LFS)
36 features.add(repository.REPO_FEATURE_LFS)
37
37
38 return orig(requirements=requirements, features=features, **kwargs)
38 return orig(requirements=requirements, features=features, **kwargs)
39
39
40 def allsupportedversions(orig, ui):
40 def allsupportedversions(orig, ui):
41 versions = orig(ui)
41 versions = orig(ui)
42 versions.add('03')
42 versions.add('03')
43 return versions
43 return versions
44
44
45 def _capabilities(orig, repo, proto):
45 def _capabilities(orig, repo, proto):
46 '''Wrap server command to announce lfs server capability'''
46 '''Wrap server command to announce lfs server capability'''
47 caps = orig(repo, proto)
47 caps = orig(repo, proto)
48 if util.safehasattr(repo.svfs, 'lfslocalblobstore'):
48 if util.safehasattr(repo.svfs, 'lfslocalblobstore'):
49 # XXX: change to 'lfs=serve' when separate git server isn't required?
49 # Advertise a slightly different capability when lfs is *required*, so
50 # that the client knows it MUST load the extension. If lfs is not
51 # required on the server, there's no reason to autoload the extension
52 # on the client.
53 if b'lfs' in repo.requirements:
54 caps.append('lfs-serve')
55
50 caps.append('lfs')
56 caps.append('lfs')
51 return caps
57 return caps
52
58
53 def bypasscheckhash(self, text):
59 def bypasscheckhash(self, text):
54 return False
60 return False
55
61
56 def readfromstore(self, text):
62 def readfromstore(self, text):
57 """Read filelog content from local blobstore transform for flagprocessor.
63 """Read filelog content from local blobstore transform for flagprocessor.
58
64
59 Default tranform for flagprocessor, returning contents from blobstore.
65 Default tranform for flagprocessor, returning contents from blobstore.
60 Returns a 2-typle (text, validatehash) where validatehash is True as the
66 Returns a 2-typle (text, validatehash) where validatehash is True as the
61 contents of the blobstore should be checked using checkhash.
67 contents of the blobstore should be checked using checkhash.
62 """
68 """
63 p = pointer.deserialize(text)
69 p = pointer.deserialize(text)
64 oid = p.oid()
70 oid = p.oid()
65 store = self.opener.lfslocalblobstore
71 store = self.opener.lfslocalblobstore
66 if not store.has(oid):
72 if not store.has(oid):
67 p.filename = self.filename
73 p.filename = self.filename
68 self.opener.lfsremoteblobstore.readbatch([p], store)
74 self.opener.lfsremoteblobstore.readbatch([p], store)
69
75
70 # The caller will validate the content
76 # The caller will validate the content
71 text = store.read(oid, verify=False)
77 text = store.read(oid, verify=False)
72
78
73 # pack hg filelog metadata
79 # pack hg filelog metadata
74 hgmeta = {}
80 hgmeta = {}
75 for k in p.keys():
81 for k in p.keys():
76 if k.startswith('x-hg-'):
82 if k.startswith('x-hg-'):
77 name = k[len('x-hg-'):]
83 name = k[len('x-hg-'):]
78 hgmeta[name] = p[k]
84 hgmeta[name] = p[k]
79 if hgmeta or text.startswith('\1\n'):
85 if hgmeta or text.startswith('\1\n'):
80 text = storageutil.packmeta(hgmeta, text)
86 text = storageutil.packmeta(hgmeta, text)
81
87
82 return (text, True)
88 return (text, True)
83
89
84 def writetostore(self, text):
90 def writetostore(self, text):
85 # hg filelog metadata (includes rename, etc)
91 # hg filelog metadata (includes rename, etc)
86 hgmeta, offset = storageutil.parsemeta(text)
92 hgmeta, offset = storageutil.parsemeta(text)
87 if offset and offset > 0:
93 if offset and offset > 0:
88 # lfs blob does not contain hg filelog metadata
94 # lfs blob does not contain hg filelog metadata
89 text = text[offset:]
95 text = text[offset:]
90
96
91 # git-lfs only supports sha256
97 # git-lfs only supports sha256
92 oid = hex(hashlib.sha256(text).digest())
98 oid = hex(hashlib.sha256(text).digest())
93 self.opener.lfslocalblobstore.write(oid, text)
99 self.opener.lfslocalblobstore.write(oid, text)
94
100
95 # replace contents with metadata
101 # replace contents with metadata
96 longoid = 'sha256:%s' % oid
102 longoid = 'sha256:%s' % oid
97 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
103 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
98
104
99 # by default, we expect the content to be binary. however, LFS could also
105 # by default, we expect the content to be binary. however, LFS could also
100 # be used for non-binary content. add a special entry for non-binary data.
106 # be used for non-binary content. add a special entry for non-binary data.
101 # this will be used by filectx.isbinary().
107 # this will be used by filectx.isbinary().
102 if not stringutil.binary(text):
108 if not stringutil.binary(text):
103 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
109 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
104 metadata['x-is-binary'] = '0'
110 metadata['x-is-binary'] = '0'
105
111
106 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
112 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
107 if hgmeta is not None:
113 if hgmeta is not None:
108 for k, v in hgmeta.iteritems():
114 for k, v in hgmeta.iteritems():
109 metadata['x-hg-%s' % k] = v
115 metadata['x-hg-%s' % k] = v
110
116
111 rawtext = metadata.serialize()
117 rawtext = metadata.serialize()
112 return (rawtext, False)
118 return (rawtext, False)
113
119
114 def _islfs(rlog, node=None, rev=None):
120 def _islfs(rlog, node=None, rev=None):
115 if rev is None:
121 if rev is None:
116 if node is None:
122 if node is None:
117 # both None - likely working copy content where node is not ready
123 # both None - likely working copy content where node is not ready
118 return False
124 return False
119 rev = rlog._revlog.rev(node)
125 rev = rlog._revlog.rev(node)
120 else:
126 else:
121 node = rlog._revlog.node(rev)
127 node = rlog._revlog.node(rev)
122 if node == nullid:
128 if node == nullid:
123 return False
129 return False
124 flags = rlog._revlog.flags(rev)
130 flags = rlog._revlog.flags(rev)
125 return bool(flags & revlog.REVIDX_EXTSTORED)
131 return bool(flags & revlog.REVIDX_EXTSTORED)
126
132
127 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
133 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
128 cachedelta=None, node=None,
134 cachedelta=None, node=None,
129 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
135 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
130 # The matcher isn't available if reposetup() wasn't called.
136 # The matcher isn't available if reposetup() wasn't called.
131 lfstrack = self._revlog.opener.options.get('lfstrack')
137 lfstrack = self._revlog.opener.options.get('lfstrack')
132
138
133 if lfstrack:
139 if lfstrack:
134 textlen = len(text)
140 textlen = len(text)
135 # exclude hg rename meta from file size
141 # exclude hg rename meta from file size
136 meta, offset = storageutil.parsemeta(text)
142 meta, offset = storageutil.parsemeta(text)
137 if offset:
143 if offset:
138 textlen -= offset
144 textlen -= offset
139
145
140 if lfstrack(self._revlog.filename, textlen):
146 if lfstrack(self._revlog.filename, textlen):
141 flags |= revlog.REVIDX_EXTSTORED
147 flags |= revlog.REVIDX_EXTSTORED
142
148
143 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
149 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
144 node=node, flags=flags, **kwds)
150 node=node, flags=flags, **kwds)
145
151
146 def filelogrenamed(orig, self, node):
152 def filelogrenamed(orig, self, node):
147 if _islfs(self, node):
153 if _islfs(self, node):
148 rawtext = self._revlog.revision(node, raw=True)
154 rawtext = self._revlog.revision(node, raw=True)
149 if not rawtext:
155 if not rawtext:
150 return False
156 return False
151 metadata = pointer.deserialize(rawtext)
157 metadata = pointer.deserialize(rawtext)
152 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
158 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
153 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
159 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
154 else:
160 else:
155 return False
161 return False
156 return orig(self, node)
162 return orig(self, node)
157
163
158 def filelogsize(orig, self, rev):
164 def filelogsize(orig, self, rev):
159 if _islfs(self, rev=rev):
165 if _islfs(self, rev=rev):
160 # fast path: use lfs metadata to answer size
166 # fast path: use lfs metadata to answer size
161 rawtext = self._revlog.revision(rev, raw=True)
167 rawtext = self._revlog.revision(rev, raw=True)
162 metadata = pointer.deserialize(rawtext)
168 metadata = pointer.deserialize(rawtext)
163 return int(metadata['size'])
169 return int(metadata['size'])
164 return orig(self, rev)
170 return orig(self, rev)
165
171
166 def filectxcmp(orig, self, fctx):
172 def filectxcmp(orig, self, fctx):
167 """returns True if text is different than fctx"""
173 """returns True if text is different than fctx"""
168 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
174 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
169 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
175 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
170 # fast path: check LFS oid
176 # fast path: check LFS oid
171 p1 = pointer.deserialize(self.rawdata())
177 p1 = pointer.deserialize(self.rawdata())
172 p2 = pointer.deserialize(fctx.rawdata())
178 p2 = pointer.deserialize(fctx.rawdata())
173 return p1.oid() != p2.oid()
179 return p1.oid() != p2.oid()
174 return orig(self, fctx)
180 return orig(self, fctx)
175
181
176 def filectxisbinary(orig, self):
182 def filectxisbinary(orig, self):
177 if self.islfs():
183 if self.islfs():
178 # fast path: use lfs metadata to answer isbinary
184 # fast path: use lfs metadata to answer isbinary
179 metadata = pointer.deserialize(self.rawdata())
185 metadata = pointer.deserialize(self.rawdata())
180 # if lfs metadata says nothing, assume it's binary by default
186 # if lfs metadata says nothing, assume it's binary by default
181 return bool(int(metadata.get('x-is-binary', 1)))
187 return bool(int(metadata.get('x-is-binary', 1)))
182 return orig(self)
188 return orig(self)
183
189
184 def filectxislfs(self):
190 def filectxislfs(self):
185 return _islfs(self.filelog(), self.filenode())
191 return _islfs(self.filelog(), self.filenode())
186
192
187 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
193 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
188 orig(fm, ctx, matcher, path, decode)
194 orig(fm, ctx, matcher, path, decode)
189 fm.data(rawdata=ctx[path].rawdata())
195 fm.data(rawdata=ctx[path].rawdata())
190
196
191 def convertsink(orig, sink):
197 def convertsink(orig, sink):
192 sink = orig(sink)
198 sink = orig(sink)
193 if sink.repotype == 'hg':
199 if sink.repotype == 'hg':
194 class lfssink(sink.__class__):
200 class lfssink(sink.__class__):
195 def putcommit(self, files, copies, parents, commit, source, revmap,
201 def putcommit(self, files, copies, parents, commit, source, revmap,
196 full, cleanp2):
202 full, cleanp2):
197 pc = super(lfssink, self).putcommit
203 pc = super(lfssink, self).putcommit
198 node = pc(files, copies, parents, commit, source, revmap, full,
204 node = pc(files, copies, parents, commit, source, revmap, full,
199 cleanp2)
205 cleanp2)
200
206
201 if 'lfs' not in self.repo.requirements:
207 if 'lfs' not in self.repo.requirements:
202 ctx = self.repo[node]
208 ctx = self.repo[node]
203
209
204 # The file list may contain removed files, so check for
210 # The file list may contain removed files, so check for
205 # membership before assuming it is in the context.
211 # membership before assuming it is in the context.
206 if any(f in ctx and ctx[f].islfs() for f, n in files):
212 if any(f in ctx and ctx[f].islfs() for f, n in files):
207 self.repo.requirements.add('lfs')
213 self.repo.requirements.add('lfs')
208 self.repo._writerequirements()
214 self.repo._writerequirements()
209
215
210 # Permanently enable lfs locally
216 # Permanently enable lfs locally
211 self.repo.vfs.append(
217 self.repo.vfs.append(
212 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
218 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
213
219
214 return node
220 return node
215
221
216 sink.__class__ = lfssink
222 sink.__class__ = lfssink
217
223
218 return sink
224 return sink
219
225
220 def vfsinit(orig, self, othervfs):
226 def vfsinit(orig, self, othervfs):
221 orig(self, othervfs)
227 orig(self, othervfs)
222 # copy lfs related options
228 # copy lfs related options
223 for k, v in othervfs.options.items():
229 for k, v in othervfs.options.items():
224 if k.startswith('lfs'):
230 if k.startswith('lfs'):
225 self.options[k] = v
231 self.options[k] = v
226 # also copy lfs blobstores. note: this can run before reposetup, so lfs
232 # also copy lfs blobstores. note: this can run before reposetup, so lfs
227 # blobstore attributes are not always ready at this time.
233 # blobstore attributes are not always ready at this time.
228 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
234 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
229 if util.safehasattr(othervfs, name):
235 if util.safehasattr(othervfs, name):
230 setattr(self, name, getattr(othervfs, name))
236 setattr(self, name, getattr(othervfs, name))
231
237
232 def _prefetchfiles(repo, revs, match):
238 def _prefetchfiles(repo, revs, match):
233 """Ensure that required LFS blobs are present, fetching them as a group if
239 """Ensure that required LFS blobs are present, fetching them as a group if
234 needed."""
240 needed."""
235 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'):
241 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'):
236 return
242 return
237
243
238 pointers = []
244 pointers = []
239 oids = set()
245 oids = set()
240 localstore = repo.svfs.lfslocalblobstore
246 localstore = repo.svfs.lfslocalblobstore
241
247
242 for rev in revs:
248 for rev in revs:
243 ctx = repo[rev]
249 ctx = repo[rev]
244 for f in ctx.walk(match):
250 for f in ctx.walk(match):
245 p = pointerfromctx(ctx, f)
251 p = pointerfromctx(ctx, f)
246 if p and p.oid() not in oids and not localstore.has(p.oid()):
252 if p and p.oid() not in oids and not localstore.has(p.oid()):
247 p.filename = f
253 p.filename = f
248 pointers.append(p)
254 pointers.append(p)
249 oids.add(p.oid())
255 oids.add(p.oid())
250
256
251 if pointers:
257 if pointers:
252 # Recalculating the repo store here allows 'paths.default' that is set
258 # Recalculating the repo store here allows 'paths.default' that is set
253 # on the repo by a clone command to be used for the update.
259 # on the repo by a clone command to be used for the update.
254 blobstore.remote(repo).readbatch(pointers, localstore)
260 blobstore.remote(repo).readbatch(pointers, localstore)
255
261
256 def _canskipupload(repo):
262 def _canskipupload(repo):
257 # Skip if this hasn't been passed to reposetup()
263 # Skip if this hasn't been passed to reposetup()
258 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
264 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
259 return True
265 return True
260
266
261 # if remotestore is a null store, upload is a no-op and can be skipped
267 # if remotestore is a null store, upload is a no-op and can be skipped
262 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
268 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
263
269
264 def candownload(repo):
270 def candownload(repo):
265 # Skip if this hasn't been passed to reposetup()
271 # Skip if this hasn't been passed to reposetup()
266 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
272 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
267 return False
273 return False
268
274
269 # if remotestore is a null store, downloads will lead to nothing
275 # if remotestore is a null store, downloads will lead to nothing
270 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
276 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
271
277
272 def uploadblobsfromrevs(repo, revs):
278 def uploadblobsfromrevs(repo, revs):
273 '''upload lfs blobs introduced by revs
279 '''upload lfs blobs introduced by revs
274
280
275 Note: also used by other extensions e. g. infinitepush. avoid renaming.
281 Note: also used by other extensions e. g. infinitepush. avoid renaming.
276 '''
282 '''
277 if _canskipupload(repo):
283 if _canskipupload(repo):
278 return
284 return
279 pointers = extractpointers(repo, revs)
285 pointers = extractpointers(repo, revs)
280 uploadblobs(repo, pointers)
286 uploadblobs(repo, pointers)
281
287
282 def prepush(pushop):
288 def prepush(pushop):
283 """Prepush hook.
289 """Prepush hook.
284
290
285 Read through the revisions to push, looking for filelog entries that can be
291 Read through the revisions to push, looking for filelog entries that can be
286 deserialized into metadata so that we can block the push on their upload to
292 deserialized into metadata so that we can block the push on their upload to
287 the remote blobstore.
293 the remote blobstore.
288 """
294 """
289 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
295 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
290
296
291 def push(orig, repo, remote, *args, **kwargs):
297 def push(orig, repo, remote, *args, **kwargs):
292 """bail on push if the extension isn't enabled on remote when needed, and
298 """bail on push if the extension isn't enabled on remote when needed, and
293 update the remote store based on the destination path."""
299 update the remote store based on the destination path."""
294 if 'lfs' in repo.requirements:
300 if 'lfs' in repo.requirements:
295 # If the remote peer is for a local repo, the requirement tests in the
301 # If the remote peer is for a local repo, the requirement tests in the
296 # base class method enforce lfs support. Otherwise, some revisions in
302 # base class method enforce lfs support. Otherwise, some revisions in
297 # this repo use lfs, and the remote repo needs the extension loaded.
303 # this repo use lfs, and the remote repo needs the extension loaded.
298 if not remote.local() and not remote.capable('lfs'):
304 if not remote.local() and not remote.capable('lfs'):
299 # This is a copy of the message in exchange.push() when requirements
305 # This is a copy of the message in exchange.push() when requirements
300 # are missing between local repos.
306 # are missing between local repos.
301 m = _("required features are not supported in the destination: %s")
307 m = _("required features are not supported in the destination: %s")
302 raise error.Abort(m % 'lfs',
308 raise error.Abort(m % 'lfs',
303 hint=_('enable the lfs extension on the server'))
309 hint=_('enable the lfs extension on the server'))
304
310
305 # Repositories where this extension is disabled won't have the field.
311 # Repositories where this extension is disabled won't have the field.
306 # But if there's a requirement, then the extension must be loaded AND
312 # But if there's a requirement, then the extension must be loaded AND
307 # there may be blobs to push.
313 # there may be blobs to push.
308 remotestore = repo.svfs.lfsremoteblobstore
314 remotestore = repo.svfs.lfsremoteblobstore
309 try:
315 try:
310 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
316 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
311 return orig(repo, remote, *args, **kwargs)
317 return orig(repo, remote, *args, **kwargs)
312 finally:
318 finally:
313 repo.svfs.lfsremoteblobstore = remotestore
319 repo.svfs.lfsremoteblobstore = remotestore
314 else:
320 else:
315 return orig(repo, remote, *args, **kwargs)
321 return orig(repo, remote, *args, **kwargs)
316
322
317 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
323 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
318 *args, **kwargs):
324 *args, **kwargs):
319 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
325 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
320 uploadblobsfromrevs(repo, outgoing.missing)
326 uploadblobsfromrevs(repo, outgoing.missing)
321 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
327 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
322 **kwargs)
328 **kwargs)
323
329
324 def extractpointers(repo, revs):
330 def extractpointers(repo, revs):
325 """return a list of lfs pointers added by given revs"""
331 """return a list of lfs pointers added by given revs"""
326 repo.ui.debug('lfs: computing set of blobs to upload\n')
332 repo.ui.debug('lfs: computing set of blobs to upload\n')
327 pointers = {}
333 pointers = {}
328
334
329 makeprogress = repo.ui.makeprogress
335 makeprogress = repo.ui.makeprogress
330 with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress:
336 with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress:
331 for r in revs:
337 for r in revs:
332 ctx = repo[r]
338 ctx = repo[r]
333 for p in pointersfromctx(ctx).values():
339 for p in pointersfromctx(ctx).values():
334 pointers[p.oid()] = p
340 pointers[p.oid()] = p
335 progress.increment()
341 progress.increment()
336 return sorted(pointers.values(), key=lambda p: p.oid())
342 return sorted(pointers.values(), key=lambda p: p.oid())
337
343
338 def pointerfromctx(ctx, f, removed=False):
344 def pointerfromctx(ctx, f, removed=False):
339 """return a pointer for the named file from the given changectx, or None if
345 """return a pointer for the named file from the given changectx, or None if
340 the file isn't LFS.
346 the file isn't LFS.
341
347
342 Optionally, the pointer for a file deleted from the context can be returned.
348 Optionally, the pointer for a file deleted from the context can be returned.
343 Since no such pointer is actually stored, and to distinguish from a non LFS
349 Since no such pointer is actually stored, and to distinguish from a non LFS
344 file, this pointer is represented by an empty dict.
350 file, this pointer is represented by an empty dict.
345 """
351 """
346 _ctx = ctx
352 _ctx = ctx
347 if f not in ctx:
353 if f not in ctx:
348 if not removed:
354 if not removed:
349 return None
355 return None
350 if f in ctx.p1():
356 if f in ctx.p1():
351 _ctx = ctx.p1()
357 _ctx = ctx.p1()
352 elif f in ctx.p2():
358 elif f in ctx.p2():
353 _ctx = ctx.p2()
359 _ctx = ctx.p2()
354 else:
360 else:
355 return None
361 return None
356 fctx = _ctx[f]
362 fctx = _ctx[f]
357 if not _islfs(fctx.filelog(), fctx.filenode()):
363 if not _islfs(fctx.filelog(), fctx.filenode()):
358 return None
364 return None
359 try:
365 try:
360 p = pointer.deserialize(fctx.rawdata())
366 p = pointer.deserialize(fctx.rawdata())
361 if ctx == _ctx:
367 if ctx == _ctx:
362 return p
368 return p
363 return {}
369 return {}
364 except pointer.InvalidPointer as ex:
370 except pointer.InvalidPointer as ex:
365 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
371 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
366 % (f, short(_ctx.node()), ex))
372 % (f, short(_ctx.node()), ex))
367
373
368 def pointersfromctx(ctx, removed=False):
374 def pointersfromctx(ctx, removed=False):
369 """return a dict {path: pointer} for given single changectx.
375 """return a dict {path: pointer} for given single changectx.
370
376
371 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
377 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
372 stored for the path is an empty dict.
378 stored for the path is an empty dict.
373 """
379 """
374 result = {}
380 result = {}
375 for f in ctx.files():
381 for f in ctx.files():
376 p = pointerfromctx(ctx, f, removed=removed)
382 p = pointerfromctx(ctx, f, removed=removed)
377 if p is not None:
383 if p is not None:
378 result[f] = p
384 result[f] = p
379 return result
385 return result
380
386
381 def uploadblobs(repo, pointers):
387 def uploadblobs(repo, pointers):
382 """upload given pointers from local blobstore"""
388 """upload given pointers from local blobstore"""
383 if not pointers:
389 if not pointers:
384 return
390 return
385
391
386 remoteblob = repo.svfs.lfsremoteblobstore
392 remoteblob = repo.svfs.lfsremoteblobstore
387 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
393 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
388
394
389 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
395 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
390 orig(ui, srcrepo, dstrepo, requirements)
396 orig(ui, srcrepo, dstrepo, requirements)
391
397
392 # Skip if this hasn't been passed to reposetup()
398 # Skip if this hasn't been passed to reposetup()
393 if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and
399 if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and
394 util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')):
400 util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')):
395 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
401 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
396 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
402 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
397
403
398 for dirpath, dirs, files in srclfsvfs.walk():
404 for dirpath, dirs, files in srclfsvfs.walk():
399 for oid in files:
405 for oid in files:
400 ui.write(_('copying lfs blob %s\n') % oid)
406 ui.write(_('copying lfs blob %s\n') % oid)
401 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
407 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
402
408
403 def upgraderequirements(orig, repo):
409 def upgraderequirements(orig, repo):
404 reqs = orig(repo)
410 reqs = orig(repo)
405 if 'lfs' in repo.requirements:
411 if 'lfs' in repo.requirements:
406 reqs.add('lfs')
412 reqs.add('lfs')
407 return reqs
413 return reqs
@@ -1,1208 +1,1225 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 release = lock.release
52 release = lock.release
53
53
54 # shared features
54 # shared features
55 sharedbookmarks = 'bookmarks'
55 sharedbookmarks = 'bookmarks'
56
56
57 def _local(path):
57 def _local(path):
58 path = util.expandpath(util.urllocalpath(path))
58 path = util.expandpath(util.urllocalpath(path))
59 return (os.path.isfile(path) and bundlerepo or localrepo)
59 return (os.path.isfile(path) and bundlerepo or localrepo)
60
60
61 def addbranchrevs(lrepo, other, branches, revs):
61 def addbranchrevs(lrepo, other, branches, revs):
62 peer = other.peer() # a courtesy to callers using a localrepo for other
62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 hashbranch, branches = branches
63 hashbranch, branches = branches
64 if not hashbranch and not branches:
64 if not hashbranch and not branches:
65 x = revs or None
65 x = revs or None
66 if revs:
66 if revs:
67 y = revs[0]
67 y = revs[0]
68 else:
68 else:
69 y = None
69 y = None
70 return x, y
70 return x, y
71 if revs:
71 if revs:
72 revs = list(revs)
72 revs = list(revs)
73 else:
73 else:
74 revs = []
74 revs = []
75
75
76 if not peer.capable('branchmap'):
76 if not peer.capable('branchmap'):
77 if branches:
77 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
78 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
79 revs.append(hashbranch)
80 return revs, revs[0]
80 return revs, revs[0]
81
81
82 with peer.commandexecutor() as e:
82 with peer.commandexecutor() as e:
83 branchmap = e.callcommand('branchmap', {}).result()
83 branchmap = e.callcommand('branchmap', {}).result()
84
84
85 def primary(branch):
85 def primary(branch):
86 if branch == '.':
86 if branch == '.':
87 if not lrepo:
87 if not lrepo:
88 raise error.Abort(_("dirstate branch not accessible"))
88 raise error.Abort(_("dirstate branch not accessible"))
89 branch = lrepo.dirstate.branch()
89 branch = lrepo.dirstate.branch()
90 if branch in branchmap:
90 if branch in branchmap:
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 return True
92 return True
93 else:
93 else:
94 return False
94 return False
95
95
96 for branch in branches:
96 for branch in branches:
97 if not primary(branch):
97 if not primary(branch):
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 if hashbranch:
99 if hashbranch:
100 if not primary(hashbranch):
100 if not primary(hashbranch):
101 revs.append(hashbranch)
101 revs.append(hashbranch)
102 return revs, revs[0]
102 return revs, revs[0]
103
103
104 def parseurl(path, branches=None):
104 def parseurl(path, branches=None):
105 '''parse url#branch, returning (url, (branch, branches))'''
105 '''parse url#branch, returning (url, (branch, branches))'''
106
106
107 u = util.url(path)
107 u = util.url(path)
108 branch = None
108 branch = None
109 if u.fragment:
109 if u.fragment:
110 branch = u.fragment
110 branch = u.fragment
111 u.fragment = None
111 u.fragment = None
112 return bytes(u), (branch, branches or [])
112 return bytes(u), (branch, branches or [])
113
113
114 schemes = {
114 schemes = {
115 'bundle': bundlerepo,
115 'bundle': bundlerepo,
116 'union': unionrepo,
116 'union': unionrepo,
117 'file': _local,
117 'file': _local,
118 'http': httppeer,
118 'http': httppeer,
119 'https': httppeer,
119 'https': httppeer,
120 'ssh': sshpeer,
120 'ssh': sshpeer,
121 'static-http': statichttprepo,
121 'static-http': statichttprepo,
122 }
122 }
123
123
124 def _peerlookup(path):
124 def _peerlookup(path):
125 u = util.url(path)
125 u = util.url(path)
126 scheme = u.scheme or 'file'
126 scheme = u.scheme or 'file'
127 thing = schemes.get(scheme) or schemes['file']
127 thing = schemes.get(scheme) or schemes['file']
128 try:
128 try:
129 return thing(path)
129 return thing(path)
130 except TypeError:
130 except TypeError:
131 # we can't test callable(thing) because 'thing' can be an unloaded
131 # we can't test callable(thing) because 'thing' can be an unloaded
132 # module that implements __call__
132 # module that implements __call__
133 if not util.safehasattr(thing, 'instance'):
133 if not util.safehasattr(thing, 'instance'):
134 raise
134 raise
135 return thing
135 return thing
136
136
137 def islocal(repo):
137 def islocal(repo):
138 '''return true if repo (or path pointing to repo) is local'''
138 '''return true if repo (or path pointing to repo) is local'''
139 if isinstance(repo, bytes):
139 if isinstance(repo, bytes):
140 try:
140 try:
141 return _peerlookup(repo).islocal(repo)
141 return _peerlookup(repo).islocal(repo)
142 except AttributeError:
142 except AttributeError:
143 return False
143 return False
144 return repo.local()
144 return repo.local()
145
145
146 def openpath(ui, path):
146 def openpath(ui, path):
147 '''open path with open if local, url.open if remote'''
147 '''open path with open if local, url.open if remote'''
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 if pathurl.islocal():
149 if pathurl.islocal():
150 return util.posixfile(pathurl.localpath(), 'rb')
150 return util.posixfile(pathurl.localpath(), 'rb')
151 else:
151 else:
152 return url.open(ui, path)
152 return url.open(ui, path)
153
153
154 # a list of (ui, repo) functions called for wire peer initialization
154 # a list of (ui, repo) functions called for wire peer initialization
155 wirepeersetupfuncs = []
155 wirepeersetupfuncs = []
156
156
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 intents=None, createopts=None):
158 intents=None, createopts=None):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 createopts=createopts)
161 createopts=createopts)
162 ui = getattr(obj, "ui", ui)
162 ui = getattr(obj, "ui", ui)
163 if ui.configbool('devel', 'debug.extensions'):
163 if ui.configbool('devel', 'debug.extensions'):
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
165 msg % values, label='debug.extensions')
165 msg % values, label='debug.extensions')
166 else:
166 else:
167 log = lambda *a, **kw: None
167 log = lambda *a, **kw: None
168 for f in presetupfuncs or []:
168 for f in presetupfuncs or []:
169 f(ui, obj)
169 f(ui, obj)
170 log('- executing reposetup hooks\n')
170 log('- executing reposetup hooks\n')
171 with util.timedcm('all reposetup') as allreposetupstats:
171 with util.timedcm('all reposetup') as allreposetupstats:
172 for name, module in extensions.extensions(ui):
172 for name, module in extensions.extensions(ui):
173 log(' - running reposetup for %s\n' % (name,))
173 log(' - running reposetup for %s\n' % (name,))
174 hook = getattr(module, 'reposetup', None)
174 hook = getattr(module, 'reposetup', None)
175 if hook:
175 if hook:
176 with util.timedcm('reposetup %r', name) as stats:
176 with util.timedcm('reposetup %r', name) as stats:
177 hook(ui, obj)
177 hook(ui, obj)
178 log(' > reposetup for %r took %s\n', name, stats)
178 log(' > reposetup for %r took %s\n', name, stats)
179 log('> all reposetup took %s\n', allreposetupstats)
179 log('> all reposetup took %s\n', allreposetupstats)
180 if not obj.local():
180 if not obj.local():
181 for f in wirepeersetupfuncs:
181 for f in wirepeersetupfuncs:
182 f(ui, obj)
182 f(ui, obj)
183 return obj
183 return obj
184
184
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
186 createopts=None):
186 createopts=None):
187 """return a repository object for the specified path"""
187 """return a repository object for the specified path"""
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
189 intents=intents, createopts=createopts)
189 intents=intents, createopts=createopts)
190 repo = peer.local()
190 repo = peer.local()
191 if not repo:
191 if not repo:
192 raise error.Abort(_("repository '%s' is not local") %
192 raise error.Abort(_("repository '%s' is not local") %
193 (path or peer.url()))
193 (path or peer.url()))
194 return repo.filtered('visible')
194 return repo.filtered('visible')
195
195
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
197 '''return a repository peer for the specified path'''
197 '''return a repository peer for the specified path'''
198 rui = remoteui(uiorrepo, opts)
198 rui = remoteui(uiorrepo, opts)
199 return _peerorrepo(rui, path, create, intents=intents,
199 return _peerorrepo(rui, path, create, intents=intents,
200 createopts=createopts).peer()
200 createopts=createopts).peer()
201
201
202 def defaultdest(source):
202 def defaultdest(source):
203 '''return default destination of clone if none is given
203 '''return default destination of clone if none is given
204
204
205 >>> defaultdest(b'foo')
205 >>> defaultdest(b'foo')
206 'foo'
206 'foo'
207 >>> defaultdest(b'/foo/bar')
207 >>> defaultdest(b'/foo/bar')
208 'bar'
208 'bar'
209 >>> defaultdest(b'/')
209 >>> defaultdest(b'/')
210 ''
210 ''
211 >>> defaultdest(b'')
211 >>> defaultdest(b'')
212 ''
212 ''
213 >>> defaultdest(b'http://example.org/')
213 >>> defaultdest(b'http://example.org/')
214 ''
214 ''
215 >>> defaultdest(b'http://example.org/foo/')
215 >>> defaultdest(b'http://example.org/foo/')
216 'foo'
216 'foo'
217 '''
217 '''
218 path = util.url(source).path
218 path = util.url(source).path
219 if not path:
219 if not path:
220 return ''
220 return ''
221 return os.path.basename(os.path.normpath(path))
221 return os.path.basename(os.path.normpath(path))
222
222
223 def sharedreposource(repo):
223 def sharedreposource(repo):
224 """Returns repository object for source repository of a shared repo.
224 """Returns repository object for source repository of a shared repo.
225
225
226 If repo is not a shared repository, returns None.
226 If repo is not a shared repository, returns None.
227 """
227 """
228 if repo.sharedpath == repo.path:
228 if repo.sharedpath == repo.path:
229 return None
229 return None
230
230
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
232 return repo.srcrepo
232 return repo.srcrepo
233
233
234 # the sharedpath always ends in the .hg; we want the path to the repo
234 # the sharedpath always ends in the .hg; we want the path to the repo
235 source = repo.vfs.split(repo.sharedpath)[0]
235 source = repo.vfs.split(repo.sharedpath)[0]
236 srcurl, branches = parseurl(source)
236 srcurl, branches = parseurl(source)
237 srcrepo = repository(repo.ui, srcurl)
237 srcrepo = repository(repo.ui, srcurl)
238 repo.srcrepo = srcrepo
238 repo.srcrepo = srcrepo
239 return srcrepo
239 return srcrepo
240
240
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
242 relative=False):
242 relative=False):
243 '''create a shared repository'''
243 '''create a shared repository'''
244
244
245 if not islocal(source):
245 if not islocal(source):
246 raise error.Abort(_('can only share local repositories'))
246 raise error.Abort(_('can only share local repositories'))
247
247
248 if not dest:
248 if not dest:
249 dest = defaultdest(source)
249 dest = defaultdest(source)
250 else:
250 else:
251 dest = ui.expandpath(dest)
251 dest = ui.expandpath(dest)
252
252
253 if isinstance(source, bytes):
253 if isinstance(source, bytes):
254 origsource = ui.expandpath(source)
254 origsource = ui.expandpath(source)
255 source, branches = parseurl(origsource)
255 source, branches = parseurl(origsource)
256 srcrepo = repository(ui, source)
256 srcrepo = repository(ui, source)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
258 else:
258 else:
259 srcrepo = source.local()
259 srcrepo = source.local()
260 checkout = None
260 checkout = None
261
261
262 shareditems = set()
262 shareditems = set()
263 if bookmarks:
263 if bookmarks:
264 shareditems.add(sharedbookmarks)
264 shareditems.add(sharedbookmarks)
265
265
266 r = repository(ui, dest, create=True, createopts={
266 r = repository(ui, dest, create=True, createopts={
267 'sharedrepo': srcrepo,
267 'sharedrepo': srcrepo,
268 'sharedrelative': relative,
268 'sharedrelative': relative,
269 'shareditems': shareditems,
269 'shareditems': shareditems,
270 })
270 })
271
271
272 postshare(srcrepo, r, defaultpath=defaultpath)
272 postshare(srcrepo, r, defaultpath=defaultpath)
273 _postshareupdate(r, update, checkout=checkout)
273 _postshareupdate(r, update, checkout=checkout)
274 return r
274 return r
275
275
276 def unshare(ui, repo):
276 def unshare(ui, repo):
277 """convert a shared repository to a normal one
277 """convert a shared repository to a normal one
278
278
279 Copy the store data to the repo and remove the sharedpath data.
279 Copy the store data to the repo and remove the sharedpath data.
280
280
281 Returns a new repository object representing the unshared repository.
281 Returns a new repository object representing the unshared repository.
282
282
283 The passed repository object is not usable after this function is
283 The passed repository object is not usable after this function is
284 called.
284 called.
285 """
285 """
286
286
287 destlock = lock = None
287 destlock = lock = None
288 lock = repo.lock()
288 lock = repo.lock()
289 try:
289 try:
290 # we use locks here because if we race with commit, we
290 # we use locks here because if we race with commit, we
291 # can end up with extra data in the cloned revlogs that's
291 # can end up with extra data in the cloned revlogs that's
292 # not pointed to by changesets, thus causing verify to
292 # not pointed to by changesets, thus causing verify to
293 # fail
293 # fail
294
294
295 destlock = copystore(ui, repo, repo.path)
295 destlock = copystore(ui, repo, repo.path)
296
296
297 sharefile = repo.vfs.join('sharedpath')
297 sharefile = repo.vfs.join('sharedpath')
298 util.rename(sharefile, sharefile + '.old')
298 util.rename(sharefile, sharefile + '.old')
299
299
300 repo.requirements.discard('shared')
300 repo.requirements.discard('shared')
301 repo.requirements.discard('relshared')
301 repo.requirements.discard('relshared')
302 repo._writerequirements()
302 repo._writerequirements()
303 finally:
303 finally:
304 destlock and destlock.release()
304 destlock and destlock.release()
305 lock and lock.release()
305 lock and lock.release()
306
306
307 # Removing share changes some fundamental properties of the repo instance.
307 # Removing share changes some fundamental properties of the repo instance.
308 # So we instantiate a new repo object and operate on it rather than
308 # So we instantiate a new repo object and operate on it rather than
309 # try to keep the existing repo usable.
309 # try to keep the existing repo usable.
310 newrepo = repository(repo.baseui, repo.root, create=False)
310 newrepo = repository(repo.baseui, repo.root, create=False)
311
311
312 # TODO: figure out how to access subrepos that exist, but were previously
312 # TODO: figure out how to access subrepos that exist, but were previously
313 # removed from .hgsub
313 # removed from .hgsub
314 c = newrepo['.']
314 c = newrepo['.']
315 subs = c.substate
315 subs = c.substate
316 for s in sorted(subs):
316 for s in sorted(subs):
317 c.sub(s).unshare()
317 c.sub(s).unshare()
318
318
319 localrepo.poisonrepository(repo)
319 localrepo.poisonrepository(repo)
320
320
321 return newrepo
321 return newrepo
322
322
323 def postshare(sourcerepo, destrepo, defaultpath=None):
323 def postshare(sourcerepo, destrepo, defaultpath=None):
324 """Called after a new shared repo is created.
324 """Called after a new shared repo is created.
325
325
326 The new repo only has a requirements file and pointer to the source.
326 The new repo only has a requirements file and pointer to the source.
327 This function configures additional shared data.
327 This function configures additional shared data.
328
328
329 Extensions can wrap this function and write additional entries to
329 Extensions can wrap this function and write additional entries to
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
331 """
331 """
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
333 if default:
333 if default:
334 template = ('[paths]\n'
334 template = ('[paths]\n'
335 'default = %s\n')
335 'default = %s\n')
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
337
337
338 def _postshareupdate(repo, update, checkout=None):
338 def _postshareupdate(repo, update, checkout=None):
339 """Maybe perform a working directory update after a shared repo is created.
339 """Maybe perform a working directory update after a shared repo is created.
340
340
341 ``update`` can be a boolean or a revision to update to.
341 ``update`` can be a boolean or a revision to update to.
342 """
342 """
343 if not update:
343 if not update:
344 return
344 return
345
345
346 repo.ui.status(_("updating working directory\n"))
346 repo.ui.status(_("updating working directory\n"))
347 if update is not True:
347 if update is not True:
348 checkout = update
348 checkout = update
349 for test in (checkout, 'default', 'tip'):
349 for test in (checkout, 'default', 'tip'):
350 if test is None:
350 if test is None:
351 continue
351 continue
352 try:
352 try:
353 uprev = repo.lookup(test)
353 uprev = repo.lookup(test)
354 break
354 break
355 except error.RepoLookupError:
355 except error.RepoLookupError:
356 continue
356 continue
357 _update(repo, uprev)
357 _update(repo, uprev)
358
358
359 def copystore(ui, srcrepo, destpath):
359 def copystore(ui, srcrepo, destpath):
360 '''copy files from store of srcrepo in destpath
360 '''copy files from store of srcrepo in destpath
361
361
362 returns destlock
362 returns destlock
363 '''
363 '''
364 destlock = None
364 destlock = None
365 try:
365 try:
366 hardlink = None
366 hardlink = None
367 topic = _('linking') if hardlink else _('copying')
367 topic = _('linking') if hardlink else _('copying')
368 with ui.makeprogress(topic) as progress:
368 with ui.makeprogress(topic) as progress:
369 num = 0
369 num = 0
370 srcpublishing = srcrepo.publishing()
370 srcpublishing = srcrepo.publishing()
371 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
371 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
372 dstvfs = vfsmod.vfs(destpath)
372 dstvfs = vfsmod.vfs(destpath)
373 for f in srcrepo.store.copylist():
373 for f in srcrepo.store.copylist():
374 if srcpublishing and f.endswith('phaseroots'):
374 if srcpublishing and f.endswith('phaseroots'):
375 continue
375 continue
376 dstbase = os.path.dirname(f)
376 dstbase = os.path.dirname(f)
377 if dstbase and not dstvfs.exists(dstbase):
377 if dstbase and not dstvfs.exists(dstbase):
378 dstvfs.mkdir(dstbase)
378 dstvfs.mkdir(dstbase)
379 if srcvfs.exists(f):
379 if srcvfs.exists(f):
380 if f.endswith('data'):
380 if f.endswith('data'):
381 # 'dstbase' may be empty (e.g. revlog format 0)
381 # 'dstbase' may be empty (e.g. revlog format 0)
382 lockfile = os.path.join(dstbase, "lock")
382 lockfile = os.path.join(dstbase, "lock")
383 # lock to avoid premature writing to the target
383 # lock to avoid premature writing to the target
384 destlock = lock.lock(dstvfs, lockfile)
384 destlock = lock.lock(dstvfs, lockfile)
385 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
385 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
386 hardlink, progress)
386 hardlink, progress)
387 num += n
387 num += n
388 if hardlink:
388 if hardlink:
389 ui.debug("linked %d files\n" % num)
389 ui.debug("linked %d files\n" % num)
390 else:
390 else:
391 ui.debug("copied %d files\n" % num)
391 ui.debug("copied %d files\n" % num)
392 return destlock
392 return destlock
393 except: # re-raises
393 except: # re-raises
394 release(destlock)
394 release(destlock)
395 raise
395 raise
396
396
397 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
397 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
398 rev=None, update=True, stream=False):
398 rev=None, update=True, stream=False):
399 """Perform a clone using a shared repo.
399 """Perform a clone using a shared repo.
400
400
401 The store for the repository will be located at <sharepath>/.hg. The
401 The store for the repository will be located at <sharepath>/.hg. The
402 specified revisions will be cloned or pulled from "source". A shared repo
402 specified revisions will be cloned or pulled from "source". A shared repo
403 will be created at "dest" and a working copy will be created if "update" is
403 will be created at "dest" and a working copy will be created if "update" is
404 True.
404 True.
405 """
405 """
406 revs = None
406 revs = None
407 if rev:
407 if rev:
408 if not srcpeer.capable('lookup'):
408 if not srcpeer.capable('lookup'):
409 raise error.Abort(_("src repository does not support "
409 raise error.Abort(_("src repository does not support "
410 "revision lookup and so doesn't "
410 "revision lookup and so doesn't "
411 "support clone by revision"))
411 "support clone by revision"))
412
412
413 # TODO this is batchable.
413 # TODO this is batchable.
414 remoterevs = []
414 remoterevs = []
415 for r in rev:
415 for r in rev:
416 with srcpeer.commandexecutor() as e:
416 with srcpeer.commandexecutor() as e:
417 remoterevs.append(e.callcommand('lookup', {
417 remoterevs.append(e.callcommand('lookup', {
418 'key': r,
418 'key': r,
419 }).result())
419 }).result())
420 revs = remoterevs
420 revs = remoterevs
421
421
422 # Obtain a lock before checking for or cloning the pooled repo otherwise
422 # Obtain a lock before checking for or cloning the pooled repo otherwise
423 # 2 clients may race creating or populating it.
423 # 2 clients may race creating or populating it.
424 pooldir = os.path.dirname(sharepath)
424 pooldir = os.path.dirname(sharepath)
425 # lock class requires the directory to exist.
425 # lock class requires the directory to exist.
426 try:
426 try:
427 util.makedir(pooldir, False)
427 util.makedir(pooldir, False)
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.EEXIST:
429 if e.errno != errno.EEXIST:
430 raise
430 raise
431
431
432 poolvfs = vfsmod.vfs(pooldir)
432 poolvfs = vfsmod.vfs(pooldir)
433 basename = os.path.basename(sharepath)
433 basename = os.path.basename(sharepath)
434
434
435 with lock.lock(poolvfs, '%s.lock' % basename):
435 with lock.lock(poolvfs, '%s.lock' % basename):
436 if os.path.exists(sharepath):
436 if os.path.exists(sharepath):
437 ui.status(_('(sharing from existing pooled repository %s)\n') %
437 ui.status(_('(sharing from existing pooled repository %s)\n') %
438 basename)
438 basename)
439 else:
439 else:
440 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
440 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
441 # Always use pull mode because hardlinks in share mode don't work
441 # Always use pull mode because hardlinks in share mode don't work
442 # well. Never update because working copies aren't necessary in
442 # well. Never update because working copies aren't necessary in
443 # share mode.
443 # share mode.
444 clone(ui, peeropts, source, dest=sharepath, pull=True,
444 clone(ui, peeropts, source, dest=sharepath, pull=True,
445 revs=rev, update=False, stream=stream)
445 revs=rev, update=False, stream=stream)
446
446
447 # Resolve the value to put in [paths] section for the source.
447 # Resolve the value to put in [paths] section for the source.
448 if islocal(source):
448 if islocal(source):
449 defaultpath = os.path.abspath(util.urllocalpath(source))
449 defaultpath = os.path.abspath(util.urllocalpath(source))
450 else:
450 else:
451 defaultpath = source
451 defaultpath = source
452
452
453 sharerepo = repository(ui, path=sharepath)
453 sharerepo = repository(ui, path=sharepath)
454 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
454 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
455 defaultpath=defaultpath)
455 defaultpath=defaultpath)
456
456
457 # We need to perform a pull against the dest repo to fetch bookmarks
457 # We need to perform a pull against the dest repo to fetch bookmarks
458 # and other non-store data that isn't shared by default. In the case of
458 # and other non-store data that isn't shared by default. In the case of
459 # non-existing shared repo, this means we pull from the remote twice. This
459 # non-existing shared repo, this means we pull from the remote twice. This
460 # is a bit weird. But at the time it was implemented, there wasn't an easy
460 # is a bit weird. But at the time it was implemented, there wasn't an easy
461 # way to pull just non-changegroup data.
461 # way to pull just non-changegroup data.
462 destrepo = repository(ui, path=dest)
462 destrepo = repository(ui, path=dest)
463 exchange.pull(destrepo, srcpeer, heads=revs)
463 exchange.pull(destrepo, srcpeer, heads=revs)
464
464
465 _postshareupdate(destrepo, update)
465 _postshareupdate(destrepo, update)
466
466
467 return srcpeer, peer(ui, peeropts, dest)
467 return srcpeer, peer(ui, peeropts, dest)
468
468
469 # Recomputing branch cache might be slow on big repos,
469 # Recomputing branch cache might be slow on big repos,
470 # so just copy it
470 # so just copy it
471 def _copycache(srcrepo, dstcachedir, fname):
471 def _copycache(srcrepo, dstcachedir, fname):
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
475 if os.path.exists(srcbranchcache):
475 if os.path.exists(srcbranchcache):
476 if not os.path.exists(dstcachedir):
476 if not os.path.exists(dstcachedir):
477 os.mkdir(dstcachedir)
477 os.mkdir(dstcachedir)
478 util.copyfile(srcbranchcache, dstbranchcache)
478 util.copyfile(srcbranchcache, dstbranchcache)
479
479
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 update=True, stream=False, branch=None, shareopts=None,
481 update=True, stream=False, branch=None, shareopts=None,
482 storeincludepats=None, storeexcludepats=None):
482 storeincludepats=None, storeexcludepats=None):
483 """Make a copy of an existing repository.
483 """Make a copy of an existing repository.
484
484
485 Create a copy of an existing repository in a new directory. The
485 Create a copy of an existing repository in a new directory. The
486 source and destination are URLs, as passed to the repository
486 source and destination are URLs, as passed to the repository
487 function. Returns a pair of repository peers, the source and
487 function. Returns a pair of repository peers, the source and
488 newly created destination.
488 newly created destination.
489
489
490 The location of the source is added to the new repository's
490 The location of the source is added to the new repository's
491 .hg/hgrc file, as the default to be used for future pulls and
491 .hg/hgrc file, as the default to be used for future pulls and
492 pushes.
492 pushes.
493
493
494 If an exception is raised, the partly cloned/updated destination
494 If an exception is raised, the partly cloned/updated destination
495 repository will be deleted.
495 repository will be deleted.
496
496
497 Arguments:
497 Arguments:
498
498
499 source: repository object or URL
499 source: repository object or URL
500
500
501 dest: URL of destination repository to create (defaults to base
501 dest: URL of destination repository to create (defaults to base
502 name of source repository)
502 name of source repository)
503
503
504 pull: always pull from source repository, even in local case or if the
504 pull: always pull from source repository, even in local case or if the
505 server prefers streaming
505 server prefers streaming
506
506
507 stream: stream raw data uncompressed from repository (fast over
507 stream: stream raw data uncompressed from repository (fast over
508 LAN, slow over WAN)
508 LAN, slow over WAN)
509
509
510 revs: revision to clone up to (implies pull=True)
510 revs: revision to clone up to (implies pull=True)
511
511
512 update: update working directory after clone completes, if
512 update: update working directory after clone completes, if
513 destination is local repository (True means update to default rev,
513 destination is local repository (True means update to default rev,
514 anything else is treated as a revision)
514 anything else is treated as a revision)
515
515
516 branch: branches to clone
516 branch: branches to clone
517
517
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 activates auto sharing mode and defines the directory for stores. The
519 activates auto sharing mode and defines the directory for stores. The
520 "mode" key determines how to construct the directory name of the shared
520 "mode" key determines how to construct the directory name of the shared
521 repository. "identity" means the name is derived from the node of the first
521 repository. "identity" means the name is derived from the node of the first
522 changeset in the repository. "remote" means the name is derived from the
522 changeset in the repository. "remote" means the name is derived from the
523 remote's path/URL. Defaults to "identity."
523 remote's path/URL. Defaults to "identity."
524
524
525 storeincludepats and storeexcludepats: sets of file patterns to include and
525 storeincludepats and storeexcludepats: sets of file patterns to include and
526 exclude in the repository copy, respectively. If not defined, all files
526 exclude in the repository copy, respectively. If not defined, all files
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 only the requested files will be performed. If ``storeincludepats`` is not
528 only the requested files will be performed. If ``storeincludepats`` is not
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 ``path:.``. If both are empty sets, no files will be cloned.
530 ``path:.``. If both are empty sets, no files will be cloned.
531 """
531 """
532
532
533 if isinstance(source, bytes):
533 if isinstance(source, bytes):
534 origsource = ui.expandpath(source)
534 origsource = ui.expandpath(source)
535 source, branches = parseurl(origsource, branch)
535 source, branches = parseurl(origsource, branch)
536 srcpeer = peer(ui, peeropts, source)
536 srcpeer = peer(ui, peeropts, source)
537 else:
537 else:
538 srcpeer = source.peer() # in case we were called with a localrepo
538 srcpeer = source.peer() # in case we were called with a localrepo
539 branches = (None, branch or [])
539 branches = (None, branch or [])
540 origsource = source = srcpeer.url()
540 origsource = source = srcpeer.url()
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542
542
543 if dest is None:
543 if dest is None:
544 dest = defaultdest(source)
544 dest = defaultdest(source)
545 if dest:
545 if dest:
546 ui.status(_("destination directory: %s\n") % dest)
546 ui.status(_("destination directory: %s\n") % dest)
547 else:
547 else:
548 dest = ui.expandpath(dest)
548 dest = ui.expandpath(dest)
549
549
550 dest = util.urllocalpath(dest)
550 dest = util.urllocalpath(dest)
551 source = util.urllocalpath(source)
551 source = util.urllocalpath(source)
552
552
553 if not dest:
553 if not dest:
554 raise error.Abort(_("empty destination path is not valid"))
554 raise error.Abort(_("empty destination path is not valid"))
555
555
556 destvfs = vfsmod.vfs(dest, expandpath=True)
556 destvfs = vfsmod.vfs(dest, expandpath=True)
557 if destvfs.lexists():
557 if destvfs.lexists():
558 if not destvfs.isdir():
558 if not destvfs.isdir():
559 raise error.Abort(_("destination '%s' already exists") % dest)
559 raise error.Abort(_("destination '%s' already exists") % dest)
560 elif destvfs.listdir():
560 elif destvfs.listdir():
561 raise error.Abort(_("destination '%s' is not empty") % dest)
561 raise error.Abort(_("destination '%s' is not empty") % dest)
562
562
563 createopts = {}
563 createopts = {}
564 narrow = False
564 narrow = False
565
565
566 if storeincludepats is not None:
566 if storeincludepats is not None:
567 narrowspec.validatepatterns(storeincludepats)
567 narrowspec.validatepatterns(storeincludepats)
568 narrow = True
568 narrow = True
569
569
570 if storeexcludepats is not None:
570 if storeexcludepats is not None:
571 narrowspec.validatepatterns(storeexcludepats)
571 narrowspec.validatepatterns(storeexcludepats)
572 narrow = True
572 narrow = True
573
573
574 if narrow:
574 if narrow:
575 # Include everything by default if only exclusion patterns defined.
575 # Include everything by default if only exclusion patterns defined.
576 if storeexcludepats and not storeincludepats:
576 if storeexcludepats and not storeincludepats:
577 storeincludepats = {'path:.'}
577 storeincludepats = {'path:.'}
578
578
579 createopts['narrowfiles'] = True
579 createopts['narrowfiles'] = True
580
580
581 if srcpeer.capable(b'lfs-serve'):
582 # Repository creation honors the config if it disabled the extension, so
583 # we can't just announce that lfs will be enabled. This check avoids
584 # saying that lfs will be enabled, and then saying it's an unknown
585 # feature. The lfs creation option is set in either case so that a
586 # requirement is added. If the extension is explicitly disabled but the
587 # requirement is set, the clone aborts early, before transferring any
588 # data.
589 createopts['lfs'] = True
590
591 if extensions.disabledext('lfs'):
592 ui.status(_('(remote is using large file support (lfs), but it is '
593 'explicitly disabled in the local configuration)\n'))
594 else:
595 ui.status(_('(remote is using large file support (lfs); lfs will '
596 'be enabled for this repository)\n'))
597
581 shareopts = shareopts or {}
598 shareopts = shareopts or {}
582 sharepool = shareopts.get('pool')
599 sharepool = shareopts.get('pool')
583 sharenamemode = shareopts.get('mode')
600 sharenamemode = shareopts.get('mode')
584 if sharepool and islocal(dest):
601 if sharepool and islocal(dest):
585 sharepath = None
602 sharepath = None
586 if sharenamemode == 'identity':
603 if sharenamemode == 'identity':
587 # Resolve the name from the initial changeset in the remote
604 # Resolve the name from the initial changeset in the remote
588 # repository. This returns nullid when the remote is empty. It
605 # repository. This returns nullid when the remote is empty. It
589 # raises RepoLookupError if revision 0 is filtered or otherwise
606 # raises RepoLookupError if revision 0 is filtered or otherwise
590 # not available. If we fail to resolve, sharing is not enabled.
607 # not available. If we fail to resolve, sharing is not enabled.
591 try:
608 try:
592 with srcpeer.commandexecutor() as e:
609 with srcpeer.commandexecutor() as e:
593 rootnode = e.callcommand('lookup', {
610 rootnode = e.callcommand('lookup', {
594 'key': '0',
611 'key': '0',
595 }).result()
612 }).result()
596
613
597 if rootnode != node.nullid:
614 if rootnode != node.nullid:
598 sharepath = os.path.join(sharepool, node.hex(rootnode))
615 sharepath = os.path.join(sharepool, node.hex(rootnode))
599 else:
616 else:
600 ui.status(_('(not using pooled storage: '
617 ui.status(_('(not using pooled storage: '
601 'remote appears to be empty)\n'))
618 'remote appears to be empty)\n'))
602 except error.RepoLookupError:
619 except error.RepoLookupError:
603 ui.status(_('(not using pooled storage: '
620 ui.status(_('(not using pooled storage: '
604 'unable to resolve identity of remote)\n'))
621 'unable to resolve identity of remote)\n'))
605 elif sharenamemode == 'remote':
622 elif sharenamemode == 'remote':
606 sharepath = os.path.join(
623 sharepath = os.path.join(
607 sharepool, node.hex(hashlib.sha1(source).digest()))
624 sharepool, node.hex(hashlib.sha1(source).digest()))
608 else:
625 else:
609 raise error.Abort(_('unknown share naming mode: %s') %
626 raise error.Abort(_('unknown share naming mode: %s') %
610 sharenamemode)
627 sharenamemode)
611
628
612 # TODO this is a somewhat arbitrary restriction.
629 # TODO this is a somewhat arbitrary restriction.
613 if narrow:
630 if narrow:
614 ui.status(_('(pooled storage not supported for narrow clones)\n'))
631 ui.status(_('(pooled storage not supported for narrow clones)\n'))
615 sharepath = None
632 sharepath = None
616
633
617 if sharepath:
634 if sharepath:
618 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
635 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
619 dest, pull=pull, rev=revs, update=update,
636 dest, pull=pull, rev=revs, update=update,
620 stream=stream)
637 stream=stream)
621
638
622 srclock = destlock = cleandir = None
639 srclock = destlock = cleandir = None
623 srcrepo = srcpeer.local()
640 srcrepo = srcpeer.local()
624 try:
641 try:
625 abspath = origsource
642 abspath = origsource
626 if islocal(origsource):
643 if islocal(origsource):
627 abspath = os.path.abspath(util.urllocalpath(origsource))
644 abspath = os.path.abspath(util.urllocalpath(origsource))
628
645
629 if islocal(dest):
646 if islocal(dest):
630 cleandir = dest
647 cleandir = dest
631
648
632 copy = False
649 copy = False
633 if (srcrepo and srcrepo.cancopy() and islocal(dest)
650 if (srcrepo and srcrepo.cancopy() and islocal(dest)
634 and not phases.hassecret(srcrepo)):
651 and not phases.hassecret(srcrepo)):
635 copy = not pull and not revs
652 copy = not pull and not revs
636
653
637 # TODO this is a somewhat arbitrary restriction.
654 # TODO this is a somewhat arbitrary restriction.
638 if narrow:
655 if narrow:
639 copy = False
656 copy = False
640
657
641 if copy:
658 if copy:
642 try:
659 try:
643 # we use a lock here because if we race with commit, we
660 # we use a lock here because if we race with commit, we
644 # can end up with extra data in the cloned revlogs that's
661 # can end up with extra data in the cloned revlogs that's
645 # not pointed to by changesets, thus causing verify to
662 # not pointed to by changesets, thus causing verify to
646 # fail
663 # fail
647 srclock = srcrepo.lock(wait=False)
664 srclock = srcrepo.lock(wait=False)
648 except error.LockError:
665 except error.LockError:
649 copy = False
666 copy = False
650
667
651 if copy:
668 if copy:
652 srcrepo.hook('preoutgoing', throw=True, source='clone')
669 srcrepo.hook('preoutgoing', throw=True, source='clone')
653 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
670 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
654 if not os.path.exists(dest):
671 if not os.path.exists(dest):
655 util.makedirs(dest)
672 util.makedirs(dest)
656 else:
673 else:
657 # only clean up directories we create ourselves
674 # only clean up directories we create ourselves
658 cleandir = hgdir
675 cleandir = hgdir
659 try:
676 try:
660 destpath = hgdir
677 destpath = hgdir
661 util.makedir(destpath, notindexed=True)
678 util.makedir(destpath, notindexed=True)
662 except OSError as inst:
679 except OSError as inst:
663 if inst.errno == errno.EEXIST:
680 if inst.errno == errno.EEXIST:
664 cleandir = None
681 cleandir = None
665 raise error.Abort(_("destination '%s' already exists")
682 raise error.Abort(_("destination '%s' already exists")
666 % dest)
683 % dest)
667 raise
684 raise
668
685
669 destlock = copystore(ui, srcrepo, destpath)
686 destlock = copystore(ui, srcrepo, destpath)
670 # copy bookmarks over
687 # copy bookmarks over
671 srcbookmarks = srcrepo.vfs.join('bookmarks')
688 srcbookmarks = srcrepo.vfs.join('bookmarks')
672 dstbookmarks = os.path.join(destpath, 'bookmarks')
689 dstbookmarks = os.path.join(destpath, 'bookmarks')
673 if os.path.exists(srcbookmarks):
690 if os.path.exists(srcbookmarks):
674 util.copyfile(srcbookmarks, dstbookmarks)
691 util.copyfile(srcbookmarks, dstbookmarks)
675
692
676 dstcachedir = os.path.join(destpath, 'cache')
693 dstcachedir = os.path.join(destpath, 'cache')
677 for cache in cacheutil.cachetocopy(srcrepo):
694 for cache in cacheutil.cachetocopy(srcrepo):
678 _copycache(srcrepo, dstcachedir, cache)
695 _copycache(srcrepo, dstcachedir, cache)
679
696
680 # we need to re-init the repo after manually copying the data
697 # we need to re-init the repo after manually copying the data
681 # into it
698 # into it
682 destpeer = peer(srcrepo, peeropts, dest)
699 destpeer = peer(srcrepo, peeropts, dest)
683 srcrepo.hook('outgoing', source='clone',
700 srcrepo.hook('outgoing', source='clone',
684 node=node.hex(node.nullid))
701 node=node.hex(node.nullid))
685 else:
702 else:
686 try:
703 try:
687 # only pass ui when no srcrepo
704 # only pass ui when no srcrepo
688 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
705 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
689 createopts=createopts)
706 createopts=createopts)
690 except OSError as inst:
707 except OSError as inst:
691 if inst.errno == errno.EEXIST:
708 if inst.errno == errno.EEXIST:
692 cleandir = None
709 cleandir = None
693 raise error.Abort(_("destination '%s' already exists")
710 raise error.Abort(_("destination '%s' already exists")
694 % dest)
711 % dest)
695 raise
712 raise
696
713
697 if revs:
714 if revs:
698 if not srcpeer.capable('lookup'):
715 if not srcpeer.capable('lookup'):
699 raise error.Abort(_("src repository does not support "
716 raise error.Abort(_("src repository does not support "
700 "revision lookup and so doesn't "
717 "revision lookup and so doesn't "
701 "support clone by revision"))
718 "support clone by revision"))
702
719
703 # TODO this is batchable.
720 # TODO this is batchable.
704 remoterevs = []
721 remoterevs = []
705 for rev in revs:
722 for rev in revs:
706 with srcpeer.commandexecutor() as e:
723 with srcpeer.commandexecutor() as e:
707 remoterevs.append(e.callcommand('lookup', {
724 remoterevs.append(e.callcommand('lookup', {
708 'key': rev,
725 'key': rev,
709 }).result())
726 }).result())
710 revs = remoterevs
727 revs = remoterevs
711
728
712 checkout = revs[0]
729 checkout = revs[0]
713 else:
730 else:
714 revs = None
731 revs = None
715 local = destpeer.local()
732 local = destpeer.local()
716 if local:
733 if local:
717 if narrow:
734 if narrow:
718 with local.lock():
735 with local.lock():
719 local.setnarrowpats(storeincludepats, storeexcludepats)
736 local.setnarrowpats(storeincludepats, storeexcludepats)
720
737
721 u = util.url(abspath)
738 u = util.url(abspath)
722 defaulturl = bytes(u)
739 defaulturl = bytes(u)
723 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
740 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
724 if not stream:
741 if not stream:
725 if pull:
742 if pull:
726 stream = False
743 stream = False
727 else:
744 else:
728 stream = None
745 stream = None
729 # internal config: ui.quietbookmarkmove
746 # internal config: ui.quietbookmarkmove
730 overrides = {('ui', 'quietbookmarkmove'): True}
747 overrides = {('ui', 'quietbookmarkmove'): True}
731 with local.ui.configoverride(overrides, 'clone'):
748 with local.ui.configoverride(overrides, 'clone'):
732 exchange.pull(local, srcpeer, revs,
749 exchange.pull(local, srcpeer, revs,
733 streamclonerequested=stream,
750 streamclonerequested=stream,
734 includepats=storeincludepats,
751 includepats=storeincludepats,
735 excludepats=storeexcludepats)
752 excludepats=storeexcludepats)
736 elif srcrepo:
753 elif srcrepo:
737 # TODO lift restriction once exchange.push() accepts narrow
754 # TODO lift restriction once exchange.push() accepts narrow
738 # push.
755 # push.
739 if narrow:
756 if narrow:
740 raise error.Abort(_('narrow clone not available for '
757 raise error.Abort(_('narrow clone not available for '
741 'remote destinations'))
758 'remote destinations'))
742
759
743 exchange.push(srcrepo, destpeer, revs=revs,
760 exchange.push(srcrepo, destpeer, revs=revs,
744 bookmarks=srcrepo._bookmarks.keys())
761 bookmarks=srcrepo._bookmarks.keys())
745 else:
762 else:
746 raise error.Abort(_("clone from remote to remote not supported")
763 raise error.Abort(_("clone from remote to remote not supported")
747 )
764 )
748
765
749 cleandir = None
766 cleandir = None
750
767
751 destrepo = destpeer.local()
768 destrepo = destpeer.local()
752 if destrepo:
769 if destrepo:
753 template = uimod.samplehgrcs['cloned']
770 template = uimod.samplehgrcs['cloned']
754 u = util.url(abspath)
771 u = util.url(abspath)
755 u.passwd = None
772 u.passwd = None
756 defaulturl = bytes(u)
773 defaulturl = bytes(u)
757 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
758 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
759
776
760 if ui.configbool('experimental', 'remotenames'):
777 if ui.configbool('experimental', 'remotenames'):
761 logexchange.pullremotenames(destrepo, srcpeer)
778 logexchange.pullremotenames(destrepo, srcpeer)
762
779
763 if update:
780 if update:
764 if update is not True:
781 if update is not True:
765 with srcpeer.commandexecutor() as e:
782 with srcpeer.commandexecutor() as e:
766 checkout = e.callcommand('lookup', {
783 checkout = e.callcommand('lookup', {
767 'key': update,
784 'key': update,
768 }).result()
785 }).result()
769
786
770 uprev = None
787 uprev = None
771 status = None
788 status = None
772 if checkout is not None:
789 if checkout is not None:
773 # Some extensions (at least hg-git and hg-subversion) have
790 # Some extensions (at least hg-git and hg-subversion) have
774 # a peer.lookup() implementation that returns a name instead
791 # a peer.lookup() implementation that returns a name instead
775 # of a nodeid. We work around it here until we've figured
792 # of a nodeid. We work around it here until we've figured
776 # out a better solution.
793 # out a better solution.
777 if len(checkout) == 20 and checkout in destrepo:
794 if len(checkout) == 20 and checkout in destrepo:
778 uprev = checkout
795 uprev = checkout
779 elif scmutil.isrevsymbol(destrepo, checkout):
796 elif scmutil.isrevsymbol(destrepo, checkout):
780 uprev = scmutil.revsymbol(destrepo, checkout).node()
797 uprev = scmutil.revsymbol(destrepo, checkout).node()
781 else:
798 else:
782 if update is not True:
799 if update is not True:
783 try:
800 try:
784 uprev = destrepo.lookup(update)
801 uprev = destrepo.lookup(update)
785 except error.RepoLookupError:
802 except error.RepoLookupError:
786 pass
803 pass
787 if uprev is None:
804 if uprev is None:
788 try:
805 try:
789 uprev = destrepo._bookmarks['@']
806 uprev = destrepo._bookmarks['@']
790 update = '@'
807 update = '@'
791 bn = destrepo[uprev].branch()
808 bn = destrepo[uprev].branch()
792 if bn == 'default':
809 if bn == 'default':
793 status = _("updating to bookmark @\n")
810 status = _("updating to bookmark @\n")
794 else:
811 else:
795 status = (_("updating to bookmark @ on branch %s\n")
812 status = (_("updating to bookmark @ on branch %s\n")
796 % bn)
813 % bn)
797 except KeyError:
814 except KeyError:
798 try:
815 try:
799 uprev = destrepo.branchtip('default')
816 uprev = destrepo.branchtip('default')
800 except error.RepoLookupError:
817 except error.RepoLookupError:
801 uprev = destrepo.lookup('tip')
818 uprev = destrepo.lookup('tip')
802 if not status:
819 if not status:
803 bn = destrepo[uprev].branch()
820 bn = destrepo[uprev].branch()
804 status = _("updating to branch %s\n") % bn
821 status = _("updating to branch %s\n") % bn
805 destrepo.ui.status(status)
822 destrepo.ui.status(status)
806 _update(destrepo, uprev)
823 _update(destrepo, uprev)
807 if update in destrepo._bookmarks:
824 if update in destrepo._bookmarks:
808 bookmarks.activate(destrepo, update)
825 bookmarks.activate(destrepo, update)
809 finally:
826 finally:
810 release(srclock, destlock)
827 release(srclock, destlock)
811 if cleandir is not None:
828 if cleandir is not None:
812 shutil.rmtree(cleandir, True)
829 shutil.rmtree(cleandir, True)
813 if srcpeer is not None:
830 if srcpeer is not None:
814 srcpeer.close()
831 srcpeer.close()
815 return srcpeer, destpeer
832 return srcpeer, destpeer
816
833
817 def _showstats(repo, stats, quietempty=False):
834 def _showstats(repo, stats, quietempty=False):
818 if quietempty and stats.isempty():
835 if quietempty and stats.isempty():
819 return
836 return
820 repo.ui.status(_("%d files updated, %d files merged, "
837 repo.ui.status(_("%d files updated, %d files merged, "
821 "%d files removed, %d files unresolved\n") % (
838 "%d files removed, %d files unresolved\n") % (
822 stats.updatedcount, stats.mergedcount,
839 stats.updatedcount, stats.mergedcount,
823 stats.removedcount, stats.unresolvedcount))
840 stats.removedcount, stats.unresolvedcount))
824
841
825 def updaterepo(repo, node, overwrite, updatecheck=None):
842 def updaterepo(repo, node, overwrite, updatecheck=None):
826 """Update the working directory to node.
843 """Update the working directory to node.
827
844
828 When overwrite is set, changes are clobbered, merged else
845 When overwrite is set, changes are clobbered, merged else
829
846
830 returns stats (see pydoc mercurial.merge.applyupdates)"""
847 returns stats (see pydoc mercurial.merge.applyupdates)"""
831 return mergemod.update(repo, node, False, overwrite,
848 return mergemod.update(repo, node, False, overwrite,
832 labels=['working copy', 'destination'],
849 labels=['working copy', 'destination'],
833 updatecheck=updatecheck)
850 updatecheck=updatecheck)
834
851
835 def update(repo, node, quietempty=False, updatecheck=None):
852 def update(repo, node, quietempty=False, updatecheck=None):
836 """update the working directory to node"""
853 """update the working directory to node"""
837 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
838 _showstats(repo, stats, quietempty)
855 _showstats(repo, stats, quietempty)
839 if stats.unresolvedcount:
856 if stats.unresolvedcount:
840 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
841 return stats.unresolvedcount > 0
858 return stats.unresolvedcount > 0
842
859
843 # naming conflict in clone()
860 # naming conflict in clone()
844 _update = update
861 _update = update
845
862
846 def clean(repo, node, show_stats=True, quietempty=False):
863 def clean(repo, node, show_stats=True, quietempty=False):
847 """forcibly switch the working directory to node, clobbering changes"""
864 """forcibly switch the working directory to node, clobbering changes"""
848 stats = updaterepo(repo, node, True)
865 stats = updaterepo(repo, node, True)
849 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
850 if show_stats:
867 if show_stats:
851 _showstats(repo, stats, quietempty)
868 _showstats(repo, stats, quietempty)
852 return stats.unresolvedcount > 0
869 return stats.unresolvedcount > 0
853
870
854 # naming conflict in updatetotally()
871 # naming conflict in updatetotally()
855 _clean = clean
872 _clean = clean
856
873
857 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
858 """Update the working directory with extra care for non-file components
875 """Update the working directory with extra care for non-file components
859
876
860 This takes care of non-file components below:
877 This takes care of non-file components below:
861
878
862 :bookmark: might be advanced or (in)activated
879 :bookmark: might be advanced or (in)activated
863
880
864 This takes arguments below:
881 This takes arguments below:
865
882
866 :checkout: to which revision the working directory is updated
883 :checkout: to which revision the working directory is updated
867 :brev: a name, which might be a bookmark to be activated after updating
884 :brev: a name, which might be a bookmark to be activated after updating
868 :clean: whether changes in the working directory can be discarded
885 :clean: whether changes in the working directory can be discarded
869 :updatecheck: how to deal with a dirty working directory
886 :updatecheck: how to deal with a dirty working directory
870
887
871 Valid values for updatecheck are (None => linear):
888 Valid values for updatecheck are (None => linear):
872
889
873 * abort: abort if the working directory is dirty
890 * abort: abort if the working directory is dirty
874 * none: don't check (merge working directory changes into destination)
891 * none: don't check (merge working directory changes into destination)
875 * linear: check that update is linear before merging working directory
892 * linear: check that update is linear before merging working directory
876 changes into destination
893 changes into destination
877 * noconflict: check that the update does not result in file merges
894 * noconflict: check that the update does not result in file merges
878
895
879 This returns whether conflict is detected at updating or not.
896 This returns whether conflict is detected at updating or not.
880 """
897 """
881 if updatecheck is None:
898 if updatecheck is None:
882 updatecheck = ui.config('commands', 'update.check')
899 updatecheck = ui.config('commands', 'update.check')
883 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
884 # If not configured, or invalid value configured
901 # If not configured, or invalid value configured
885 updatecheck = 'linear'
902 updatecheck = 'linear'
886 with repo.wlock():
903 with repo.wlock():
887 movemarkfrom = None
904 movemarkfrom = None
888 warndest = False
905 warndest = False
889 if checkout is None:
906 if checkout is None:
890 updata = destutil.destupdate(repo, clean=clean)
907 updata = destutil.destupdate(repo, clean=clean)
891 checkout, movemarkfrom, brev = updata
908 checkout, movemarkfrom, brev = updata
892 warndest = True
909 warndest = True
893
910
894 if clean:
911 if clean:
895 ret = _clean(repo, checkout)
912 ret = _clean(repo, checkout)
896 else:
913 else:
897 if updatecheck == 'abort':
914 if updatecheck == 'abort':
898 cmdutil.bailifchanged(repo, merge=False)
915 cmdutil.bailifchanged(repo, merge=False)
899 updatecheck = 'none'
916 updatecheck = 'none'
900 ret = _update(repo, checkout, updatecheck=updatecheck)
917 ret = _update(repo, checkout, updatecheck=updatecheck)
901
918
902 if not ret and movemarkfrom:
919 if not ret and movemarkfrom:
903 if movemarkfrom == repo['.'].node():
920 if movemarkfrom == repo['.'].node():
904 pass # no-op update
921 pass # no-op update
905 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
906 b = ui.label(repo._activebookmark, 'bookmarks.active')
923 b = ui.label(repo._activebookmark, 'bookmarks.active')
907 ui.status(_("updating bookmark %s\n") % b)
924 ui.status(_("updating bookmark %s\n") % b)
908 else:
925 else:
909 # this can happen with a non-linear update
926 # this can happen with a non-linear update
910 b = ui.label(repo._activebookmark, 'bookmarks')
927 b = ui.label(repo._activebookmark, 'bookmarks')
911 ui.status(_("(leaving bookmark %s)\n") % b)
928 ui.status(_("(leaving bookmark %s)\n") % b)
912 bookmarks.deactivate(repo)
929 bookmarks.deactivate(repo)
913 elif brev in repo._bookmarks:
930 elif brev in repo._bookmarks:
914 if brev != repo._activebookmark:
931 if brev != repo._activebookmark:
915 b = ui.label(brev, 'bookmarks.active')
932 b = ui.label(brev, 'bookmarks.active')
916 ui.status(_("(activating bookmark %s)\n") % b)
933 ui.status(_("(activating bookmark %s)\n") % b)
917 bookmarks.activate(repo, brev)
934 bookmarks.activate(repo, brev)
918 elif brev:
935 elif brev:
919 if repo._activebookmark:
936 if repo._activebookmark:
920 b = ui.label(repo._activebookmark, 'bookmarks')
937 b = ui.label(repo._activebookmark, 'bookmarks')
921 ui.status(_("(leaving bookmark %s)\n") % b)
938 ui.status(_("(leaving bookmark %s)\n") % b)
922 bookmarks.deactivate(repo)
939 bookmarks.deactivate(repo)
923
940
924 if warndest:
941 if warndest:
925 destutil.statusotherdests(ui, repo)
942 destutil.statusotherdests(ui, repo)
926
943
927 return ret
944 return ret
928
945
929 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
930 abort=False):
947 abort=False):
931 """Branch merge with node, resolving changes. Return true if any
948 """Branch merge with node, resolving changes. Return true if any
932 unresolved conflicts."""
949 unresolved conflicts."""
933 if not abort:
950 if not abort:
934 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
951 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
935 labels=labels)
952 labels=labels)
936 else:
953 else:
937 ms = mergemod.mergestate.read(repo)
954 ms = mergemod.mergestate.read(repo)
938 if ms.active():
955 if ms.active():
939 # there were conflicts
956 # there were conflicts
940 node = ms.localctx.hex()
957 node = ms.localctx.hex()
941 else:
958 else:
942 # there were no conficts, mergestate was not stored
959 # there were no conficts, mergestate was not stored
943 node = repo['.'].hex()
960 node = repo['.'].hex()
944
961
945 repo.ui.status(_("aborting the merge, updating back to"
962 repo.ui.status(_("aborting the merge, updating back to"
946 " %s\n") % node[:12])
963 " %s\n") % node[:12])
947 stats = mergemod.update(repo, node, branchmerge=False, force=True,
964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
948 labels=labels)
965 labels=labels)
949
966
950 _showstats(repo, stats)
967 _showstats(repo, stats)
951 if stats.unresolvedcount:
968 if stats.unresolvedcount:
952 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
953 "or 'hg merge --abort' to abandon\n"))
970 "or 'hg merge --abort' to abandon\n"))
954 elif remind and not abort:
971 elif remind and not abort:
955 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
956 return stats.unresolvedcount > 0
973 return stats.unresolvedcount > 0
957
974
958 def _incoming(displaychlist, subreporecurse, ui, repo, source,
975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
959 opts, buffered=False):
976 opts, buffered=False):
960 """
977 """
961 Helper for incoming / gincoming.
978 Helper for incoming / gincoming.
962 displaychlist gets called with
979 displaychlist gets called with
963 (remoterepo, incomingchangesetlist, displayer) parameters,
980 (remoterepo, incomingchangesetlist, displayer) parameters,
964 and is supposed to contain only code that can't be unified.
981 and is supposed to contain only code that can't be unified.
965 """
982 """
966 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
967 other = peer(repo, opts, source)
984 other = peer(repo, opts, source)
968 ui.status(_('comparing with %s\n') % util.hidepassword(source))
985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
969 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
970
987
971 if revs:
988 if revs:
972 revs = [other.lookup(rev) for rev in revs]
989 revs = [other.lookup(rev) for rev in revs]
973 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
974 revs, opts["bundle"], opts["force"])
991 revs, opts["bundle"], opts["force"])
975 try:
992 try:
976 if not chlist:
993 if not chlist:
977 ui.status(_("no changes found\n"))
994 ui.status(_("no changes found\n"))
978 return subreporecurse()
995 return subreporecurse()
979 ui.pager('incoming')
996 ui.pager('incoming')
980 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
981 buffered=buffered)
998 buffered=buffered)
982 displaychlist(other, chlist, displayer)
999 displaychlist(other, chlist, displayer)
983 displayer.close()
1000 displayer.close()
984 finally:
1001 finally:
985 cleanupfn()
1002 cleanupfn()
986 subreporecurse()
1003 subreporecurse()
987 return 0 # exit code is zero since we found incoming changes
1004 return 0 # exit code is zero since we found incoming changes
988
1005
989 def incoming(ui, repo, source, opts):
1006 def incoming(ui, repo, source, opts):
990 def subreporecurse():
1007 def subreporecurse():
991 ret = 1
1008 ret = 1
992 if opts.get('subrepos'):
1009 if opts.get('subrepos'):
993 ctx = repo[None]
1010 ctx = repo[None]
994 for subpath in sorted(ctx.substate):
1011 for subpath in sorted(ctx.substate):
995 sub = ctx.sub(subpath)
1012 sub = ctx.sub(subpath)
996 ret = min(ret, sub.incoming(ui, source, opts))
1013 ret = min(ret, sub.incoming(ui, source, opts))
997 return ret
1014 return ret
998
1015
999 def display(other, chlist, displayer):
1016 def display(other, chlist, displayer):
1000 limit = logcmdutil.getlimit(opts)
1017 limit = logcmdutil.getlimit(opts)
1001 if opts.get('newest_first'):
1018 if opts.get('newest_first'):
1002 chlist.reverse()
1019 chlist.reverse()
1003 count = 0
1020 count = 0
1004 for n in chlist:
1021 for n in chlist:
1005 if limit is not None and count >= limit:
1022 if limit is not None and count >= limit:
1006 break
1023 break
1007 parents = [p for p in other.changelog.parents(n) if p != nullid]
1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1008 if opts.get('no_merges') and len(parents) == 2:
1025 if opts.get('no_merges') and len(parents) == 2:
1009 continue
1026 continue
1010 count += 1
1027 count += 1
1011 displayer.show(other[n])
1028 displayer.show(other[n])
1012 return _incoming(display, subreporecurse, ui, repo, source, opts)
1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1013
1030
1014 def _outgoing(ui, repo, dest, opts):
1031 def _outgoing(ui, repo, dest, opts):
1015 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1016 if not path:
1033 if not path:
1017 raise error.Abort(_('default repository not configured!'),
1034 raise error.Abort(_('default repository not configured!'),
1018 hint=_("see 'hg help config.paths'"))
1035 hint=_("see 'hg help config.paths'"))
1019 dest = path.pushloc or path.loc
1036 dest = path.pushloc or path.loc
1020 branches = path.branch, opts.get('branch') or []
1037 branches = path.branch, opts.get('branch') or []
1021
1038
1022 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1023 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1024 if revs:
1041 if revs:
1025 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1026
1043
1027 other = peer(repo, opts, dest)
1044 other = peer(repo, opts, dest)
1028 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1029 force=opts.get('force'))
1046 force=opts.get('force'))
1030 o = outgoing.missing
1047 o = outgoing.missing
1031 if not o:
1048 if not o:
1032 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1033 return o, other
1050 return o, other
1034
1051
1035 def outgoing(ui, repo, dest, opts):
1052 def outgoing(ui, repo, dest, opts):
1036 def recurse():
1053 def recurse():
1037 ret = 1
1054 ret = 1
1038 if opts.get('subrepos'):
1055 if opts.get('subrepos'):
1039 ctx = repo[None]
1056 ctx = repo[None]
1040 for subpath in sorted(ctx.substate):
1057 for subpath in sorted(ctx.substate):
1041 sub = ctx.sub(subpath)
1058 sub = ctx.sub(subpath)
1042 ret = min(ret, sub.outgoing(ui, dest, opts))
1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1043 return ret
1060 return ret
1044
1061
1045 limit = logcmdutil.getlimit(opts)
1062 limit = logcmdutil.getlimit(opts)
1046 o, other = _outgoing(ui, repo, dest, opts)
1063 o, other = _outgoing(ui, repo, dest, opts)
1047 if not o:
1064 if not o:
1048 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1049 return recurse()
1066 return recurse()
1050
1067
1051 if opts.get('newest_first'):
1068 if opts.get('newest_first'):
1052 o.reverse()
1069 o.reverse()
1053 ui.pager('outgoing')
1070 ui.pager('outgoing')
1054 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1055 count = 0
1072 count = 0
1056 for n in o:
1073 for n in o:
1057 if limit is not None and count >= limit:
1074 if limit is not None and count >= limit:
1058 break
1075 break
1059 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1060 if opts.get('no_merges') and len(parents) == 2:
1077 if opts.get('no_merges') and len(parents) == 2:
1061 continue
1078 continue
1062 count += 1
1079 count += 1
1063 displayer.show(repo[n])
1080 displayer.show(repo[n])
1064 displayer.close()
1081 displayer.close()
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1066 recurse()
1083 recurse()
1067 return 0 # exit code is zero since we found outgoing changes
1084 return 0 # exit code is zero since we found outgoing changes
1068
1085
1069 def verify(repo):
1086 def verify(repo):
1070 """verify the consistency of a repository"""
1087 """verify the consistency of a repository"""
1071 ret = verifymod.verify(repo)
1088 ret = verifymod.verify(repo)
1072
1089
1073 # Broken subrepo references in hidden csets don't seem worth worrying about,
1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1074 # since they can't be pushed/pulled, and --hidden can be used if they are a
1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1075 # concern.
1092 # concern.
1076
1093
1077 # pathto() is needed for -R case
1094 # pathto() is needed for -R case
1078 revs = repo.revs("filelog(%s)",
1095 revs = repo.revs("filelog(%s)",
1079 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1080
1097
1081 if revs:
1098 if revs:
1082 repo.ui.status(_('checking subrepo links\n'))
1099 repo.ui.status(_('checking subrepo links\n'))
1083 for rev in revs:
1100 for rev in revs:
1084 ctx = repo[rev]
1101 ctx = repo[rev]
1085 try:
1102 try:
1086 for subpath in ctx.substate:
1103 for subpath in ctx.substate:
1087 try:
1104 try:
1088 ret = (ctx.sub(subpath, allowcreate=False).verify()
1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1089 or ret)
1106 or ret)
1090 except error.RepoError as e:
1107 except error.RepoError as e:
1091 repo.ui.warn(('%d: %s\n') % (rev, e))
1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1092 except Exception:
1109 except Exception:
1093 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1094 node.short(ctx.node()))
1111 node.short(ctx.node()))
1095
1112
1096 return ret
1113 return ret
1097
1114
1098 def remoteui(src, opts):
1115 def remoteui(src, opts):
1099 'build a remote ui from ui or repo and opts'
1116 'build a remote ui from ui or repo and opts'
1100 if util.safehasattr(src, 'baseui'): # looks like a repository
1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1101 dst = src.baseui.copy() # drop repo-specific config
1118 dst = src.baseui.copy() # drop repo-specific config
1102 src = src.ui # copy target options from repo
1119 src = src.ui # copy target options from repo
1103 else: # assume it's a global ui object
1120 else: # assume it's a global ui object
1104 dst = src.copy() # keep all global options
1121 dst = src.copy() # keep all global options
1105
1122
1106 # copy ssh-specific options
1123 # copy ssh-specific options
1107 for o in 'ssh', 'remotecmd':
1124 for o in 'ssh', 'remotecmd':
1108 v = opts.get(o) or src.config('ui', o)
1125 v = opts.get(o) or src.config('ui', o)
1109 if v:
1126 if v:
1110 dst.setconfig("ui", o, v, 'copied')
1127 dst.setconfig("ui", o, v, 'copied')
1111
1128
1112 # copy bundle-specific options
1129 # copy bundle-specific options
1113 r = src.config('bundle', 'mainreporoot')
1130 r = src.config('bundle', 'mainreporoot')
1114 if r:
1131 if r:
1115 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1116
1133
1117 # copy selected local settings to the remote ui
1134 # copy selected local settings to the remote ui
1118 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1119 for key, val in src.configitems(sect):
1136 for key, val in src.configitems(sect):
1120 dst.setconfig(sect, key, val, 'copied')
1137 dst.setconfig(sect, key, val, 'copied')
1121 v = src.config('web', 'cacerts')
1138 v = src.config('web', 'cacerts')
1122 if v:
1139 if v:
1123 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1124
1141
1125 return dst
1142 return dst
1126
1143
1127 # Files of interest
1144 # Files of interest
1128 # Used to check if the repository has changed looking at mtime and size of
1145 # Used to check if the repository has changed looking at mtime and size of
1129 # these files.
1146 # these files.
1130 foi = [('spath', '00changelog.i'),
1147 foi = [('spath', '00changelog.i'),
1131 ('spath', 'phaseroots'), # ! phase can change content at the same size
1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1132 ('spath', 'obsstore'),
1149 ('spath', 'obsstore'),
1133 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1134 ]
1151 ]
1135
1152
1136 class cachedlocalrepo(object):
1153 class cachedlocalrepo(object):
1137 """Holds a localrepository that can be cached and reused."""
1154 """Holds a localrepository that can be cached and reused."""
1138
1155
1139 def __init__(self, repo):
1156 def __init__(self, repo):
1140 """Create a new cached repo from an existing repo.
1157 """Create a new cached repo from an existing repo.
1141
1158
1142 We assume the passed in repo was recently created. If the
1159 We assume the passed in repo was recently created. If the
1143 repo has changed between when it was created and when it was
1160 repo has changed between when it was created and when it was
1144 turned into a cache, it may not refresh properly.
1161 turned into a cache, it may not refresh properly.
1145 """
1162 """
1146 assert isinstance(repo, localrepo.localrepository)
1163 assert isinstance(repo, localrepo.localrepository)
1147 self._repo = repo
1164 self._repo = repo
1148 self._state, self.mtime = self._repostate()
1165 self._state, self.mtime = self._repostate()
1149 self._filtername = repo.filtername
1166 self._filtername = repo.filtername
1150
1167
1151 def fetch(self):
1168 def fetch(self):
1152 """Refresh (if necessary) and return a repository.
1169 """Refresh (if necessary) and return a repository.
1153
1170
1154 If the cached instance is out of date, it will be recreated
1171 If the cached instance is out of date, it will be recreated
1155 automatically and returned.
1172 automatically and returned.
1156
1173
1157 Returns a tuple of the repo and a boolean indicating whether a new
1174 Returns a tuple of the repo and a boolean indicating whether a new
1158 repo instance was created.
1175 repo instance was created.
1159 """
1176 """
1160 # We compare the mtimes and sizes of some well-known files to
1177 # We compare the mtimes and sizes of some well-known files to
1161 # determine if the repo changed. This is not precise, as mtimes
1178 # determine if the repo changed. This is not precise, as mtimes
1162 # are susceptible to clock skew and imprecise filesystems and
1179 # are susceptible to clock skew and imprecise filesystems and
1163 # file content can change while maintaining the same size.
1180 # file content can change while maintaining the same size.
1164
1181
1165 state, mtime = self._repostate()
1182 state, mtime = self._repostate()
1166 if state == self._state:
1183 if state == self._state:
1167 return self._repo, False
1184 return self._repo, False
1168
1185
1169 repo = repository(self._repo.baseui, self._repo.url())
1186 repo = repository(self._repo.baseui, self._repo.url())
1170 if self._filtername:
1187 if self._filtername:
1171 self._repo = repo.filtered(self._filtername)
1188 self._repo = repo.filtered(self._filtername)
1172 else:
1189 else:
1173 self._repo = repo.unfiltered()
1190 self._repo = repo.unfiltered()
1174 self._state = state
1191 self._state = state
1175 self.mtime = mtime
1192 self.mtime = mtime
1176
1193
1177 return self._repo, True
1194 return self._repo, True
1178
1195
1179 def _repostate(self):
1196 def _repostate(self):
1180 state = []
1197 state = []
1181 maxmtime = -1
1198 maxmtime = -1
1182 for attr, fname in foi:
1199 for attr, fname in foi:
1183 prefix = getattr(self._repo, attr)
1200 prefix = getattr(self._repo, attr)
1184 p = os.path.join(prefix, fname)
1201 p = os.path.join(prefix, fname)
1185 try:
1202 try:
1186 st = os.stat(p)
1203 st = os.stat(p)
1187 except OSError:
1204 except OSError:
1188 st = os.stat(prefix)
1205 st = os.stat(prefix)
1189 state.append((st[stat.ST_MTIME], st.st_size))
1206 state.append((st[stat.ST_MTIME], st.st_size))
1190 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1191
1208
1192 return tuple(state), maxmtime
1209 return tuple(state), maxmtime
1193
1210
1194 def copy(self):
1211 def copy(self):
1195 """Obtain a copy of this class instance.
1212 """Obtain a copy of this class instance.
1196
1213
1197 A new localrepository instance is obtained. The new instance should be
1214 A new localrepository instance is obtained. The new instance should be
1198 completely independent of the original.
1215 completely independent of the original.
1199 """
1216 """
1200 repo = repository(self._repo.baseui, self._repo.origroot)
1217 repo = repository(self._repo.baseui, self._repo.origroot)
1201 if self._filtername:
1218 if self._filtername:
1202 repo = repo.filtered(self._filtername)
1219 repo = repo.filtered(self._filtername)
1203 else:
1220 else:
1204 repo = repo.unfiltered()
1221 repo = repo.unfiltered()
1205 c = cachedlocalrepo(repo)
1222 c = cachedlocalrepo(repo)
1206 c._state = self._state
1223 c._state = self._state
1207 c.mtime = self.mtime
1224 c.mtime = self.mtime
1208 return c
1225 return c
@@ -1,3032 +1,3039 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn()(ui=ui,
537 typ = fn()(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711 options[b'flagprocessors'] = {}
711 options[b'flagprocessors'] = {}
712
712
713 if b'revlogv1' in requirements:
713 if b'revlogv1' in requirements:
714 options[b'revlogv1'] = True
714 options[b'revlogv1'] = True
715 if REVLOGV2_REQUIREMENT in requirements:
715 if REVLOGV2_REQUIREMENT in requirements:
716 options[b'revlogv2'] = True
716 options[b'revlogv2'] = True
717
717
718 if b'generaldelta' in requirements:
718 if b'generaldelta' in requirements:
719 options[b'generaldelta'] = True
719 options[b'generaldelta'] = True
720
720
721 # experimental config: format.chunkcachesize
721 # experimental config: format.chunkcachesize
722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
723 if chunkcachesize is not None:
723 if chunkcachesize is not None:
724 options[b'chunkcachesize'] = chunkcachesize
724 options[b'chunkcachesize'] = chunkcachesize
725
725
726 deltabothparents = ui.configbool(b'storage',
726 deltabothparents = ui.configbool(b'storage',
727 b'revlog.optimize-delta-parent-choice')
727 b'revlog.optimize-delta-parent-choice')
728 options[b'deltabothparents'] = deltabothparents
728 options[b'deltabothparents'] = deltabothparents
729
729
730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
731
731
732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
733 if 0 <= chainspan:
733 if 0 <= chainspan:
734 options[b'maxdeltachainspan'] = chainspan
734 options[b'maxdeltachainspan'] = chainspan
735
735
736 mmapindexthreshold = ui.configbytes(b'experimental',
736 mmapindexthreshold = ui.configbytes(b'experimental',
737 b'mmapindexthreshold')
737 b'mmapindexthreshold')
738 if mmapindexthreshold is not None:
738 if mmapindexthreshold is not None:
739 options[b'mmapindexthreshold'] = mmapindexthreshold
739 options[b'mmapindexthreshold'] = mmapindexthreshold
740
740
741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
742 srdensitythres = float(ui.config(b'experimental',
742 srdensitythres = float(ui.config(b'experimental',
743 b'sparse-read.density-threshold'))
743 b'sparse-read.density-threshold'))
744 srmingapsize = ui.configbytes(b'experimental',
744 srmingapsize = ui.configbytes(b'experimental',
745 b'sparse-read.min-gap-size')
745 b'sparse-read.min-gap-size')
746 options[b'with-sparse-read'] = withsparseread
746 options[b'with-sparse-read'] = withsparseread
747 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-density-threshold'] = srdensitythres
748 options[b'sparse-read-min-gap-size'] = srmingapsize
748 options[b'sparse-read-min-gap-size'] = srmingapsize
749
749
750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
751 options[b'sparse-revlog'] = sparserevlog
751 options[b'sparse-revlog'] = sparserevlog
752 if sparserevlog:
752 if sparserevlog:
753 options[b'generaldelta'] = True
753 options[b'generaldelta'] = True
754
754
755 maxchainlen = None
755 maxchainlen = None
756 if sparserevlog:
756 if sparserevlog:
757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
758 # experimental config: format.maxchainlen
758 # experimental config: format.maxchainlen
759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
760 if maxchainlen is not None:
760 if maxchainlen is not None:
761 options[b'maxchainlen'] = maxchainlen
761 options[b'maxchainlen'] = maxchainlen
762
762
763 for r in requirements:
763 for r in requirements:
764 if r.startswith(b'exp-compression-'):
764 if r.startswith(b'exp-compression-'):
765 options[b'compengine'] = r[len(b'exp-compression-'):]
765 options[b'compengine'] = r[len(b'exp-compression-'):]
766
766
767 if repository.NARROW_REQUIREMENT in requirements:
767 if repository.NARROW_REQUIREMENT in requirements:
768 options[b'enableellipsis'] = True
768 options[b'enableellipsis'] = True
769
769
770 return options
770 return options
771
771
772 def makemain(**kwargs):
772 def makemain(**kwargs):
773 """Produce a type conforming to ``ilocalrepositorymain``."""
773 """Produce a type conforming to ``ilocalrepositorymain``."""
774 return localrepository
774 return localrepository
775
775
776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
777 class revlogfilestorage(object):
777 class revlogfilestorage(object):
778 """File storage when using revlogs."""
778 """File storage when using revlogs."""
779
779
780 def file(self, path):
780 def file(self, path):
781 if path[0] == b'/':
781 if path[0] == b'/':
782 path = path[1:]
782 path = path[1:]
783
783
784 return filelog.filelog(self.svfs, path)
784 return filelog.filelog(self.svfs, path)
785
785
786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
787 class revlognarrowfilestorage(object):
787 class revlognarrowfilestorage(object):
788 """File storage when using revlogs and narrow files."""
788 """File storage when using revlogs and narrow files."""
789
789
790 def file(self, path):
790 def file(self, path):
791 if path[0] == b'/':
791 if path[0] == b'/':
792 path = path[1:]
792 path = path[1:]
793
793
794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
795
795
796 def makefilestorage(requirements, features, **kwargs):
796 def makefilestorage(requirements, features, **kwargs):
797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
800
800
801 if repository.NARROW_REQUIREMENT in requirements:
801 if repository.NARROW_REQUIREMENT in requirements:
802 return revlognarrowfilestorage
802 return revlognarrowfilestorage
803 else:
803 else:
804 return revlogfilestorage
804 return revlogfilestorage
805
805
806 # List of repository interfaces and factory functions for them. Each
806 # List of repository interfaces and factory functions for them. Each
807 # will be called in order during ``makelocalrepository()`` to iteratively
807 # will be called in order during ``makelocalrepository()`` to iteratively
808 # derive the final type for a local repository instance. We capture the
808 # derive the final type for a local repository instance. We capture the
809 # function as a lambda so we don't hold a reference and the module-level
809 # function as a lambda so we don't hold a reference and the module-level
810 # functions can be wrapped.
810 # functions can be wrapped.
811 REPO_INTERFACES = [
811 REPO_INTERFACES = [
812 (repository.ilocalrepositorymain, lambda: makemain),
812 (repository.ilocalrepositorymain, lambda: makemain),
813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
814 ]
814 ]
815
815
816 @interfaceutil.implementer(repository.ilocalrepositorymain)
816 @interfaceutil.implementer(repository.ilocalrepositorymain)
817 class localrepository(object):
817 class localrepository(object):
818 """Main class for representing local repositories.
818 """Main class for representing local repositories.
819
819
820 All local repositories are instances of this class.
820 All local repositories are instances of this class.
821
821
822 Constructed on its own, instances of this class are not usable as
822 Constructed on its own, instances of this class are not usable as
823 repository objects. To obtain a usable repository object, call
823 repository objects. To obtain a usable repository object, call
824 ``hg.repository()``, ``localrepo.instance()``, or
824 ``hg.repository()``, ``localrepo.instance()``, or
825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
826 ``instance()`` adds support for creating new repositories.
826 ``instance()`` adds support for creating new repositories.
827 ``hg.repository()`` adds more extension integration, including calling
827 ``hg.repository()`` adds more extension integration, including calling
828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
829 used.
829 used.
830 """
830 """
831
831
832 # obsolete experimental requirements:
832 # obsolete experimental requirements:
833 # - manifestv2: An experimental new manifest format that allowed
833 # - manifestv2: An experimental new manifest format that allowed
834 # for stem compression of long paths. Experiment ended up not
834 # for stem compression of long paths. Experiment ended up not
835 # being successful (repository sizes went up due to worse delta
835 # being successful (repository sizes went up due to worse delta
836 # chains), and the code was deleted in 4.6.
836 # chains), and the code was deleted in 4.6.
837 supportedformats = {
837 supportedformats = {
838 'revlogv1',
838 'revlogv1',
839 'generaldelta',
839 'generaldelta',
840 'treemanifest',
840 'treemanifest',
841 REVLOGV2_REQUIREMENT,
841 REVLOGV2_REQUIREMENT,
842 SPARSEREVLOG_REQUIREMENT,
842 SPARSEREVLOG_REQUIREMENT,
843 }
843 }
844 _basesupported = supportedformats | {
844 _basesupported = supportedformats | {
845 'store',
845 'store',
846 'fncache',
846 'fncache',
847 'shared',
847 'shared',
848 'relshared',
848 'relshared',
849 'dotencode',
849 'dotencode',
850 'exp-sparse',
850 'exp-sparse',
851 'internal-phase'
851 'internal-phase'
852 }
852 }
853
853
854 # list of prefix for file which can be written without 'wlock'
854 # list of prefix for file which can be written without 'wlock'
855 # Extensions should extend this list when needed
855 # Extensions should extend this list when needed
856 _wlockfreeprefix = {
856 _wlockfreeprefix = {
857 # We migh consider requiring 'wlock' for the next
857 # We migh consider requiring 'wlock' for the next
858 # two, but pretty much all the existing code assume
858 # two, but pretty much all the existing code assume
859 # wlock is not needed so we keep them excluded for
859 # wlock is not needed so we keep them excluded for
860 # now.
860 # now.
861 'hgrc',
861 'hgrc',
862 'requires',
862 'requires',
863 # XXX cache is a complicatged business someone
863 # XXX cache is a complicatged business someone
864 # should investigate this in depth at some point
864 # should investigate this in depth at some point
865 'cache/',
865 'cache/',
866 # XXX shouldn't be dirstate covered by the wlock?
866 # XXX shouldn't be dirstate covered by the wlock?
867 'dirstate',
867 'dirstate',
868 # XXX bisect was still a bit too messy at the time
868 # XXX bisect was still a bit too messy at the time
869 # this changeset was introduced. Someone should fix
869 # this changeset was introduced. Someone should fix
870 # the remainig bit and drop this line
870 # the remainig bit and drop this line
871 'bisect.state',
871 'bisect.state',
872 }
872 }
873
873
874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
875 supportedrequirements, sharedpath, store, cachevfs,
875 supportedrequirements, sharedpath, store, cachevfs,
876 features, intents=None):
876 features, intents=None):
877 """Create a new local repository instance.
877 """Create a new local repository instance.
878
878
879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
881 object.
881 object.
882
882
883 Arguments:
883 Arguments:
884
884
885 baseui
885 baseui
886 ``ui.ui`` instance that ``ui`` argument was based off of.
886 ``ui.ui`` instance that ``ui`` argument was based off of.
887
887
888 ui
888 ui
889 ``ui.ui`` instance for use by the repository.
889 ``ui.ui`` instance for use by the repository.
890
890
891 origroot
891 origroot
892 ``bytes`` path to working directory root of this repository.
892 ``bytes`` path to working directory root of this repository.
893
893
894 wdirvfs
894 wdirvfs
895 ``vfs.vfs`` rooted at the working directory.
895 ``vfs.vfs`` rooted at the working directory.
896
896
897 hgvfs
897 hgvfs
898 ``vfs.vfs`` rooted at .hg/
898 ``vfs.vfs`` rooted at .hg/
899
899
900 requirements
900 requirements
901 ``set`` of bytestrings representing repository opening requirements.
901 ``set`` of bytestrings representing repository opening requirements.
902
902
903 supportedrequirements
903 supportedrequirements
904 ``set`` of bytestrings representing repository requirements that we
904 ``set`` of bytestrings representing repository requirements that we
905 know how to open. May be a supetset of ``requirements``.
905 know how to open. May be a supetset of ``requirements``.
906
906
907 sharedpath
907 sharedpath
908 ``bytes`` Defining path to storage base directory. Points to a
908 ``bytes`` Defining path to storage base directory. Points to a
909 ``.hg/`` directory somewhere.
909 ``.hg/`` directory somewhere.
910
910
911 store
911 store
912 ``store.basicstore`` (or derived) instance providing access to
912 ``store.basicstore`` (or derived) instance providing access to
913 versioned storage.
913 versioned storage.
914
914
915 cachevfs
915 cachevfs
916 ``vfs.vfs`` used for cache files.
916 ``vfs.vfs`` used for cache files.
917
917
918 features
918 features
919 ``set`` of bytestrings defining features/capabilities of this
919 ``set`` of bytestrings defining features/capabilities of this
920 instance.
920 instance.
921
921
922 intents
922 intents
923 ``set`` of system strings indicating what this repo will be used
923 ``set`` of system strings indicating what this repo will be used
924 for.
924 for.
925 """
925 """
926 self.baseui = baseui
926 self.baseui = baseui
927 self.ui = ui
927 self.ui = ui
928 self.origroot = origroot
928 self.origroot = origroot
929 # vfs rooted at working directory.
929 # vfs rooted at working directory.
930 self.wvfs = wdirvfs
930 self.wvfs = wdirvfs
931 self.root = wdirvfs.base
931 self.root = wdirvfs.base
932 # vfs rooted at .hg/. Used to access most non-store paths.
932 # vfs rooted at .hg/. Used to access most non-store paths.
933 self.vfs = hgvfs
933 self.vfs = hgvfs
934 self.path = hgvfs.base
934 self.path = hgvfs.base
935 self.requirements = requirements
935 self.requirements = requirements
936 self.supported = supportedrequirements
936 self.supported = supportedrequirements
937 self.sharedpath = sharedpath
937 self.sharedpath = sharedpath
938 self.store = store
938 self.store = store
939 self.cachevfs = cachevfs
939 self.cachevfs = cachevfs
940 self.features = features
940 self.features = features
941
941
942 self.filtername = None
942 self.filtername = None
943
943
944 if (self.ui.configbool('devel', 'all-warnings') or
944 if (self.ui.configbool('devel', 'all-warnings') or
945 self.ui.configbool('devel', 'check-locks')):
945 self.ui.configbool('devel', 'check-locks')):
946 self.vfs.audit = self._getvfsward(self.vfs.audit)
946 self.vfs.audit = self._getvfsward(self.vfs.audit)
947 # A list of callback to shape the phase if no data were found.
947 # A list of callback to shape the phase if no data were found.
948 # Callback are in the form: func(repo, roots) --> processed root.
948 # Callback are in the form: func(repo, roots) --> processed root.
949 # This list it to be filled by extension during repo setup
949 # This list it to be filled by extension during repo setup
950 self._phasedefaults = []
950 self._phasedefaults = []
951
951
952 color.setup(self.ui)
952 color.setup(self.ui)
953
953
954 self.spath = self.store.path
954 self.spath = self.store.path
955 self.svfs = self.store.vfs
955 self.svfs = self.store.vfs
956 self.sjoin = self.store.join
956 self.sjoin = self.store.join
957 if (self.ui.configbool('devel', 'all-warnings') or
957 if (self.ui.configbool('devel', 'all-warnings') or
958 self.ui.configbool('devel', 'check-locks')):
958 self.ui.configbool('devel', 'check-locks')):
959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
961 else: # standard vfs
961 else: # standard vfs
962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
963
963
964 self._dirstatevalidatewarned = False
964 self._dirstatevalidatewarned = False
965
965
966 self._branchcaches = {}
966 self._branchcaches = {}
967 self._revbranchcache = None
967 self._revbranchcache = None
968 self._filterpats = {}
968 self._filterpats = {}
969 self._datafilters = {}
969 self._datafilters = {}
970 self._transref = self._lockref = self._wlockref = None
970 self._transref = self._lockref = self._wlockref = None
971
971
972 # A cache for various files under .hg/ that tracks file changes,
972 # A cache for various files under .hg/ that tracks file changes,
973 # (used by the filecache decorator)
973 # (used by the filecache decorator)
974 #
974 #
975 # Maps a property name to its util.filecacheentry
975 # Maps a property name to its util.filecacheentry
976 self._filecache = {}
976 self._filecache = {}
977
977
978 # hold sets of revision to be filtered
978 # hold sets of revision to be filtered
979 # should be cleared when something might have changed the filter value:
979 # should be cleared when something might have changed the filter value:
980 # - new changesets,
980 # - new changesets,
981 # - phase change,
981 # - phase change,
982 # - new obsolescence marker,
982 # - new obsolescence marker,
983 # - working directory parent change,
983 # - working directory parent change,
984 # - bookmark changes
984 # - bookmark changes
985 self.filteredrevcache = {}
985 self.filteredrevcache = {}
986
986
987 # post-dirstate-status hooks
987 # post-dirstate-status hooks
988 self._postdsstatus = []
988 self._postdsstatus = []
989
989
990 # generic mapping between names and nodes
990 # generic mapping between names and nodes
991 self.names = namespaces.namespaces()
991 self.names = namespaces.namespaces()
992
992
993 # Key to signature value.
993 # Key to signature value.
994 self._sparsesignaturecache = {}
994 self._sparsesignaturecache = {}
995 # Signature to cached matcher instance.
995 # Signature to cached matcher instance.
996 self._sparsematchercache = {}
996 self._sparsematchercache = {}
997
997
998 def _getvfsward(self, origfunc):
998 def _getvfsward(self, origfunc):
999 """build a ward for self.vfs"""
999 """build a ward for self.vfs"""
1000 rref = weakref.ref(self)
1000 rref = weakref.ref(self)
1001 def checkvfs(path, mode=None):
1001 def checkvfs(path, mode=None):
1002 ret = origfunc(path, mode=mode)
1002 ret = origfunc(path, mode=mode)
1003 repo = rref()
1003 repo = rref()
1004 if (repo is None
1004 if (repo is None
1005 or not util.safehasattr(repo, '_wlockref')
1005 or not util.safehasattr(repo, '_wlockref')
1006 or not util.safehasattr(repo, '_lockref')):
1006 or not util.safehasattr(repo, '_lockref')):
1007 return
1007 return
1008 if mode in (None, 'r', 'rb'):
1008 if mode in (None, 'r', 'rb'):
1009 return
1009 return
1010 if path.startswith(repo.path):
1010 if path.startswith(repo.path):
1011 # truncate name relative to the repository (.hg)
1011 # truncate name relative to the repository (.hg)
1012 path = path[len(repo.path) + 1:]
1012 path = path[len(repo.path) + 1:]
1013 if path.startswith('cache/'):
1013 if path.startswith('cache/'):
1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1016 if path.startswith('journal.'):
1016 if path.startswith('journal.'):
1017 # journal is covered by 'lock'
1017 # journal is covered by 'lock'
1018 if repo._currentlock(repo._lockref) is None:
1018 if repo._currentlock(repo._lockref) is None:
1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1020 stacklevel=2, config='check-locks')
1020 stacklevel=2, config='check-locks')
1021 elif repo._currentlock(repo._wlockref) is None:
1021 elif repo._currentlock(repo._wlockref) is None:
1022 # rest of vfs files are covered by 'wlock'
1022 # rest of vfs files are covered by 'wlock'
1023 #
1023 #
1024 # exclude special files
1024 # exclude special files
1025 for prefix in self._wlockfreeprefix:
1025 for prefix in self._wlockfreeprefix:
1026 if path.startswith(prefix):
1026 if path.startswith(prefix):
1027 return
1027 return
1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1029 stacklevel=2, config='check-locks')
1029 stacklevel=2, config='check-locks')
1030 return ret
1030 return ret
1031 return checkvfs
1031 return checkvfs
1032
1032
1033 def _getsvfsward(self, origfunc):
1033 def _getsvfsward(self, origfunc):
1034 """build a ward for self.svfs"""
1034 """build a ward for self.svfs"""
1035 rref = weakref.ref(self)
1035 rref = weakref.ref(self)
1036 def checksvfs(path, mode=None):
1036 def checksvfs(path, mode=None):
1037 ret = origfunc(path, mode=mode)
1037 ret = origfunc(path, mode=mode)
1038 repo = rref()
1038 repo = rref()
1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1040 return
1040 return
1041 if mode in (None, 'r', 'rb'):
1041 if mode in (None, 'r', 'rb'):
1042 return
1042 return
1043 if path.startswith(repo.sharedpath):
1043 if path.startswith(repo.sharedpath):
1044 # truncate name relative to the repository (.hg)
1044 # truncate name relative to the repository (.hg)
1045 path = path[len(repo.sharedpath) + 1:]
1045 path = path[len(repo.sharedpath) + 1:]
1046 if repo._currentlock(repo._lockref) is None:
1046 if repo._currentlock(repo._lockref) is None:
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 stacklevel=3)
1048 stacklevel=3)
1049 return ret
1049 return ret
1050 return checksvfs
1050 return checksvfs
1051
1051
1052 def close(self):
1052 def close(self):
1053 self._writecaches()
1053 self._writecaches()
1054
1054
1055 def _writecaches(self):
1055 def _writecaches(self):
1056 if self._revbranchcache:
1056 if self._revbranchcache:
1057 self._revbranchcache.write()
1057 self._revbranchcache.write()
1058
1058
1059 def _restrictcapabilities(self, caps):
1059 def _restrictcapabilities(self, caps):
1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1061 caps = set(caps)
1061 caps = set(caps)
1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1063 role='client'))
1063 role='client'))
1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1065 return caps
1065 return caps
1066
1066
1067 def _writerequirements(self):
1067 def _writerequirements(self):
1068 scmutil.writerequires(self.vfs, self.requirements)
1068 scmutil.writerequires(self.vfs, self.requirements)
1069
1069
1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1071 # self -> auditor -> self._checknested -> self
1071 # self -> auditor -> self._checknested -> self
1072
1072
1073 @property
1073 @property
1074 def auditor(self):
1074 def auditor(self):
1075 # This is only used by context.workingctx.match in order to
1075 # This is only used by context.workingctx.match in order to
1076 # detect files in subrepos.
1076 # detect files in subrepos.
1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1078
1078
1079 @property
1079 @property
1080 def nofsauditor(self):
1080 def nofsauditor(self):
1081 # This is only used by context.basectx.match in order to detect
1081 # This is only used by context.basectx.match in order to detect
1082 # files in subrepos.
1082 # files in subrepos.
1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1084 realfs=False, cached=True)
1084 realfs=False, cached=True)
1085
1085
1086 def _checknested(self, path):
1086 def _checknested(self, path):
1087 """Determine if path is a legal nested repository."""
1087 """Determine if path is a legal nested repository."""
1088 if not path.startswith(self.root):
1088 if not path.startswith(self.root):
1089 return False
1089 return False
1090 subpath = path[len(self.root) + 1:]
1090 subpath = path[len(self.root) + 1:]
1091 normsubpath = util.pconvert(subpath)
1091 normsubpath = util.pconvert(subpath)
1092
1092
1093 # XXX: Checking against the current working copy is wrong in
1093 # XXX: Checking against the current working copy is wrong in
1094 # the sense that it can reject things like
1094 # the sense that it can reject things like
1095 #
1095 #
1096 # $ hg cat -r 10 sub/x.txt
1096 # $ hg cat -r 10 sub/x.txt
1097 #
1097 #
1098 # if sub/ is no longer a subrepository in the working copy
1098 # if sub/ is no longer a subrepository in the working copy
1099 # parent revision.
1099 # parent revision.
1100 #
1100 #
1101 # However, it can of course also allow things that would have
1101 # However, it can of course also allow things that would have
1102 # been rejected before, such as the above cat command if sub/
1102 # been rejected before, such as the above cat command if sub/
1103 # is a subrepository now, but was a normal directory before.
1103 # is a subrepository now, but was a normal directory before.
1104 # The old path auditor would have rejected by mistake since it
1104 # The old path auditor would have rejected by mistake since it
1105 # panics when it sees sub/.hg/.
1105 # panics when it sees sub/.hg/.
1106 #
1106 #
1107 # All in all, checking against the working copy seems sensible
1107 # All in all, checking against the working copy seems sensible
1108 # since we want to prevent access to nested repositories on
1108 # since we want to prevent access to nested repositories on
1109 # the filesystem *now*.
1109 # the filesystem *now*.
1110 ctx = self[None]
1110 ctx = self[None]
1111 parts = util.splitpath(subpath)
1111 parts = util.splitpath(subpath)
1112 while parts:
1112 while parts:
1113 prefix = '/'.join(parts)
1113 prefix = '/'.join(parts)
1114 if prefix in ctx.substate:
1114 if prefix in ctx.substate:
1115 if prefix == normsubpath:
1115 if prefix == normsubpath:
1116 return True
1116 return True
1117 else:
1117 else:
1118 sub = ctx.sub(prefix)
1118 sub = ctx.sub(prefix)
1119 return sub.checknested(subpath[len(prefix) + 1:])
1119 return sub.checknested(subpath[len(prefix) + 1:])
1120 else:
1120 else:
1121 parts.pop()
1121 parts.pop()
1122 return False
1122 return False
1123
1123
1124 def peer(self):
1124 def peer(self):
1125 return localpeer(self) # not cached to avoid reference cycle
1125 return localpeer(self) # not cached to avoid reference cycle
1126
1126
1127 def unfiltered(self):
1127 def unfiltered(self):
1128 """Return unfiltered version of the repository
1128 """Return unfiltered version of the repository
1129
1129
1130 Intended to be overwritten by filtered repo."""
1130 Intended to be overwritten by filtered repo."""
1131 return self
1131 return self
1132
1132
1133 def filtered(self, name, visibilityexceptions=None):
1133 def filtered(self, name, visibilityexceptions=None):
1134 """Return a filtered version of a repository"""
1134 """Return a filtered version of a repository"""
1135 cls = repoview.newtype(self.unfiltered().__class__)
1135 cls = repoview.newtype(self.unfiltered().__class__)
1136 return cls(self, name, visibilityexceptions)
1136 return cls(self, name, visibilityexceptions)
1137
1137
1138 @repofilecache('bookmarks', 'bookmarks.current')
1138 @repofilecache('bookmarks', 'bookmarks.current')
1139 def _bookmarks(self):
1139 def _bookmarks(self):
1140 return bookmarks.bmstore(self)
1140 return bookmarks.bmstore(self)
1141
1141
1142 @property
1142 @property
1143 def _activebookmark(self):
1143 def _activebookmark(self):
1144 return self._bookmarks.active
1144 return self._bookmarks.active
1145
1145
1146 # _phasesets depend on changelog. what we need is to call
1146 # _phasesets depend on changelog. what we need is to call
1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1148 # can't be easily expressed in filecache mechanism.
1148 # can't be easily expressed in filecache mechanism.
1149 @storecache('phaseroots', '00changelog.i')
1149 @storecache('phaseroots', '00changelog.i')
1150 def _phasecache(self):
1150 def _phasecache(self):
1151 return phases.phasecache(self, self._phasedefaults)
1151 return phases.phasecache(self, self._phasedefaults)
1152
1152
1153 @storecache('obsstore')
1153 @storecache('obsstore')
1154 def obsstore(self):
1154 def obsstore(self):
1155 return obsolete.makestore(self.ui, self)
1155 return obsolete.makestore(self.ui, self)
1156
1156
1157 @storecache('00changelog.i')
1157 @storecache('00changelog.i')
1158 def changelog(self):
1158 def changelog(self):
1159 return changelog.changelog(self.svfs,
1159 return changelog.changelog(self.svfs,
1160 trypending=txnutil.mayhavepending(self.root))
1160 trypending=txnutil.mayhavepending(self.root))
1161
1161
1162 @storecache('00manifest.i')
1162 @storecache('00manifest.i')
1163 def manifestlog(self):
1163 def manifestlog(self):
1164 rootstore = manifest.manifestrevlog(self.svfs)
1164 rootstore = manifest.manifestrevlog(self.svfs)
1165 return manifest.manifestlog(self.svfs, self, rootstore)
1165 return manifest.manifestlog(self.svfs, self, rootstore)
1166
1166
1167 @repofilecache('dirstate')
1167 @repofilecache('dirstate')
1168 def dirstate(self):
1168 def dirstate(self):
1169 return self._makedirstate()
1169 return self._makedirstate()
1170
1170
1171 def _makedirstate(self):
1171 def _makedirstate(self):
1172 """Extension point for wrapping the dirstate per-repo."""
1172 """Extension point for wrapping the dirstate per-repo."""
1173 sparsematchfn = lambda: sparse.matcher(self)
1173 sparsematchfn = lambda: sparse.matcher(self)
1174
1174
1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1176 self._dirstatevalidate, sparsematchfn)
1176 self._dirstatevalidate, sparsematchfn)
1177
1177
1178 def _dirstatevalidate(self, node):
1178 def _dirstatevalidate(self, node):
1179 try:
1179 try:
1180 self.changelog.rev(node)
1180 self.changelog.rev(node)
1181 return node
1181 return node
1182 except error.LookupError:
1182 except error.LookupError:
1183 if not self._dirstatevalidatewarned:
1183 if not self._dirstatevalidatewarned:
1184 self._dirstatevalidatewarned = True
1184 self._dirstatevalidatewarned = True
1185 self.ui.warn(_("warning: ignoring unknown"
1185 self.ui.warn(_("warning: ignoring unknown"
1186 " working parent %s!\n") % short(node))
1186 " working parent %s!\n") % short(node))
1187 return nullid
1187 return nullid
1188
1188
1189 @storecache(narrowspec.FILENAME)
1189 @storecache(narrowspec.FILENAME)
1190 def narrowpats(self):
1190 def narrowpats(self):
1191 """matcher patterns for this repository's narrowspec
1191 """matcher patterns for this repository's narrowspec
1192
1192
1193 A tuple of (includes, excludes).
1193 A tuple of (includes, excludes).
1194 """
1194 """
1195 return narrowspec.load(self)
1195 return narrowspec.load(self)
1196
1196
1197 @storecache(narrowspec.FILENAME)
1197 @storecache(narrowspec.FILENAME)
1198 def _narrowmatch(self):
1198 def _narrowmatch(self):
1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1200 return matchmod.always(self.root, '')
1200 return matchmod.always(self.root, '')
1201 include, exclude = self.narrowpats
1201 include, exclude = self.narrowpats
1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1203
1203
1204 def narrowmatch(self, match=None, includeexact=False):
1204 def narrowmatch(self, match=None, includeexact=False):
1205 """matcher corresponding the the repo's narrowspec
1205 """matcher corresponding the the repo's narrowspec
1206
1206
1207 If `match` is given, then that will be intersected with the narrow
1207 If `match` is given, then that will be intersected with the narrow
1208 matcher.
1208 matcher.
1209
1209
1210 If `includeexact` is True, then any exact matches from `match` will
1210 If `includeexact` is True, then any exact matches from `match` will
1211 be included even if they're outside the narrowspec.
1211 be included even if they're outside the narrowspec.
1212 """
1212 """
1213 if match:
1213 if match:
1214 if includeexact and not self._narrowmatch.always():
1214 if includeexact and not self._narrowmatch.always():
1215 # do not exclude explicitly-specified paths so that they can
1215 # do not exclude explicitly-specified paths so that they can
1216 # be warned later on
1216 # be warned later on
1217 em = matchmod.exact(match._root, match._cwd, match.files())
1217 em = matchmod.exact(match._root, match._cwd, match.files())
1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1219 return matchmod.intersectmatchers(match, nm)
1219 return matchmod.intersectmatchers(match, nm)
1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1221 return self._narrowmatch
1221 return self._narrowmatch
1222
1222
1223 def setnarrowpats(self, newincludes, newexcludes):
1223 def setnarrowpats(self, newincludes, newexcludes):
1224 narrowspec.save(self, newincludes, newexcludes)
1224 narrowspec.save(self, newincludes, newexcludes)
1225 self.invalidate(clearfilecache=True)
1225 self.invalidate(clearfilecache=True)
1226
1226
1227 def __getitem__(self, changeid):
1227 def __getitem__(self, changeid):
1228 if changeid is None:
1228 if changeid is None:
1229 return context.workingctx(self)
1229 return context.workingctx(self)
1230 if isinstance(changeid, context.basectx):
1230 if isinstance(changeid, context.basectx):
1231 return changeid
1231 return changeid
1232 if isinstance(changeid, slice):
1232 if isinstance(changeid, slice):
1233 # wdirrev isn't contiguous so the slice shouldn't include it
1233 # wdirrev isn't contiguous so the slice shouldn't include it
1234 return [self[i]
1234 return [self[i]
1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1236 if i not in self.changelog.filteredrevs]
1236 if i not in self.changelog.filteredrevs]
1237 try:
1237 try:
1238 if isinstance(changeid, int):
1238 if isinstance(changeid, int):
1239 node = self.changelog.node(changeid)
1239 node = self.changelog.node(changeid)
1240 rev = changeid
1240 rev = changeid
1241 elif changeid == 'null':
1241 elif changeid == 'null':
1242 node = nullid
1242 node = nullid
1243 rev = nullrev
1243 rev = nullrev
1244 elif changeid == 'tip':
1244 elif changeid == 'tip':
1245 node = self.changelog.tip()
1245 node = self.changelog.tip()
1246 rev = self.changelog.rev(node)
1246 rev = self.changelog.rev(node)
1247 elif changeid == '.':
1247 elif changeid == '.':
1248 # this is a hack to delay/avoid loading obsmarkers
1248 # this is a hack to delay/avoid loading obsmarkers
1249 # when we know that '.' won't be hidden
1249 # when we know that '.' won't be hidden
1250 node = self.dirstate.p1()
1250 node = self.dirstate.p1()
1251 rev = self.unfiltered().changelog.rev(node)
1251 rev = self.unfiltered().changelog.rev(node)
1252 elif len(changeid) == 20:
1252 elif len(changeid) == 20:
1253 try:
1253 try:
1254 node = changeid
1254 node = changeid
1255 rev = self.changelog.rev(changeid)
1255 rev = self.changelog.rev(changeid)
1256 except error.FilteredLookupError:
1256 except error.FilteredLookupError:
1257 changeid = hex(changeid) # for the error message
1257 changeid = hex(changeid) # for the error message
1258 raise
1258 raise
1259 except LookupError:
1259 except LookupError:
1260 # check if it might have come from damaged dirstate
1260 # check if it might have come from damaged dirstate
1261 #
1261 #
1262 # XXX we could avoid the unfiltered if we had a recognizable
1262 # XXX we could avoid the unfiltered if we had a recognizable
1263 # exception for filtered changeset access
1263 # exception for filtered changeset access
1264 if (self.local()
1264 if (self.local()
1265 and changeid in self.unfiltered().dirstate.parents()):
1265 and changeid in self.unfiltered().dirstate.parents()):
1266 msg = _("working directory has unknown parent '%s'!")
1266 msg = _("working directory has unknown parent '%s'!")
1267 raise error.Abort(msg % short(changeid))
1267 raise error.Abort(msg % short(changeid))
1268 changeid = hex(changeid) # for the error message
1268 changeid = hex(changeid) # for the error message
1269 raise
1269 raise
1270
1270
1271 elif len(changeid) == 40:
1271 elif len(changeid) == 40:
1272 node = bin(changeid)
1272 node = bin(changeid)
1273 rev = self.changelog.rev(node)
1273 rev = self.changelog.rev(node)
1274 else:
1274 else:
1275 raise error.ProgrammingError(
1275 raise error.ProgrammingError(
1276 "unsupported changeid '%s' of type %s" %
1276 "unsupported changeid '%s' of type %s" %
1277 (changeid, type(changeid)))
1277 (changeid, type(changeid)))
1278
1278
1279 return context.changectx(self, rev, node)
1279 return context.changectx(self, rev, node)
1280
1280
1281 except (error.FilteredIndexError, error.FilteredLookupError):
1281 except (error.FilteredIndexError, error.FilteredLookupError):
1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1283 % pycompat.bytestr(changeid))
1283 % pycompat.bytestr(changeid))
1284 except (IndexError, LookupError):
1284 except (IndexError, LookupError):
1285 raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
1285 raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
1286 except error.WdirUnsupported:
1286 except error.WdirUnsupported:
1287 return context.workingctx(self)
1287 return context.workingctx(self)
1288
1288
1289 def __contains__(self, changeid):
1289 def __contains__(self, changeid):
1290 """True if the given changeid exists
1290 """True if the given changeid exists
1291
1291
1292 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1292 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1293 specified.
1293 specified.
1294 """
1294 """
1295 try:
1295 try:
1296 self[changeid]
1296 self[changeid]
1297 return True
1297 return True
1298 except error.RepoLookupError:
1298 except error.RepoLookupError:
1299 return False
1299 return False
1300
1300
1301 def __nonzero__(self):
1301 def __nonzero__(self):
1302 return True
1302 return True
1303
1303
1304 __bool__ = __nonzero__
1304 __bool__ = __nonzero__
1305
1305
1306 def __len__(self):
1306 def __len__(self):
1307 # no need to pay the cost of repoview.changelog
1307 # no need to pay the cost of repoview.changelog
1308 unfi = self.unfiltered()
1308 unfi = self.unfiltered()
1309 return len(unfi.changelog)
1309 return len(unfi.changelog)
1310
1310
1311 def __iter__(self):
1311 def __iter__(self):
1312 return iter(self.changelog)
1312 return iter(self.changelog)
1313
1313
1314 def revs(self, expr, *args):
1314 def revs(self, expr, *args):
1315 '''Find revisions matching a revset.
1315 '''Find revisions matching a revset.
1316
1316
1317 The revset is specified as a string ``expr`` that may contain
1317 The revset is specified as a string ``expr`` that may contain
1318 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1318 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1319
1319
1320 Revset aliases from the configuration are not expanded. To expand
1320 Revset aliases from the configuration are not expanded. To expand
1321 user aliases, consider calling ``scmutil.revrange()`` or
1321 user aliases, consider calling ``scmutil.revrange()`` or
1322 ``repo.anyrevs([expr], user=True)``.
1322 ``repo.anyrevs([expr], user=True)``.
1323
1323
1324 Returns a revset.abstractsmartset, which is a list-like interface
1324 Returns a revset.abstractsmartset, which is a list-like interface
1325 that contains integer revisions.
1325 that contains integer revisions.
1326 '''
1326 '''
1327 expr = revsetlang.formatspec(expr, *args)
1327 expr = revsetlang.formatspec(expr, *args)
1328 m = revset.match(None, expr)
1328 m = revset.match(None, expr)
1329 return m(self)
1329 return m(self)
1330
1330
1331 def set(self, expr, *args):
1331 def set(self, expr, *args):
1332 '''Find revisions matching a revset and emit changectx instances.
1332 '''Find revisions matching a revset and emit changectx instances.
1333
1333
1334 This is a convenience wrapper around ``revs()`` that iterates the
1334 This is a convenience wrapper around ``revs()`` that iterates the
1335 result and is a generator of changectx instances.
1335 result and is a generator of changectx instances.
1336
1336
1337 Revset aliases from the configuration are not expanded. To expand
1337 Revset aliases from the configuration are not expanded. To expand
1338 user aliases, consider calling ``scmutil.revrange()``.
1338 user aliases, consider calling ``scmutil.revrange()``.
1339 '''
1339 '''
1340 for r in self.revs(expr, *args):
1340 for r in self.revs(expr, *args):
1341 yield self[r]
1341 yield self[r]
1342
1342
1343 def anyrevs(self, specs, user=False, localalias=None):
1343 def anyrevs(self, specs, user=False, localalias=None):
1344 '''Find revisions matching one of the given revsets.
1344 '''Find revisions matching one of the given revsets.
1345
1345
1346 Revset aliases from the configuration are not expanded by default. To
1346 Revset aliases from the configuration are not expanded by default. To
1347 expand user aliases, specify ``user=True``. To provide some local
1347 expand user aliases, specify ``user=True``. To provide some local
1348 definitions overriding user aliases, set ``localalias`` to
1348 definitions overriding user aliases, set ``localalias`` to
1349 ``{name: definitionstring}``.
1349 ``{name: definitionstring}``.
1350 '''
1350 '''
1351 if user:
1351 if user:
1352 m = revset.matchany(self.ui, specs,
1352 m = revset.matchany(self.ui, specs,
1353 lookup=revset.lookupfn(self),
1353 lookup=revset.lookupfn(self),
1354 localalias=localalias)
1354 localalias=localalias)
1355 else:
1355 else:
1356 m = revset.matchany(None, specs, localalias=localalias)
1356 m = revset.matchany(None, specs, localalias=localalias)
1357 return m(self)
1357 return m(self)
1358
1358
1359 def url(self):
1359 def url(self):
1360 return 'file:' + self.root
1360 return 'file:' + self.root
1361
1361
1362 def hook(self, name, throw=False, **args):
1362 def hook(self, name, throw=False, **args):
1363 """Call a hook, passing this repo instance.
1363 """Call a hook, passing this repo instance.
1364
1364
1365 This a convenience method to aid invoking hooks. Extensions likely
1365 This a convenience method to aid invoking hooks. Extensions likely
1366 won't call this unless they have registered a custom hook or are
1366 won't call this unless they have registered a custom hook or are
1367 replacing code that is expected to call a hook.
1367 replacing code that is expected to call a hook.
1368 """
1368 """
1369 return hook.hook(self.ui, self, name, throw, **args)
1369 return hook.hook(self.ui, self, name, throw, **args)
1370
1370
1371 @filteredpropertycache
1371 @filteredpropertycache
1372 def _tagscache(self):
1372 def _tagscache(self):
1373 '''Returns a tagscache object that contains various tags related
1373 '''Returns a tagscache object that contains various tags related
1374 caches.'''
1374 caches.'''
1375
1375
1376 # This simplifies its cache management by having one decorated
1376 # This simplifies its cache management by having one decorated
1377 # function (this one) and the rest simply fetch things from it.
1377 # function (this one) and the rest simply fetch things from it.
1378 class tagscache(object):
1378 class tagscache(object):
1379 def __init__(self):
1379 def __init__(self):
1380 # These two define the set of tags for this repository. tags
1380 # These two define the set of tags for this repository. tags
1381 # maps tag name to node; tagtypes maps tag name to 'global' or
1381 # maps tag name to node; tagtypes maps tag name to 'global' or
1382 # 'local'. (Global tags are defined by .hgtags across all
1382 # 'local'. (Global tags are defined by .hgtags across all
1383 # heads, and local tags are defined in .hg/localtags.)
1383 # heads, and local tags are defined in .hg/localtags.)
1384 # They constitute the in-memory cache of tags.
1384 # They constitute the in-memory cache of tags.
1385 self.tags = self.tagtypes = None
1385 self.tags = self.tagtypes = None
1386
1386
1387 self.nodetagscache = self.tagslist = None
1387 self.nodetagscache = self.tagslist = None
1388
1388
1389 cache = tagscache()
1389 cache = tagscache()
1390 cache.tags, cache.tagtypes = self._findtags()
1390 cache.tags, cache.tagtypes = self._findtags()
1391
1391
1392 return cache
1392 return cache
1393
1393
1394 def tags(self):
1394 def tags(self):
1395 '''return a mapping of tag to node'''
1395 '''return a mapping of tag to node'''
1396 t = {}
1396 t = {}
1397 if self.changelog.filteredrevs:
1397 if self.changelog.filteredrevs:
1398 tags, tt = self._findtags()
1398 tags, tt = self._findtags()
1399 else:
1399 else:
1400 tags = self._tagscache.tags
1400 tags = self._tagscache.tags
1401 for k, v in tags.iteritems():
1401 for k, v in tags.iteritems():
1402 try:
1402 try:
1403 # ignore tags to unknown nodes
1403 # ignore tags to unknown nodes
1404 self.changelog.rev(v)
1404 self.changelog.rev(v)
1405 t[k] = v
1405 t[k] = v
1406 except (error.LookupError, ValueError):
1406 except (error.LookupError, ValueError):
1407 pass
1407 pass
1408 return t
1408 return t
1409
1409
1410 def _findtags(self):
1410 def _findtags(self):
1411 '''Do the hard work of finding tags. Return a pair of dicts
1411 '''Do the hard work of finding tags. Return a pair of dicts
1412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1413 maps tag name to a string like \'global\' or \'local\'.
1413 maps tag name to a string like \'global\' or \'local\'.
1414 Subclasses or extensions are free to add their own tags, but
1414 Subclasses or extensions are free to add their own tags, but
1415 should be aware that the returned dicts will be retained for the
1415 should be aware that the returned dicts will be retained for the
1416 duration of the localrepo object.'''
1416 duration of the localrepo object.'''
1417
1417
1418 # XXX what tagtype should subclasses/extensions use? Currently
1418 # XXX what tagtype should subclasses/extensions use? Currently
1419 # mq and bookmarks add tags, but do not set the tagtype at all.
1419 # mq and bookmarks add tags, but do not set the tagtype at all.
1420 # Should each extension invent its own tag type? Should there
1420 # Should each extension invent its own tag type? Should there
1421 # be one tagtype for all such "virtual" tags? Or is the status
1421 # be one tagtype for all such "virtual" tags? Or is the status
1422 # quo fine?
1422 # quo fine?
1423
1423
1424
1424
1425 # map tag name to (node, hist)
1425 # map tag name to (node, hist)
1426 alltags = tagsmod.findglobaltags(self.ui, self)
1426 alltags = tagsmod.findglobaltags(self.ui, self)
1427 # map tag name to tag type
1427 # map tag name to tag type
1428 tagtypes = dict((tag, 'global') for tag in alltags)
1428 tagtypes = dict((tag, 'global') for tag in alltags)
1429
1429
1430 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1430 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1431
1431
1432 # Build the return dicts. Have to re-encode tag names because
1432 # Build the return dicts. Have to re-encode tag names because
1433 # the tags module always uses UTF-8 (in order not to lose info
1433 # the tags module always uses UTF-8 (in order not to lose info
1434 # writing to the cache), but the rest of Mercurial wants them in
1434 # writing to the cache), but the rest of Mercurial wants them in
1435 # local encoding.
1435 # local encoding.
1436 tags = {}
1436 tags = {}
1437 for (name, (node, hist)) in alltags.iteritems():
1437 for (name, (node, hist)) in alltags.iteritems():
1438 if node != nullid:
1438 if node != nullid:
1439 tags[encoding.tolocal(name)] = node
1439 tags[encoding.tolocal(name)] = node
1440 tags['tip'] = self.changelog.tip()
1440 tags['tip'] = self.changelog.tip()
1441 tagtypes = dict([(encoding.tolocal(name), value)
1441 tagtypes = dict([(encoding.tolocal(name), value)
1442 for (name, value) in tagtypes.iteritems()])
1442 for (name, value) in tagtypes.iteritems()])
1443 return (tags, tagtypes)
1443 return (tags, tagtypes)
1444
1444
1445 def tagtype(self, tagname):
1445 def tagtype(self, tagname):
1446 '''
1446 '''
1447 return the type of the given tag. result can be:
1447 return the type of the given tag. result can be:
1448
1448
1449 'local' : a local tag
1449 'local' : a local tag
1450 'global' : a global tag
1450 'global' : a global tag
1451 None : tag does not exist
1451 None : tag does not exist
1452 '''
1452 '''
1453
1453
1454 return self._tagscache.tagtypes.get(tagname)
1454 return self._tagscache.tagtypes.get(tagname)
1455
1455
1456 def tagslist(self):
1456 def tagslist(self):
1457 '''return a list of tags ordered by revision'''
1457 '''return a list of tags ordered by revision'''
1458 if not self._tagscache.tagslist:
1458 if not self._tagscache.tagslist:
1459 l = []
1459 l = []
1460 for t, n in self.tags().iteritems():
1460 for t, n in self.tags().iteritems():
1461 l.append((self.changelog.rev(n), t, n))
1461 l.append((self.changelog.rev(n), t, n))
1462 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1462 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1463
1463
1464 return self._tagscache.tagslist
1464 return self._tagscache.tagslist
1465
1465
1466 def nodetags(self, node):
1466 def nodetags(self, node):
1467 '''return the tags associated with a node'''
1467 '''return the tags associated with a node'''
1468 if not self._tagscache.nodetagscache:
1468 if not self._tagscache.nodetagscache:
1469 nodetagscache = {}
1469 nodetagscache = {}
1470 for t, n in self._tagscache.tags.iteritems():
1470 for t, n in self._tagscache.tags.iteritems():
1471 nodetagscache.setdefault(n, []).append(t)
1471 nodetagscache.setdefault(n, []).append(t)
1472 for tags in nodetagscache.itervalues():
1472 for tags in nodetagscache.itervalues():
1473 tags.sort()
1473 tags.sort()
1474 self._tagscache.nodetagscache = nodetagscache
1474 self._tagscache.nodetagscache = nodetagscache
1475 return self._tagscache.nodetagscache.get(node, [])
1475 return self._tagscache.nodetagscache.get(node, [])
1476
1476
1477 def nodebookmarks(self, node):
1477 def nodebookmarks(self, node):
1478 """return the list of bookmarks pointing to the specified node"""
1478 """return the list of bookmarks pointing to the specified node"""
1479 return self._bookmarks.names(node)
1479 return self._bookmarks.names(node)
1480
1480
1481 def branchmap(self):
1481 def branchmap(self):
1482 '''returns a dictionary {branch: [branchheads]} with branchheads
1482 '''returns a dictionary {branch: [branchheads]} with branchheads
1483 ordered by increasing revision number'''
1483 ordered by increasing revision number'''
1484 branchmap.updatecache(self)
1484 branchmap.updatecache(self)
1485 return self._branchcaches[self.filtername]
1485 return self._branchcaches[self.filtername]
1486
1486
1487 @unfilteredmethod
1487 @unfilteredmethod
1488 def revbranchcache(self):
1488 def revbranchcache(self):
1489 if not self._revbranchcache:
1489 if not self._revbranchcache:
1490 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1490 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1491 return self._revbranchcache
1491 return self._revbranchcache
1492
1492
1493 def branchtip(self, branch, ignoremissing=False):
1493 def branchtip(self, branch, ignoremissing=False):
1494 '''return the tip node for a given branch
1494 '''return the tip node for a given branch
1495
1495
1496 If ignoremissing is True, then this method will not raise an error.
1496 If ignoremissing is True, then this method will not raise an error.
1497 This is helpful for callers that only expect None for a missing branch
1497 This is helpful for callers that only expect None for a missing branch
1498 (e.g. namespace).
1498 (e.g. namespace).
1499
1499
1500 '''
1500 '''
1501 try:
1501 try:
1502 return self.branchmap().branchtip(branch)
1502 return self.branchmap().branchtip(branch)
1503 except KeyError:
1503 except KeyError:
1504 if not ignoremissing:
1504 if not ignoremissing:
1505 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1505 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1506 else:
1506 else:
1507 pass
1507 pass
1508
1508
1509 def lookup(self, key):
1509 def lookup(self, key):
1510 return scmutil.revsymbol(self, key).node()
1510 return scmutil.revsymbol(self, key).node()
1511
1511
1512 def lookupbranch(self, key):
1512 def lookupbranch(self, key):
1513 if key in self.branchmap():
1513 if key in self.branchmap():
1514 return key
1514 return key
1515
1515
1516 return scmutil.revsymbol(self, key).branch()
1516 return scmutil.revsymbol(self, key).branch()
1517
1517
1518 def known(self, nodes):
1518 def known(self, nodes):
1519 cl = self.changelog
1519 cl = self.changelog
1520 nm = cl.nodemap
1520 nm = cl.nodemap
1521 filtered = cl.filteredrevs
1521 filtered = cl.filteredrevs
1522 result = []
1522 result = []
1523 for n in nodes:
1523 for n in nodes:
1524 r = nm.get(n)
1524 r = nm.get(n)
1525 resp = not (r is None or r in filtered)
1525 resp = not (r is None or r in filtered)
1526 result.append(resp)
1526 result.append(resp)
1527 return result
1527 return result
1528
1528
1529 def local(self):
1529 def local(self):
1530 return self
1530 return self
1531
1531
1532 def publishing(self):
1532 def publishing(self):
1533 # it's safe (and desirable) to trust the publish flag unconditionally
1533 # it's safe (and desirable) to trust the publish flag unconditionally
1534 # so that we don't finalize changes shared between users via ssh or nfs
1534 # so that we don't finalize changes shared between users via ssh or nfs
1535 return self.ui.configbool('phases', 'publish', untrusted=True)
1535 return self.ui.configbool('phases', 'publish', untrusted=True)
1536
1536
1537 def cancopy(self):
1537 def cancopy(self):
1538 # so statichttprepo's override of local() works
1538 # so statichttprepo's override of local() works
1539 if not self.local():
1539 if not self.local():
1540 return False
1540 return False
1541 if not self.publishing():
1541 if not self.publishing():
1542 return True
1542 return True
1543 # if publishing we can't copy if there is filtered content
1543 # if publishing we can't copy if there is filtered content
1544 return not self.filtered('visible').changelog.filteredrevs
1544 return not self.filtered('visible').changelog.filteredrevs
1545
1545
1546 def shared(self):
1546 def shared(self):
1547 '''the type of shared repository (None if not shared)'''
1547 '''the type of shared repository (None if not shared)'''
1548 if self.sharedpath != self.path:
1548 if self.sharedpath != self.path:
1549 return 'store'
1549 return 'store'
1550 return None
1550 return None
1551
1551
1552 def wjoin(self, f, *insidef):
1552 def wjoin(self, f, *insidef):
1553 return self.vfs.reljoin(self.root, f, *insidef)
1553 return self.vfs.reljoin(self.root, f, *insidef)
1554
1554
1555 def setparents(self, p1, p2=nullid):
1555 def setparents(self, p1, p2=nullid):
1556 with self.dirstate.parentchange():
1556 with self.dirstate.parentchange():
1557 copies = self.dirstate.setparents(p1, p2)
1557 copies = self.dirstate.setparents(p1, p2)
1558 pctx = self[p1]
1558 pctx = self[p1]
1559 if copies:
1559 if copies:
1560 # Adjust copy records, the dirstate cannot do it, it
1560 # Adjust copy records, the dirstate cannot do it, it
1561 # requires access to parents manifests. Preserve them
1561 # requires access to parents manifests. Preserve them
1562 # only for entries added to first parent.
1562 # only for entries added to first parent.
1563 for f in copies:
1563 for f in copies:
1564 if f not in pctx and copies[f] in pctx:
1564 if f not in pctx and copies[f] in pctx:
1565 self.dirstate.copy(copies[f], f)
1565 self.dirstate.copy(copies[f], f)
1566 if p2 == nullid:
1566 if p2 == nullid:
1567 for f, s in sorted(self.dirstate.copies().items()):
1567 for f, s in sorted(self.dirstate.copies().items()):
1568 if f not in pctx and s not in pctx:
1568 if f not in pctx and s not in pctx:
1569 self.dirstate.copy(None, f)
1569 self.dirstate.copy(None, f)
1570
1570
1571 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1571 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1572 """changeid can be a changeset revision, node, or tag.
1572 """changeid can be a changeset revision, node, or tag.
1573 fileid can be a file revision or node."""
1573 fileid can be a file revision or node."""
1574 return context.filectx(self, path, changeid, fileid,
1574 return context.filectx(self, path, changeid, fileid,
1575 changectx=changectx)
1575 changectx=changectx)
1576
1576
1577 def getcwd(self):
1577 def getcwd(self):
1578 return self.dirstate.getcwd()
1578 return self.dirstate.getcwd()
1579
1579
1580 def pathto(self, f, cwd=None):
1580 def pathto(self, f, cwd=None):
1581 return self.dirstate.pathto(f, cwd)
1581 return self.dirstate.pathto(f, cwd)
1582
1582
1583 def _loadfilter(self, filter):
1583 def _loadfilter(self, filter):
1584 if filter not in self._filterpats:
1584 if filter not in self._filterpats:
1585 l = []
1585 l = []
1586 for pat, cmd in self.ui.configitems(filter):
1586 for pat, cmd in self.ui.configitems(filter):
1587 if cmd == '!':
1587 if cmd == '!':
1588 continue
1588 continue
1589 mf = matchmod.match(self.root, '', [pat])
1589 mf = matchmod.match(self.root, '', [pat])
1590 fn = None
1590 fn = None
1591 params = cmd
1591 params = cmd
1592 for name, filterfn in self._datafilters.iteritems():
1592 for name, filterfn in self._datafilters.iteritems():
1593 if cmd.startswith(name):
1593 if cmd.startswith(name):
1594 fn = filterfn
1594 fn = filterfn
1595 params = cmd[len(name):].lstrip()
1595 params = cmd[len(name):].lstrip()
1596 break
1596 break
1597 if not fn:
1597 if not fn:
1598 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1598 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1599 # Wrap old filters not supporting keyword arguments
1599 # Wrap old filters not supporting keyword arguments
1600 if not pycompat.getargspec(fn)[2]:
1600 if not pycompat.getargspec(fn)[2]:
1601 oldfn = fn
1601 oldfn = fn
1602 fn = lambda s, c, **kwargs: oldfn(s, c)
1602 fn = lambda s, c, **kwargs: oldfn(s, c)
1603 l.append((mf, fn, params))
1603 l.append((mf, fn, params))
1604 self._filterpats[filter] = l
1604 self._filterpats[filter] = l
1605 return self._filterpats[filter]
1605 return self._filterpats[filter]
1606
1606
1607 def _filter(self, filterpats, filename, data):
1607 def _filter(self, filterpats, filename, data):
1608 for mf, fn, cmd in filterpats:
1608 for mf, fn, cmd in filterpats:
1609 if mf(filename):
1609 if mf(filename):
1610 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1610 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1611 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1611 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1612 break
1612 break
1613
1613
1614 return data
1614 return data
1615
1615
1616 @unfilteredpropertycache
1616 @unfilteredpropertycache
1617 def _encodefilterpats(self):
1617 def _encodefilterpats(self):
1618 return self._loadfilter('encode')
1618 return self._loadfilter('encode')
1619
1619
1620 @unfilteredpropertycache
1620 @unfilteredpropertycache
1621 def _decodefilterpats(self):
1621 def _decodefilterpats(self):
1622 return self._loadfilter('decode')
1622 return self._loadfilter('decode')
1623
1623
1624 def adddatafilter(self, name, filter):
1624 def adddatafilter(self, name, filter):
1625 self._datafilters[name] = filter
1625 self._datafilters[name] = filter
1626
1626
1627 def wread(self, filename):
1627 def wread(self, filename):
1628 if self.wvfs.islink(filename):
1628 if self.wvfs.islink(filename):
1629 data = self.wvfs.readlink(filename)
1629 data = self.wvfs.readlink(filename)
1630 else:
1630 else:
1631 data = self.wvfs.read(filename)
1631 data = self.wvfs.read(filename)
1632 return self._filter(self._encodefilterpats, filename, data)
1632 return self._filter(self._encodefilterpats, filename, data)
1633
1633
1634 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1634 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1635 """write ``data`` into ``filename`` in the working directory
1635 """write ``data`` into ``filename`` in the working directory
1636
1636
1637 This returns length of written (maybe decoded) data.
1637 This returns length of written (maybe decoded) data.
1638 """
1638 """
1639 data = self._filter(self._decodefilterpats, filename, data)
1639 data = self._filter(self._decodefilterpats, filename, data)
1640 if 'l' in flags:
1640 if 'l' in flags:
1641 self.wvfs.symlink(data, filename)
1641 self.wvfs.symlink(data, filename)
1642 else:
1642 else:
1643 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1643 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1644 **kwargs)
1644 **kwargs)
1645 if 'x' in flags:
1645 if 'x' in flags:
1646 self.wvfs.setflags(filename, False, True)
1646 self.wvfs.setflags(filename, False, True)
1647 else:
1647 else:
1648 self.wvfs.setflags(filename, False, False)
1648 self.wvfs.setflags(filename, False, False)
1649 return len(data)
1649 return len(data)
1650
1650
1651 def wwritedata(self, filename, data):
1651 def wwritedata(self, filename, data):
1652 return self._filter(self._decodefilterpats, filename, data)
1652 return self._filter(self._decodefilterpats, filename, data)
1653
1653
1654 def currenttransaction(self):
1654 def currenttransaction(self):
1655 """return the current transaction or None if non exists"""
1655 """return the current transaction or None if non exists"""
1656 if self._transref:
1656 if self._transref:
1657 tr = self._transref()
1657 tr = self._transref()
1658 else:
1658 else:
1659 tr = None
1659 tr = None
1660
1660
1661 if tr and tr.running():
1661 if tr and tr.running():
1662 return tr
1662 return tr
1663 return None
1663 return None
1664
1664
1665 def transaction(self, desc, report=None):
1665 def transaction(self, desc, report=None):
1666 if (self.ui.configbool('devel', 'all-warnings')
1666 if (self.ui.configbool('devel', 'all-warnings')
1667 or self.ui.configbool('devel', 'check-locks')):
1667 or self.ui.configbool('devel', 'check-locks')):
1668 if self._currentlock(self._lockref) is None:
1668 if self._currentlock(self._lockref) is None:
1669 raise error.ProgrammingError('transaction requires locking')
1669 raise error.ProgrammingError('transaction requires locking')
1670 tr = self.currenttransaction()
1670 tr = self.currenttransaction()
1671 if tr is not None:
1671 if tr is not None:
1672 return tr.nest(name=desc)
1672 return tr.nest(name=desc)
1673
1673
1674 # abort here if the journal already exists
1674 # abort here if the journal already exists
1675 if self.svfs.exists("journal"):
1675 if self.svfs.exists("journal"):
1676 raise error.RepoError(
1676 raise error.RepoError(
1677 _("abandoned transaction found"),
1677 _("abandoned transaction found"),
1678 hint=_("run 'hg recover' to clean up transaction"))
1678 hint=_("run 'hg recover' to clean up transaction"))
1679
1679
1680 idbase = "%.40f#%f" % (random.random(), time.time())
1680 idbase = "%.40f#%f" % (random.random(), time.time())
1681 ha = hex(hashlib.sha1(idbase).digest())
1681 ha = hex(hashlib.sha1(idbase).digest())
1682 txnid = 'TXN:' + ha
1682 txnid = 'TXN:' + ha
1683 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1683 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1684
1684
1685 self._writejournal(desc)
1685 self._writejournal(desc)
1686 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1686 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1687 if report:
1687 if report:
1688 rp = report
1688 rp = report
1689 else:
1689 else:
1690 rp = self.ui.warn
1690 rp = self.ui.warn
1691 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1691 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1692 # we must avoid cyclic reference between repo and transaction.
1692 # we must avoid cyclic reference between repo and transaction.
1693 reporef = weakref.ref(self)
1693 reporef = weakref.ref(self)
1694 # Code to track tag movement
1694 # Code to track tag movement
1695 #
1695 #
1696 # Since tags are all handled as file content, it is actually quite hard
1696 # Since tags are all handled as file content, it is actually quite hard
1697 # to track these movement from a code perspective. So we fallback to a
1697 # to track these movement from a code perspective. So we fallback to a
1698 # tracking at the repository level. One could envision to track changes
1698 # tracking at the repository level. One could envision to track changes
1699 # to the '.hgtags' file through changegroup apply but that fails to
1699 # to the '.hgtags' file through changegroup apply but that fails to
1700 # cope with case where transaction expose new heads without changegroup
1700 # cope with case where transaction expose new heads without changegroup
1701 # being involved (eg: phase movement).
1701 # being involved (eg: phase movement).
1702 #
1702 #
1703 # For now, We gate the feature behind a flag since this likely comes
1703 # For now, We gate the feature behind a flag since this likely comes
1704 # with performance impacts. The current code run more often than needed
1704 # with performance impacts. The current code run more often than needed
1705 # and do not use caches as much as it could. The current focus is on
1705 # and do not use caches as much as it could. The current focus is on
1706 # the behavior of the feature so we disable it by default. The flag
1706 # the behavior of the feature so we disable it by default. The flag
1707 # will be removed when we are happy with the performance impact.
1707 # will be removed when we are happy with the performance impact.
1708 #
1708 #
1709 # Once this feature is no longer experimental move the following
1709 # Once this feature is no longer experimental move the following
1710 # documentation to the appropriate help section:
1710 # documentation to the appropriate help section:
1711 #
1711 #
1712 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1712 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1713 # tags (new or changed or deleted tags). In addition the details of
1713 # tags (new or changed or deleted tags). In addition the details of
1714 # these changes are made available in a file at:
1714 # these changes are made available in a file at:
1715 # ``REPOROOT/.hg/changes/tags.changes``.
1715 # ``REPOROOT/.hg/changes/tags.changes``.
1716 # Make sure you check for HG_TAG_MOVED before reading that file as it
1716 # Make sure you check for HG_TAG_MOVED before reading that file as it
1717 # might exist from a previous transaction even if no tag were touched
1717 # might exist from a previous transaction even if no tag were touched
1718 # in this one. Changes are recorded in a line base format::
1718 # in this one. Changes are recorded in a line base format::
1719 #
1719 #
1720 # <action> <hex-node> <tag-name>\n
1720 # <action> <hex-node> <tag-name>\n
1721 #
1721 #
1722 # Actions are defined as follow:
1722 # Actions are defined as follow:
1723 # "-R": tag is removed,
1723 # "-R": tag is removed,
1724 # "+A": tag is added,
1724 # "+A": tag is added,
1725 # "-M": tag is moved (old value),
1725 # "-M": tag is moved (old value),
1726 # "+M": tag is moved (new value),
1726 # "+M": tag is moved (new value),
1727 tracktags = lambda x: None
1727 tracktags = lambda x: None
1728 # experimental config: experimental.hook-track-tags
1728 # experimental config: experimental.hook-track-tags
1729 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1729 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1730 if desc != 'strip' and shouldtracktags:
1730 if desc != 'strip' and shouldtracktags:
1731 oldheads = self.changelog.headrevs()
1731 oldheads = self.changelog.headrevs()
1732 def tracktags(tr2):
1732 def tracktags(tr2):
1733 repo = reporef()
1733 repo = reporef()
1734 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1734 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1735 newheads = repo.changelog.headrevs()
1735 newheads = repo.changelog.headrevs()
1736 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1736 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1737 # notes: we compare lists here.
1737 # notes: we compare lists here.
1738 # As we do it only once buiding set would not be cheaper
1738 # As we do it only once buiding set would not be cheaper
1739 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1739 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1740 if changes:
1740 if changes:
1741 tr2.hookargs['tag_moved'] = '1'
1741 tr2.hookargs['tag_moved'] = '1'
1742 with repo.vfs('changes/tags.changes', 'w',
1742 with repo.vfs('changes/tags.changes', 'w',
1743 atomictemp=True) as changesfile:
1743 atomictemp=True) as changesfile:
1744 # note: we do not register the file to the transaction
1744 # note: we do not register the file to the transaction
1745 # because we needs it to still exist on the transaction
1745 # because we needs it to still exist on the transaction
1746 # is close (for txnclose hooks)
1746 # is close (for txnclose hooks)
1747 tagsmod.writediff(changesfile, changes)
1747 tagsmod.writediff(changesfile, changes)
1748 def validate(tr2):
1748 def validate(tr2):
1749 """will run pre-closing hooks"""
1749 """will run pre-closing hooks"""
1750 # XXX the transaction API is a bit lacking here so we take a hacky
1750 # XXX the transaction API is a bit lacking here so we take a hacky
1751 # path for now
1751 # path for now
1752 #
1752 #
1753 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1753 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1754 # dict is copied before these run. In addition we needs the data
1754 # dict is copied before these run. In addition we needs the data
1755 # available to in memory hooks too.
1755 # available to in memory hooks too.
1756 #
1756 #
1757 # Moreover, we also need to make sure this runs before txnclose
1757 # Moreover, we also need to make sure this runs before txnclose
1758 # hooks and there is no "pending" mechanism that would execute
1758 # hooks and there is no "pending" mechanism that would execute
1759 # logic only if hooks are about to run.
1759 # logic only if hooks are about to run.
1760 #
1760 #
1761 # Fixing this limitation of the transaction is also needed to track
1761 # Fixing this limitation of the transaction is also needed to track
1762 # other families of changes (bookmarks, phases, obsolescence).
1762 # other families of changes (bookmarks, phases, obsolescence).
1763 #
1763 #
1764 # This will have to be fixed before we remove the experimental
1764 # This will have to be fixed before we remove the experimental
1765 # gating.
1765 # gating.
1766 tracktags(tr2)
1766 tracktags(tr2)
1767 repo = reporef()
1767 repo = reporef()
1768 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1768 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1769 scmutil.enforcesinglehead(repo, tr2, desc)
1769 scmutil.enforcesinglehead(repo, tr2, desc)
1770 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1770 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1771 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1771 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1772 args = tr.hookargs.copy()
1772 args = tr.hookargs.copy()
1773 args.update(bookmarks.preparehookargs(name, old, new))
1773 args.update(bookmarks.preparehookargs(name, old, new))
1774 repo.hook('pretxnclose-bookmark', throw=True,
1774 repo.hook('pretxnclose-bookmark', throw=True,
1775 txnname=desc,
1775 txnname=desc,
1776 **pycompat.strkwargs(args))
1776 **pycompat.strkwargs(args))
1777 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1777 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1778 cl = repo.unfiltered().changelog
1778 cl = repo.unfiltered().changelog
1779 for rev, (old, new) in tr.changes['phases'].items():
1779 for rev, (old, new) in tr.changes['phases'].items():
1780 args = tr.hookargs.copy()
1780 args = tr.hookargs.copy()
1781 node = hex(cl.node(rev))
1781 node = hex(cl.node(rev))
1782 args.update(phases.preparehookargs(node, old, new))
1782 args.update(phases.preparehookargs(node, old, new))
1783 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1783 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1784 **pycompat.strkwargs(args))
1784 **pycompat.strkwargs(args))
1785
1785
1786 repo.hook('pretxnclose', throw=True,
1786 repo.hook('pretxnclose', throw=True,
1787 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1787 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1788 def releasefn(tr, success):
1788 def releasefn(tr, success):
1789 repo = reporef()
1789 repo = reporef()
1790 if success:
1790 if success:
1791 # this should be explicitly invoked here, because
1791 # this should be explicitly invoked here, because
1792 # in-memory changes aren't written out at closing
1792 # in-memory changes aren't written out at closing
1793 # transaction, if tr.addfilegenerator (via
1793 # transaction, if tr.addfilegenerator (via
1794 # dirstate.write or so) isn't invoked while
1794 # dirstate.write or so) isn't invoked while
1795 # transaction running
1795 # transaction running
1796 repo.dirstate.write(None)
1796 repo.dirstate.write(None)
1797 else:
1797 else:
1798 # discard all changes (including ones already written
1798 # discard all changes (including ones already written
1799 # out) in this transaction
1799 # out) in this transaction
1800 narrowspec.restorebackup(self, 'journal.narrowspec')
1800 narrowspec.restorebackup(self, 'journal.narrowspec')
1801 repo.dirstate.restorebackup(None, 'journal.dirstate')
1801 repo.dirstate.restorebackup(None, 'journal.dirstate')
1802
1802
1803 repo.invalidate(clearfilecache=True)
1803 repo.invalidate(clearfilecache=True)
1804
1804
1805 tr = transaction.transaction(rp, self.svfs, vfsmap,
1805 tr = transaction.transaction(rp, self.svfs, vfsmap,
1806 "journal",
1806 "journal",
1807 "undo",
1807 "undo",
1808 aftertrans(renames),
1808 aftertrans(renames),
1809 self.store.createmode,
1809 self.store.createmode,
1810 validator=validate,
1810 validator=validate,
1811 releasefn=releasefn,
1811 releasefn=releasefn,
1812 checkambigfiles=_cachedfiles,
1812 checkambigfiles=_cachedfiles,
1813 name=desc)
1813 name=desc)
1814 tr.changes['origrepolen'] = len(self)
1814 tr.changes['origrepolen'] = len(self)
1815 tr.changes['obsmarkers'] = set()
1815 tr.changes['obsmarkers'] = set()
1816 tr.changes['phases'] = {}
1816 tr.changes['phases'] = {}
1817 tr.changes['bookmarks'] = {}
1817 tr.changes['bookmarks'] = {}
1818
1818
1819 tr.hookargs['txnid'] = txnid
1819 tr.hookargs['txnid'] = txnid
1820 # note: writing the fncache only during finalize mean that the file is
1820 # note: writing the fncache only during finalize mean that the file is
1821 # outdated when running hooks. As fncache is used for streaming clone,
1821 # outdated when running hooks. As fncache is used for streaming clone,
1822 # this is not expected to break anything that happen during the hooks.
1822 # this is not expected to break anything that happen during the hooks.
1823 tr.addfinalize('flush-fncache', self.store.write)
1823 tr.addfinalize('flush-fncache', self.store.write)
1824 def txnclosehook(tr2):
1824 def txnclosehook(tr2):
1825 """To be run if transaction is successful, will schedule a hook run
1825 """To be run if transaction is successful, will schedule a hook run
1826 """
1826 """
1827 # Don't reference tr2 in hook() so we don't hold a reference.
1827 # Don't reference tr2 in hook() so we don't hold a reference.
1828 # This reduces memory consumption when there are multiple
1828 # This reduces memory consumption when there are multiple
1829 # transactions per lock. This can likely go away if issue5045
1829 # transactions per lock. This can likely go away if issue5045
1830 # fixes the function accumulation.
1830 # fixes the function accumulation.
1831 hookargs = tr2.hookargs
1831 hookargs = tr2.hookargs
1832
1832
1833 def hookfunc():
1833 def hookfunc():
1834 repo = reporef()
1834 repo = reporef()
1835 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1835 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1836 bmchanges = sorted(tr.changes['bookmarks'].items())
1836 bmchanges = sorted(tr.changes['bookmarks'].items())
1837 for name, (old, new) in bmchanges:
1837 for name, (old, new) in bmchanges:
1838 args = tr.hookargs.copy()
1838 args = tr.hookargs.copy()
1839 args.update(bookmarks.preparehookargs(name, old, new))
1839 args.update(bookmarks.preparehookargs(name, old, new))
1840 repo.hook('txnclose-bookmark', throw=False,
1840 repo.hook('txnclose-bookmark', throw=False,
1841 txnname=desc, **pycompat.strkwargs(args))
1841 txnname=desc, **pycompat.strkwargs(args))
1842
1842
1843 if hook.hashook(repo.ui, 'txnclose-phase'):
1843 if hook.hashook(repo.ui, 'txnclose-phase'):
1844 cl = repo.unfiltered().changelog
1844 cl = repo.unfiltered().changelog
1845 phasemv = sorted(tr.changes['phases'].items())
1845 phasemv = sorted(tr.changes['phases'].items())
1846 for rev, (old, new) in phasemv:
1846 for rev, (old, new) in phasemv:
1847 args = tr.hookargs.copy()
1847 args = tr.hookargs.copy()
1848 node = hex(cl.node(rev))
1848 node = hex(cl.node(rev))
1849 args.update(phases.preparehookargs(node, old, new))
1849 args.update(phases.preparehookargs(node, old, new))
1850 repo.hook('txnclose-phase', throw=False, txnname=desc,
1850 repo.hook('txnclose-phase', throw=False, txnname=desc,
1851 **pycompat.strkwargs(args))
1851 **pycompat.strkwargs(args))
1852
1852
1853 repo.hook('txnclose', throw=False, txnname=desc,
1853 repo.hook('txnclose', throw=False, txnname=desc,
1854 **pycompat.strkwargs(hookargs))
1854 **pycompat.strkwargs(hookargs))
1855 reporef()._afterlock(hookfunc)
1855 reporef()._afterlock(hookfunc)
1856 tr.addfinalize('txnclose-hook', txnclosehook)
1856 tr.addfinalize('txnclose-hook', txnclosehook)
1857 # Include a leading "-" to make it happen before the transaction summary
1857 # Include a leading "-" to make it happen before the transaction summary
1858 # reports registered via scmutil.registersummarycallback() whose names
1858 # reports registered via scmutil.registersummarycallback() whose names
1859 # are 00-txnreport etc. That way, the caches will be warm when the
1859 # are 00-txnreport etc. That way, the caches will be warm when the
1860 # callbacks run.
1860 # callbacks run.
1861 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1861 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1862 def txnaborthook(tr2):
1862 def txnaborthook(tr2):
1863 """To be run if transaction is aborted
1863 """To be run if transaction is aborted
1864 """
1864 """
1865 reporef().hook('txnabort', throw=False, txnname=desc,
1865 reporef().hook('txnabort', throw=False, txnname=desc,
1866 **pycompat.strkwargs(tr2.hookargs))
1866 **pycompat.strkwargs(tr2.hookargs))
1867 tr.addabort('txnabort-hook', txnaborthook)
1867 tr.addabort('txnabort-hook', txnaborthook)
1868 # avoid eager cache invalidation. in-memory data should be identical
1868 # avoid eager cache invalidation. in-memory data should be identical
1869 # to stored data if transaction has no error.
1869 # to stored data if transaction has no error.
1870 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1870 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1871 self._transref = weakref.ref(tr)
1871 self._transref = weakref.ref(tr)
1872 scmutil.registersummarycallback(self, tr, desc)
1872 scmutil.registersummarycallback(self, tr, desc)
1873 return tr
1873 return tr
1874
1874
1875 def _journalfiles(self):
1875 def _journalfiles(self):
1876 return ((self.svfs, 'journal'),
1876 return ((self.svfs, 'journal'),
1877 (self.vfs, 'journal.dirstate'),
1877 (self.vfs, 'journal.dirstate'),
1878 (self.vfs, 'journal.branch'),
1878 (self.vfs, 'journal.branch'),
1879 (self.vfs, 'journal.desc'),
1879 (self.vfs, 'journal.desc'),
1880 (self.vfs, 'journal.bookmarks'),
1880 (self.vfs, 'journal.bookmarks'),
1881 (self.svfs, 'journal.phaseroots'))
1881 (self.svfs, 'journal.phaseroots'))
1882
1882
1883 def undofiles(self):
1883 def undofiles(self):
1884 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1884 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1885
1885
1886 @unfilteredmethod
1886 @unfilteredmethod
1887 def _writejournal(self, desc):
1887 def _writejournal(self, desc):
1888 self.dirstate.savebackup(None, 'journal.dirstate')
1888 self.dirstate.savebackup(None, 'journal.dirstate')
1889 narrowspec.savebackup(self, 'journal.narrowspec')
1889 narrowspec.savebackup(self, 'journal.narrowspec')
1890 self.vfs.write("journal.branch",
1890 self.vfs.write("journal.branch",
1891 encoding.fromlocal(self.dirstate.branch()))
1891 encoding.fromlocal(self.dirstate.branch()))
1892 self.vfs.write("journal.desc",
1892 self.vfs.write("journal.desc",
1893 "%d\n%s\n" % (len(self), desc))
1893 "%d\n%s\n" % (len(self), desc))
1894 self.vfs.write("journal.bookmarks",
1894 self.vfs.write("journal.bookmarks",
1895 self.vfs.tryread("bookmarks"))
1895 self.vfs.tryread("bookmarks"))
1896 self.svfs.write("journal.phaseroots",
1896 self.svfs.write("journal.phaseroots",
1897 self.svfs.tryread("phaseroots"))
1897 self.svfs.tryread("phaseroots"))
1898
1898
1899 def recover(self):
1899 def recover(self):
1900 with self.lock():
1900 with self.lock():
1901 if self.svfs.exists("journal"):
1901 if self.svfs.exists("journal"):
1902 self.ui.status(_("rolling back interrupted transaction\n"))
1902 self.ui.status(_("rolling back interrupted transaction\n"))
1903 vfsmap = {'': self.svfs,
1903 vfsmap = {'': self.svfs,
1904 'plain': self.vfs,}
1904 'plain': self.vfs,}
1905 transaction.rollback(self.svfs, vfsmap, "journal",
1905 transaction.rollback(self.svfs, vfsmap, "journal",
1906 self.ui.warn,
1906 self.ui.warn,
1907 checkambigfiles=_cachedfiles)
1907 checkambigfiles=_cachedfiles)
1908 self.invalidate()
1908 self.invalidate()
1909 return True
1909 return True
1910 else:
1910 else:
1911 self.ui.warn(_("no interrupted transaction available\n"))
1911 self.ui.warn(_("no interrupted transaction available\n"))
1912 return False
1912 return False
1913
1913
1914 def rollback(self, dryrun=False, force=False):
1914 def rollback(self, dryrun=False, force=False):
1915 wlock = lock = dsguard = None
1915 wlock = lock = dsguard = None
1916 try:
1916 try:
1917 wlock = self.wlock()
1917 wlock = self.wlock()
1918 lock = self.lock()
1918 lock = self.lock()
1919 if self.svfs.exists("undo"):
1919 if self.svfs.exists("undo"):
1920 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1920 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1921
1921
1922 return self._rollback(dryrun, force, dsguard)
1922 return self._rollback(dryrun, force, dsguard)
1923 else:
1923 else:
1924 self.ui.warn(_("no rollback information available\n"))
1924 self.ui.warn(_("no rollback information available\n"))
1925 return 1
1925 return 1
1926 finally:
1926 finally:
1927 release(dsguard, lock, wlock)
1927 release(dsguard, lock, wlock)
1928
1928
1929 @unfilteredmethod # Until we get smarter cache management
1929 @unfilteredmethod # Until we get smarter cache management
1930 def _rollback(self, dryrun, force, dsguard):
1930 def _rollback(self, dryrun, force, dsguard):
1931 ui = self.ui
1931 ui = self.ui
1932 try:
1932 try:
1933 args = self.vfs.read('undo.desc').splitlines()
1933 args = self.vfs.read('undo.desc').splitlines()
1934 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1934 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1935 if len(args) >= 3:
1935 if len(args) >= 3:
1936 detail = args[2]
1936 detail = args[2]
1937 oldtip = oldlen - 1
1937 oldtip = oldlen - 1
1938
1938
1939 if detail and ui.verbose:
1939 if detail and ui.verbose:
1940 msg = (_('repository tip rolled back to revision %d'
1940 msg = (_('repository tip rolled back to revision %d'
1941 ' (undo %s: %s)\n')
1941 ' (undo %s: %s)\n')
1942 % (oldtip, desc, detail))
1942 % (oldtip, desc, detail))
1943 else:
1943 else:
1944 msg = (_('repository tip rolled back to revision %d'
1944 msg = (_('repository tip rolled back to revision %d'
1945 ' (undo %s)\n')
1945 ' (undo %s)\n')
1946 % (oldtip, desc))
1946 % (oldtip, desc))
1947 except IOError:
1947 except IOError:
1948 msg = _('rolling back unknown transaction\n')
1948 msg = _('rolling back unknown transaction\n')
1949 desc = None
1949 desc = None
1950
1950
1951 if not force and self['.'] != self['tip'] and desc == 'commit':
1951 if not force and self['.'] != self['tip'] and desc == 'commit':
1952 raise error.Abort(
1952 raise error.Abort(
1953 _('rollback of last commit while not checked out '
1953 _('rollback of last commit while not checked out '
1954 'may lose data'), hint=_('use -f to force'))
1954 'may lose data'), hint=_('use -f to force'))
1955
1955
1956 ui.status(msg)
1956 ui.status(msg)
1957 if dryrun:
1957 if dryrun:
1958 return 0
1958 return 0
1959
1959
1960 parents = self.dirstate.parents()
1960 parents = self.dirstate.parents()
1961 self.destroying()
1961 self.destroying()
1962 vfsmap = {'plain': self.vfs, '': self.svfs}
1962 vfsmap = {'plain': self.vfs, '': self.svfs}
1963 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1963 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1964 checkambigfiles=_cachedfiles)
1964 checkambigfiles=_cachedfiles)
1965 if self.vfs.exists('undo.bookmarks'):
1965 if self.vfs.exists('undo.bookmarks'):
1966 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1966 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1967 if self.svfs.exists('undo.phaseroots'):
1967 if self.svfs.exists('undo.phaseroots'):
1968 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1968 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1969 self.invalidate()
1969 self.invalidate()
1970
1970
1971 parentgone = (parents[0] not in self.changelog.nodemap or
1971 parentgone = (parents[0] not in self.changelog.nodemap or
1972 parents[1] not in self.changelog.nodemap)
1972 parents[1] not in self.changelog.nodemap)
1973 if parentgone:
1973 if parentgone:
1974 # prevent dirstateguard from overwriting already restored one
1974 # prevent dirstateguard from overwriting already restored one
1975 dsguard.close()
1975 dsguard.close()
1976
1976
1977 narrowspec.restorebackup(self, 'undo.narrowspec')
1977 narrowspec.restorebackup(self, 'undo.narrowspec')
1978 self.dirstate.restorebackup(None, 'undo.dirstate')
1978 self.dirstate.restorebackup(None, 'undo.dirstate')
1979 try:
1979 try:
1980 branch = self.vfs.read('undo.branch')
1980 branch = self.vfs.read('undo.branch')
1981 self.dirstate.setbranch(encoding.tolocal(branch))
1981 self.dirstate.setbranch(encoding.tolocal(branch))
1982 except IOError:
1982 except IOError:
1983 ui.warn(_('named branch could not be reset: '
1983 ui.warn(_('named branch could not be reset: '
1984 'current branch is still \'%s\'\n')
1984 'current branch is still \'%s\'\n')
1985 % self.dirstate.branch())
1985 % self.dirstate.branch())
1986
1986
1987 parents = tuple([p.rev() for p in self[None].parents()])
1987 parents = tuple([p.rev() for p in self[None].parents()])
1988 if len(parents) > 1:
1988 if len(parents) > 1:
1989 ui.status(_('working directory now based on '
1989 ui.status(_('working directory now based on '
1990 'revisions %d and %d\n') % parents)
1990 'revisions %d and %d\n') % parents)
1991 else:
1991 else:
1992 ui.status(_('working directory now based on '
1992 ui.status(_('working directory now based on '
1993 'revision %d\n') % parents)
1993 'revision %d\n') % parents)
1994 mergemod.mergestate.clean(self, self['.'].node())
1994 mergemod.mergestate.clean(self, self['.'].node())
1995
1995
1996 # TODO: if we know which new heads may result from this rollback, pass
1996 # TODO: if we know which new heads may result from this rollback, pass
1997 # them to destroy(), which will prevent the branchhead cache from being
1997 # them to destroy(), which will prevent the branchhead cache from being
1998 # invalidated.
1998 # invalidated.
1999 self.destroyed()
1999 self.destroyed()
2000 return 0
2000 return 0
2001
2001
2002 def _buildcacheupdater(self, newtransaction):
2002 def _buildcacheupdater(self, newtransaction):
2003 """called during transaction to build the callback updating cache
2003 """called during transaction to build the callback updating cache
2004
2004
2005 Lives on the repository to help extension who might want to augment
2005 Lives on the repository to help extension who might want to augment
2006 this logic. For this purpose, the created transaction is passed to the
2006 this logic. For this purpose, the created transaction is passed to the
2007 method.
2007 method.
2008 """
2008 """
2009 # we must avoid cyclic reference between repo and transaction.
2009 # we must avoid cyclic reference between repo and transaction.
2010 reporef = weakref.ref(self)
2010 reporef = weakref.ref(self)
2011 def updater(tr):
2011 def updater(tr):
2012 repo = reporef()
2012 repo = reporef()
2013 repo.updatecaches(tr)
2013 repo.updatecaches(tr)
2014 return updater
2014 return updater
2015
2015
2016 @unfilteredmethod
2016 @unfilteredmethod
2017 def updatecaches(self, tr=None, full=False):
2017 def updatecaches(self, tr=None, full=False):
2018 """warm appropriate caches
2018 """warm appropriate caches
2019
2019
2020 If this function is called after a transaction closed. The transaction
2020 If this function is called after a transaction closed. The transaction
2021 will be available in the 'tr' argument. This can be used to selectively
2021 will be available in the 'tr' argument. This can be used to selectively
2022 update caches relevant to the changes in that transaction.
2022 update caches relevant to the changes in that transaction.
2023
2023
2024 If 'full' is set, make sure all caches the function knows about have
2024 If 'full' is set, make sure all caches the function knows about have
2025 up-to-date data. Even the ones usually loaded more lazily.
2025 up-to-date data. Even the ones usually loaded more lazily.
2026 """
2026 """
2027 if tr is not None and tr.hookargs.get('source') == 'strip':
2027 if tr is not None and tr.hookargs.get('source') == 'strip':
2028 # During strip, many caches are invalid but
2028 # During strip, many caches are invalid but
2029 # later call to `destroyed` will refresh them.
2029 # later call to `destroyed` will refresh them.
2030 return
2030 return
2031
2031
2032 if tr is None or tr.changes['origrepolen'] < len(self):
2032 if tr is None or tr.changes['origrepolen'] < len(self):
2033 # updating the unfiltered branchmap should refresh all the others,
2033 # updating the unfiltered branchmap should refresh all the others,
2034 self.ui.debug('updating the branch cache\n')
2034 self.ui.debug('updating the branch cache\n')
2035 branchmap.updatecache(self.filtered('served'))
2035 branchmap.updatecache(self.filtered('served'))
2036
2036
2037 if full:
2037 if full:
2038 rbc = self.revbranchcache()
2038 rbc = self.revbranchcache()
2039 for r in self.changelog:
2039 for r in self.changelog:
2040 rbc.branchinfo(r)
2040 rbc.branchinfo(r)
2041 rbc.write()
2041 rbc.write()
2042
2042
2043 # ensure the working copy parents are in the manifestfulltextcache
2043 # ensure the working copy parents are in the manifestfulltextcache
2044 for ctx in self['.'].parents():
2044 for ctx in self['.'].parents():
2045 ctx.manifest() # accessing the manifest is enough
2045 ctx.manifest() # accessing the manifest is enough
2046
2046
2047 def invalidatecaches(self):
2047 def invalidatecaches(self):
2048
2048
2049 if '_tagscache' in vars(self):
2049 if '_tagscache' in vars(self):
2050 # can't use delattr on proxy
2050 # can't use delattr on proxy
2051 del self.__dict__['_tagscache']
2051 del self.__dict__['_tagscache']
2052
2052
2053 self.unfiltered()._branchcaches.clear()
2053 self.unfiltered()._branchcaches.clear()
2054 self.invalidatevolatilesets()
2054 self.invalidatevolatilesets()
2055 self._sparsesignaturecache.clear()
2055 self._sparsesignaturecache.clear()
2056
2056
2057 def invalidatevolatilesets(self):
2057 def invalidatevolatilesets(self):
2058 self.filteredrevcache.clear()
2058 self.filteredrevcache.clear()
2059 obsolete.clearobscaches(self)
2059 obsolete.clearobscaches(self)
2060
2060
2061 def invalidatedirstate(self):
2061 def invalidatedirstate(self):
2062 '''Invalidates the dirstate, causing the next call to dirstate
2062 '''Invalidates the dirstate, causing the next call to dirstate
2063 to check if it was modified since the last time it was read,
2063 to check if it was modified since the last time it was read,
2064 rereading it if it has.
2064 rereading it if it has.
2065
2065
2066 This is different to dirstate.invalidate() that it doesn't always
2066 This is different to dirstate.invalidate() that it doesn't always
2067 rereads the dirstate. Use dirstate.invalidate() if you want to
2067 rereads the dirstate. Use dirstate.invalidate() if you want to
2068 explicitly read the dirstate again (i.e. restoring it to a previous
2068 explicitly read the dirstate again (i.e. restoring it to a previous
2069 known good state).'''
2069 known good state).'''
2070 if hasunfilteredcache(self, r'dirstate'):
2070 if hasunfilteredcache(self, r'dirstate'):
2071 for k in self.dirstate._filecache:
2071 for k in self.dirstate._filecache:
2072 try:
2072 try:
2073 delattr(self.dirstate, k)
2073 delattr(self.dirstate, k)
2074 except AttributeError:
2074 except AttributeError:
2075 pass
2075 pass
2076 delattr(self.unfiltered(), r'dirstate')
2076 delattr(self.unfiltered(), r'dirstate')
2077
2077
2078 def invalidate(self, clearfilecache=False):
2078 def invalidate(self, clearfilecache=False):
2079 '''Invalidates both store and non-store parts other than dirstate
2079 '''Invalidates both store and non-store parts other than dirstate
2080
2080
2081 If a transaction is running, invalidation of store is omitted,
2081 If a transaction is running, invalidation of store is omitted,
2082 because discarding in-memory changes might cause inconsistency
2082 because discarding in-memory changes might cause inconsistency
2083 (e.g. incomplete fncache causes unintentional failure, but
2083 (e.g. incomplete fncache causes unintentional failure, but
2084 redundant one doesn't).
2084 redundant one doesn't).
2085 '''
2085 '''
2086 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2086 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2087 for k in list(self._filecache.keys()):
2087 for k in list(self._filecache.keys()):
2088 # dirstate is invalidated separately in invalidatedirstate()
2088 # dirstate is invalidated separately in invalidatedirstate()
2089 if k == 'dirstate':
2089 if k == 'dirstate':
2090 continue
2090 continue
2091 if (k == 'changelog' and
2091 if (k == 'changelog' and
2092 self.currenttransaction() and
2092 self.currenttransaction() and
2093 self.changelog._delayed):
2093 self.changelog._delayed):
2094 # The changelog object may store unwritten revisions. We don't
2094 # The changelog object may store unwritten revisions. We don't
2095 # want to lose them.
2095 # want to lose them.
2096 # TODO: Solve the problem instead of working around it.
2096 # TODO: Solve the problem instead of working around it.
2097 continue
2097 continue
2098
2098
2099 if clearfilecache:
2099 if clearfilecache:
2100 del self._filecache[k]
2100 del self._filecache[k]
2101 try:
2101 try:
2102 delattr(unfiltered, k)
2102 delattr(unfiltered, k)
2103 except AttributeError:
2103 except AttributeError:
2104 pass
2104 pass
2105 self.invalidatecaches()
2105 self.invalidatecaches()
2106 if not self.currenttransaction():
2106 if not self.currenttransaction():
2107 # TODO: Changing contents of store outside transaction
2107 # TODO: Changing contents of store outside transaction
2108 # causes inconsistency. We should make in-memory store
2108 # causes inconsistency. We should make in-memory store
2109 # changes detectable, and abort if changed.
2109 # changes detectable, and abort if changed.
2110 self.store.invalidatecaches()
2110 self.store.invalidatecaches()
2111
2111
2112 def invalidateall(self):
2112 def invalidateall(self):
2113 '''Fully invalidates both store and non-store parts, causing the
2113 '''Fully invalidates both store and non-store parts, causing the
2114 subsequent operation to reread any outside changes.'''
2114 subsequent operation to reread any outside changes.'''
2115 # extension should hook this to invalidate its caches
2115 # extension should hook this to invalidate its caches
2116 self.invalidate()
2116 self.invalidate()
2117 self.invalidatedirstate()
2117 self.invalidatedirstate()
2118
2118
2119 @unfilteredmethod
2119 @unfilteredmethod
2120 def _refreshfilecachestats(self, tr):
2120 def _refreshfilecachestats(self, tr):
2121 """Reload stats of cached files so that they are flagged as valid"""
2121 """Reload stats of cached files so that they are flagged as valid"""
2122 for k, ce in self._filecache.items():
2122 for k, ce in self._filecache.items():
2123 k = pycompat.sysstr(k)
2123 k = pycompat.sysstr(k)
2124 if k == r'dirstate' or k not in self.__dict__:
2124 if k == r'dirstate' or k not in self.__dict__:
2125 continue
2125 continue
2126 ce.refresh()
2126 ce.refresh()
2127
2127
2128 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2128 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2129 inheritchecker=None, parentenvvar=None):
2129 inheritchecker=None, parentenvvar=None):
2130 parentlock = None
2130 parentlock = None
2131 # the contents of parentenvvar are used by the underlying lock to
2131 # the contents of parentenvvar are used by the underlying lock to
2132 # determine whether it can be inherited
2132 # determine whether it can be inherited
2133 if parentenvvar is not None:
2133 if parentenvvar is not None:
2134 parentlock = encoding.environ.get(parentenvvar)
2134 parentlock = encoding.environ.get(parentenvvar)
2135
2135
2136 timeout = 0
2136 timeout = 0
2137 warntimeout = 0
2137 warntimeout = 0
2138 if wait:
2138 if wait:
2139 timeout = self.ui.configint("ui", "timeout")
2139 timeout = self.ui.configint("ui", "timeout")
2140 warntimeout = self.ui.configint("ui", "timeout.warn")
2140 warntimeout = self.ui.configint("ui", "timeout.warn")
2141 # internal config: ui.signal-safe-lock
2141 # internal config: ui.signal-safe-lock
2142 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2142 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2143
2143
2144 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2144 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2145 releasefn=releasefn,
2145 releasefn=releasefn,
2146 acquirefn=acquirefn, desc=desc,
2146 acquirefn=acquirefn, desc=desc,
2147 inheritchecker=inheritchecker,
2147 inheritchecker=inheritchecker,
2148 parentlock=parentlock,
2148 parentlock=parentlock,
2149 signalsafe=signalsafe)
2149 signalsafe=signalsafe)
2150 return l
2150 return l
2151
2151
2152 def _afterlock(self, callback):
2152 def _afterlock(self, callback):
2153 """add a callback to be run when the repository is fully unlocked
2153 """add a callback to be run when the repository is fully unlocked
2154
2154
2155 The callback will be executed when the outermost lock is released
2155 The callback will be executed when the outermost lock is released
2156 (with wlock being higher level than 'lock')."""
2156 (with wlock being higher level than 'lock')."""
2157 for ref in (self._wlockref, self._lockref):
2157 for ref in (self._wlockref, self._lockref):
2158 l = ref and ref()
2158 l = ref and ref()
2159 if l and l.held:
2159 if l and l.held:
2160 l.postrelease.append(callback)
2160 l.postrelease.append(callback)
2161 break
2161 break
2162 else: # no lock have been found.
2162 else: # no lock have been found.
2163 callback()
2163 callback()
2164
2164
2165 def lock(self, wait=True):
2165 def lock(self, wait=True):
2166 '''Lock the repository store (.hg/store) and return a weak reference
2166 '''Lock the repository store (.hg/store) and return a weak reference
2167 to the lock. Use this before modifying the store (e.g. committing or
2167 to the lock. Use this before modifying the store (e.g. committing or
2168 stripping). If you are opening a transaction, get a lock as well.)
2168 stripping). If you are opening a transaction, get a lock as well.)
2169
2169
2170 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2170 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2171 'wlock' first to avoid a dead-lock hazard.'''
2171 'wlock' first to avoid a dead-lock hazard.'''
2172 l = self._currentlock(self._lockref)
2172 l = self._currentlock(self._lockref)
2173 if l is not None:
2173 if l is not None:
2174 l.lock()
2174 l.lock()
2175 return l
2175 return l
2176
2176
2177 l = self._lock(self.svfs, "lock", wait, None,
2177 l = self._lock(self.svfs, "lock", wait, None,
2178 self.invalidate, _('repository %s') % self.origroot)
2178 self.invalidate, _('repository %s') % self.origroot)
2179 self._lockref = weakref.ref(l)
2179 self._lockref = weakref.ref(l)
2180 return l
2180 return l
2181
2181
2182 def _wlockchecktransaction(self):
2182 def _wlockchecktransaction(self):
2183 if self.currenttransaction() is not None:
2183 if self.currenttransaction() is not None:
2184 raise error.LockInheritanceContractViolation(
2184 raise error.LockInheritanceContractViolation(
2185 'wlock cannot be inherited in the middle of a transaction')
2185 'wlock cannot be inherited in the middle of a transaction')
2186
2186
2187 def wlock(self, wait=True):
2187 def wlock(self, wait=True):
2188 '''Lock the non-store parts of the repository (everything under
2188 '''Lock the non-store parts of the repository (everything under
2189 .hg except .hg/store) and return a weak reference to the lock.
2189 .hg except .hg/store) and return a weak reference to the lock.
2190
2190
2191 Use this before modifying files in .hg.
2191 Use this before modifying files in .hg.
2192
2192
2193 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2193 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2194 'wlock' first to avoid a dead-lock hazard.'''
2194 'wlock' first to avoid a dead-lock hazard.'''
2195 l = self._wlockref and self._wlockref()
2195 l = self._wlockref and self._wlockref()
2196 if l is not None and l.held:
2196 if l is not None and l.held:
2197 l.lock()
2197 l.lock()
2198 return l
2198 return l
2199
2199
2200 # We do not need to check for non-waiting lock acquisition. Such
2200 # We do not need to check for non-waiting lock acquisition. Such
2201 # acquisition would not cause dead-lock as they would just fail.
2201 # acquisition would not cause dead-lock as they would just fail.
2202 if wait and (self.ui.configbool('devel', 'all-warnings')
2202 if wait and (self.ui.configbool('devel', 'all-warnings')
2203 or self.ui.configbool('devel', 'check-locks')):
2203 or self.ui.configbool('devel', 'check-locks')):
2204 if self._currentlock(self._lockref) is not None:
2204 if self._currentlock(self._lockref) is not None:
2205 self.ui.develwarn('"wlock" acquired after "lock"')
2205 self.ui.develwarn('"wlock" acquired after "lock"')
2206
2206
2207 def unlock():
2207 def unlock():
2208 if self.dirstate.pendingparentchange():
2208 if self.dirstate.pendingparentchange():
2209 self.dirstate.invalidate()
2209 self.dirstate.invalidate()
2210 else:
2210 else:
2211 self.dirstate.write(None)
2211 self.dirstate.write(None)
2212
2212
2213 self._filecache['dirstate'].refresh()
2213 self._filecache['dirstate'].refresh()
2214
2214
2215 l = self._lock(self.vfs, "wlock", wait, unlock,
2215 l = self._lock(self.vfs, "wlock", wait, unlock,
2216 self.invalidatedirstate, _('working directory of %s') %
2216 self.invalidatedirstate, _('working directory of %s') %
2217 self.origroot,
2217 self.origroot,
2218 inheritchecker=self._wlockchecktransaction,
2218 inheritchecker=self._wlockchecktransaction,
2219 parentenvvar='HG_WLOCK_LOCKER')
2219 parentenvvar='HG_WLOCK_LOCKER')
2220 self._wlockref = weakref.ref(l)
2220 self._wlockref = weakref.ref(l)
2221 return l
2221 return l
2222
2222
2223 def _currentlock(self, lockref):
2223 def _currentlock(self, lockref):
2224 """Returns the lock if it's held, or None if it's not."""
2224 """Returns the lock if it's held, or None if it's not."""
2225 if lockref is None:
2225 if lockref is None:
2226 return None
2226 return None
2227 l = lockref()
2227 l = lockref()
2228 if l is None or not l.held:
2228 if l is None or not l.held:
2229 return None
2229 return None
2230 return l
2230 return l
2231
2231
2232 def currentwlock(self):
2232 def currentwlock(self):
2233 """Returns the wlock if it's held, or None if it's not."""
2233 """Returns the wlock if it's held, or None if it's not."""
2234 return self._currentlock(self._wlockref)
2234 return self._currentlock(self._wlockref)
2235
2235
2236 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2236 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2237 """
2237 """
2238 commit an individual file as part of a larger transaction
2238 commit an individual file as part of a larger transaction
2239 """
2239 """
2240
2240
2241 fname = fctx.path()
2241 fname = fctx.path()
2242 fparent1 = manifest1.get(fname, nullid)
2242 fparent1 = manifest1.get(fname, nullid)
2243 fparent2 = manifest2.get(fname, nullid)
2243 fparent2 = manifest2.get(fname, nullid)
2244 if isinstance(fctx, context.filectx):
2244 if isinstance(fctx, context.filectx):
2245 node = fctx.filenode()
2245 node = fctx.filenode()
2246 if node in [fparent1, fparent2]:
2246 if node in [fparent1, fparent2]:
2247 self.ui.debug('reusing %s filelog entry\n' % fname)
2247 self.ui.debug('reusing %s filelog entry\n' % fname)
2248 if manifest1.flags(fname) != fctx.flags():
2248 if manifest1.flags(fname) != fctx.flags():
2249 changelist.append(fname)
2249 changelist.append(fname)
2250 return node
2250 return node
2251
2251
2252 flog = self.file(fname)
2252 flog = self.file(fname)
2253 meta = {}
2253 meta = {}
2254 copy = fctx.renamed()
2254 copy = fctx.renamed()
2255 if copy and copy[0] != fname:
2255 if copy and copy[0] != fname:
2256 # Mark the new revision of this file as a copy of another
2256 # Mark the new revision of this file as a copy of another
2257 # file. This copy data will effectively act as a parent
2257 # file. This copy data will effectively act as a parent
2258 # of this new revision. If this is a merge, the first
2258 # of this new revision. If this is a merge, the first
2259 # parent will be the nullid (meaning "look up the copy data")
2259 # parent will be the nullid (meaning "look up the copy data")
2260 # and the second one will be the other parent. For example:
2260 # and the second one will be the other parent. For example:
2261 #
2261 #
2262 # 0 --- 1 --- 3 rev1 changes file foo
2262 # 0 --- 1 --- 3 rev1 changes file foo
2263 # \ / rev2 renames foo to bar and changes it
2263 # \ / rev2 renames foo to bar and changes it
2264 # \- 2 -/ rev3 should have bar with all changes and
2264 # \- 2 -/ rev3 should have bar with all changes and
2265 # should record that bar descends from
2265 # should record that bar descends from
2266 # bar in rev2 and foo in rev1
2266 # bar in rev2 and foo in rev1
2267 #
2267 #
2268 # this allows this merge to succeed:
2268 # this allows this merge to succeed:
2269 #
2269 #
2270 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2270 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2271 # \ / merging rev3 and rev4 should use bar@rev2
2271 # \ / merging rev3 and rev4 should use bar@rev2
2272 # \- 2 --- 4 as the merge base
2272 # \- 2 --- 4 as the merge base
2273 #
2273 #
2274
2274
2275 cfname = copy[0]
2275 cfname = copy[0]
2276 crev = manifest1.get(cfname)
2276 crev = manifest1.get(cfname)
2277 newfparent = fparent2
2277 newfparent = fparent2
2278
2278
2279 if manifest2: # branch merge
2279 if manifest2: # branch merge
2280 if fparent2 == nullid or crev is None: # copied on remote side
2280 if fparent2 == nullid or crev is None: # copied on remote side
2281 if cfname in manifest2:
2281 if cfname in manifest2:
2282 crev = manifest2[cfname]
2282 crev = manifest2[cfname]
2283 newfparent = fparent1
2283 newfparent = fparent1
2284
2284
2285 # Here, we used to search backwards through history to try to find
2285 # Here, we used to search backwards through history to try to find
2286 # where the file copy came from if the source of a copy was not in
2286 # where the file copy came from if the source of a copy was not in
2287 # the parent directory. However, this doesn't actually make sense to
2287 # the parent directory. However, this doesn't actually make sense to
2288 # do (what does a copy from something not in your working copy even
2288 # do (what does a copy from something not in your working copy even
2289 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2289 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2290 # the user that copy information was dropped, so if they didn't
2290 # the user that copy information was dropped, so if they didn't
2291 # expect this outcome it can be fixed, but this is the correct
2291 # expect this outcome it can be fixed, but this is the correct
2292 # behavior in this circumstance.
2292 # behavior in this circumstance.
2293
2293
2294 if crev:
2294 if crev:
2295 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2295 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2296 meta["copy"] = cfname
2296 meta["copy"] = cfname
2297 meta["copyrev"] = hex(crev)
2297 meta["copyrev"] = hex(crev)
2298 fparent1, fparent2 = nullid, newfparent
2298 fparent1, fparent2 = nullid, newfparent
2299 else:
2299 else:
2300 self.ui.warn(_("warning: can't find ancestor for '%s' "
2300 self.ui.warn(_("warning: can't find ancestor for '%s' "
2301 "copied from '%s'!\n") % (fname, cfname))
2301 "copied from '%s'!\n") % (fname, cfname))
2302
2302
2303 elif fparent1 == nullid:
2303 elif fparent1 == nullid:
2304 fparent1, fparent2 = fparent2, nullid
2304 fparent1, fparent2 = fparent2, nullid
2305 elif fparent2 != nullid:
2305 elif fparent2 != nullid:
2306 # is one parent an ancestor of the other?
2306 # is one parent an ancestor of the other?
2307 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2307 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2308 if fparent1 in fparentancestors:
2308 if fparent1 in fparentancestors:
2309 fparent1, fparent2 = fparent2, nullid
2309 fparent1, fparent2 = fparent2, nullid
2310 elif fparent2 in fparentancestors:
2310 elif fparent2 in fparentancestors:
2311 fparent2 = nullid
2311 fparent2 = nullid
2312
2312
2313 # is the file changed?
2313 # is the file changed?
2314 text = fctx.data()
2314 text = fctx.data()
2315 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2315 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2316 changelist.append(fname)
2316 changelist.append(fname)
2317 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2317 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2318 # are just the flags changed during merge?
2318 # are just the flags changed during merge?
2319 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2319 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2320 changelist.append(fname)
2320 changelist.append(fname)
2321
2321
2322 return fparent1
2322 return fparent1
2323
2323
2324 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2324 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2325 """check for commit arguments that aren't committable"""
2325 """check for commit arguments that aren't committable"""
2326 if match.isexact() or match.prefix():
2326 if match.isexact() or match.prefix():
2327 matched = set(status.modified + status.added + status.removed)
2327 matched = set(status.modified + status.added + status.removed)
2328
2328
2329 for f in match.files():
2329 for f in match.files():
2330 f = self.dirstate.normalize(f)
2330 f = self.dirstate.normalize(f)
2331 if f == '.' or f in matched or f in wctx.substate:
2331 if f == '.' or f in matched or f in wctx.substate:
2332 continue
2332 continue
2333 if f in status.deleted:
2333 if f in status.deleted:
2334 fail(f, _('file not found!'))
2334 fail(f, _('file not found!'))
2335 if f in vdirs: # visited directory
2335 if f in vdirs: # visited directory
2336 d = f + '/'
2336 d = f + '/'
2337 for mf in matched:
2337 for mf in matched:
2338 if mf.startswith(d):
2338 if mf.startswith(d):
2339 break
2339 break
2340 else:
2340 else:
2341 fail(f, _("no match under directory!"))
2341 fail(f, _("no match under directory!"))
2342 elif f not in self.dirstate:
2342 elif f not in self.dirstate:
2343 fail(f, _("file not tracked!"))
2343 fail(f, _("file not tracked!"))
2344
2344
2345 @unfilteredmethod
2345 @unfilteredmethod
2346 def commit(self, text="", user=None, date=None, match=None, force=False,
2346 def commit(self, text="", user=None, date=None, match=None, force=False,
2347 editor=False, extra=None):
2347 editor=False, extra=None):
2348 """Add a new revision to current repository.
2348 """Add a new revision to current repository.
2349
2349
2350 Revision information is gathered from the working directory,
2350 Revision information is gathered from the working directory,
2351 match can be used to filter the committed files. If editor is
2351 match can be used to filter the committed files. If editor is
2352 supplied, it is called to get a commit message.
2352 supplied, it is called to get a commit message.
2353 """
2353 """
2354 if extra is None:
2354 if extra is None:
2355 extra = {}
2355 extra = {}
2356
2356
2357 def fail(f, msg):
2357 def fail(f, msg):
2358 raise error.Abort('%s: %s' % (f, msg))
2358 raise error.Abort('%s: %s' % (f, msg))
2359
2359
2360 if not match:
2360 if not match:
2361 match = matchmod.always(self.root, '')
2361 match = matchmod.always(self.root, '')
2362
2362
2363 if not force:
2363 if not force:
2364 vdirs = []
2364 vdirs = []
2365 match.explicitdir = vdirs.append
2365 match.explicitdir = vdirs.append
2366 match.bad = fail
2366 match.bad = fail
2367
2367
2368 wlock = lock = tr = None
2368 wlock = lock = tr = None
2369 try:
2369 try:
2370 wlock = self.wlock()
2370 wlock = self.wlock()
2371 lock = self.lock() # for recent changelog (see issue4368)
2371 lock = self.lock() # for recent changelog (see issue4368)
2372
2372
2373 wctx = self[None]
2373 wctx = self[None]
2374 merge = len(wctx.parents()) > 1
2374 merge = len(wctx.parents()) > 1
2375
2375
2376 if not force and merge and not match.always():
2376 if not force and merge and not match.always():
2377 raise error.Abort(_('cannot partially commit a merge '
2377 raise error.Abort(_('cannot partially commit a merge '
2378 '(do not specify files or patterns)'))
2378 '(do not specify files or patterns)'))
2379
2379
2380 status = self.status(match=match, clean=force)
2380 status = self.status(match=match, clean=force)
2381 if force:
2381 if force:
2382 status.modified.extend(status.clean) # mq may commit clean files
2382 status.modified.extend(status.clean) # mq may commit clean files
2383
2383
2384 # check subrepos
2384 # check subrepos
2385 subs, commitsubs, newstate = subrepoutil.precommit(
2385 subs, commitsubs, newstate = subrepoutil.precommit(
2386 self.ui, wctx, status, match, force=force)
2386 self.ui, wctx, status, match, force=force)
2387
2387
2388 # make sure all explicit patterns are matched
2388 # make sure all explicit patterns are matched
2389 if not force:
2389 if not force:
2390 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2390 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2391
2391
2392 cctx = context.workingcommitctx(self, status,
2392 cctx = context.workingcommitctx(self, status,
2393 text, user, date, extra)
2393 text, user, date, extra)
2394
2394
2395 # internal config: ui.allowemptycommit
2395 # internal config: ui.allowemptycommit
2396 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2396 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2397 or extra.get('close') or merge or cctx.files()
2397 or extra.get('close') or merge or cctx.files()
2398 or self.ui.configbool('ui', 'allowemptycommit'))
2398 or self.ui.configbool('ui', 'allowemptycommit'))
2399 if not allowemptycommit:
2399 if not allowemptycommit:
2400 return None
2400 return None
2401
2401
2402 if merge and cctx.deleted():
2402 if merge and cctx.deleted():
2403 raise error.Abort(_("cannot commit merge with missing files"))
2403 raise error.Abort(_("cannot commit merge with missing files"))
2404
2404
2405 ms = mergemod.mergestate.read(self)
2405 ms = mergemod.mergestate.read(self)
2406 mergeutil.checkunresolved(ms)
2406 mergeutil.checkunresolved(ms)
2407
2407
2408 if editor:
2408 if editor:
2409 cctx._text = editor(self, cctx, subs)
2409 cctx._text = editor(self, cctx, subs)
2410 edited = (text != cctx._text)
2410 edited = (text != cctx._text)
2411
2411
2412 # Save commit message in case this transaction gets rolled back
2412 # Save commit message in case this transaction gets rolled back
2413 # (e.g. by a pretxncommit hook). Leave the content alone on
2413 # (e.g. by a pretxncommit hook). Leave the content alone on
2414 # the assumption that the user will use the same editor again.
2414 # the assumption that the user will use the same editor again.
2415 msgfn = self.savecommitmessage(cctx._text)
2415 msgfn = self.savecommitmessage(cctx._text)
2416
2416
2417 # commit subs and write new state
2417 # commit subs and write new state
2418 if subs:
2418 if subs:
2419 for s in sorted(commitsubs):
2419 for s in sorted(commitsubs):
2420 sub = wctx.sub(s)
2420 sub = wctx.sub(s)
2421 self.ui.status(_('committing subrepository %s\n') %
2421 self.ui.status(_('committing subrepository %s\n') %
2422 subrepoutil.subrelpath(sub))
2422 subrepoutil.subrelpath(sub))
2423 sr = sub.commit(cctx._text, user, date)
2423 sr = sub.commit(cctx._text, user, date)
2424 newstate[s] = (newstate[s][0], sr)
2424 newstate[s] = (newstate[s][0], sr)
2425 subrepoutil.writestate(self, newstate)
2425 subrepoutil.writestate(self, newstate)
2426
2426
2427 p1, p2 = self.dirstate.parents()
2427 p1, p2 = self.dirstate.parents()
2428 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2428 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2429 try:
2429 try:
2430 self.hook("precommit", throw=True, parent1=hookp1,
2430 self.hook("precommit", throw=True, parent1=hookp1,
2431 parent2=hookp2)
2431 parent2=hookp2)
2432 tr = self.transaction('commit')
2432 tr = self.transaction('commit')
2433 ret = self.commitctx(cctx, True)
2433 ret = self.commitctx(cctx, True)
2434 except: # re-raises
2434 except: # re-raises
2435 if edited:
2435 if edited:
2436 self.ui.write(
2436 self.ui.write(
2437 _('note: commit message saved in %s\n') % msgfn)
2437 _('note: commit message saved in %s\n') % msgfn)
2438 raise
2438 raise
2439 # update bookmarks, dirstate and mergestate
2439 # update bookmarks, dirstate and mergestate
2440 bookmarks.update(self, [p1, p2], ret)
2440 bookmarks.update(self, [p1, p2], ret)
2441 cctx.markcommitted(ret)
2441 cctx.markcommitted(ret)
2442 ms.reset()
2442 ms.reset()
2443 tr.close()
2443 tr.close()
2444
2444
2445 finally:
2445 finally:
2446 lockmod.release(tr, lock, wlock)
2446 lockmod.release(tr, lock, wlock)
2447
2447
2448 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2448 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2449 # hack for command that use a temporary commit (eg: histedit)
2449 # hack for command that use a temporary commit (eg: histedit)
2450 # temporary commit got stripped before hook release
2450 # temporary commit got stripped before hook release
2451 if self.changelog.hasnode(ret):
2451 if self.changelog.hasnode(ret):
2452 self.hook("commit", node=node, parent1=parent1,
2452 self.hook("commit", node=node, parent1=parent1,
2453 parent2=parent2)
2453 parent2=parent2)
2454 self._afterlock(commithook)
2454 self._afterlock(commithook)
2455 return ret
2455 return ret
2456
2456
2457 @unfilteredmethod
2457 @unfilteredmethod
2458 def commitctx(self, ctx, error=False):
2458 def commitctx(self, ctx, error=False):
2459 """Add a new revision to current repository.
2459 """Add a new revision to current repository.
2460 Revision information is passed via the context argument.
2460 Revision information is passed via the context argument.
2461
2461
2462 ctx.files() should list all files involved in this commit, i.e.
2462 ctx.files() should list all files involved in this commit, i.e.
2463 modified/added/removed files. On merge, it may be wider than the
2463 modified/added/removed files. On merge, it may be wider than the
2464 ctx.files() to be committed, since any file nodes derived directly
2464 ctx.files() to be committed, since any file nodes derived directly
2465 from p1 or p2 are excluded from the committed ctx.files().
2465 from p1 or p2 are excluded from the committed ctx.files().
2466 """
2466 """
2467
2467
2468 tr = None
2468 tr = None
2469 p1, p2 = ctx.p1(), ctx.p2()
2469 p1, p2 = ctx.p1(), ctx.p2()
2470 user = ctx.user()
2470 user = ctx.user()
2471
2471
2472 lock = self.lock()
2472 lock = self.lock()
2473 try:
2473 try:
2474 tr = self.transaction("commit")
2474 tr = self.transaction("commit")
2475 trp = weakref.proxy(tr)
2475 trp = weakref.proxy(tr)
2476
2476
2477 if ctx.manifestnode():
2477 if ctx.manifestnode():
2478 # reuse an existing manifest revision
2478 # reuse an existing manifest revision
2479 self.ui.debug('reusing known manifest\n')
2479 self.ui.debug('reusing known manifest\n')
2480 mn = ctx.manifestnode()
2480 mn = ctx.manifestnode()
2481 files = ctx.files()
2481 files = ctx.files()
2482 elif ctx.files():
2482 elif ctx.files():
2483 m1ctx = p1.manifestctx()
2483 m1ctx = p1.manifestctx()
2484 m2ctx = p2.manifestctx()
2484 m2ctx = p2.manifestctx()
2485 mctx = m1ctx.copy()
2485 mctx = m1ctx.copy()
2486
2486
2487 m = mctx.read()
2487 m = mctx.read()
2488 m1 = m1ctx.read()
2488 m1 = m1ctx.read()
2489 m2 = m2ctx.read()
2489 m2 = m2ctx.read()
2490
2490
2491 # check in files
2491 # check in files
2492 added = []
2492 added = []
2493 changed = []
2493 changed = []
2494 removed = list(ctx.removed())
2494 removed = list(ctx.removed())
2495 linkrev = len(self)
2495 linkrev = len(self)
2496 self.ui.note(_("committing files:\n"))
2496 self.ui.note(_("committing files:\n"))
2497 for f in sorted(ctx.modified() + ctx.added()):
2497 for f in sorted(ctx.modified() + ctx.added()):
2498 self.ui.note(f + "\n")
2498 self.ui.note(f + "\n")
2499 try:
2499 try:
2500 fctx = ctx[f]
2500 fctx = ctx[f]
2501 if fctx is None:
2501 if fctx is None:
2502 removed.append(f)
2502 removed.append(f)
2503 else:
2503 else:
2504 added.append(f)
2504 added.append(f)
2505 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2505 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2506 trp, changed)
2506 trp, changed)
2507 m.setflag(f, fctx.flags())
2507 m.setflag(f, fctx.flags())
2508 except OSError as inst:
2508 except OSError as inst:
2509 self.ui.warn(_("trouble committing %s!\n") % f)
2509 self.ui.warn(_("trouble committing %s!\n") % f)
2510 raise
2510 raise
2511 except IOError as inst:
2511 except IOError as inst:
2512 errcode = getattr(inst, 'errno', errno.ENOENT)
2512 errcode = getattr(inst, 'errno', errno.ENOENT)
2513 if error or errcode and errcode != errno.ENOENT:
2513 if error or errcode and errcode != errno.ENOENT:
2514 self.ui.warn(_("trouble committing %s!\n") % f)
2514 self.ui.warn(_("trouble committing %s!\n") % f)
2515 raise
2515 raise
2516
2516
2517 # update manifest
2517 # update manifest
2518 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2518 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2519 drop = [f for f in removed if f in m]
2519 drop = [f for f in removed if f in m]
2520 for f in drop:
2520 for f in drop:
2521 del m[f]
2521 del m[f]
2522 files = changed + removed
2522 files = changed + removed
2523 md = None
2523 md = None
2524 if not files:
2524 if not files:
2525 # if no "files" actually changed in terms of the changelog,
2525 # if no "files" actually changed in terms of the changelog,
2526 # try hard to detect unmodified manifest entry so that the
2526 # try hard to detect unmodified manifest entry so that the
2527 # exact same commit can be reproduced later on convert.
2527 # exact same commit can be reproduced later on convert.
2528 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2528 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2529 if not files and md:
2529 if not files and md:
2530 self.ui.debug('not reusing manifest (no file change in '
2530 self.ui.debug('not reusing manifest (no file change in '
2531 'changelog, but manifest differs)\n')
2531 'changelog, but manifest differs)\n')
2532 if files or md:
2532 if files or md:
2533 self.ui.note(_("committing manifest\n"))
2533 self.ui.note(_("committing manifest\n"))
2534 # we're using narrowmatch here since it's already applied at
2534 # we're using narrowmatch here since it's already applied at
2535 # other stages (such as dirstate.walk), so we're already
2535 # other stages (such as dirstate.walk), so we're already
2536 # ignoring things outside of narrowspec in most cases. The
2536 # ignoring things outside of narrowspec in most cases. The
2537 # one case where we might have files outside the narrowspec
2537 # one case where we might have files outside the narrowspec
2538 # at this point is merges, and we already error out in the
2538 # at this point is merges, and we already error out in the
2539 # case where the merge has files outside of the narrowspec,
2539 # case where the merge has files outside of the narrowspec,
2540 # so this is safe.
2540 # so this is safe.
2541 mn = mctx.write(trp, linkrev,
2541 mn = mctx.write(trp, linkrev,
2542 p1.manifestnode(), p2.manifestnode(),
2542 p1.manifestnode(), p2.manifestnode(),
2543 added, drop, match=self.narrowmatch())
2543 added, drop, match=self.narrowmatch())
2544 else:
2544 else:
2545 self.ui.debug('reusing manifest form p1 (listed files '
2545 self.ui.debug('reusing manifest form p1 (listed files '
2546 'actually unchanged)\n')
2546 'actually unchanged)\n')
2547 mn = p1.manifestnode()
2547 mn = p1.manifestnode()
2548 else:
2548 else:
2549 self.ui.debug('reusing manifest from p1 (no file change)\n')
2549 self.ui.debug('reusing manifest from p1 (no file change)\n')
2550 mn = p1.manifestnode()
2550 mn = p1.manifestnode()
2551 files = []
2551 files = []
2552
2552
2553 # update changelog
2553 # update changelog
2554 self.ui.note(_("committing changelog\n"))
2554 self.ui.note(_("committing changelog\n"))
2555 self.changelog.delayupdate(tr)
2555 self.changelog.delayupdate(tr)
2556 n = self.changelog.add(mn, files, ctx.description(),
2556 n = self.changelog.add(mn, files, ctx.description(),
2557 trp, p1.node(), p2.node(),
2557 trp, p1.node(), p2.node(),
2558 user, ctx.date(), ctx.extra().copy())
2558 user, ctx.date(), ctx.extra().copy())
2559 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2559 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2560 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2560 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2561 parent2=xp2)
2561 parent2=xp2)
2562 # set the new commit is proper phase
2562 # set the new commit is proper phase
2563 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2563 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2564 if targetphase:
2564 if targetphase:
2565 # retract boundary do not alter parent changeset.
2565 # retract boundary do not alter parent changeset.
2566 # if a parent have higher the resulting phase will
2566 # if a parent have higher the resulting phase will
2567 # be compliant anyway
2567 # be compliant anyway
2568 #
2568 #
2569 # if minimal phase was 0 we don't need to retract anything
2569 # if minimal phase was 0 we don't need to retract anything
2570 phases.registernew(self, tr, targetphase, [n])
2570 phases.registernew(self, tr, targetphase, [n])
2571 tr.close()
2571 tr.close()
2572 return n
2572 return n
2573 finally:
2573 finally:
2574 if tr:
2574 if tr:
2575 tr.release()
2575 tr.release()
2576 lock.release()
2576 lock.release()
2577
2577
2578 @unfilteredmethod
2578 @unfilteredmethod
2579 def destroying(self):
2579 def destroying(self):
2580 '''Inform the repository that nodes are about to be destroyed.
2580 '''Inform the repository that nodes are about to be destroyed.
2581 Intended for use by strip and rollback, so there's a common
2581 Intended for use by strip and rollback, so there's a common
2582 place for anything that has to be done before destroying history.
2582 place for anything that has to be done before destroying history.
2583
2583
2584 This is mostly useful for saving state that is in memory and waiting
2584 This is mostly useful for saving state that is in memory and waiting
2585 to be flushed when the current lock is released. Because a call to
2585 to be flushed when the current lock is released. Because a call to
2586 destroyed is imminent, the repo will be invalidated causing those
2586 destroyed is imminent, the repo will be invalidated causing those
2587 changes to stay in memory (waiting for the next unlock), or vanish
2587 changes to stay in memory (waiting for the next unlock), or vanish
2588 completely.
2588 completely.
2589 '''
2589 '''
2590 # When using the same lock to commit and strip, the phasecache is left
2590 # When using the same lock to commit and strip, the phasecache is left
2591 # dirty after committing. Then when we strip, the repo is invalidated,
2591 # dirty after committing. Then when we strip, the repo is invalidated,
2592 # causing those changes to disappear.
2592 # causing those changes to disappear.
2593 if '_phasecache' in vars(self):
2593 if '_phasecache' in vars(self):
2594 self._phasecache.write()
2594 self._phasecache.write()
2595
2595
2596 @unfilteredmethod
2596 @unfilteredmethod
2597 def destroyed(self):
2597 def destroyed(self):
2598 '''Inform the repository that nodes have been destroyed.
2598 '''Inform the repository that nodes have been destroyed.
2599 Intended for use by strip and rollback, so there's a common
2599 Intended for use by strip and rollback, so there's a common
2600 place for anything that has to be done after destroying history.
2600 place for anything that has to be done after destroying history.
2601 '''
2601 '''
2602 # When one tries to:
2602 # When one tries to:
2603 # 1) destroy nodes thus calling this method (e.g. strip)
2603 # 1) destroy nodes thus calling this method (e.g. strip)
2604 # 2) use phasecache somewhere (e.g. commit)
2604 # 2) use phasecache somewhere (e.g. commit)
2605 #
2605 #
2606 # then 2) will fail because the phasecache contains nodes that were
2606 # then 2) will fail because the phasecache contains nodes that were
2607 # removed. We can either remove phasecache from the filecache,
2607 # removed. We can either remove phasecache from the filecache,
2608 # causing it to reload next time it is accessed, or simply filter
2608 # causing it to reload next time it is accessed, or simply filter
2609 # the removed nodes now and write the updated cache.
2609 # the removed nodes now and write the updated cache.
2610 self._phasecache.filterunknown(self)
2610 self._phasecache.filterunknown(self)
2611 self._phasecache.write()
2611 self._phasecache.write()
2612
2612
2613 # refresh all repository caches
2613 # refresh all repository caches
2614 self.updatecaches()
2614 self.updatecaches()
2615
2615
2616 # Ensure the persistent tag cache is updated. Doing it now
2616 # Ensure the persistent tag cache is updated. Doing it now
2617 # means that the tag cache only has to worry about destroyed
2617 # means that the tag cache only has to worry about destroyed
2618 # heads immediately after a strip/rollback. That in turn
2618 # heads immediately after a strip/rollback. That in turn
2619 # guarantees that "cachetip == currenttip" (comparing both rev
2619 # guarantees that "cachetip == currenttip" (comparing both rev
2620 # and node) always means no nodes have been added or destroyed.
2620 # and node) always means no nodes have been added or destroyed.
2621
2621
2622 # XXX this is suboptimal when qrefresh'ing: we strip the current
2622 # XXX this is suboptimal when qrefresh'ing: we strip the current
2623 # head, refresh the tag cache, then immediately add a new head.
2623 # head, refresh the tag cache, then immediately add a new head.
2624 # But I think doing it this way is necessary for the "instant
2624 # But I think doing it this way is necessary for the "instant
2625 # tag cache retrieval" case to work.
2625 # tag cache retrieval" case to work.
2626 self.invalidate()
2626 self.invalidate()
2627
2627
2628 def status(self, node1='.', node2=None, match=None,
2628 def status(self, node1='.', node2=None, match=None,
2629 ignored=False, clean=False, unknown=False,
2629 ignored=False, clean=False, unknown=False,
2630 listsubrepos=False):
2630 listsubrepos=False):
2631 '''a convenience method that calls node1.status(node2)'''
2631 '''a convenience method that calls node1.status(node2)'''
2632 return self[node1].status(node2, match, ignored, clean, unknown,
2632 return self[node1].status(node2, match, ignored, clean, unknown,
2633 listsubrepos)
2633 listsubrepos)
2634
2634
2635 def addpostdsstatus(self, ps):
2635 def addpostdsstatus(self, ps):
2636 """Add a callback to run within the wlock, at the point at which status
2636 """Add a callback to run within the wlock, at the point at which status
2637 fixups happen.
2637 fixups happen.
2638
2638
2639 On status completion, callback(wctx, status) will be called with the
2639 On status completion, callback(wctx, status) will be called with the
2640 wlock held, unless the dirstate has changed from underneath or the wlock
2640 wlock held, unless the dirstate has changed from underneath or the wlock
2641 couldn't be grabbed.
2641 couldn't be grabbed.
2642
2642
2643 Callbacks should not capture and use a cached copy of the dirstate --
2643 Callbacks should not capture and use a cached copy of the dirstate --
2644 it might change in the meanwhile. Instead, they should access the
2644 it might change in the meanwhile. Instead, they should access the
2645 dirstate via wctx.repo().dirstate.
2645 dirstate via wctx.repo().dirstate.
2646
2646
2647 This list is emptied out after each status run -- extensions should
2647 This list is emptied out after each status run -- extensions should
2648 make sure it adds to this list each time dirstate.status is called.
2648 make sure it adds to this list each time dirstate.status is called.
2649 Extensions should also make sure they don't call this for statuses
2649 Extensions should also make sure they don't call this for statuses
2650 that don't involve the dirstate.
2650 that don't involve the dirstate.
2651 """
2651 """
2652
2652
2653 # The list is located here for uniqueness reasons -- it is actually
2653 # The list is located here for uniqueness reasons -- it is actually
2654 # managed by the workingctx, but that isn't unique per-repo.
2654 # managed by the workingctx, but that isn't unique per-repo.
2655 self._postdsstatus.append(ps)
2655 self._postdsstatus.append(ps)
2656
2656
2657 def postdsstatus(self):
2657 def postdsstatus(self):
2658 """Used by workingctx to get the list of post-dirstate-status hooks."""
2658 """Used by workingctx to get the list of post-dirstate-status hooks."""
2659 return self._postdsstatus
2659 return self._postdsstatus
2660
2660
2661 def clearpostdsstatus(self):
2661 def clearpostdsstatus(self):
2662 """Used by workingctx to clear post-dirstate-status hooks."""
2662 """Used by workingctx to clear post-dirstate-status hooks."""
2663 del self._postdsstatus[:]
2663 del self._postdsstatus[:]
2664
2664
2665 def heads(self, start=None):
2665 def heads(self, start=None):
2666 if start is None:
2666 if start is None:
2667 cl = self.changelog
2667 cl = self.changelog
2668 headrevs = reversed(cl.headrevs())
2668 headrevs = reversed(cl.headrevs())
2669 return [cl.node(rev) for rev in headrevs]
2669 return [cl.node(rev) for rev in headrevs]
2670
2670
2671 heads = self.changelog.heads(start)
2671 heads = self.changelog.heads(start)
2672 # sort the output in rev descending order
2672 # sort the output in rev descending order
2673 return sorted(heads, key=self.changelog.rev, reverse=True)
2673 return sorted(heads, key=self.changelog.rev, reverse=True)
2674
2674
2675 def branchheads(self, branch=None, start=None, closed=False):
2675 def branchheads(self, branch=None, start=None, closed=False):
2676 '''return a (possibly filtered) list of heads for the given branch
2676 '''return a (possibly filtered) list of heads for the given branch
2677
2677
2678 Heads are returned in topological order, from newest to oldest.
2678 Heads are returned in topological order, from newest to oldest.
2679 If branch is None, use the dirstate branch.
2679 If branch is None, use the dirstate branch.
2680 If start is not None, return only heads reachable from start.
2680 If start is not None, return only heads reachable from start.
2681 If closed is True, return heads that are marked as closed as well.
2681 If closed is True, return heads that are marked as closed as well.
2682 '''
2682 '''
2683 if branch is None:
2683 if branch is None:
2684 branch = self[None].branch()
2684 branch = self[None].branch()
2685 branches = self.branchmap()
2685 branches = self.branchmap()
2686 if branch not in branches:
2686 if branch not in branches:
2687 return []
2687 return []
2688 # the cache returns heads ordered lowest to highest
2688 # the cache returns heads ordered lowest to highest
2689 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2689 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2690 if start is not None:
2690 if start is not None:
2691 # filter out the heads that cannot be reached from startrev
2691 # filter out the heads that cannot be reached from startrev
2692 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2692 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2693 bheads = [h for h in bheads if h in fbheads]
2693 bheads = [h for h in bheads if h in fbheads]
2694 return bheads
2694 return bheads
2695
2695
2696 def branches(self, nodes):
2696 def branches(self, nodes):
2697 if not nodes:
2697 if not nodes:
2698 nodes = [self.changelog.tip()]
2698 nodes = [self.changelog.tip()]
2699 b = []
2699 b = []
2700 for n in nodes:
2700 for n in nodes:
2701 t = n
2701 t = n
2702 while True:
2702 while True:
2703 p = self.changelog.parents(n)
2703 p = self.changelog.parents(n)
2704 if p[1] != nullid or p[0] == nullid:
2704 if p[1] != nullid or p[0] == nullid:
2705 b.append((t, n, p[0], p[1]))
2705 b.append((t, n, p[0], p[1]))
2706 break
2706 break
2707 n = p[0]
2707 n = p[0]
2708 return b
2708 return b
2709
2709
2710 def between(self, pairs):
2710 def between(self, pairs):
2711 r = []
2711 r = []
2712
2712
2713 for top, bottom in pairs:
2713 for top, bottom in pairs:
2714 n, l, i = top, [], 0
2714 n, l, i = top, [], 0
2715 f = 1
2715 f = 1
2716
2716
2717 while n != bottom and n != nullid:
2717 while n != bottom and n != nullid:
2718 p = self.changelog.parents(n)[0]
2718 p = self.changelog.parents(n)[0]
2719 if i == f:
2719 if i == f:
2720 l.append(n)
2720 l.append(n)
2721 f = f * 2
2721 f = f * 2
2722 n = p
2722 n = p
2723 i += 1
2723 i += 1
2724
2724
2725 r.append(l)
2725 r.append(l)
2726
2726
2727 return r
2727 return r
2728
2728
2729 def checkpush(self, pushop):
2729 def checkpush(self, pushop):
2730 """Extensions can override this function if additional checks have
2730 """Extensions can override this function if additional checks have
2731 to be performed before pushing, or call it if they override push
2731 to be performed before pushing, or call it if they override push
2732 command.
2732 command.
2733 """
2733 """
2734
2734
2735 @unfilteredpropertycache
2735 @unfilteredpropertycache
2736 def prepushoutgoinghooks(self):
2736 def prepushoutgoinghooks(self):
2737 """Return util.hooks consists of a pushop with repo, remote, outgoing
2737 """Return util.hooks consists of a pushop with repo, remote, outgoing
2738 methods, which are called before pushing changesets.
2738 methods, which are called before pushing changesets.
2739 """
2739 """
2740 return util.hooks()
2740 return util.hooks()
2741
2741
2742 def pushkey(self, namespace, key, old, new):
2742 def pushkey(self, namespace, key, old, new):
2743 try:
2743 try:
2744 tr = self.currenttransaction()
2744 tr = self.currenttransaction()
2745 hookargs = {}
2745 hookargs = {}
2746 if tr is not None:
2746 if tr is not None:
2747 hookargs.update(tr.hookargs)
2747 hookargs.update(tr.hookargs)
2748 hookargs = pycompat.strkwargs(hookargs)
2748 hookargs = pycompat.strkwargs(hookargs)
2749 hookargs[r'namespace'] = namespace
2749 hookargs[r'namespace'] = namespace
2750 hookargs[r'key'] = key
2750 hookargs[r'key'] = key
2751 hookargs[r'old'] = old
2751 hookargs[r'old'] = old
2752 hookargs[r'new'] = new
2752 hookargs[r'new'] = new
2753 self.hook('prepushkey', throw=True, **hookargs)
2753 self.hook('prepushkey', throw=True, **hookargs)
2754 except error.HookAbort as exc:
2754 except error.HookAbort as exc:
2755 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2755 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2756 if exc.hint:
2756 if exc.hint:
2757 self.ui.write_err(_("(%s)\n") % exc.hint)
2757 self.ui.write_err(_("(%s)\n") % exc.hint)
2758 return False
2758 return False
2759 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2759 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2760 ret = pushkey.push(self, namespace, key, old, new)
2760 ret = pushkey.push(self, namespace, key, old, new)
2761 def runhook():
2761 def runhook():
2762 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2762 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2763 ret=ret)
2763 ret=ret)
2764 self._afterlock(runhook)
2764 self._afterlock(runhook)
2765 return ret
2765 return ret
2766
2766
2767 def listkeys(self, namespace):
2767 def listkeys(self, namespace):
2768 self.hook('prelistkeys', throw=True, namespace=namespace)
2768 self.hook('prelistkeys', throw=True, namespace=namespace)
2769 self.ui.debug('listing keys for "%s"\n' % namespace)
2769 self.ui.debug('listing keys for "%s"\n' % namespace)
2770 values = pushkey.list(self, namespace)
2770 values = pushkey.list(self, namespace)
2771 self.hook('listkeys', namespace=namespace, values=values)
2771 self.hook('listkeys', namespace=namespace, values=values)
2772 return values
2772 return values
2773
2773
2774 def debugwireargs(self, one, two, three=None, four=None, five=None):
2774 def debugwireargs(self, one, two, three=None, four=None, five=None):
2775 '''used to test argument passing over the wire'''
2775 '''used to test argument passing over the wire'''
2776 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2776 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2777 pycompat.bytestr(four),
2777 pycompat.bytestr(four),
2778 pycompat.bytestr(five))
2778 pycompat.bytestr(five))
2779
2779
2780 def savecommitmessage(self, text):
2780 def savecommitmessage(self, text):
2781 fp = self.vfs('last-message.txt', 'wb')
2781 fp = self.vfs('last-message.txt', 'wb')
2782 try:
2782 try:
2783 fp.write(text)
2783 fp.write(text)
2784 finally:
2784 finally:
2785 fp.close()
2785 fp.close()
2786 return self.pathto(fp.name[len(self.root) + 1:])
2786 return self.pathto(fp.name[len(self.root) + 1:])
2787
2787
2788 # used to avoid circular references so destructors work
2788 # used to avoid circular references so destructors work
2789 def aftertrans(files):
2789 def aftertrans(files):
2790 renamefiles = [tuple(t) for t in files]
2790 renamefiles = [tuple(t) for t in files]
2791 def a():
2791 def a():
2792 for vfs, src, dest in renamefiles:
2792 for vfs, src, dest in renamefiles:
2793 # if src and dest refer to a same file, vfs.rename is a no-op,
2793 # if src and dest refer to a same file, vfs.rename is a no-op,
2794 # leaving both src and dest on disk. delete dest to make sure
2794 # leaving both src and dest on disk. delete dest to make sure
2795 # the rename couldn't be such a no-op.
2795 # the rename couldn't be such a no-op.
2796 vfs.tryunlink(dest)
2796 vfs.tryunlink(dest)
2797 try:
2797 try:
2798 vfs.rename(src, dest)
2798 vfs.rename(src, dest)
2799 except OSError: # journal file does not yet exist
2799 except OSError: # journal file does not yet exist
2800 pass
2800 pass
2801 return a
2801 return a
2802
2802
2803 def undoname(fn):
2803 def undoname(fn):
2804 base, name = os.path.split(fn)
2804 base, name = os.path.split(fn)
2805 assert name.startswith('journal')
2805 assert name.startswith('journal')
2806 return os.path.join(base, name.replace('journal', 'undo', 1))
2806 return os.path.join(base, name.replace('journal', 'undo', 1))
2807
2807
2808 def instance(ui, path, create, intents=None, createopts=None):
2808 def instance(ui, path, create, intents=None, createopts=None):
2809 localpath = util.urllocalpath(path)
2809 localpath = util.urllocalpath(path)
2810 if create:
2810 if create:
2811 createrepository(ui, localpath, createopts=createopts)
2811 createrepository(ui, localpath, createopts=createopts)
2812
2812
2813 return makelocalrepository(ui, localpath, intents=intents)
2813 return makelocalrepository(ui, localpath, intents=intents)
2814
2814
2815 def islocal(path):
2815 def islocal(path):
2816 return True
2816 return True
2817
2817
2818 def defaultcreateopts(ui, createopts=None):
2818 def defaultcreateopts(ui, createopts=None):
2819 """Populate the default creation options for a repository.
2819 """Populate the default creation options for a repository.
2820
2820
2821 A dictionary of explicitly requested creation options can be passed
2821 A dictionary of explicitly requested creation options can be passed
2822 in. Missing keys will be populated.
2822 in. Missing keys will be populated.
2823 """
2823 """
2824 createopts = dict(createopts or {})
2824 createopts = dict(createopts or {})
2825
2825
2826 if 'backend' not in createopts:
2826 if 'backend' not in createopts:
2827 # experimental config: storage.new-repo-backend
2827 # experimental config: storage.new-repo-backend
2828 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2828 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2829
2829
2830 return createopts
2830 return createopts
2831
2831
2832 def newreporequirements(ui, createopts):
2832 def newreporequirements(ui, createopts):
2833 """Determine the set of requirements for a new local repository.
2833 """Determine the set of requirements for a new local repository.
2834
2834
2835 Extensions can wrap this function to specify custom requirements for
2835 Extensions can wrap this function to specify custom requirements for
2836 new repositories.
2836 new repositories.
2837 """
2837 """
2838 # If the repo is being created from a shared repository, we copy
2838 # If the repo is being created from a shared repository, we copy
2839 # its requirements.
2839 # its requirements.
2840 if 'sharedrepo' in createopts:
2840 if 'sharedrepo' in createopts:
2841 requirements = set(createopts['sharedrepo'].requirements)
2841 requirements = set(createopts['sharedrepo'].requirements)
2842 if createopts.get('sharedrelative'):
2842 if createopts.get('sharedrelative'):
2843 requirements.add('relshared')
2843 requirements.add('relshared')
2844 else:
2844 else:
2845 requirements.add('shared')
2845 requirements.add('shared')
2846
2846
2847 return requirements
2847 return requirements
2848
2848
2849 if 'backend' not in createopts:
2849 if 'backend' not in createopts:
2850 raise error.ProgrammingError('backend key not present in createopts; '
2850 raise error.ProgrammingError('backend key not present in createopts; '
2851 'was defaultcreateopts() called?')
2851 'was defaultcreateopts() called?')
2852
2852
2853 if createopts['backend'] != 'revlogv1':
2853 if createopts['backend'] != 'revlogv1':
2854 raise error.Abort(_('unable to determine repository requirements for '
2854 raise error.Abort(_('unable to determine repository requirements for '
2855 'storage backend: %s') % createopts['backend'])
2855 'storage backend: %s') % createopts['backend'])
2856
2856
2857 requirements = {'revlogv1'}
2857 requirements = {'revlogv1'}
2858 if ui.configbool('format', 'usestore'):
2858 if ui.configbool('format', 'usestore'):
2859 requirements.add('store')
2859 requirements.add('store')
2860 if ui.configbool('format', 'usefncache'):
2860 if ui.configbool('format', 'usefncache'):
2861 requirements.add('fncache')
2861 requirements.add('fncache')
2862 if ui.configbool('format', 'dotencode'):
2862 if ui.configbool('format', 'dotencode'):
2863 requirements.add('dotencode')
2863 requirements.add('dotencode')
2864
2864
2865 compengine = ui.config('experimental', 'format.compression')
2865 compengine = ui.config('experimental', 'format.compression')
2866 if compengine not in util.compengines:
2866 if compengine not in util.compengines:
2867 raise error.Abort(_('compression engine %s defined by '
2867 raise error.Abort(_('compression engine %s defined by '
2868 'experimental.format.compression not available') %
2868 'experimental.format.compression not available') %
2869 compengine,
2869 compengine,
2870 hint=_('run "hg debuginstall" to list available '
2870 hint=_('run "hg debuginstall" to list available '
2871 'compression engines'))
2871 'compression engines'))
2872
2872
2873 # zlib is the historical default and doesn't need an explicit requirement.
2873 # zlib is the historical default and doesn't need an explicit requirement.
2874 if compengine != 'zlib':
2874 if compengine != 'zlib':
2875 requirements.add('exp-compression-%s' % compengine)
2875 requirements.add('exp-compression-%s' % compengine)
2876
2876
2877 if scmutil.gdinitconfig(ui):
2877 if scmutil.gdinitconfig(ui):
2878 requirements.add('generaldelta')
2878 requirements.add('generaldelta')
2879 if ui.configbool('experimental', 'treemanifest'):
2879 if ui.configbool('experimental', 'treemanifest'):
2880 requirements.add('treemanifest')
2880 requirements.add('treemanifest')
2881 # experimental config: format.sparse-revlog
2881 # experimental config: format.sparse-revlog
2882 if ui.configbool('format', 'sparse-revlog'):
2882 if ui.configbool('format', 'sparse-revlog'):
2883 requirements.add(SPARSEREVLOG_REQUIREMENT)
2883 requirements.add(SPARSEREVLOG_REQUIREMENT)
2884
2884
2885 revlogv2 = ui.config('experimental', 'revlogv2')
2885 revlogv2 = ui.config('experimental', 'revlogv2')
2886 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2886 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2887 requirements.remove('revlogv1')
2887 requirements.remove('revlogv1')
2888 # generaldelta is implied by revlogv2.
2888 # generaldelta is implied by revlogv2.
2889 requirements.discard('generaldelta')
2889 requirements.discard('generaldelta')
2890 requirements.add(REVLOGV2_REQUIREMENT)
2890 requirements.add(REVLOGV2_REQUIREMENT)
2891 # experimental config: format.internal-phase
2891 # experimental config: format.internal-phase
2892 if ui.configbool('format', 'internal-phase'):
2892 if ui.configbool('format', 'internal-phase'):
2893 requirements.add('internal-phase')
2893 requirements.add('internal-phase')
2894
2894
2895 if createopts.get('narrowfiles'):
2895 if createopts.get('narrowfiles'):
2896 requirements.add(repository.NARROW_REQUIREMENT)
2896 requirements.add(repository.NARROW_REQUIREMENT)
2897
2897
2898 if createopts.get('lfs'):
2899 requirements.add('lfs')
2900
2898 return requirements
2901 return requirements
2899
2902
2900 def filterknowncreateopts(ui, createopts):
2903 def filterknowncreateopts(ui, createopts):
2901 """Filters a dict of repo creation options against options that are known.
2904 """Filters a dict of repo creation options against options that are known.
2902
2905
2903 Receives a dict of repo creation options and returns a dict of those
2906 Receives a dict of repo creation options and returns a dict of those
2904 options that we don't know how to handle.
2907 options that we don't know how to handle.
2905
2908
2906 This function is called as part of repository creation. If the
2909 This function is called as part of repository creation. If the
2907 returned dict contains any items, repository creation will not
2910 returned dict contains any items, repository creation will not
2908 be allowed, as it means there was a request to create a repository
2911 be allowed, as it means there was a request to create a repository
2909 with options not recognized by loaded code.
2912 with options not recognized by loaded code.
2910
2913
2911 Extensions can wrap this function to filter out creation options
2914 Extensions can wrap this function to filter out creation options
2912 they know how to handle.
2915 they know how to handle.
2913 """
2916 """
2914 known = {
2917 known = {
2915 'backend',
2918 'backend',
2919 'lfs',
2916 'narrowfiles',
2920 'narrowfiles',
2917 'sharedrepo',
2921 'sharedrepo',
2918 'sharedrelative',
2922 'sharedrelative',
2919 'shareditems',
2923 'shareditems',
2920 }
2924 }
2921
2925
2922 return {k: v for k, v in createopts.items() if k not in known}
2926 return {k: v for k, v in createopts.items() if k not in known}
2923
2927
2924 def createrepository(ui, path, createopts=None):
2928 def createrepository(ui, path, createopts=None):
2925 """Create a new repository in a vfs.
2929 """Create a new repository in a vfs.
2926
2930
2927 ``path`` path to the new repo's working directory.
2931 ``path`` path to the new repo's working directory.
2928 ``createopts`` options for the new repository.
2932 ``createopts`` options for the new repository.
2929
2933
2930 The following keys for ``createopts`` are recognized:
2934 The following keys for ``createopts`` are recognized:
2931
2935
2932 backend
2936 backend
2933 The storage backend to use.
2937 The storage backend to use.
2938 lfs
2939 Repository will be created with ``lfs`` requirement. The lfs extension
2940 will automatically be loaded when the repository is accessed.
2934 narrowfiles
2941 narrowfiles
2935 Set up repository to support narrow file storage.
2942 Set up repository to support narrow file storage.
2936 sharedrepo
2943 sharedrepo
2937 Repository object from which storage should be shared.
2944 Repository object from which storage should be shared.
2938 sharedrelative
2945 sharedrelative
2939 Boolean indicating if the path to the shared repo should be
2946 Boolean indicating if the path to the shared repo should be
2940 stored as relative. By default, the pointer to the "parent" repo
2947 stored as relative. By default, the pointer to the "parent" repo
2941 is stored as an absolute path.
2948 is stored as an absolute path.
2942 shareditems
2949 shareditems
2943 Set of items to share to the new repository (in addition to storage).
2950 Set of items to share to the new repository (in addition to storage).
2944 """
2951 """
2945 createopts = defaultcreateopts(ui, createopts=createopts)
2952 createopts = defaultcreateopts(ui, createopts=createopts)
2946
2953
2947 unknownopts = filterknowncreateopts(ui, createopts)
2954 unknownopts = filterknowncreateopts(ui, createopts)
2948
2955
2949 if not isinstance(unknownopts, dict):
2956 if not isinstance(unknownopts, dict):
2950 raise error.ProgrammingError('filterknowncreateopts() did not return '
2957 raise error.ProgrammingError('filterknowncreateopts() did not return '
2951 'a dict')
2958 'a dict')
2952
2959
2953 if unknownopts:
2960 if unknownopts:
2954 raise error.Abort(_('unable to create repository because of unknown '
2961 raise error.Abort(_('unable to create repository because of unknown '
2955 'creation option: %s') %
2962 'creation option: %s') %
2956 ', '.join(sorted(unknownopts)),
2963 ', '.join(sorted(unknownopts)),
2957 hint=_('is a required extension not loaded?'))
2964 hint=_('is a required extension not loaded?'))
2958
2965
2959 requirements = newreporequirements(ui, createopts=createopts)
2966 requirements = newreporequirements(ui, createopts=createopts)
2960
2967
2961 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2968 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2962
2969
2963 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2970 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2964 if hgvfs.exists():
2971 if hgvfs.exists():
2965 raise error.RepoError(_('repository %s already exists') % path)
2972 raise error.RepoError(_('repository %s already exists') % path)
2966
2973
2967 if 'sharedrepo' in createopts:
2974 if 'sharedrepo' in createopts:
2968 sharedpath = createopts['sharedrepo'].sharedpath
2975 sharedpath = createopts['sharedrepo'].sharedpath
2969
2976
2970 if createopts.get('sharedrelative'):
2977 if createopts.get('sharedrelative'):
2971 try:
2978 try:
2972 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2979 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2973 except (IOError, ValueError) as e:
2980 except (IOError, ValueError) as e:
2974 # ValueError is raised on Windows if the drive letters differ
2981 # ValueError is raised on Windows if the drive letters differ
2975 # on each path.
2982 # on each path.
2976 raise error.Abort(_('cannot calculate relative path'),
2983 raise error.Abort(_('cannot calculate relative path'),
2977 hint=stringutil.forcebytestr(e))
2984 hint=stringutil.forcebytestr(e))
2978
2985
2979 if not wdirvfs.exists():
2986 if not wdirvfs.exists():
2980 wdirvfs.makedirs()
2987 wdirvfs.makedirs()
2981
2988
2982 hgvfs.makedir(notindexed=True)
2989 hgvfs.makedir(notindexed=True)
2983
2990
2984 if b'store' in requirements and 'sharedrepo' not in createopts:
2991 if b'store' in requirements and 'sharedrepo' not in createopts:
2985 hgvfs.mkdir(b'store')
2992 hgvfs.mkdir(b'store')
2986
2993
2987 # We create an invalid changelog outside the store so very old
2994 # We create an invalid changelog outside the store so very old
2988 # Mercurial versions (which didn't know about the requirements
2995 # Mercurial versions (which didn't know about the requirements
2989 # file) encounter an error on reading the changelog. This
2996 # file) encounter an error on reading the changelog. This
2990 # effectively locks out old clients and prevents them from
2997 # effectively locks out old clients and prevents them from
2991 # mucking with a repo in an unknown format.
2998 # mucking with a repo in an unknown format.
2992 #
2999 #
2993 # The revlog header has version 2, which won't be recognized by
3000 # The revlog header has version 2, which won't be recognized by
2994 # such old clients.
3001 # such old clients.
2995 hgvfs.append(b'00changelog.i',
3002 hgvfs.append(b'00changelog.i',
2996 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3003 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2997 b'layout')
3004 b'layout')
2998
3005
2999 scmutil.writerequires(hgvfs, requirements)
3006 scmutil.writerequires(hgvfs, requirements)
3000
3007
3001 # Write out file telling readers where to find the shared store.
3008 # Write out file telling readers where to find the shared store.
3002 if 'sharedrepo' in createopts:
3009 if 'sharedrepo' in createopts:
3003 hgvfs.write(b'sharedpath', sharedpath)
3010 hgvfs.write(b'sharedpath', sharedpath)
3004
3011
3005 if createopts.get('shareditems'):
3012 if createopts.get('shareditems'):
3006 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3013 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3007 hgvfs.write(b'shared', shared)
3014 hgvfs.write(b'shared', shared)
3008
3015
3009 def poisonrepository(repo):
3016 def poisonrepository(repo):
3010 """Poison a repository instance so it can no longer be used."""
3017 """Poison a repository instance so it can no longer be used."""
3011 # Perform any cleanup on the instance.
3018 # Perform any cleanup on the instance.
3012 repo.close()
3019 repo.close()
3013
3020
3014 # Our strategy is to replace the type of the object with one that
3021 # Our strategy is to replace the type of the object with one that
3015 # has all attribute lookups result in error.
3022 # has all attribute lookups result in error.
3016 #
3023 #
3017 # But we have to allow the close() method because some constructors
3024 # But we have to allow the close() method because some constructors
3018 # of repos call close() on repo references.
3025 # of repos call close() on repo references.
3019 class poisonedrepository(object):
3026 class poisonedrepository(object):
3020 def __getattribute__(self, item):
3027 def __getattribute__(self, item):
3021 if item == r'close':
3028 if item == r'close':
3022 return object.__getattribute__(self, item)
3029 return object.__getattribute__(self, item)
3023
3030
3024 raise error.ProgrammingError('repo instances should not be used '
3031 raise error.ProgrammingError('repo instances should not be used '
3025 'after unshare')
3032 'after unshare')
3026
3033
3027 def close(self):
3034 def close(self):
3028 pass
3035 pass
3029
3036
3030 # We may have a repoview, which intercepts __setattr__. So be sure
3037 # We may have a repoview, which intercepts __setattr__. So be sure
3031 # we operate at the lowest level possible.
3038 # we operate at the lowest level possible.
3032 object.__setattr__(repo, r'__class__', poisonedrepository)
3039 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,463 +1,465 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > lfs=
5 > lfs=
6 > [lfs]
6 > [lfs]
7 > track=all()
7 > track=all()
8 > [web]
8 > [web]
9 > push_ssl = False
9 > push_ssl = False
10 > allow-push = *
10 > allow-push = *
11 > EOF
11 > EOF
12
12
13 Serving LFS files can experimentally be turned off. The long term solution is
13 Serving LFS files can experimentally be turned off. The long term solution is
14 to support the 'verify' action in both client and server, so that the server can
14 to support the 'verify' action in both client and server, so that the server can
15 tell the client to store files elsewhere.
15 tell the client to store files elsewhere.
16
16
17 $ hg init server
17 $ hg init server
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
19 > --config experimental.lfs.serve=False -R server serve -d \
19 > --config experimental.lfs.serve=False -R server serve -d \
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
21 $ cat hg.pid >> $DAEMON_PIDS
21 $ cat hg.pid >> $DAEMON_PIDS
22
22
23 Uploads fail...
23 Uploads fail...
24
24
25 $ hg init client
25 $ hg init client
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
27 $ hg -R client ci -Am 'initial commit'
27 $ hg -R client ci -Am 'initial commit'
28 adding lfs.bin
28 adding lfs.bin
29 $ hg -R client push http://localhost:$HGPORT
29 $ hg -R client push http://localhost:$HGPORT
30 pushing to http://localhost:$HGPORT/
30 pushing to http://localhost:$HGPORT/
31 searching for changes
31 searching for changes
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=upload)!
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=upload)!
33 [255]
33 [255]
34
34
35 ... so do a local push to make the data available. Remove the blob from the
35 ... so do a local push to make the data available. Remove the blob from the
36 default cache, so it attempts to download.
36 default cache, so it attempts to download.
37 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
37 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
38 > --config "lfs.url=null://" \
38 > --config "lfs.url=null://" \
39 > -R client push -q server
39 > -R client push -q server
40 $ mv `hg config lfs.usercache` $TESTTMP/servercache
40 $ mv `hg config lfs.usercache` $TESTTMP/servercache
41
41
42 Downloads fail...
42 Downloads fail...
43
43
44 $ hg clone http://localhost:$HGPORT httpclone
44 $ hg clone http://localhost:$HGPORT httpclone
45 (remote is using large file support (lfs); lfs will be enabled for this repository)
45 requesting all changes
46 requesting all changes
46 adding changesets
47 adding changesets
47 adding manifests
48 adding manifests
48 adding file changes
49 adding file changes
49 added 1 changesets with 1 changes to 1 files
50 added 1 changesets with 1 changes to 1 files
50 new changesets 525251863cad
51 new changesets 525251863cad
51 updating to branch default
52 updating to branch default
52 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=download)!
53 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=download)!
53 [255]
54 [255]
54
55
55 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
56 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
56
57
57 $ cat $TESTTMP/access.log $TESTTMP/errors.log
58 $ cat $TESTTMP/access.log $TESTTMP/errors.log
58 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
59 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
59 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
60 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
60 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
62 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
63 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
66 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
67 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
67
68
68 Blob URIs are correct when --prefix is used
69 Blob URIs are correct when --prefix is used
69
70
70 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
71 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
71 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
72 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
72 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
73 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
73 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
74 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
74 $ cat hg.pid >> $DAEMON_PIDS
75 $ cat hg.pid >> $DAEMON_PIDS
75
76
76 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
77 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
77 using http://localhost:$HGPORT/subdir/mount/point
78 using http://localhost:$HGPORT/subdir/mount/point
78 sending capabilities command
79 sending capabilities command
80 (remote is using large file support (lfs); lfs will be enabled for this repository)
79 query 1; heads
81 query 1; heads
80 sending batch command
82 sending batch command
81 requesting all changes
83 requesting all changes
82 sending getbundle command
84 sending getbundle command
83 bundle2-input-bundle: with-transaction
85 bundle2-input-bundle: with-transaction
84 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
86 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
85 adding changesets
87 adding changesets
86 add changeset 525251863cad
88 add changeset 525251863cad
87 adding manifests
89 adding manifests
88 adding file changes
90 adding file changes
89 adding lfs.bin revisions
91 adding lfs.bin revisions
90 added 1 changesets with 1 changes to 1 files
92 added 1 changesets with 1 changes to 1 files
91 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
92 bundle2-input-part: total payload size 648
93 bundle2-input-part: total payload size 648
93 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
94 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
94 bundle2-input-part: "phase-heads" supported
95 bundle2-input-part: "phase-heads" supported
95 bundle2-input-part: total payload size 24
96 bundle2-input-part: total payload size 24
96 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
97 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
97 bundle2-input-part: total payload size 39
98 bundle2-input-part: total payload size 39
98 bundle2-input-bundle: 3 parts total
99 bundle2-input-bundle: 3 parts total
99 checking for updated bookmarks
100 checking for updated bookmarks
100 updating the branch cache
101 updating the branch cache
101 new changesets 525251863cad
102 new changesets 525251863cad
102 updating to branch default
103 updating to branch default
103 resolving manifests
104 resolving manifests
104 branchmerge: False, force: False, partial: False
105 branchmerge: False, force: False, partial: False
105 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
106 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
106 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
107 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
107 Status: 200
108 Status: 200
108 Content-Length: 371
109 Content-Length: 371
109 Content-Type: application/vnd.git-lfs+json
110 Content-Type: application/vnd.git-lfs+json
110 Date: $HTTP_DATE$
111 Date: $HTTP_DATE$
111 Server: testing stub value
112 Server: testing stub value
112 {
113 {
113 "objects": [
114 "objects": [
114 {
115 {
115 "actions": {
116 "actions": {
116 "download": {
117 "download": {
117 "expires_at": "$ISO_8601_DATE_TIME$"
118 "expires_at": "$ISO_8601_DATE_TIME$"
118 "header": {
119 "header": {
119 "Accept": "application/vnd.git-lfs"
120 "Accept": "application/vnd.git-lfs"
120 }
121 }
121 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
122 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
122 }
123 }
123 }
124 }
124 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
125 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
125 "size": 20
126 "size": 20
126 }
127 }
127 ]
128 ]
128 "transfer": "basic"
129 "transfer": "basic"
129 }
130 }
130 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
131 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
131 Status: 200
132 Status: 200
132 Content-Length: 20
133 Content-Length: 20
133 Content-Type: application/octet-stream
134 Content-Type: application/octet-stream
134 Date: $HTTP_DATE$
135 Date: $HTTP_DATE$
135 Server: testing stub value
136 Server: testing stub value
136 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
137 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
137 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
138 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
138 lfs: downloaded 1 files (20 bytes)
139 lfs: downloaded 1 files (20 bytes)
139 lfs.bin: remote created -> g
140 lfs.bin: remote created -> g
140 getting lfs.bin
141 getting lfs.bin
141 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
142 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
144 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
144
145
145 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
146 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
146
147
147 $ cat $TESTTMP/access.log $TESTTMP/errors.log
148 $ cat $TESTTMP/access.log $TESTTMP/errors.log
148 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
149 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
149 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
150 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
150 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
151 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
151 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
152 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
152 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
153 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
153
154
154 Blobs that already exist in the usercache are linked into the repo store, even
155 Blobs that already exist in the usercache are linked into the repo store, even
155 though the client doesn't send the blob.
156 though the client doesn't send the blob.
156
157
157 $ hg init server2
158 $ hg init server2
158 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
159 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
159 > -p $HGPORT --pid-file=hg.pid \
160 > -p $HGPORT --pid-file=hg.pid \
160 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
161 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
161 $ cat hg.pid >> $DAEMON_PIDS
162 $ cat hg.pid >> $DAEMON_PIDS
162
163
163 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
164 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
164 > push http://localhost:$HGPORT | grep '^[{} ]'
165 > push http://localhost:$HGPORT | grep '^[{} ]'
165 {
166 {
166 "objects": [
167 "objects": [
167 {
168 {
168 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
169 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
169 "size": 20
170 "size": 20
170 }
171 }
171 ]
172 ]
172 "transfer": "basic"
173 "transfer": "basic"
173 }
174 }
174 $ find server2/.hg/store/lfs/objects | sort
175 $ find server2/.hg/store/lfs/objects | sort
175 server2/.hg/store/lfs/objects
176 server2/.hg/store/lfs/objects
176 server2/.hg/store/lfs/objects/f0
177 server2/.hg/store/lfs/objects/f0
177 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
178 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
178 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
179 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
179 $ cat $TESTTMP/errors.log
180 $ cat $TESTTMP/errors.log
180
181
181 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
182 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
182 > import errno
183 > import errno
183 > from hgext.lfs import blobstore
184 > from hgext.lfs import blobstore
184 >
185 >
185 > _numverifies = 0
186 > _numverifies = 0
186 > _readerr = True
187 > _readerr = True
187 >
188 >
188 > def reposetup(ui, repo):
189 > def reposetup(ui, repo):
189 > # Nothing to do with a remote repo
190 > # Nothing to do with a remote repo
190 > if not repo.local():
191 > if not repo.local():
191 > return
192 > return
192 >
193 >
193 > store = repo.svfs.lfslocalblobstore
194 > store = repo.svfs.lfslocalblobstore
194 > class badstore(store.__class__):
195 > class badstore(store.__class__):
195 > def download(self, oid, src):
196 > def download(self, oid, src):
196 > '''Called in the server to handle reading from the client in a
197 > '''Called in the server to handle reading from the client in a
197 > PUT request.'''
198 > PUT request.'''
198 > origread = src.read
199 > origread = src.read
199 > def _badread(nbytes):
200 > def _badread(nbytes):
200 > # Simulate bad data/checksum failure from the client
201 > # Simulate bad data/checksum failure from the client
201 > return b'0' * len(origread(nbytes))
202 > return b'0' * len(origread(nbytes))
202 > src.read = _badread
203 > src.read = _badread
203 > super(badstore, self).download(oid, src)
204 > super(badstore, self).download(oid, src)
204 >
205 >
205 > def _read(self, vfs, oid, verify):
206 > def _read(self, vfs, oid, verify):
206 > '''Called in the server to read data for a GET request, and then
207 > '''Called in the server to read data for a GET request, and then
207 > calls self._verify() on it before returning.'''
208 > calls self._verify() on it before returning.'''
208 > global _readerr
209 > global _readerr
209 > # One time simulation of a read error
210 > # One time simulation of a read error
210 > if _readerr:
211 > if _readerr:
211 > _readerr = False
212 > _readerr = False
212 > raise IOError(errno.EIO, '%s: I/O error' % oid)
213 > raise IOError(errno.EIO, '%s: I/O error' % oid)
213 > # Simulate corrupt content on client download
214 > # Simulate corrupt content on client download
214 > blobstore._verify(oid, 'dummy content')
215 > blobstore._verify(oid, 'dummy content')
215 >
216 >
216 > def verify(self, oid):
217 > def verify(self, oid):
217 > '''Called in the server to populate the Batch API response,
218 > '''Called in the server to populate the Batch API response,
218 > letting the client re-upload if the file is corrupt.'''
219 > letting the client re-upload if the file is corrupt.'''
219 > # Fail verify in Batch API for one clone command and one push
220 > # Fail verify in Batch API for one clone command and one push
220 > # command with an IOError. Then let it through to access other
221 > # command with an IOError. Then let it through to access other
221 > # functions. Checksum failure is tested elsewhere.
222 > # functions. Checksum failure is tested elsewhere.
222 > global _numverifies
223 > global _numverifies
223 > _numverifies += 1
224 > _numverifies += 1
224 > if _numverifies <= 2:
225 > if _numverifies <= 2:
225 > raise IOError(errno.EIO, '%s: I/O error' % oid)
226 > raise IOError(errno.EIO, '%s: I/O error' % oid)
226 > return super(badstore, self).verify(oid)
227 > return super(badstore, self).verify(oid)
227 >
228 >
228 > store.__class__ = badstore
229 > store.__class__ = badstore
229 > EOF
230 > EOF
230
231
231 $ rm -rf `hg config lfs.usercache`
232 $ rm -rf `hg config lfs.usercache`
232 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
233 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
233 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
234 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
234 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
235 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
235 > -R server serve -d \
236 > -R server serve -d \
236 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
237 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
237 $ cat hg.pid >> $DAEMON_PIDS
238 $ cat hg.pid >> $DAEMON_PIDS
238
239
239 Test an I/O error in localstore.verify() (Batch API) with GET
240 Test an I/O error in localstore.verify() (Batch API) with GET
240
241
241 $ hg clone http://localhost:$HGPORT1 httpclone2
242 $ hg clone http://localhost:$HGPORT1 httpclone2
243 (remote is using large file support (lfs); lfs will be enabled for this repository)
242 requesting all changes
244 requesting all changes
243 adding changesets
245 adding changesets
244 adding manifests
246 adding manifests
245 adding file changes
247 adding file changes
246 added 1 changesets with 1 changes to 1 files
248 added 1 changesets with 1 changes to 1 files
247 new changesets 525251863cad
249 new changesets 525251863cad
248 updating to branch default
250 updating to branch default
249 abort: LFS server error for "lfs.bin": Internal server error!
251 abort: LFS server error for "lfs.bin": Internal server error!
250 [255]
252 [255]
251
253
252 Test an I/O error in localstore.verify() (Batch API) with PUT
254 Test an I/O error in localstore.verify() (Batch API) with PUT
253
255
254 $ echo foo > client/lfs.bin
256 $ echo foo > client/lfs.bin
255 $ hg -R client ci -m 'mod lfs'
257 $ hg -R client ci -m 'mod lfs'
256 $ hg -R client push http://localhost:$HGPORT1
258 $ hg -R client push http://localhost:$HGPORT1
257 pushing to http://localhost:$HGPORT1/
259 pushing to http://localhost:$HGPORT1/
258 searching for changes
260 searching for changes
259 abort: LFS server error for "unknown": Internal server error!
261 abort: LFS server error for "unknown": Internal server error!
260 [255]
262 [255]
261 TODO: figure out how to associate the file name in the error above
263 TODO: figure out how to associate the file name in the error above
262
264
263 Test a bad checksum sent by the client in the transfer API
265 Test a bad checksum sent by the client in the transfer API
264
266
265 $ hg -R client push http://localhost:$HGPORT1
267 $ hg -R client push http://localhost:$HGPORT1
266 pushing to http://localhost:$HGPORT1/
268 pushing to http://localhost:$HGPORT1/
267 searching for changes
269 searching for changes
268 abort: HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
270 abort: HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
269 [255]
271 [255]
270
272
271 $ echo 'test lfs file' > server/lfs3.bin
273 $ echo 'test lfs file' > server/lfs3.bin
272 $ hg --config experimental.lfs.disableusercache=True \
274 $ hg --config experimental.lfs.disableusercache=True \
273 > -R server ci -Aqm 'another lfs file'
275 > -R server ci -Aqm 'another lfs file'
274 $ hg -R client pull -q http://localhost:$HGPORT1
276 $ hg -R client pull -q http://localhost:$HGPORT1
275
277
276 Test an I/O error during the processing of the GET request
278 Test an I/O error during the processing of the GET request
277
279
278 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
280 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
279 > -R client update -r tip
281 > -R client update -r tip
280 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
282 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
281 [255]
283 [255]
282
284
283 Test a checksum failure during the processing of the GET request
285 Test a checksum failure during the processing of the GET request
284
286
285 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
287 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
286 > -R client update -r tip
288 > -R client update -r tip
287 abort: HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
289 abort: HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
288 [255]
290 [255]
289
291
290 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
292 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
291
293
292 $ cat $TESTTMP/access.log
294 $ cat $TESTTMP/access.log
293 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
295 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
294 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
296 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
295 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
297 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
296 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
298 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
297 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
299 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
298 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
300 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
299 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
301 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
300 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
302 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
301 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
303 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
302 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
304 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
303 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
305 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
304 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
306 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
305 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
307 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
306 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
308 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
307 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
309 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
308 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
310 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
309 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
311 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
310 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
312 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
311 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
313 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
315 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
315 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
317 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
316 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
318 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
317 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
319 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
318 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
320 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
319
321
320 $ grep -v ' File "' $TESTTMP/errors.log
322 $ grep -v ' File "' $TESTTMP/errors.log
321 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
323 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
322 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
324 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
323 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
325 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
324 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
326 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
325 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
327 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
326 $LOCALIP - - [$ERRDATE$] HG error: (glob)
328 $LOCALIP - - [$ERRDATE$] HG error: (glob)
327 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
329 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
328 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
330 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
329 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
331 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
330 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
332 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
331 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
333 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
332 $LOCALIP - - [$ERRDATE$] HG error: (glob)
334 $LOCALIP - - [$ERRDATE$] HG error: (glob)
333 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
335 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
334 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
336 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
335 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
337 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
336 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
338 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
337 $LOCALIP - - [$ERRDATE$] HG error: % oid) (glob)
339 $LOCALIP - - [$ERRDATE$] HG error: % oid) (glob)
338 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
340 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
339 $LOCALIP - - [$ERRDATE$] HG error: (glob)
341 $LOCALIP - - [$ERRDATE$] HG error: (glob)
340 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
342 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
341 Traceback (most recent call last):
343 Traceback (most recent call last):
342 self.do_write()
344 self.do_write()
343 self.do_hgweb()
345 self.do_hgweb()
344 for chunk in self.server.application(env, self._start_response):
346 for chunk in self.server.application(env, self._start_response):
345 for r in self._runwsgi(req, res, repo):
347 for r in self._runwsgi(req, res, repo):
346 rctx, req, res, self.check_perm)
348 rctx, req, res, self.check_perm)
347 return func(*(args + a), **kw)
349 return func(*(args + a), **kw)
348 lambda perm:
350 lambda perm:
349 res.setbodybytes(localstore.read(oid))
351 res.setbodybytes(localstore.read(oid))
350 blob = self._read(self.vfs, oid, verify)
352 blob = self._read(self.vfs, oid, verify)
351 raise IOError(errno.EIO, '%s: I/O error' % oid)
353 raise IOError(errno.EIO, '%s: I/O error' % oid)
352 IOError: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error
354 IOError: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error
353
355
354 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
356 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
355 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
357 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
356 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
358 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
357 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
359 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
358 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, 'dummy content') (glob)
360 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, 'dummy content') (glob)
359 $LOCALIP - - [$ERRDATE$] HG error: hint=_('run hg verify')) (glob)
361 $LOCALIP - - [$ERRDATE$] HG error: hint=_('run hg verify')) (glob)
360 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
362 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
361 $LOCALIP - - [$ERRDATE$] HG error: (glob)
363 $LOCALIP - - [$ERRDATE$] HG error: (glob)
362
364
363 Basic Authorization headers are returned by the Batch API, and sent back with
365 Basic Authorization headers are returned by the Batch API, and sent back with
364 the GET/PUT request.
366 the GET/PUT request.
365
367
366 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
368 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
367
369
368 $ cat >> $HGRCPATH << EOF
370 $ cat >> $HGRCPATH << EOF
369 > [experimental]
371 > [experimental]
370 > lfs.disableusercache = True
372 > lfs.disableusercache = True
371 > [auth]
373 > [auth]
372 > l.schemes=http
374 > l.schemes=http
373 > l.prefix=lo
375 > l.prefix=lo
374 > l.username=user
376 > l.username=user
375 > l.password=pass
377 > l.password=pass
376 > EOF
378 > EOF
377
379
378 $ cat << EOF > userpass.py
380 $ cat << EOF > userpass.py
379 > import base64
381 > import base64
380 > from mercurial.hgweb import common
382 > from mercurial.hgweb import common
381 > def perform_authentication(hgweb, req, op):
383 > def perform_authentication(hgweb, req, op):
382 > auth = req.headers.get(b'Authorization')
384 > auth = req.headers.get(b'Authorization')
383 > if not auth:
385 > if not auth:
384 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
386 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
385 > [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
387 > [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
386 > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
388 > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
387 > b'pass']:
389 > b'pass']:
388 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
390 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
389 > def extsetup():
391 > def extsetup():
390 > common.permhooks.insert(0, perform_authentication)
392 > common.permhooks.insert(0, perform_authentication)
391 > EOF
393 > EOF
392
394
393 $ hg --config extensions.x=$TESTTMP/userpass.py \
395 $ hg --config extensions.x=$TESTTMP/userpass.py \
394 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
396 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
395 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
397 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
396 $ mv hg.pid $DAEMON_PIDS
398 $ mv hg.pid $DAEMON_PIDS
397
399
398 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
400 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
399 {
401 {
400 "objects": [
402 "objects": [
401 {
403 {
402 "actions": {
404 "actions": {
403 "download": {
405 "download": {
404 "expires_at": "$ISO_8601_DATE_TIME$"
406 "expires_at": "$ISO_8601_DATE_TIME$"
405 "header": {
407 "header": {
406 "Accept": "application/vnd.git-lfs"
408 "Accept": "application/vnd.git-lfs"
407 "Authorization": "Basic dXNlcjpwYXNz"
409 "Authorization": "Basic dXNlcjpwYXNz"
408 }
410 }
409 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
411 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
410 }
412 }
411 }
413 }
412 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
414 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
413 "size": 14
415 "size": 14
414 }
416 }
415 ]
417 ]
416 "transfer": "basic"
418 "transfer": "basic"
417 }
419 }
418
420
419 $ echo 'another blob' > auth_clone/lfs.blob
421 $ echo 'another blob' > auth_clone/lfs.blob
420 $ hg -R auth_clone ci -Aqm 'add blob'
422 $ hg -R auth_clone ci -Aqm 'add blob'
421 $ hg -R auth_clone --debug push | egrep '^[{}]| '
423 $ hg -R auth_clone --debug push | egrep '^[{}]| '
422 {
424 {
423 "objects": [
425 "objects": [
424 {
426 {
425 "actions": {
427 "actions": {
426 "upload": {
428 "upload": {
427 "expires_at": "$ISO_8601_DATE_TIME$"
429 "expires_at": "$ISO_8601_DATE_TIME$"
428 "header": {
430 "header": {
429 "Accept": "application/vnd.git-lfs"
431 "Accept": "application/vnd.git-lfs"
430 "Authorization": "Basic dXNlcjpwYXNz"
432 "Authorization": "Basic dXNlcjpwYXNz"
431 }
433 }
432 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
434 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
433 }
435 }
434 }
436 }
435 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
437 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
436 "size": 13
438 "size": 13
437 }
439 }
438 ]
440 ]
439 "transfer": "basic"
441 "transfer": "basic"
440 }
442 }
441
443
442 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
444 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
443
445
444 $ cat $TESTTMP/access.log $TESTTMP/errors.log
446 $ cat $TESTTMP/access.log $TESTTMP/errors.log
445 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
447 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
446 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
448 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
447 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
449 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
448 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
450 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
449 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
451 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
450 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
452 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
451 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
453 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
452 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
454 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
453 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
455 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
454 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
456 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
455 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
457 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
456 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
458 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
457 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
459 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
458 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
460 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
459 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
461 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
460 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
462 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
461 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
463 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
462 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
464 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
463 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
465 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
@@ -1,668 +1,666 b''
1 #testcases lfsremote-on lfsremote-off
1 #testcases lfsremote-on lfsremote-off
2 #require serve no-reposimplestore no-chg
2 #require serve no-reposimplestore no-chg
3
3
4 This test splits `hg serve` with and without using the extension into separate
4 This test splits `hg serve` with and without using the extension into separate
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 indicates whether or not the extension is loaded. The "X" cases are not tested
7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 individually, because the lfs requirement causes the process to bail early if
8 individually, because the lfs requirement causes the process to bail early if
9 the extension is disabled.
9 the extension is disabled.
10
10
11 . Server
11 . Server
12 .
12 .
13 . No-LFS LFS
13 . No-LFS LFS
14 . +----------------------------+
14 . +----------------------------+
15 . | || D | E | D | E |
15 . | || D | E | D | E |
16 . |---++=======================|
16 . |---++=======================|
17 . C | D || N/A | #1 | X | #4 |
17 . C | D || N/A | #1 | X | #4 |
18 . l No +---++-----------------------|
18 . l No +---++-----------------------|
19 . i LFS | E || #2 | #2 | X | #5 |
19 . i LFS | E || #2 | #2 | X | #5 |
20 . e +---++-----------------------|
20 . e +---++-----------------------|
21 . n | D || X | X | X | X |
21 . n | D || X | X | X | X |
22 . t LFS |---++-----------------------|
22 . t LFS |---++-----------------------|
23 . | E || #3 | #3 | X | #6 |
23 . | E || #3 | #3 | X | #6 |
24 . |---++-----------------------+
24 . |---++-----------------------+
25
25
26 make command server magic visible
26 make command server magic visible
27
27
28 #if windows
28 #if windows
29 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
29 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
30 #else
30 #else
31 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
31 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
32 #endif
32 #endif
33 $ export PYTHONPATH
33 $ export PYTHONPATH
34
34
35 $ hg init server
35 $ hg init server
36 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
36 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
37
37
38 $ cat > $TESTTMP/debugprocessors.py <<EOF
38 $ cat > $TESTTMP/debugprocessors.py <<EOF
39 > from mercurial import (
39 > from mercurial import (
40 > cmdutil,
40 > cmdutil,
41 > commands,
41 > commands,
42 > pycompat,
42 > pycompat,
43 > registrar,
43 > registrar,
44 > )
44 > )
45 > cmdtable = {}
45 > cmdtable = {}
46 > command = registrar.command(cmdtable)
46 > command = registrar.command(cmdtable)
47 > @command(b'debugprocessors', [], b'FILE')
47 > @command(b'debugprocessors', [], b'FILE')
48 > def debugprocessors(ui, repo, file_=None, **opts):
48 > def debugprocessors(ui, repo, file_=None, **opts):
49 > opts = pycompat.byteskwargs(opts)
49 > opts = pycompat.byteskwargs(opts)
50 > opts[b'changelog'] = False
50 > opts[b'changelog'] = False
51 > opts[b'manifest'] = False
51 > opts[b'manifest'] = False
52 > opts[b'dir'] = False
52 > opts[b'dir'] = False
53 > rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
53 > rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
54 > for flag, proc in rl._flagprocessors.iteritems():
54 > for flag, proc in rl._flagprocessors.iteritems():
55 > ui.status(b"registered processor '%#x'\n" % (flag))
55 > ui.status(b"registered processor '%#x'\n" % (flag))
56 > EOF
56 > EOF
57
57
58 Skip the experimental.changegroup3=True config. Failure to agree on this comes
58 Skip the experimental.changegroup3=True config. Failure to agree on this comes
59 first, and causes a "ValueError: no common changegroup version" or "abort:
59 first, and causes a "ValueError: no common changegroup version" or "abort:
60 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
60 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
61 side. If that *is* enabled, the subsequent failure is "abort: missing processor
61 side. If that *is* enabled, the subsequent failure is "abort: missing processor
62 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
62 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
63 masked by the Internal Server Error message).
63 masked by the Internal Server Error message).
64 $ cat >> $HGRCPATH <<EOF
64 $ cat >> $HGRCPATH <<EOF
65 > [extensions]
65 > [extensions]
66 > debugprocessors = $TESTTMP/debugprocessors.py
66 > debugprocessors = $TESTTMP/debugprocessors.py
67 > [experimental]
67 > [experimental]
68 > lfs.disableusercache = True
68 > lfs.disableusercache = True
69 > [lfs]
69 > [lfs]
70 > threshold=10
70 > threshold=10
71 > [web]
71 > [web]
72 > allow_push=*
72 > allow_push=*
73 > push_ssl=False
73 > push_ssl=False
74 > EOF
74 > EOF
75
75
76 $ cp $HGRCPATH $HGRCPATH.orig
76 $ cp $HGRCPATH $HGRCPATH.orig
77
77
78 #if lfsremote-on
78 #if lfsremote-on
79 $ hg --config extensions.lfs= -R server \
79 $ hg --config extensions.lfs= -R server \
80 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
80 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
81 #else
81 #else
82 $ hg --config extensions.lfs=! -R server \
82 $ hg --config extensions.lfs=! -R server \
83 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
83 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
84 #endif
84 #endif
85
85
86 $ cat hg.pid >> $DAEMON_PIDS
86 $ cat hg.pid >> $DAEMON_PIDS
87 $ hg clone -q http://localhost:$HGPORT client
87 $ hg clone -q http://localhost:$HGPORT client
88 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
88 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
89 [1]
89 [1]
90
90
91 This trivial repo will force commandserver to load the extension, but not call
91 This trivial repo will force commandserver to load the extension, but not call
92 reposetup() on another repo actually being operated on. This gives coverage
92 reposetup() on another repo actually being operated on. This gives coverage
93 that wrapper functions are not assuming reposetup() was called.
93 that wrapper functions are not assuming reposetup() was called.
94
94
95 $ hg init $TESTTMP/cmdservelfs
95 $ hg init $TESTTMP/cmdservelfs
96 $ cat >> $TESTTMP/cmdservelfs/.hg/hgrc << EOF
96 $ cat >> $TESTTMP/cmdservelfs/.hg/hgrc << EOF
97 > [extensions]
97 > [extensions]
98 > lfs =
98 > lfs =
99 > EOF
99 > EOF
100
100
101 --------------------------------------------------------------------------------
101 --------------------------------------------------------------------------------
102 Case #1: client with non-lfs content and the extension disabled; server with
102 Case #1: client with non-lfs content and the extension disabled; server with
103 non-lfs content, and the extension enabled.
103 non-lfs content, and the extension enabled.
104
104
105 $ cd client
105 $ cd client
106 $ echo 'non-lfs' > nonlfs.txt
106 $ echo 'non-lfs' > nonlfs.txt
107 >>> from __future__ import absolute_import
107 >>> from __future__ import absolute_import
108 >>> from hgclient import check, readchannel, runcommand
108 >>> from hgclient import check, readchannel, runcommand
109 >>> @check
109 >>> @check
110 ... def diff(server):
110 ... def diff(server):
111 ... readchannel(server)
111 ... readchannel(server)
112 ... # run an arbitrary command in the repo with the extension loaded
112 ... # run an arbitrary command in the repo with the extension loaded
113 ... runcommand(server, ['id', '-R', '../cmdservelfs'])
113 ... runcommand(server, ['id', '-R', '../cmdservelfs'])
114 ... # now run a command in a repo without the extension to ensure that
114 ... # now run a command in a repo without the extension to ensure that
115 ... # files are added safely..
115 ... # files are added safely..
116 ... runcommand(server, ['ci', '-Aqm', 'non-lfs'])
116 ... runcommand(server, ['ci', '-Aqm', 'non-lfs'])
117 ... # .. and that scmutil.prefetchfiles() safely no-ops..
117 ... # .. and that scmutil.prefetchfiles() safely no-ops..
118 ... runcommand(server, ['diff', '-r', '.~1'])
118 ... runcommand(server, ['diff', '-r', '.~1'])
119 ... # .. and that debugupgraderepo safely no-ops.
119 ... # .. and that debugupgraderepo safely no-ops.
120 ... runcommand(server, ['debugupgraderepo', '-q', '--run'])
120 ... runcommand(server, ['debugupgraderepo', '-q', '--run'])
121 *** runcommand id -R ../cmdservelfs
121 *** runcommand id -R ../cmdservelfs
122 000000000000 tip
122 000000000000 tip
123 *** runcommand ci -Aqm non-lfs
123 *** runcommand ci -Aqm non-lfs
124 *** runcommand diff -r .~1
124 *** runcommand diff -r .~1
125 diff -r 000000000000 nonlfs.txt
125 diff -r 000000000000 nonlfs.txt
126 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
126 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
127 +++ b/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
127 +++ b/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
128 @@ -0,0 +1,1 @@
128 @@ -0,0 +1,1 @@
129 +non-lfs
129 +non-lfs
130 *** runcommand debugupgraderepo -q --run
130 *** runcommand debugupgraderepo -q --run
131 upgrade will perform the following actions:
131 upgrade will perform the following actions:
132
132
133 requirements
133 requirements
134 preserved: dotencode, fncache, generaldelta, revlogv1, store
134 preserved: dotencode, fncache, generaldelta, revlogv1, store
135
135
136 beginning upgrade...
136 beginning upgrade...
137 repository locked and read-only
137 repository locked and read-only
138 creating temporary repository to stage migrated data: * (glob)
138 creating temporary repository to stage migrated data: * (glob)
139 (it is safe to interrupt this process any time before data migration completes)
139 (it is safe to interrupt this process any time before data migration completes)
140 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
140 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
141 migrating 324 bytes in store; 129 bytes tracked data
141 migrating 324 bytes in store; 129 bytes tracked data
142 migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
142 migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
143 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
143 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
144 migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
144 migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
145 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
145 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
146 migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
146 migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
147 finished migrating 1 changelog revisions; change in size: 0 bytes
147 finished migrating 1 changelog revisions; change in size: 0 bytes
148 finished migrating 3 total revisions; total change in store size: 0 bytes
148 finished migrating 3 total revisions; total change in store size: 0 bytes
149 copying phaseroots
149 copying phaseroots
150 data fully migrated to temporary repository
150 data fully migrated to temporary repository
151 marking source repository as being upgraded; clients will be unable to read from repository
151 marking source repository as being upgraded; clients will be unable to read from repository
152 starting in-place swap of repository data
152 starting in-place swap of repository data
153 replaced files will be backed up at * (glob)
153 replaced files will be backed up at * (glob)
154 replacing store...
154 replacing store...
155 store replacement complete; repository was inconsistent for *s (glob)
155 store replacement complete; repository was inconsistent for *s (glob)
156 finalizing requirements file and making repository readable again
156 finalizing requirements file and making repository readable again
157 removing temporary repository * (glob)
157 removing temporary repository * (glob)
158 copy of old repository backed up at * (glob)
158 copy of old repository backed up at * (glob)
159 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
159 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
160
160
161 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
161 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
162 [1]
162 [1]
163
163
164 #if lfsremote-on
164 #if lfsremote-on
165
165
166 $ hg push -q
166 $ hg push -q
167 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
167 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
168 [1]
168 [1]
169
169
170 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
170 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
171 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
171 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
172 [1]
172 [1]
173
173
174 $ hg init $TESTTMP/client1_pull
174 $ hg init $TESTTMP/client1_pull
175 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
175 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
176 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
176 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
177 [1]
177 [1]
178
178
179 $ hg identify http://localhost:$HGPORT
179 $ hg identify http://localhost:$HGPORT
180 d437e1d24fbd
180 d437e1d24fbd
181
181
182 #endif
182 #endif
183
183
184 --------------------------------------------------------------------------------
184 --------------------------------------------------------------------------------
185 Case #2: client with non-lfs content and the extension enabled; server with
185 Case #2: client with non-lfs content and the extension enabled; server with
186 non-lfs content, and the extension state controlled by #testcases.
186 non-lfs content, and the extension state controlled by #testcases.
187
187
188 $ cat >> $HGRCPATH <<EOF
188 $ cat >> $HGRCPATH <<EOF
189 > [extensions]
189 > [extensions]
190 > lfs =
190 > lfs =
191 > EOF
191 > EOF
192 $ echo 'non-lfs' > nonlfs2.txt
192 $ echo 'non-lfs' > nonlfs2.txt
193 $ hg ci -Aqm 'non-lfs file with lfs client'
193 $ hg ci -Aqm 'non-lfs file with lfs client'
194
194
195 Since no lfs content has been added yet, the push is allowed, even when the
195 Since no lfs content has been added yet, the push is allowed, even when the
196 extension is not enabled remotely.
196 extension is not enabled remotely.
197
197
198 $ hg push -q
198 $ hg push -q
199 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
199 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
200 [1]
200 [1]
201
201
202 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
202 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
203 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
203 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
204 [1]
204 [1]
205
205
206 $ hg init $TESTTMP/client2_pull
206 $ hg init $TESTTMP/client2_pull
207 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
207 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
208 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
208 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
209 [1]
209 [1]
210
210
211 $ hg identify http://localhost:$HGPORT
211 $ hg identify http://localhost:$HGPORT
212 1477875038c6
212 1477875038c6
213
213
214 --------------------------------------------------------------------------------
214 --------------------------------------------------------------------------------
215 Case #3: client with lfs content and the extension enabled; server with
215 Case #3: client with lfs content and the extension enabled; server with
216 non-lfs content, and the extension state controlled by #testcases. The server
216 non-lfs content, and the extension state controlled by #testcases. The server
217 should have an 'lfs' requirement after it picks up its first commit with a blob.
217 should have an 'lfs' requirement after it picks up its first commit with a blob.
218
218
219 $ echo 'this is a big lfs file' > lfs.bin
219 $ echo 'this is a big lfs file' > lfs.bin
220 $ hg ci -Aqm 'lfs'
220 $ hg ci -Aqm 'lfs'
221 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
221 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
222 .hg/requires:lfs
222 .hg/requires:lfs
223
223
224 #if lfsremote-off
224 #if lfsremote-off
225 $ hg push -q
225 $ hg push -q
226 abort: required features are not supported in the destination: lfs
226 abort: required features are not supported in the destination: lfs
227 (enable the lfs extension on the server)
227 (enable the lfs extension on the server)
228 [255]
228 [255]
229 #else
229 #else
230 $ hg push -q
230 $ hg push -q
231 #endif
231 #endif
232 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
232 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
233 .hg/requires:lfs
233 .hg/requires:lfs
234 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
234 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
235
235
236 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
236 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
237 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
237 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
238 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
238 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
239 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
239 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
240
240
241 $ hg init $TESTTMP/client3_pull
241 $ hg init $TESTTMP/client3_pull
242 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
242 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
243 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
243 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
244 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
244 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
245 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
245 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
246
246
247 Test that the commit/changegroup requirement check hook can be run multiple
247 Test that the commit/changegroup requirement check hook can be run multiple
248 times.
248 times.
249
249
250 $ hg clone -qr 0 http://localhost:$HGPORT $TESTTMP/cmdserve_client3
250 $ hg clone -qr 0 http://localhost:$HGPORT $TESTTMP/cmdserve_client3
251
251
252 $ cd ../cmdserve_client3
252 $ cd ../cmdserve_client3
253
253
254 >>> from __future__ import absolute_import
254 >>> from __future__ import absolute_import
255 >>> from hgclient import check, readchannel, runcommand
255 >>> from hgclient import check, readchannel, runcommand
256 >>> @check
256 >>> @check
257 ... def addrequirement(server):
257 ... def addrequirement(server):
258 ... readchannel(server)
258 ... readchannel(server)
259 ... # change the repo in a way that adds the lfs requirement
259 ... # change the repo in a way that adds the lfs requirement
260 ... runcommand(server, ['pull', '-qu'])
260 ... runcommand(server, ['pull', '-qu'])
261 ... # Now cause the requirement adding hook to fire again, without going
261 ... # Now cause the requirement adding hook to fire again, without going
262 ... # through reposetup() again.
262 ... # through reposetup() again.
263 ... with open('file.txt', 'wb') as fp:
263 ... with open('file.txt', 'wb') as fp:
264 ... fp.write('data')
264 ... fp.write('data')
265 ... runcommand(server, ['ci', '-Aqm', 'non-lfs'])
265 ... runcommand(server, ['ci', '-Aqm', 'non-lfs'])
266 *** runcommand pull -qu
266 *** runcommand pull -qu
267 *** runcommand ci -Aqm non-lfs
267 *** runcommand ci -Aqm non-lfs
268
268
269 $ cd ../client
269 $ cd ../client
270
270
271 The difference here is the push failed above when the extension isn't
271 The difference here is the push failed above when the extension isn't
272 enabled on the server.
272 enabled on the server.
273 $ hg identify http://localhost:$HGPORT
273 $ hg identify http://localhost:$HGPORT
274 8374dc4052cb (lfsremote-on !)
274 8374dc4052cb (lfsremote-on !)
275 1477875038c6 (lfsremote-off !)
275 1477875038c6 (lfsremote-off !)
276
276
277 Don't bother testing the lfsremote-off cases- the server won't be able
277 Don't bother testing the lfsremote-off cases- the server won't be able
278 to launch if there's lfs content and the extension is disabled.
278 to launch if there's lfs content and the extension is disabled.
279
279
280 #if lfsremote-on
280 #if lfsremote-on
281
281
282 --------------------------------------------------------------------------------
282 --------------------------------------------------------------------------------
283 Case #4: client with non-lfs content and the extension disabled; server with
283 Case #4: client with non-lfs content and the extension disabled; server with
284 lfs content, and the extension enabled.
284 lfs content, and the extension enabled.
285
285
286 $ cat >> $HGRCPATH <<EOF
286 $ cat >> $HGRCPATH <<EOF
287 > [extensions]
287 > [extensions]
288 > lfs = !
288 > lfs = !
289 > EOF
289 > EOF
290
290
291 $ hg init $TESTTMP/client4
291 $ hg init $TESTTMP/client4
292 $ cd $TESTTMP/client4
292 $ cd $TESTTMP/client4
293 $ cat >> .hg/hgrc <<EOF
293 $ cat >> .hg/hgrc <<EOF
294 > [paths]
294 > [paths]
295 > default = http://localhost:$HGPORT
295 > default = http://localhost:$HGPORT
296 > EOF
296 > EOF
297 $ echo 'non-lfs' > nonlfs2.txt
297 $ echo 'non-lfs' > nonlfs2.txt
298 $ hg ci -Aqm 'non-lfs'
298 $ hg ci -Aqm 'non-lfs'
299 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
299 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
300 $TESTTMP/server/.hg/requires:lfs
300 $TESTTMP/server/.hg/requires:lfs
301
301
302 $ hg push -q --force
302 $ hg push -q --force
303 warning: repository is unrelated
303 warning: repository is unrelated
304 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
304 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
305 $TESTTMP/server/.hg/requires:lfs
305 $TESTTMP/server/.hg/requires:lfs
306
306
307 TODO: fail more gracefully.
307 $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
308
308 (remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
309 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
309 abort: repository requires features unknown to this Mercurial: lfs!
310 abort: HTTP Error 500: Internal Server Error
310 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
311 [255]
311 [255]
312 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
312 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
313 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
313 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
314 $TESTTMP/server/.hg/requires:lfs
314 $TESTTMP/server/.hg/requires:lfs
315 [2]
315 [2]
316
316
317 TODO: fail more gracefully.
317 TODO: fail more gracefully.
318
318
319 $ hg init $TESTTMP/client4_pull
319 $ hg init $TESTTMP/client4_pull
320 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
320 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
321 abort: HTTP Error 500: Internal Server Error
321 abort: HTTP Error 500: Internal Server Error
322 [255]
322 [255]
323 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
323 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
324 $TESTTMP/server/.hg/requires:lfs
324 $TESTTMP/server/.hg/requires:lfs
325
325
326 $ hg identify http://localhost:$HGPORT
326 $ hg identify http://localhost:$HGPORT
327 03b080fa9d93
327 03b080fa9d93
328
328
329 --------------------------------------------------------------------------------
329 --------------------------------------------------------------------------------
330 Case #5: client with non-lfs content and the extension enabled; server with
330 Case #5: client with non-lfs content and the extension enabled; server with
331 lfs content, and the extension enabled.
331 lfs content, and the extension enabled.
332
332
333 $ cat >> $HGRCPATH <<EOF
333 $ cat >> $HGRCPATH <<EOF
334 > [extensions]
334 > [extensions]
335 > lfs =
335 > lfs =
336 > EOF
336 > EOF
337 $ echo 'non-lfs' > nonlfs3.txt
337 $ echo 'non-lfs' > nonlfs3.txt
338 $ hg ci -Aqm 'non-lfs file with lfs client'
338 $ hg ci -Aqm 'non-lfs file with lfs client'
339
339
340 $ hg push -q
340 $ hg push -q
341 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
341 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
342 $TESTTMP/server/.hg/requires:lfs
342 $TESTTMP/server/.hg/requires:lfs
343
343
344 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
344 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
345 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
345 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
346 $TESTTMP/client5_clone/.hg/requires:lfs
346 $TESTTMP/client5_clone/.hg/requires:lfs
347 $TESTTMP/server/.hg/requires:lfs
347 $TESTTMP/server/.hg/requires:lfs
348
348
349 $ hg init $TESTTMP/client5_pull
349 $ hg init $TESTTMP/client5_pull
350 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
350 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
351 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
351 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
352 $TESTTMP/client5_pull/.hg/requires:lfs
352 $TESTTMP/client5_pull/.hg/requires:lfs
353 $TESTTMP/server/.hg/requires:lfs
353 $TESTTMP/server/.hg/requires:lfs
354
354
355 $ hg identify http://localhost:$HGPORT
355 $ hg identify http://localhost:$HGPORT
356 c729025cc5e3
356 c729025cc5e3
357
357
358 $ mv $HGRCPATH $HGRCPATH.tmp
358 $ mv $HGRCPATH $HGRCPATH.tmp
359 $ cp $HGRCPATH.orig $HGRCPATH
359 $ cp $HGRCPATH.orig $HGRCPATH
360
360
361 >>> from __future__ import absolute_import
361 >>> from __future__ import absolute_import
362 >>> from hgclient import check, readchannel, runcommand
362 >>> from hgclient import check, readchannel, runcommand
363 >>> @check
363 >>> @check
364 ... def checkflags(server):
364 ... def checkflags(server):
365 ... readchannel(server)
365 ... readchannel(server)
366 ... print('')
366 ... print('')
367 ... print('# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
367 ... print('# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
368 ... runcommand(server, ['debugprocessors', 'lfs.bin', '-R',
368 ... runcommand(server, ['debugprocessors', 'lfs.bin', '-R',
369 ... '../server'])
369 ... '../server'])
370 ... runcommand(server, ['debugprocessors', 'nonlfs2.txt', '-R',
370 ... runcommand(server, ['debugprocessors', 'nonlfs2.txt', '-R',
371 ... '../server'])
371 ... '../server'])
372 ... runcommand(server, ['config', 'extensions', '--cwd',
372 ... runcommand(server, ['config', 'extensions', '--cwd',
373 ... '../server'])
373 ... '../server'])
374 ...
374 ...
375 ... print("\n# LFS not enabled- revlogs don't have 0x2000 flag")
375 ... print("\n# LFS not enabled- revlogs don't have 0x2000 flag")
376 ... runcommand(server, ['debugprocessors', 'nonlfs3.txt'])
376 ... runcommand(server, ['debugprocessors', 'nonlfs3.txt'])
377 ... runcommand(server, ['config', 'extensions'])
377 ... runcommand(server, ['config', 'extensions'])
378
378
379 # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
379 # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
380 *** runcommand debugprocessors lfs.bin -R ../server
380 *** runcommand debugprocessors lfs.bin -R ../server
381 registered processor '0x8000'
381 registered processor '0x8000'
382 registered processor '0x2000'
382 registered processor '0x2000'
383 *** runcommand debugprocessors nonlfs2.txt -R ../server
383 *** runcommand debugprocessors nonlfs2.txt -R ../server
384 registered processor '0x8000'
384 registered processor '0x8000'
385 registered processor '0x2000'
385 registered processor '0x2000'
386 *** runcommand config extensions --cwd ../server
386 *** runcommand config extensions --cwd ../server
387 extensions.debugprocessors=$TESTTMP/debugprocessors.py
387 extensions.debugprocessors=$TESTTMP/debugprocessors.py
388 extensions.lfs=
388 extensions.lfs=
389
389
390 # LFS not enabled- revlogs don't have 0x2000 flag
390 # LFS not enabled- revlogs don't have 0x2000 flag
391 *** runcommand debugprocessors nonlfs3.txt
391 *** runcommand debugprocessors nonlfs3.txt
392 registered processor '0x8000'
392 registered processor '0x8000'
393 *** runcommand config extensions
393 *** runcommand config extensions
394 extensions.debugprocessors=$TESTTMP/debugprocessors.py
394 extensions.debugprocessors=$TESTTMP/debugprocessors.py
395
395
396 $ rm $HGRCPATH
396 $ rm $HGRCPATH
397 $ mv $HGRCPATH.tmp $HGRCPATH
397 $ mv $HGRCPATH.tmp $HGRCPATH
398
398
399 $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
399 $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
400 $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
400 $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
401 > [extensions]
401 > [extensions]
402 > lfs = !
402 > lfs = !
403 > EOF
403 > EOF
404
404
405 >>> from __future__ import absolute_import, print_function
405 >>> from __future__ import absolute_import, print_function
406 >>> from hgclient import check, readchannel, runcommand
406 >>> from hgclient import check, readchannel, runcommand
407 >>> @check
407 >>> @check
408 ... def checkflags2(server):
408 ... def checkflags2(server):
409 ... readchannel(server)
409 ... readchannel(server)
410 ... print('')
410 ... print('')
411 ... print('# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
411 ... print('# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
412 ... runcommand(server, ['debugprocessors', 'lfs.bin', '-R',
412 ... runcommand(server, ['debugprocessors', 'lfs.bin', '-R',
413 ... '../server'])
413 ... '../server'])
414 ... runcommand(server, ['debugprocessors', 'nonlfs2.txt', '-R',
414 ... runcommand(server, ['debugprocessors', 'nonlfs2.txt', '-R',
415 ... '../server'])
415 ... '../server'])
416 ... runcommand(server, ['config', 'extensions', '--cwd',
416 ... runcommand(server, ['config', 'extensions', '--cwd',
417 ... '../server'])
417 ... '../server'])
418 ...
418 ...
419 ... print('\n# LFS enabled without requirement- revlogs have 0x2000 flag')
419 ... print('\n# LFS enabled without requirement- revlogs have 0x2000 flag')
420 ... runcommand(server, ['debugprocessors', 'nonlfs3.txt'])
420 ... runcommand(server, ['debugprocessors', 'nonlfs3.txt'])
421 ... runcommand(server, ['config', 'extensions'])
421 ... runcommand(server, ['config', 'extensions'])
422 ...
422 ...
423 ... print("\n# LFS disabled locally- revlogs don't have 0x2000 flag")
423 ... print("\n# LFS disabled locally- revlogs don't have 0x2000 flag")
424 ... runcommand(server, ['debugprocessors', 'nonlfs.txt', '-R',
424 ... runcommand(server, ['debugprocessors', 'nonlfs.txt', '-R',
425 ... '../nonlfs'])
425 ... '../nonlfs'])
426 ... runcommand(server, ['config', 'extensions', '--cwd',
426 ... runcommand(server, ['config', 'extensions', '--cwd',
427 ... '../nonlfs'])
427 ... '../nonlfs'])
428
428
429 # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
429 # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
430 *** runcommand debugprocessors lfs.bin -R ../server
430 *** runcommand debugprocessors lfs.bin -R ../server
431 registered processor '0x8000'
431 registered processor '0x8000'
432 registered processor '0x2000'
432 registered processor '0x2000'
433 *** runcommand debugprocessors nonlfs2.txt -R ../server
433 *** runcommand debugprocessors nonlfs2.txt -R ../server
434 registered processor '0x8000'
434 registered processor '0x8000'
435 registered processor '0x2000'
435 registered processor '0x2000'
436 *** runcommand config extensions --cwd ../server
436 *** runcommand config extensions --cwd ../server
437 extensions.debugprocessors=$TESTTMP/debugprocessors.py
437 extensions.debugprocessors=$TESTTMP/debugprocessors.py
438 extensions.lfs=
438 extensions.lfs=
439
439
440 # LFS enabled without requirement- revlogs have 0x2000 flag
440 # LFS enabled without requirement- revlogs have 0x2000 flag
441 *** runcommand debugprocessors nonlfs3.txt
441 *** runcommand debugprocessors nonlfs3.txt
442 registered processor '0x8000'
442 registered processor '0x8000'
443 registered processor '0x2000'
443 registered processor '0x2000'
444 *** runcommand config extensions
444 *** runcommand config extensions
445 extensions.debugprocessors=$TESTTMP/debugprocessors.py
445 extensions.debugprocessors=$TESTTMP/debugprocessors.py
446 extensions.lfs=
446 extensions.lfs=
447
447
448 # LFS disabled locally- revlogs don't have 0x2000 flag
448 # LFS disabled locally- revlogs don't have 0x2000 flag
449 *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
449 *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
450 registered processor '0x8000'
450 registered processor '0x8000'
451 *** runcommand config extensions --cwd ../nonlfs
451 *** runcommand config extensions --cwd ../nonlfs
452 extensions.debugprocessors=$TESTTMP/debugprocessors.py
452 extensions.debugprocessors=$TESTTMP/debugprocessors.py
453 extensions.lfs=!
453 extensions.lfs=!
454
454
455 --------------------------------------------------------------------------------
455 --------------------------------------------------------------------------------
456 Case #6: client with lfs content and the extension enabled; server with
456 Case #6: client with lfs content and the extension enabled; server with
457 lfs content, and the extension enabled.
457 lfs content, and the extension enabled.
458
458
459 $ echo 'this is another lfs file' > lfs2.txt
459 $ echo 'this is another lfs file' > lfs2.txt
460 $ hg ci -Aqm 'lfs file with lfs client'
460 $ hg ci -Aqm 'lfs file with lfs client'
461
461
462 $ hg --config paths.default= push -v http://localhost:$HGPORT
462 $ hg --config paths.default= push -v http://localhost:$HGPORT
463 pushing to http://localhost:$HGPORT/
463 pushing to http://localhost:$HGPORT/
464 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
464 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
465 searching for changes
465 searching for changes
466 remote has heads on branch 'default' that are not known locally: 8374dc4052cb
466 remote has heads on branch 'default' that are not known locally: 8374dc4052cb
467 lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
467 lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
468 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
468 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
469 lfs: uploaded 1 files (25 bytes)
469 lfs: uploaded 1 files (25 bytes)
470 1 changesets found
470 1 changesets found
471 uncompressed size of bundle content:
471 uncompressed size of bundle content:
472 206 (changelog)
472 206 (changelog)
473 172 (manifests)
473 172 (manifests)
474 275 lfs2.txt
474 275 lfs2.txt
475 remote: adding changesets
475 remote: adding changesets
476 remote: adding manifests
476 remote: adding manifests
477 remote: adding file changes
477 remote: adding file changes
478 remote: added 1 changesets with 1 changes to 1 files
478 remote: added 1 changesets with 1 changes to 1 files
479 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
479 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
480 .hg/requires:lfs
480 .hg/requires:lfs
481 $TESTTMP/server/.hg/requires:lfs
481 $TESTTMP/server/.hg/requires:lfs
482
482
483 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
483 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
484 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
484 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
485 $TESTTMP/client6_clone/.hg/requires:lfs
485 $TESTTMP/client6_clone/.hg/requires:lfs
486 $TESTTMP/server/.hg/requires:lfs
486 $TESTTMP/server/.hg/requires:lfs
487
487
488 $ hg init $TESTTMP/client6_pull
488 $ hg init $TESTTMP/client6_pull
489 $ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
489 $ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
490 pulling from http://localhost:$HGPORT/
490 pulling from http://localhost:$HGPORT/
491 requesting all changes
491 requesting all changes
492 adding changesets
492 adding changesets
493 adding manifests
493 adding manifests
494 adding file changes
494 adding file changes
495 added 6 changesets with 5 changes to 5 files (+1 heads)
495 added 6 changesets with 5 changes to 5 files (+1 heads)
496 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
496 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
497 new changesets d437e1d24fbd:d3b84d50eacb
497 new changesets d437e1d24fbd:d3b84d50eacb
498 resolving manifests
498 resolving manifests
499 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
499 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
500 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
500 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
501 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
501 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
502 lfs: downloaded 1 files (25 bytes)
502 lfs: downloaded 1 files (25 bytes)
503 getting lfs2.txt
503 getting lfs2.txt
504 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
504 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
505 getting nonlfs2.txt
505 getting nonlfs2.txt
506 getting nonlfs3.txt
506 getting nonlfs3.txt
507 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
507 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
508 updated to "d3b84d50eacb: lfs file with lfs client"
508 updated to "d3b84d50eacb: lfs file with lfs client"
509 1 other heads for branch "default"
509 1 other heads for branch "default"
510 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
510 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
511 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
511 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
512 $TESTTMP/client6_pull/.hg/requires:lfs
512 $TESTTMP/client6_pull/.hg/requires:lfs
513 $TESTTMP/server/.hg/requires:lfs
513 $TESTTMP/server/.hg/requires:lfs
514
514
515 $ hg identify http://localhost:$HGPORT
515 $ hg identify http://localhost:$HGPORT
516 d3b84d50eacb
516 d3b84d50eacb
517
517
518 --------------------------------------------------------------------------------
518 --------------------------------------------------------------------------------
519 Misc: process dies early if a requirement exists and the extension is disabled
519 Misc: process dies early if a requirement exists and the extension is disabled
520
520
521 $ hg --config extensions.lfs=! summary
521 $ hg --config extensions.lfs=! summary
522 abort: repository requires features unknown to this Mercurial: lfs!
522 abort: repository requires features unknown to this Mercurial: lfs!
523 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
523 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
524 [255]
524 [255]
525
525
526 $ echo 'this is an lfs file' > $TESTTMP/client6_clone/lfspair1.bin
526 $ echo 'this is an lfs file' > $TESTTMP/client6_clone/lfspair1.bin
527 $ echo 'this is an lfs file too' > $TESTTMP/client6_clone/lfspair2.bin
527 $ echo 'this is an lfs file too' > $TESTTMP/client6_clone/lfspair2.bin
528 $ hg -R $TESTTMP/client6_clone ci -Aqm 'add lfs pair'
528 $ hg -R $TESTTMP/client6_clone ci -Aqm 'add lfs pair'
529 $ hg -R $TESTTMP/client6_clone push -q
529 $ hg -R $TESTTMP/client6_clone push -q
530
530
531 $ hg clone -qU http://localhost:$HGPORT $TESTTMP/bulkfetch
531 $ hg clone -qU http://localhost:$HGPORT $TESTTMP/bulkfetch
532
532
533 Export will prefetch all needed files across all needed revisions
533 Export will prefetch all needed files across all needed revisions
534
534
535 $ hg -R $TESTTMP/bulkfetch -v export -r 0:tip -o all.export
535 $ hg -R $TESTTMP/bulkfetch -v export -r 0:tip -o all.export
536 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
536 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
537 exporting patches:
537 exporting patches:
538 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
538 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
539 lfs: need to transfer 4 objects (92 bytes)
539 lfs: need to transfer 4 objects (92 bytes)
540 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
540 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
541 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
541 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
542 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
542 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
543 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
543 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
544 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
544 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
545 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
545 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
546 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
546 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
547 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
547 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
548 lfs: downloaded 4 files (92 bytes)
548 lfs: downloaded 4 files (92 bytes)
549 all.export
549 all.export
550 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
550 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
551 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
551 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
552 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
552 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
553 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
553 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
554
554
555 Export with selected files is used with `extdiff --patch`
555 Export with selected files is used with `extdiff --patch`
556
556
557 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
557 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
558 $ hg --config extensions.extdiff= \
558 $ hg --config extensions.extdiff= \
559 > -R $TESTTMP/bulkfetch -v extdiff -r 2:tip --patch $TESTTMP/bulkfetch/lfs.bin
559 > -R $TESTTMP/bulkfetch -v extdiff -r 2:tip --patch $TESTTMP/bulkfetch/lfs.bin
560 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
560 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
561 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
561 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
562 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
562 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
563 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
563 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
564 lfs: downloaded 1 files (23 bytes)
564 lfs: downloaded 1 files (23 bytes)
565 */hg-8374dc4052cb.patch (glob)
565 */hg-8374dc4052cb.patch (glob)
566 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
566 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
567 */hg-9640b57e77b1.patch (glob)
567 */hg-9640b57e77b1.patch (glob)
568 --- */hg-8374dc4052cb.patch * (glob)
568 --- */hg-8374dc4052cb.patch * (glob)
569 +++ */hg-9640b57e77b1.patch * (glob)
569 +++ */hg-9640b57e77b1.patch * (glob)
570 @@ -2,12 +2,7 @@
570 @@ -2,12 +2,7 @@
571 # User test
571 # User test
572 # Date 0 0
572 # Date 0 0
573 # Thu Jan 01 00:00:00 1970 +0000
573 # Thu Jan 01 00:00:00 1970 +0000
574 -# Node ID 8374dc4052cbd388e79d9dc4ddb29784097aa354
574 -# Node ID 8374dc4052cbd388e79d9dc4ddb29784097aa354
575 -# Parent 1477875038c60152e391238920a16381c627b487
575 -# Parent 1477875038c60152e391238920a16381c627b487
576 -lfs
576 -lfs
577 +# Node ID 9640b57e77b14c3a0144fb4478b6cc13e13ea0d1
577 +# Node ID 9640b57e77b14c3a0144fb4478b6cc13e13ea0d1
578 +# Parent d3b84d50eacbd56638e11abce6b8616aaba54420
578 +# Parent d3b84d50eacbd56638e11abce6b8616aaba54420
579 +add lfs pair
579 +add lfs pair
580
580
581 -diff -r 1477875038c6 -r 8374dc4052cb lfs.bin
581 -diff -r 1477875038c6 -r 8374dc4052cb lfs.bin
582 ---- /dev/null Thu Jan 01 00:00:00 1970 +0000
582 ---- /dev/null Thu Jan 01 00:00:00 1970 +0000
583 -+++ b/lfs.bin Thu Jan 01 00:00:00 1970 +0000
583 -+++ b/lfs.bin Thu Jan 01 00:00:00 1970 +0000
584 -@@ -0,0 +1,1 @@
584 -@@ -0,0 +1,1 @@
585 -+this is a big lfs file
585 -+this is a big lfs file
586 cleaning up temp directory
586 cleaning up temp directory
587 [1]
587 [1]
588
588
589 Diff will prefetch files
589 Diff will prefetch files
590
590
591 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
591 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
592 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip
592 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip
593 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
593 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
594 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
594 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
595 lfs: need to transfer 4 objects (92 bytes)
595 lfs: need to transfer 4 objects (92 bytes)
596 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
596 lfs: downloading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
597 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
597 lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
598 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
598 lfs: downloading bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc (23 bytes)
599 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
599 lfs: processed: bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc
600 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
600 lfs: downloading cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 (20 bytes)
601 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
601 lfs: processed: cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782
602 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
602 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
603 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
603 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
604 lfs: downloaded 4 files (92 bytes)
604 lfs: downloaded 4 files (92 bytes)
605 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
605 lfs: found bed80f00180ac404b843628ab56a1c1984d6145c391cd1628a7dd7d2598d71fc in the local lfs store
606 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
606 lfs: found a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de in the local lfs store
607 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
607 lfs: found cf1b2787b74e66547d931b6ebe28ff63303e803cb2baa14a8f57c4383d875782 in the local lfs store
608 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
608 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
609 diff -r 8374dc4052cb -r 9640b57e77b1 lfs.bin
609 diff -r 8374dc4052cb -r 9640b57e77b1 lfs.bin
610 --- a/lfs.bin Thu Jan 01 00:00:00 1970 +0000
610 --- a/lfs.bin Thu Jan 01 00:00:00 1970 +0000
611 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
611 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
612 @@ -1,1 +0,0 @@
612 @@ -1,1 +0,0 @@
613 -this is a big lfs file
613 -this is a big lfs file
614 diff -r 8374dc4052cb -r 9640b57e77b1 lfs2.txt
614 diff -r 8374dc4052cb -r 9640b57e77b1 lfs2.txt
615 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
615 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
616 +++ b/lfs2.txt Thu Jan 01 00:00:00 1970 +0000
616 +++ b/lfs2.txt Thu Jan 01 00:00:00 1970 +0000
617 @@ -0,0 +1,1 @@
617 @@ -0,0 +1,1 @@
618 +this is another lfs file
618 +this is another lfs file
619 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair1.bin
619 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair1.bin
620 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
620 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
621 +++ b/lfspair1.bin Thu Jan 01 00:00:00 1970 +0000
621 +++ b/lfspair1.bin Thu Jan 01 00:00:00 1970 +0000
622 @@ -0,0 +1,1 @@
622 @@ -0,0 +1,1 @@
623 +this is an lfs file
623 +this is an lfs file
624 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
624 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
625 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
625 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
626 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
626 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
627 @@ -0,0 +1,1 @@
627 @@ -0,0 +1,1 @@
628 +this is an lfs file too
628 +this is an lfs file too
629 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs.txt
629 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs.txt
630 --- a/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
630 --- a/nonlfs.txt Thu Jan 01 00:00:00 1970 +0000
631 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
631 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
632 @@ -1,1 +0,0 @@
632 @@ -1,1 +0,0 @@
633 -non-lfs
633 -non-lfs
634 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs3.txt
634 diff -r 8374dc4052cb -r 9640b57e77b1 nonlfs3.txt
635 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
635 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
636 +++ b/nonlfs3.txt Thu Jan 01 00:00:00 1970 +0000
636 +++ b/nonlfs3.txt Thu Jan 01 00:00:00 1970 +0000
637 @@ -0,0 +1,1 @@
637 @@ -0,0 +1,1 @@
638 +non-lfs
638 +non-lfs
639
639
640 Only the files required by diff are prefetched
640 Only the files required by diff are prefetched
641
641
642 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
642 $ rm -r $TESTTMP/bulkfetch/.hg/store/lfs
643 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip $TESTTMP/bulkfetch/lfspair2.bin
643 $ hg -R $TESTTMP/bulkfetch -v diff -r 2:tip $TESTTMP/bulkfetch/lfspair2.bin
644 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
644 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
645 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
645 lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
646 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
646 lfs: downloading d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e (24 bytes)
647 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
647 lfs: processed: d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e
648 lfs: downloaded 1 files (24 bytes)
648 lfs: downloaded 1 files (24 bytes)
649 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
649 lfs: found d96eda2c74b56e95cfb5ffb66b6503e198cc6fc4a09dc877de925feebc65786e in the local lfs store
650 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
650 diff -r 8374dc4052cb -r 9640b57e77b1 lfspair2.bin
651 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
651 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
652 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
652 +++ b/lfspair2.bin Thu Jan 01 00:00:00 1970 +0000
653 @@ -0,0 +1,1 @@
653 @@ -0,0 +1,1 @@
654 +this is an lfs file too
654 +this is an lfs file too
655
655
656 #endif
656 #endif
657
657
658 $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
658 $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
659
659
660 #if lfsremote-on
660 #if lfsremote-on
661 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
661 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
662 Traceback (most recent call last):
662 Traceback (most recent call last):
663 ValueError: no common changegroup version
663 ValueError: no common changegroup version
664 Traceback (most recent call last):
665 ValueError: no common changegroup version
666 #else
664 #else
667 $ cat $TESTTMP/errors.log
665 $ cat $TESTTMP/errors.log
668 #endif
666 #endif
General Comments 0
You need to be logged in to leave comments. Login now