##// END OF EJS Templates
py3: use '%d' for integers instead of '%s'...
Pulkit Goyal -
r37597:ce566e0f default
parent child Browse files
Show More
@@ -1,478 +1,478 b''
1 # git.py - git support for the convert extension
1 # git.py - git support for the convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import os
9 import os
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial import (
12 from mercurial import (
13 config,
13 config,
14 error,
14 error,
15 node as nodemod,
15 node as nodemod,
16 )
16 )
17
17
18 from . import (
18 from . import (
19 common,
19 common,
20 )
20 )
21
21
22 class submodule(object):
22 class submodule(object):
23 def __init__(self, path, node, url):
23 def __init__(self, path, node, url):
24 self.path = path
24 self.path = path
25 self.node = node
25 self.node = node
26 self.url = url
26 self.url = url
27
27
28 def hgsub(self):
28 def hgsub(self):
29 return "%s = [git]%s" % (self.path, self.url)
29 return "%s = [git]%s" % (self.path, self.url)
30
30
31 def hgsubstate(self):
31 def hgsubstate(self):
32 return "%s %s" % (self.node, self.path)
32 return "%s %s" % (self.node, self.path)
33
33
34 # Keys in extra fields that should not be copied if the user requests.
34 # Keys in extra fields that should not be copied if the user requests.
35 bannedextrakeys = {
35 bannedextrakeys = {
36 # Git commit object built-ins.
36 # Git commit object built-ins.
37 'tree',
37 'tree',
38 'parent',
38 'parent',
39 'author',
39 'author',
40 'committer',
40 'committer',
41 # Mercurial built-ins.
41 # Mercurial built-ins.
42 'branch',
42 'branch',
43 'close',
43 'close',
44 }
44 }
45
45
46 class convert_git(common.converter_source, common.commandline):
46 class convert_git(common.converter_source, common.commandline):
47 # Windows does not support GIT_DIR= construct while other systems
47 # Windows does not support GIT_DIR= construct while other systems
48 # cannot remove environment variable. Just assume none have
48 # cannot remove environment variable. Just assume none have
49 # both issues.
49 # both issues.
50
50
51 def _gitcmd(self, cmd, *args, **kwargs):
51 def _gitcmd(self, cmd, *args, **kwargs):
52 return cmd('--git-dir=%s' % self.path, *args, **kwargs)
52 return cmd('--git-dir=%s' % self.path, *args, **kwargs)
53
53
54 def gitrun0(self, *args, **kwargs):
54 def gitrun0(self, *args, **kwargs):
55 return self._gitcmd(self.run0, *args, **kwargs)
55 return self._gitcmd(self.run0, *args, **kwargs)
56
56
57 def gitrun(self, *args, **kwargs):
57 def gitrun(self, *args, **kwargs):
58 return self._gitcmd(self.run, *args, **kwargs)
58 return self._gitcmd(self.run, *args, **kwargs)
59
59
60 def gitrunlines0(self, *args, **kwargs):
60 def gitrunlines0(self, *args, **kwargs):
61 return self._gitcmd(self.runlines0, *args, **kwargs)
61 return self._gitcmd(self.runlines0, *args, **kwargs)
62
62
63 def gitrunlines(self, *args, **kwargs):
63 def gitrunlines(self, *args, **kwargs):
64 return self._gitcmd(self.runlines, *args, **kwargs)
64 return self._gitcmd(self.runlines, *args, **kwargs)
65
65
66 def gitpipe(self, *args, **kwargs):
66 def gitpipe(self, *args, **kwargs):
67 return self._gitcmd(self._run3, *args, **kwargs)
67 return self._gitcmd(self._run3, *args, **kwargs)
68
68
69 def __init__(self, ui, repotype, path, revs=None):
69 def __init__(self, ui, repotype, path, revs=None):
70 super(convert_git, self).__init__(ui, repotype, path, revs=revs)
70 super(convert_git, self).__init__(ui, repotype, path, revs=revs)
71 common.commandline.__init__(self, ui, 'git')
71 common.commandline.__init__(self, ui, 'git')
72
72
73 # Pass an absolute path to git to prevent from ever being interpreted
73 # Pass an absolute path to git to prevent from ever being interpreted
74 # as a URL
74 # as a URL
75 path = os.path.abspath(path)
75 path = os.path.abspath(path)
76
76
77 if os.path.isdir(path + "/.git"):
77 if os.path.isdir(path + "/.git"):
78 path += "/.git"
78 path += "/.git"
79 if not os.path.exists(path + "/objects"):
79 if not os.path.exists(path + "/objects"):
80 raise common.NoRepo(_("%s does not look like a Git repository") %
80 raise common.NoRepo(_("%s does not look like a Git repository") %
81 path)
81 path)
82
82
83 # The default value (50) is based on the default for 'git diff'.
83 # The default value (50) is based on the default for 'git diff'.
84 similarity = ui.configint('convert', 'git.similarity')
84 similarity = ui.configint('convert', 'git.similarity')
85 if similarity < 0 or similarity > 100:
85 if similarity < 0 or similarity > 100:
86 raise error.Abort(_('similarity must be between 0 and 100'))
86 raise error.Abort(_('similarity must be between 0 and 100'))
87 if similarity > 0:
87 if similarity > 0:
88 self.simopt = ['-C%d%%' % similarity]
88 self.simopt = ['-C%d%%' % similarity]
89 findcopiesharder = ui.configbool('convert', 'git.findcopiesharder')
89 findcopiesharder = ui.configbool('convert', 'git.findcopiesharder')
90 if findcopiesharder:
90 if findcopiesharder:
91 self.simopt.append('--find-copies-harder')
91 self.simopt.append('--find-copies-harder')
92
92
93 renamelimit = ui.configint('convert', 'git.renamelimit')
93 renamelimit = ui.configint('convert', 'git.renamelimit')
94 self.simopt.append('-l%d' % renamelimit)
94 self.simopt.append('-l%d' % renamelimit)
95 else:
95 else:
96 self.simopt = []
96 self.simopt = []
97
97
98 common.checktool('git', 'git')
98 common.checktool('git', 'git')
99
99
100 self.path = path
100 self.path = path
101 self.submodules = []
101 self.submodules = []
102
102
103 self.catfilepipe = self.gitpipe('cat-file', '--batch')
103 self.catfilepipe = self.gitpipe('cat-file', '--batch')
104
104
105 self.copyextrakeys = self.ui.configlist('convert', 'git.extrakeys')
105 self.copyextrakeys = self.ui.configlist('convert', 'git.extrakeys')
106 banned = set(self.copyextrakeys) & bannedextrakeys
106 banned = set(self.copyextrakeys) & bannedextrakeys
107 if banned:
107 if banned:
108 raise error.Abort(_('copying of extra key is forbidden: %s') %
108 raise error.Abort(_('copying of extra key is forbidden: %s') %
109 _(', ').join(sorted(banned)))
109 _(', ').join(sorted(banned)))
110
110
111 committeractions = self.ui.configlist('convert', 'git.committeractions')
111 committeractions = self.ui.configlist('convert', 'git.committeractions')
112
112
113 messagedifferent = None
113 messagedifferent = None
114 messagealways = None
114 messagealways = None
115 for a in committeractions:
115 for a in committeractions:
116 if a.startswith(('messagedifferent', 'messagealways')):
116 if a.startswith(('messagedifferent', 'messagealways')):
117 k = a
117 k = a
118 v = None
118 v = None
119 if '=' in a:
119 if '=' in a:
120 k, v = a.split('=', 1)
120 k, v = a.split('=', 1)
121
121
122 if k == 'messagedifferent':
122 if k == 'messagedifferent':
123 messagedifferent = v or 'committer:'
123 messagedifferent = v or 'committer:'
124 elif k == 'messagealways':
124 elif k == 'messagealways':
125 messagealways = v or 'committer:'
125 messagealways = v or 'committer:'
126
126
127 if messagedifferent and messagealways:
127 if messagedifferent and messagealways:
128 raise error.Abort(_('committeractions cannot define both '
128 raise error.Abort(_('committeractions cannot define both '
129 'messagedifferent and messagealways'))
129 'messagedifferent and messagealways'))
130
130
131 dropcommitter = 'dropcommitter' in committeractions
131 dropcommitter = 'dropcommitter' in committeractions
132 replaceauthor = 'replaceauthor' in committeractions
132 replaceauthor = 'replaceauthor' in committeractions
133
133
134 if dropcommitter and replaceauthor:
134 if dropcommitter and replaceauthor:
135 raise error.Abort(_('committeractions cannot define both '
135 raise error.Abort(_('committeractions cannot define both '
136 'dropcommitter and replaceauthor'))
136 'dropcommitter and replaceauthor'))
137
137
138 if dropcommitter and messagealways:
138 if dropcommitter and messagealways:
139 raise error.Abort(_('committeractions cannot define both '
139 raise error.Abort(_('committeractions cannot define both '
140 'dropcommitter and messagealways'))
140 'dropcommitter and messagealways'))
141
141
142 if not messagedifferent and not messagealways:
142 if not messagedifferent and not messagealways:
143 messagedifferent = 'committer:'
143 messagedifferent = 'committer:'
144
144
145 self.committeractions = {
145 self.committeractions = {
146 'dropcommitter': dropcommitter,
146 'dropcommitter': dropcommitter,
147 'replaceauthor': replaceauthor,
147 'replaceauthor': replaceauthor,
148 'messagedifferent': messagedifferent,
148 'messagedifferent': messagedifferent,
149 'messagealways': messagealways,
149 'messagealways': messagealways,
150 }
150 }
151
151
152 def after(self):
152 def after(self):
153 for f in self.catfilepipe:
153 for f in self.catfilepipe:
154 f.close()
154 f.close()
155
155
156 def getheads(self):
156 def getheads(self):
157 if not self.revs:
157 if not self.revs:
158 output, status = self.gitrun('rev-parse', '--branches', '--remotes')
158 output, status = self.gitrun('rev-parse', '--branches', '--remotes')
159 heads = output.splitlines()
159 heads = output.splitlines()
160 if status:
160 if status:
161 raise error.Abort(_('cannot retrieve git heads'))
161 raise error.Abort(_('cannot retrieve git heads'))
162 else:
162 else:
163 heads = []
163 heads = []
164 for rev in self.revs:
164 for rev in self.revs:
165 rawhead, ret = self.gitrun('rev-parse', '--verify', rev)
165 rawhead, ret = self.gitrun('rev-parse', '--verify', rev)
166 heads.append(rawhead[:-1])
166 heads.append(rawhead[:-1])
167 if ret:
167 if ret:
168 raise error.Abort(_('cannot retrieve git head "%s"') % rev)
168 raise error.Abort(_('cannot retrieve git head "%s"') % rev)
169 return heads
169 return heads
170
170
171 def catfile(self, rev, ftype):
171 def catfile(self, rev, ftype):
172 if rev == nodemod.nullhex:
172 if rev == nodemod.nullhex:
173 raise IOError
173 raise IOError
174 self.catfilepipe[0].write(rev+'\n')
174 self.catfilepipe[0].write(rev+'\n')
175 self.catfilepipe[0].flush()
175 self.catfilepipe[0].flush()
176 info = self.catfilepipe[1].readline().split()
176 info = self.catfilepipe[1].readline().split()
177 if info[1] != ftype:
177 if info[1] != ftype:
178 raise error.Abort(_('cannot read %r object at %s') % (ftype, rev))
178 raise error.Abort(_('cannot read %r object at %s') % (ftype, rev))
179 size = int(info[2])
179 size = int(info[2])
180 data = self.catfilepipe[1].read(size)
180 data = self.catfilepipe[1].read(size)
181 if len(data) < size:
181 if len(data) < size:
182 raise error.Abort(_('cannot read %r object at %s: unexpected size')
182 raise error.Abort(_('cannot read %r object at %s: unexpected size')
183 % (ftype, rev))
183 % (ftype, rev))
184 # read the trailing newline
184 # read the trailing newline
185 self.catfilepipe[1].read(1)
185 self.catfilepipe[1].read(1)
186 return data
186 return data
187
187
188 def getfile(self, name, rev):
188 def getfile(self, name, rev):
189 if rev == nodemod.nullhex:
189 if rev == nodemod.nullhex:
190 return None, None
190 return None, None
191 if name == '.hgsub':
191 if name == '.hgsub':
192 data = '\n'.join([m.hgsub() for m in self.submoditer()])
192 data = '\n'.join([m.hgsub() for m in self.submoditer()])
193 mode = ''
193 mode = ''
194 elif name == '.hgsubstate':
194 elif name == '.hgsubstate':
195 data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
195 data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
196 mode = ''
196 mode = ''
197 else:
197 else:
198 data = self.catfile(rev, "blob")
198 data = self.catfile(rev, "blob")
199 mode = self.modecache[(name, rev)]
199 mode = self.modecache[(name, rev)]
200 return data, mode
200 return data, mode
201
201
202 def submoditer(self):
202 def submoditer(self):
203 null = nodemod.nullhex
203 null = nodemod.nullhex
204 for m in sorted(self.submodules, key=lambda p: p.path):
204 for m in sorted(self.submodules, key=lambda p: p.path):
205 if m.node != null:
205 if m.node != null:
206 yield m
206 yield m
207
207
208 def parsegitmodules(self, content):
208 def parsegitmodules(self, content):
209 """Parse the formatted .gitmodules file, example file format:
209 """Parse the formatted .gitmodules file, example file format:
210 [submodule "sub"]\n
210 [submodule "sub"]\n
211 \tpath = sub\n
211 \tpath = sub\n
212 \turl = git://giturl\n
212 \turl = git://giturl\n
213 """
213 """
214 self.submodules = []
214 self.submodules = []
215 c = config.config()
215 c = config.config()
216 # Each item in .gitmodules starts with whitespace that cant be parsed
216 # Each item in .gitmodules starts with whitespace that cant be parsed
217 c.parse('.gitmodules', '\n'.join(line.strip() for line in
217 c.parse('.gitmodules', '\n'.join(line.strip() for line in
218 content.split('\n')))
218 content.split('\n')))
219 for sec in c.sections():
219 for sec in c.sections():
220 s = c[sec]
220 s = c[sec]
221 if 'url' in s and 'path' in s:
221 if 'url' in s and 'path' in s:
222 self.submodules.append(submodule(s['path'], '', s['url']))
222 self.submodules.append(submodule(s['path'], '', s['url']))
223
223
224 def retrievegitmodules(self, version):
224 def retrievegitmodules(self, version):
225 modules, ret = self.gitrun('show', '%s:%s' % (version, '.gitmodules'))
225 modules, ret = self.gitrun('show', '%s:%s' % (version, '.gitmodules'))
226 if ret:
226 if ret:
227 # This can happen if a file is in the repo that has permissions
227 # This can happen if a file is in the repo that has permissions
228 # 160000, but there is no .gitmodules file.
228 # 160000, but there is no .gitmodules file.
229 self.ui.warn(_("warning: cannot read submodules config file in "
229 self.ui.warn(_("warning: cannot read submodules config file in "
230 "%s\n") % version)
230 "%s\n") % version)
231 return
231 return
232
232
233 try:
233 try:
234 self.parsegitmodules(modules)
234 self.parsegitmodules(modules)
235 except error.ParseError:
235 except error.ParseError:
236 self.ui.warn(_("warning: unable to parse .gitmodules in %s\n")
236 self.ui.warn(_("warning: unable to parse .gitmodules in %s\n")
237 % version)
237 % version)
238 return
238 return
239
239
240 for m in self.submodules:
240 for m in self.submodules:
241 node, ret = self.gitrun('rev-parse', '%s:%s' % (version, m.path))
241 node, ret = self.gitrun('rev-parse', '%s:%s' % (version, m.path))
242 if ret:
242 if ret:
243 continue
243 continue
244 m.node = node.strip()
244 m.node = node.strip()
245
245
246 def getchanges(self, version, full):
246 def getchanges(self, version, full):
247 if full:
247 if full:
248 raise error.Abort(_("convert from git does not support --full"))
248 raise error.Abort(_("convert from git does not support --full"))
249 self.modecache = {}
249 self.modecache = {}
250 cmd = ['diff-tree','-z', '--root', '-m', '-r'] + self.simopt + [version]
250 cmd = ['diff-tree','-z', '--root', '-m', '-r'] + self.simopt + [version]
251 output, status = self.gitrun(*cmd)
251 output, status = self.gitrun(*cmd)
252 if status:
252 if status:
253 raise error.Abort(_('cannot read changes in %s') % version)
253 raise error.Abort(_('cannot read changes in %s') % version)
254 changes = []
254 changes = []
255 copies = {}
255 copies = {}
256 seen = set()
256 seen = set()
257 entry = None
257 entry = None
258 subexists = [False]
258 subexists = [False]
259 subdeleted = [False]
259 subdeleted = [False]
260 difftree = output.split('\x00')
260 difftree = output.split('\x00')
261 lcount = len(difftree)
261 lcount = len(difftree)
262 i = 0
262 i = 0
263
263
264 skipsubmodules = self.ui.configbool('convert', 'git.skipsubmodules')
264 skipsubmodules = self.ui.configbool('convert', 'git.skipsubmodules')
265 def add(entry, f, isdest):
265 def add(entry, f, isdest):
266 seen.add(f)
266 seen.add(f)
267 h = entry[3]
267 h = entry[3]
268 p = (entry[1] == "100755")
268 p = (entry[1] == "100755")
269 s = (entry[1] == "120000")
269 s = (entry[1] == "120000")
270 renamesource = (not isdest and entry[4][0] == 'R')
270 renamesource = (not isdest and entry[4][0] == 'R')
271
271
272 if f == '.gitmodules':
272 if f == '.gitmodules':
273 if skipsubmodules:
273 if skipsubmodules:
274 return
274 return
275
275
276 subexists[0] = True
276 subexists[0] = True
277 if entry[4] == 'D' or renamesource:
277 if entry[4] == 'D' or renamesource:
278 subdeleted[0] = True
278 subdeleted[0] = True
279 changes.append(('.hgsub', nodemod.nullhex))
279 changes.append(('.hgsub', nodemod.nullhex))
280 else:
280 else:
281 changes.append(('.hgsub', ''))
281 changes.append(('.hgsub', ''))
282 elif entry[1] == '160000' or entry[0] == ':160000':
282 elif entry[1] == '160000' or entry[0] == ':160000':
283 if not skipsubmodules:
283 if not skipsubmodules:
284 subexists[0] = True
284 subexists[0] = True
285 else:
285 else:
286 if renamesource:
286 if renamesource:
287 h = nodemod.nullhex
287 h = nodemod.nullhex
288 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
288 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
289 changes.append((f, h))
289 changes.append((f, h))
290
290
291 while i < lcount:
291 while i < lcount:
292 l = difftree[i]
292 l = difftree[i]
293 i += 1
293 i += 1
294 if not entry:
294 if not entry:
295 if not l.startswith(':'):
295 if not l.startswith(':'):
296 continue
296 continue
297 entry = l.split()
297 entry = l.split()
298 continue
298 continue
299 f = l
299 f = l
300 if entry[4][0] == 'C':
300 if entry[4][0] == 'C':
301 copysrc = f
301 copysrc = f
302 copydest = difftree[i]
302 copydest = difftree[i]
303 i += 1
303 i += 1
304 f = copydest
304 f = copydest
305 copies[copydest] = copysrc
305 copies[copydest] = copysrc
306 if f not in seen:
306 if f not in seen:
307 add(entry, f, False)
307 add(entry, f, False)
308 # A file can be copied multiple times, or modified and copied
308 # A file can be copied multiple times, or modified and copied
309 # simultaneously. So f can be repeated even if fdest isn't.
309 # simultaneously. So f can be repeated even if fdest isn't.
310 if entry[4][0] == 'R':
310 if entry[4][0] == 'R':
311 # rename: next line is the destination
311 # rename: next line is the destination
312 fdest = difftree[i]
312 fdest = difftree[i]
313 i += 1
313 i += 1
314 if fdest not in seen:
314 if fdest not in seen:
315 add(entry, fdest, True)
315 add(entry, fdest, True)
316 # .gitmodules isn't imported at all, so it being copied to
316 # .gitmodules isn't imported at all, so it being copied to
317 # and fro doesn't really make sense
317 # and fro doesn't really make sense
318 if f != '.gitmodules' and fdest != '.gitmodules':
318 if f != '.gitmodules' and fdest != '.gitmodules':
319 copies[fdest] = f
319 copies[fdest] = f
320 entry = None
320 entry = None
321
321
322 if subexists[0]:
322 if subexists[0]:
323 if subdeleted[0]:
323 if subdeleted[0]:
324 changes.append(('.hgsubstate', nodemod.nullhex))
324 changes.append(('.hgsubstate', nodemod.nullhex))
325 else:
325 else:
326 self.retrievegitmodules(version)
326 self.retrievegitmodules(version)
327 changes.append(('.hgsubstate', ''))
327 changes.append(('.hgsubstate', ''))
328 return (changes, copies, set())
328 return (changes, copies, set())
329
329
330 def getcommit(self, version):
330 def getcommit(self, version):
331 c = self.catfile(version, "commit") # read the commit hash
331 c = self.catfile(version, "commit") # read the commit hash
332 end = c.find("\n\n")
332 end = c.find("\n\n")
333 message = c[end + 2:]
333 message = c[end + 2:]
334 message = self.recode(message)
334 message = self.recode(message)
335 l = c[:end].splitlines()
335 l = c[:end].splitlines()
336 parents = []
336 parents = []
337 author = committer = None
337 author = committer = None
338 extra = {}
338 extra = {}
339 for e in l[1:]:
339 for e in l[1:]:
340 n, v = e.split(" ", 1)
340 n, v = e.split(" ", 1)
341 if n == "author":
341 if n == "author":
342 p = v.split()
342 p = v.split()
343 tm, tz = p[-2:]
343 tm, tz = p[-2:]
344 author = " ".join(p[:-2])
344 author = " ".join(p[:-2])
345 if author[0] == "<":
345 if author[0] == "<":
346 author = author[1:-1]
346 author = author[1:-1]
347 author = self.recode(author)
347 author = self.recode(author)
348 if n == "committer":
348 if n == "committer":
349 p = v.split()
349 p = v.split()
350 tm, tz = p[-2:]
350 tm, tz = p[-2:]
351 committer = " ".join(p[:-2])
351 committer = " ".join(p[:-2])
352 if committer[0] == "<":
352 if committer[0] == "<":
353 committer = committer[1:-1]
353 committer = committer[1:-1]
354 committer = self.recode(committer)
354 committer = self.recode(committer)
355 if n == "parent":
355 if n == "parent":
356 parents.append(v)
356 parents.append(v)
357 if n in self.copyextrakeys:
357 if n in self.copyextrakeys:
358 extra[n] = v
358 extra[n] = v
359
359
360 if self.committeractions['dropcommitter']:
360 if self.committeractions['dropcommitter']:
361 committer = None
361 committer = None
362 elif self.committeractions['replaceauthor']:
362 elif self.committeractions['replaceauthor']:
363 author = committer
363 author = committer
364
364
365 if committer:
365 if committer:
366 messagealways = self.committeractions['messagealways']
366 messagealways = self.committeractions['messagealways']
367 messagedifferent = self.committeractions['messagedifferent']
367 messagedifferent = self.committeractions['messagedifferent']
368 if messagealways:
368 if messagealways:
369 message += '\n%s %s\n' % (messagealways, committer)
369 message += '\n%s %s\n' % (messagealways, committer)
370 elif messagedifferent and author != committer:
370 elif messagedifferent and author != committer:
371 message += '\n%s %s\n' % (messagedifferent, committer)
371 message += '\n%s %s\n' % (messagedifferent, committer)
372
372
373 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
373 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
374 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
374 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
375 date = tm + " " + str(tz)
375 date = tm + " " + str(tz)
376 saverev = self.ui.configbool('convert', 'git.saverev')
376 saverev = self.ui.configbool('convert', 'git.saverev')
377
377
378 c = common.commit(parents=parents, date=date, author=author,
378 c = common.commit(parents=parents, date=date, author=author,
379 desc=message,
379 desc=message,
380 rev=version,
380 rev=version,
381 extra=extra,
381 extra=extra,
382 saverev=saverev)
382 saverev=saverev)
383 return c
383 return c
384
384
385 def numcommits(self):
385 def numcommits(self):
386 output, ret = self.gitrunlines('rev-list', '--all')
386 output, ret = self.gitrunlines('rev-list', '--all')
387 if ret:
387 if ret:
388 raise error.Abort(_('cannot retrieve number of commits in %s') \
388 raise error.Abort(_('cannot retrieve number of commits in %s') \
389 % self.path)
389 % self.path)
390 return len(output)
390 return len(output)
391
391
392 def gettags(self):
392 def gettags(self):
393 tags = {}
393 tags = {}
394 alltags = {}
394 alltags = {}
395 output, status = self.gitrunlines('ls-remote', '--tags', self.path)
395 output, status = self.gitrunlines('ls-remote', '--tags', self.path)
396
396
397 if status:
397 if status:
398 raise error.Abort(_('cannot read tags from %s') % self.path)
398 raise error.Abort(_('cannot read tags from %s') % self.path)
399 prefix = 'refs/tags/'
399 prefix = 'refs/tags/'
400
400
401 # Build complete list of tags, both annotated and bare ones
401 # Build complete list of tags, both annotated and bare ones
402 for line in output:
402 for line in output:
403 line = line.strip()
403 line = line.strip()
404 if line.startswith("error:") or line.startswith("fatal:"):
404 if line.startswith("error:") or line.startswith("fatal:"):
405 raise error.Abort(_('cannot read tags from %s') % self.path)
405 raise error.Abort(_('cannot read tags from %s') % self.path)
406 node, tag = line.split(None, 1)
406 node, tag = line.split(None, 1)
407 if not tag.startswith(prefix):
407 if not tag.startswith(prefix):
408 continue
408 continue
409 alltags[tag[len(prefix):]] = node
409 alltags[tag[len(prefix):]] = node
410
410
411 # Filter out tag objects for annotated tag refs
411 # Filter out tag objects for annotated tag refs
412 for tag in alltags:
412 for tag in alltags:
413 if tag.endswith('^{}'):
413 if tag.endswith('^{}'):
414 tags[tag[:-3]] = alltags[tag]
414 tags[tag[:-3]] = alltags[tag]
415 else:
415 else:
416 if tag + '^{}' in alltags:
416 if tag + '^{}' in alltags:
417 continue
417 continue
418 else:
418 else:
419 tags[tag] = alltags[tag]
419 tags[tag] = alltags[tag]
420
420
421 return tags
421 return tags
422
422
423 def getchangedfiles(self, version, i):
423 def getchangedfiles(self, version, i):
424 changes = []
424 changes = []
425 if i is None:
425 if i is None:
426 output, status = self.gitrunlines('diff-tree', '--root', '-m',
426 output, status = self.gitrunlines('diff-tree', '--root', '-m',
427 '-r', version)
427 '-r', version)
428 if status:
428 if status:
429 raise error.Abort(_('cannot read changes in %s') % version)
429 raise error.Abort(_('cannot read changes in %s') % version)
430 for l in output:
430 for l in output:
431 if "\t" not in l:
431 if "\t" not in l:
432 continue
432 continue
433 m, f = l[:-1].split("\t")
433 m, f = l[:-1].split("\t")
434 changes.append(f)
434 changes.append(f)
435 else:
435 else:
436 output, status = self.gitrunlines('diff-tree', '--name-only',
436 output, status = self.gitrunlines('diff-tree', '--name-only',
437 '--root', '-r', version,
437 '--root', '-r', version,
438 '%s^%s' % (version, i + 1), '--')
438 '%s^%d' % (version, i + 1), '--')
439 if status:
439 if status:
440 raise error.Abort(_('cannot read changes in %s') % version)
440 raise error.Abort(_('cannot read changes in %s') % version)
441 changes = [f.rstrip('\n') for f in output]
441 changes = [f.rstrip('\n') for f in output]
442
442
443 return changes
443 return changes
444
444
445 def getbookmarks(self):
445 def getbookmarks(self):
446 bookmarks = {}
446 bookmarks = {}
447
447
448 # Handle local and remote branches
448 # Handle local and remote branches
449 remoteprefix = self.ui.config('convert', 'git.remoteprefix')
449 remoteprefix = self.ui.config('convert', 'git.remoteprefix')
450 reftypes = [
450 reftypes = [
451 # (git prefix, hg prefix)
451 # (git prefix, hg prefix)
452 ('refs/remotes/origin/', remoteprefix + '/'),
452 ('refs/remotes/origin/', remoteprefix + '/'),
453 ('refs/heads/', '')
453 ('refs/heads/', '')
454 ]
454 ]
455
455
456 exclude = {
456 exclude = {
457 'refs/remotes/origin/HEAD',
457 'refs/remotes/origin/HEAD',
458 }
458 }
459
459
460 try:
460 try:
461 output, status = self.gitrunlines('show-ref')
461 output, status = self.gitrunlines('show-ref')
462 for line in output:
462 for line in output:
463 line = line.strip()
463 line = line.strip()
464 rev, name = line.split(None, 1)
464 rev, name = line.split(None, 1)
465 # Process each type of branch
465 # Process each type of branch
466 for gitprefix, hgprefix in reftypes:
466 for gitprefix, hgprefix in reftypes:
467 if not name.startswith(gitprefix) or name in exclude:
467 if not name.startswith(gitprefix) or name in exclude:
468 continue
468 continue
469 name = '%s%s' % (hgprefix, name[len(gitprefix):])
469 name = '%s%s' % (hgprefix, name[len(gitprefix):])
470 bookmarks[name] = rev
470 bookmarks[name] = rev
471 except Exception:
471 except Exception:
472 pass
472 pass
473
473
474 return bookmarks
474 return bookmarks
475
475
476 def checkrevformat(self, revstr, mapname='splicemap'):
476 def checkrevformat(self, revstr, mapname='splicemap'):
477 """ git revision string is a 40 byte hex """
477 """ git revision string is a 40 byte hex """
478 self.checkhexformat(revstr, mapname)
478 self.checkhexformat(revstr, mapname)
@@ -1,647 +1,647 b''
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19 from __future__ import absolute_import
19 from __future__ import absolute_import
20
20
21 import os
21 import os
22 import re
22 import re
23 import time
23 import time
24
24
25 from mercurial.i18n import _
25 from mercurial.i18n import _
26 from mercurial import (
26 from mercurial import (
27 bookmarks,
27 bookmarks,
28 context,
28 context,
29 error,
29 error,
30 exchange,
30 exchange,
31 hg,
31 hg,
32 lock as lockmod,
32 lock as lockmod,
33 merge as mergemod,
33 merge as mergemod,
34 node as nodemod,
34 node as nodemod,
35 phases,
35 phases,
36 scmutil,
36 scmutil,
37 util,
37 util,
38 )
38 )
39 from mercurial.utils import dateutil
39 from mercurial.utils import dateutil
40 stringio = util.stringio
40 stringio = util.stringio
41
41
42 from . import common
42 from . import common
43 mapfile = common.mapfile
43 mapfile = common.mapfile
44 NoRepo = common.NoRepo
44 NoRepo = common.NoRepo
45
45
46 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
46 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
47
47
48 class mercurial_sink(common.converter_sink):
48 class mercurial_sink(common.converter_sink):
49 def __init__(self, ui, repotype, path):
49 def __init__(self, ui, repotype, path):
50 common.converter_sink.__init__(self, ui, repotype, path)
50 common.converter_sink.__init__(self, ui, repotype, path)
51 self.branchnames = ui.configbool('convert', 'hg.usebranchnames')
51 self.branchnames = ui.configbool('convert', 'hg.usebranchnames')
52 self.clonebranches = ui.configbool('convert', 'hg.clonebranches')
52 self.clonebranches = ui.configbool('convert', 'hg.clonebranches')
53 self.tagsbranch = ui.config('convert', 'hg.tagsbranch')
53 self.tagsbranch = ui.config('convert', 'hg.tagsbranch')
54 self.lastbranch = None
54 self.lastbranch = None
55 if os.path.isdir(path) and len(os.listdir(path)) > 0:
55 if os.path.isdir(path) and len(os.listdir(path)) > 0:
56 try:
56 try:
57 self.repo = hg.repository(self.ui, path)
57 self.repo = hg.repository(self.ui, path)
58 if not self.repo.local():
58 if not self.repo.local():
59 raise NoRepo(_('%s is not a local Mercurial repository')
59 raise NoRepo(_('%s is not a local Mercurial repository')
60 % path)
60 % path)
61 except error.RepoError as err:
61 except error.RepoError as err:
62 ui.traceback()
62 ui.traceback()
63 raise NoRepo(err.args[0])
63 raise NoRepo(err.args[0])
64 else:
64 else:
65 try:
65 try:
66 ui.status(_('initializing destination %s repository\n') % path)
66 ui.status(_('initializing destination %s repository\n') % path)
67 self.repo = hg.repository(self.ui, path, create=True)
67 self.repo = hg.repository(self.ui, path, create=True)
68 if not self.repo.local():
68 if not self.repo.local():
69 raise NoRepo(_('%s is not a local Mercurial repository')
69 raise NoRepo(_('%s is not a local Mercurial repository')
70 % path)
70 % path)
71 self.created.append(path)
71 self.created.append(path)
72 except error.RepoError:
72 except error.RepoError:
73 ui.traceback()
73 ui.traceback()
74 raise NoRepo(_("could not create hg repository %s as sink")
74 raise NoRepo(_("could not create hg repository %s as sink")
75 % path)
75 % path)
76 self.lock = None
76 self.lock = None
77 self.wlock = None
77 self.wlock = None
78 self.filemapmode = False
78 self.filemapmode = False
79 self.subrevmaps = {}
79 self.subrevmaps = {}
80
80
81 def before(self):
81 def before(self):
82 self.ui.debug('run hg sink pre-conversion action\n')
82 self.ui.debug('run hg sink pre-conversion action\n')
83 self.wlock = self.repo.wlock()
83 self.wlock = self.repo.wlock()
84 self.lock = self.repo.lock()
84 self.lock = self.repo.lock()
85
85
86 def after(self):
86 def after(self):
87 self.ui.debug('run hg sink post-conversion action\n')
87 self.ui.debug('run hg sink post-conversion action\n')
88 if self.lock:
88 if self.lock:
89 self.lock.release()
89 self.lock.release()
90 if self.wlock:
90 if self.wlock:
91 self.wlock.release()
91 self.wlock.release()
92
92
93 def revmapfile(self):
93 def revmapfile(self):
94 return self.repo.vfs.join("shamap")
94 return self.repo.vfs.join("shamap")
95
95
96 def authorfile(self):
96 def authorfile(self):
97 return self.repo.vfs.join("authormap")
97 return self.repo.vfs.join("authormap")
98
98
99 def setbranch(self, branch, pbranches):
99 def setbranch(self, branch, pbranches):
100 if not self.clonebranches:
100 if not self.clonebranches:
101 return
101 return
102
102
103 setbranch = (branch != self.lastbranch)
103 setbranch = (branch != self.lastbranch)
104 self.lastbranch = branch
104 self.lastbranch = branch
105 if not branch:
105 if not branch:
106 branch = 'default'
106 branch = 'default'
107 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
107 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
108 if pbranches:
108 if pbranches:
109 pbranch = pbranches[0][1]
109 pbranch = pbranches[0][1]
110 else:
110 else:
111 pbranch = 'default'
111 pbranch = 'default'
112
112
113 branchpath = os.path.join(self.path, branch)
113 branchpath = os.path.join(self.path, branch)
114 if setbranch:
114 if setbranch:
115 self.after()
115 self.after()
116 try:
116 try:
117 self.repo = hg.repository(self.ui, branchpath)
117 self.repo = hg.repository(self.ui, branchpath)
118 except Exception:
118 except Exception:
119 self.repo = hg.repository(self.ui, branchpath, create=True)
119 self.repo = hg.repository(self.ui, branchpath, create=True)
120 self.before()
120 self.before()
121
121
122 # pbranches may bring revisions from other branches (merge parents)
122 # pbranches may bring revisions from other branches (merge parents)
123 # Make sure we have them, or pull them.
123 # Make sure we have them, or pull them.
124 missings = {}
124 missings = {}
125 for b in pbranches:
125 for b in pbranches:
126 try:
126 try:
127 self.repo.lookup(b[0])
127 self.repo.lookup(b[0])
128 except Exception:
128 except Exception:
129 missings.setdefault(b[1], []).append(b[0])
129 missings.setdefault(b[1], []).append(b[0])
130
130
131 if missings:
131 if missings:
132 self.after()
132 self.after()
133 for pbranch, heads in sorted(missings.iteritems()):
133 for pbranch, heads in sorted(missings.iteritems()):
134 pbranchpath = os.path.join(self.path, pbranch)
134 pbranchpath = os.path.join(self.path, pbranch)
135 prepo = hg.peer(self.ui, {}, pbranchpath)
135 prepo = hg.peer(self.ui, {}, pbranchpath)
136 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
136 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
137 exchange.pull(self.repo, prepo,
137 exchange.pull(self.repo, prepo,
138 [prepo.lookup(h) for h in heads])
138 [prepo.lookup(h) for h in heads])
139 self.before()
139 self.before()
140
140
141 def _rewritetags(self, source, revmap, data):
141 def _rewritetags(self, source, revmap, data):
142 fp = stringio()
142 fp = stringio()
143 for line in data.splitlines():
143 for line in data.splitlines():
144 s = line.split(' ', 1)
144 s = line.split(' ', 1)
145 if len(s) != 2:
145 if len(s) != 2:
146 continue
146 continue
147 revid = revmap.get(source.lookuprev(s[0]))
147 revid = revmap.get(source.lookuprev(s[0]))
148 if not revid:
148 if not revid:
149 if s[0] == nodemod.nullhex:
149 if s[0] == nodemod.nullhex:
150 revid = s[0]
150 revid = s[0]
151 else:
151 else:
152 continue
152 continue
153 fp.write('%s %s\n' % (revid, s[1]))
153 fp.write('%s %s\n' % (revid, s[1]))
154 return fp.getvalue()
154 return fp.getvalue()
155
155
156 def _rewritesubstate(self, source, data):
156 def _rewritesubstate(self, source, data):
157 fp = stringio()
157 fp = stringio()
158 for line in data.splitlines():
158 for line in data.splitlines():
159 s = line.split(' ', 1)
159 s = line.split(' ', 1)
160 if len(s) != 2:
160 if len(s) != 2:
161 continue
161 continue
162
162
163 revid = s[0]
163 revid = s[0]
164 subpath = s[1]
164 subpath = s[1]
165 if revid != nodemod.nullhex:
165 if revid != nodemod.nullhex:
166 revmap = self.subrevmaps.get(subpath)
166 revmap = self.subrevmaps.get(subpath)
167 if revmap is None:
167 if revmap is None:
168 revmap = mapfile(self.ui,
168 revmap = mapfile(self.ui,
169 self.repo.wjoin(subpath, '.hg/shamap'))
169 self.repo.wjoin(subpath, '.hg/shamap'))
170 self.subrevmaps[subpath] = revmap
170 self.subrevmaps[subpath] = revmap
171
171
172 # It is reasonable that one or more of the subrepos don't
172 # It is reasonable that one or more of the subrepos don't
173 # need to be converted, in which case they can be cloned
173 # need to be converted, in which case they can be cloned
174 # into place instead of converted. Therefore, only warn
174 # into place instead of converted. Therefore, only warn
175 # once.
175 # once.
176 msg = _('no ".hgsubstate" updates will be made for "%s"\n')
176 msg = _('no ".hgsubstate" updates will be made for "%s"\n')
177 if len(revmap) == 0:
177 if len(revmap) == 0:
178 sub = self.repo.wvfs.reljoin(subpath, '.hg')
178 sub = self.repo.wvfs.reljoin(subpath, '.hg')
179
179
180 if self.repo.wvfs.exists(sub):
180 if self.repo.wvfs.exists(sub):
181 self.ui.warn(msg % subpath)
181 self.ui.warn(msg % subpath)
182
182
183 newid = revmap.get(revid)
183 newid = revmap.get(revid)
184 if not newid:
184 if not newid:
185 if len(revmap) > 0:
185 if len(revmap) > 0:
186 self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
186 self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
187 (revid, subpath))
187 (revid, subpath))
188 else:
188 else:
189 revid = newid
189 revid = newid
190
190
191 fp.write('%s %s\n' % (revid, subpath))
191 fp.write('%s %s\n' % (revid, subpath))
192
192
193 return fp.getvalue()
193 return fp.getvalue()
194
194
195 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
195 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
196 """Calculates the files from p2 that we need to pull in when merging p1
196 """Calculates the files from p2 that we need to pull in when merging p1
197 and p2, given that the merge is coming from the given source.
197 and p2, given that the merge is coming from the given source.
198
198
199 This prevents us from losing files that only exist in the target p2 and
199 This prevents us from losing files that only exist in the target p2 and
200 that don't come from the source repo (like if you're merging multiple
200 that don't come from the source repo (like if you're merging multiple
201 repositories together).
201 repositories together).
202 """
202 """
203 anc = [p1ctx.ancestor(p2ctx)]
203 anc = [p1ctx.ancestor(p2ctx)]
204 # Calculate what files are coming from p2
204 # Calculate what files are coming from p2
205 actions, diverge, rename = mergemod.calculateupdates(
205 actions, diverge, rename = mergemod.calculateupdates(
206 self.repo, p1ctx, p2ctx, anc,
206 self.repo, p1ctx, p2ctx, anc,
207 True, # branchmerge
207 True, # branchmerge
208 True, # force
208 True, # force
209 False, # acceptremote
209 False, # acceptremote
210 False, # followcopies
210 False, # followcopies
211 )
211 )
212
212
213 for file, (action, info, msg) in actions.iteritems():
213 for file, (action, info, msg) in actions.iteritems():
214 if source.targetfilebelongstosource(file):
214 if source.targetfilebelongstosource(file):
215 # If the file belongs to the source repo, ignore the p2
215 # If the file belongs to the source repo, ignore the p2
216 # since it will be covered by the existing fileset.
216 # since it will be covered by the existing fileset.
217 continue
217 continue
218
218
219 # If the file requires actual merging, abort. We don't have enough
219 # If the file requires actual merging, abort. We don't have enough
220 # context to resolve merges correctly.
220 # context to resolve merges correctly.
221 if action in ['m', 'dm', 'cd', 'dc']:
221 if action in ['m', 'dm', 'cd', 'dc']:
222 raise error.Abort(_("unable to convert merge commit "
222 raise error.Abort(_("unable to convert merge commit "
223 "since target parents do not merge cleanly (file "
223 "since target parents do not merge cleanly (file "
224 "%s, parents %s and %s)") % (file, p1ctx,
224 "%s, parents %s and %s)") % (file, p1ctx,
225 p2ctx))
225 p2ctx))
226 elif action == 'k':
226 elif action == 'k':
227 # 'keep' means nothing changed from p1
227 # 'keep' means nothing changed from p1
228 continue
228 continue
229 else:
229 else:
230 # Any other change means we want to take the p2 version
230 # Any other change means we want to take the p2 version
231 yield file
231 yield file
232
232
233 def putcommit(self, files, copies, parents, commit, source, revmap, full,
233 def putcommit(self, files, copies, parents, commit, source, revmap, full,
234 cleanp2):
234 cleanp2):
235 files = dict(files)
235 files = dict(files)
236
236
237 def getfilectx(repo, memctx, f):
237 def getfilectx(repo, memctx, f):
238 if p2ctx and f in p2files and f not in copies:
238 if p2ctx and f in p2files and f not in copies:
239 self.ui.debug('reusing %s from p2\n' % f)
239 self.ui.debug('reusing %s from p2\n' % f)
240 try:
240 try:
241 return p2ctx[f]
241 return p2ctx[f]
242 except error.ManifestLookupError:
242 except error.ManifestLookupError:
243 # If the file doesn't exist in p2, then we're syncing a
243 # If the file doesn't exist in p2, then we're syncing a
244 # delete, so just return None.
244 # delete, so just return None.
245 return None
245 return None
246 try:
246 try:
247 v = files[f]
247 v = files[f]
248 except KeyError:
248 except KeyError:
249 return None
249 return None
250 data, mode = source.getfile(f, v)
250 data, mode = source.getfile(f, v)
251 if data is None:
251 if data is None:
252 return None
252 return None
253 if f == '.hgtags':
253 if f == '.hgtags':
254 data = self._rewritetags(source, revmap, data)
254 data = self._rewritetags(source, revmap, data)
255 if f == '.hgsubstate':
255 if f == '.hgsubstate':
256 data = self._rewritesubstate(source, data)
256 data = self._rewritesubstate(source, data)
257 return context.memfilectx(self.repo, memctx, f, data, 'l' in mode,
257 return context.memfilectx(self.repo, memctx, f, data, 'l' in mode,
258 'x' in mode, copies.get(f))
258 'x' in mode, copies.get(f))
259
259
260 pl = []
260 pl = []
261 for p in parents:
261 for p in parents:
262 if p not in pl:
262 if p not in pl:
263 pl.append(p)
263 pl.append(p)
264 parents = pl
264 parents = pl
265 nparents = len(parents)
265 nparents = len(parents)
266 if self.filemapmode and nparents == 1:
266 if self.filemapmode and nparents == 1:
267 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
267 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
268 parent = parents[0]
268 parent = parents[0]
269
269
270 if len(parents) < 2:
270 if len(parents) < 2:
271 parents.append(nodemod.nullid)
271 parents.append(nodemod.nullid)
272 if len(parents) < 2:
272 if len(parents) < 2:
273 parents.append(nodemod.nullid)
273 parents.append(nodemod.nullid)
274 p2 = parents.pop(0)
274 p2 = parents.pop(0)
275
275
276 text = commit.desc
276 text = commit.desc
277
277
278 sha1s = re.findall(sha1re, text)
278 sha1s = re.findall(sha1re, text)
279 for sha1 in sha1s:
279 for sha1 in sha1s:
280 oldrev = source.lookuprev(sha1)
280 oldrev = source.lookuprev(sha1)
281 newrev = revmap.get(oldrev)
281 newrev = revmap.get(oldrev)
282 if newrev is not None:
282 if newrev is not None:
283 text = text.replace(sha1, newrev[:len(sha1)])
283 text = text.replace(sha1, newrev[:len(sha1)])
284
284
285 extra = commit.extra.copy()
285 extra = commit.extra.copy()
286
286
287 sourcename = self.repo.ui.config('convert', 'hg.sourcename')
287 sourcename = self.repo.ui.config('convert', 'hg.sourcename')
288 if sourcename:
288 if sourcename:
289 extra['convert_source'] = sourcename
289 extra['convert_source'] = sourcename
290
290
291 for label in ('source', 'transplant_source', 'rebase_source',
291 for label in ('source', 'transplant_source', 'rebase_source',
292 'intermediate-source'):
292 'intermediate-source'):
293 node = extra.get(label)
293 node = extra.get(label)
294
294
295 if node is None:
295 if node is None:
296 continue
296 continue
297
297
298 # Only transplant stores its reference in binary
298 # Only transplant stores its reference in binary
299 if label == 'transplant_source':
299 if label == 'transplant_source':
300 node = nodemod.hex(node)
300 node = nodemod.hex(node)
301
301
302 newrev = revmap.get(node)
302 newrev = revmap.get(node)
303 if newrev is not None:
303 if newrev is not None:
304 if label == 'transplant_source':
304 if label == 'transplant_source':
305 newrev = nodemod.bin(newrev)
305 newrev = nodemod.bin(newrev)
306
306
307 extra[label] = newrev
307 extra[label] = newrev
308
308
309 if self.branchnames and commit.branch:
309 if self.branchnames and commit.branch:
310 extra['branch'] = commit.branch
310 extra['branch'] = commit.branch
311 if commit.rev and commit.saverev:
311 if commit.rev and commit.saverev:
312 extra['convert_revision'] = commit.rev
312 extra['convert_revision'] = commit.rev
313
313
314 while parents:
314 while parents:
315 p1 = p2
315 p1 = p2
316 p2 = parents.pop(0)
316 p2 = parents.pop(0)
317 p1ctx = self.repo[p1]
317 p1ctx = self.repo[p1]
318 p2ctx = None
318 p2ctx = None
319 if p2 != nodemod.nullid:
319 if p2 != nodemod.nullid:
320 p2ctx = self.repo[p2]
320 p2ctx = self.repo[p2]
321 fileset = set(files)
321 fileset = set(files)
322 if full:
322 if full:
323 fileset.update(self.repo[p1])
323 fileset.update(self.repo[p1])
324 fileset.update(self.repo[p2])
324 fileset.update(self.repo[p2])
325
325
326 if p2ctx:
326 if p2ctx:
327 p2files = set(cleanp2)
327 p2files = set(cleanp2)
328 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
328 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
329 p2files.add(file)
329 p2files.add(file)
330 fileset.add(file)
330 fileset.add(file)
331
331
332 ctx = context.memctx(self.repo, (p1, p2), text, fileset,
332 ctx = context.memctx(self.repo, (p1, p2), text, fileset,
333 getfilectx, commit.author, commit.date, extra)
333 getfilectx, commit.author, commit.date, extra)
334
334
335 # We won't know if the conversion changes the node until after the
335 # We won't know if the conversion changes the node until after the
336 # commit, so copy the source's phase for now.
336 # commit, so copy the source's phase for now.
337 self.repo.ui.setconfig('phases', 'new-commit',
337 self.repo.ui.setconfig('phases', 'new-commit',
338 phases.phasenames[commit.phase], 'convert')
338 phases.phasenames[commit.phase], 'convert')
339
339
340 with self.repo.transaction("convert") as tr:
340 with self.repo.transaction("convert") as tr:
341 node = nodemod.hex(self.repo.commitctx(ctx))
341 node = nodemod.hex(self.repo.commitctx(ctx))
342
342
343 # If the node value has changed, but the phase is lower than
343 # If the node value has changed, but the phase is lower than
344 # draft, set it back to draft since it hasn't been exposed
344 # draft, set it back to draft since it hasn't been exposed
345 # anywhere.
345 # anywhere.
346 if commit.rev != node:
346 if commit.rev != node:
347 ctx = self.repo[node]
347 ctx = self.repo[node]
348 if ctx.phase() < phases.draft:
348 if ctx.phase() < phases.draft:
349 phases.registernew(self.repo, tr, phases.draft,
349 phases.registernew(self.repo, tr, phases.draft,
350 [ctx.node()])
350 [ctx.node()])
351
351
352 text = "(octopus merge fixup)\n"
352 text = "(octopus merge fixup)\n"
353 p2 = node
353 p2 = node
354
354
355 if self.filemapmode and nparents == 1:
355 if self.filemapmode and nparents == 1:
356 man = self.repo.manifestlog._revlog
356 man = self.repo.manifestlog._revlog
357 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
357 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
358 closed = 'close' in commit.extra
358 closed = 'close' in commit.extra
359 if not closed and not man.cmp(m1node, man.revision(mnode)):
359 if not closed and not man.cmp(m1node, man.revision(mnode)):
360 self.ui.status(_("filtering out empty revision\n"))
360 self.ui.status(_("filtering out empty revision\n"))
361 self.repo.rollback(force=True)
361 self.repo.rollback(force=True)
362 return parent
362 return parent
363 return p2
363 return p2
364
364
365 def puttags(self, tags):
365 def puttags(self, tags):
366 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
366 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
367 tagparent = tagparent or nodemod.nullid
367 tagparent = tagparent or nodemod.nullid
368
368
369 oldlines = set()
369 oldlines = set()
370 for branch, heads in self.repo.branchmap().iteritems():
370 for branch, heads in self.repo.branchmap().iteritems():
371 for h in heads:
371 for h in heads:
372 if '.hgtags' in self.repo[h]:
372 if '.hgtags' in self.repo[h]:
373 oldlines.update(
373 oldlines.update(
374 set(self.repo[h]['.hgtags'].data().splitlines(True)))
374 set(self.repo[h]['.hgtags'].data().splitlines(True)))
375 oldlines = sorted(list(oldlines))
375 oldlines = sorted(list(oldlines))
376
376
377 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
377 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
378 if newlines == oldlines:
378 if newlines == oldlines:
379 return None, None
379 return None, None
380
380
381 # if the old and new tags match, then there is nothing to update
381 # if the old and new tags match, then there is nothing to update
382 oldtags = set()
382 oldtags = set()
383 newtags = set()
383 newtags = set()
384 for line in oldlines:
384 for line in oldlines:
385 s = line.strip().split(' ', 1)
385 s = line.strip().split(' ', 1)
386 if len(s) != 2:
386 if len(s) != 2:
387 continue
387 continue
388 oldtags.add(s[1])
388 oldtags.add(s[1])
389 for line in newlines:
389 for line in newlines:
390 s = line.strip().split(' ', 1)
390 s = line.strip().split(' ', 1)
391 if len(s) != 2:
391 if len(s) != 2:
392 continue
392 continue
393 if s[1] not in oldtags:
393 if s[1] not in oldtags:
394 newtags.add(s[1].strip())
394 newtags.add(s[1].strip())
395
395
396 if not newtags:
396 if not newtags:
397 return None, None
397 return None, None
398
398
399 data = "".join(newlines)
399 data = "".join(newlines)
400 def getfilectx(repo, memctx, f):
400 def getfilectx(repo, memctx, f):
401 return context.memfilectx(repo, memctx, f, data, False, False, None)
401 return context.memfilectx(repo, memctx, f, data, False, False, None)
402
402
403 self.ui.status(_("updating tags\n"))
403 self.ui.status(_("updating tags\n"))
404 date = "%s 0" % int(time.mktime(time.gmtime()))
404 date = "%d 0" % int(time.mktime(time.gmtime()))
405 extra = {'branch': self.tagsbranch}
405 extra = {'branch': self.tagsbranch}
406 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
406 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
407 [".hgtags"], getfilectx, "convert-repo", date,
407 [".hgtags"], getfilectx, "convert-repo", date,
408 extra)
408 extra)
409 node = self.repo.commitctx(ctx)
409 node = self.repo.commitctx(ctx)
410 return nodemod.hex(node), nodemod.hex(tagparent)
410 return nodemod.hex(node), nodemod.hex(tagparent)
411
411
412 def setfilemapmode(self, active):
412 def setfilemapmode(self, active):
413 self.filemapmode = active
413 self.filemapmode = active
414
414
415 def putbookmarks(self, updatedbookmark):
415 def putbookmarks(self, updatedbookmark):
416 if not len(updatedbookmark):
416 if not len(updatedbookmark):
417 return
417 return
418 wlock = lock = tr = None
418 wlock = lock = tr = None
419 try:
419 try:
420 wlock = self.repo.wlock()
420 wlock = self.repo.wlock()
421 lock = self.repo.lock()
421 lock = self.repo.lock()
422 tr = self.repo.transaction('bookmark')
422 tr = self.repo.transaction('bookmark')
423 self.ui.status(_("updating bookmarks\n"))
423 self.ui.status(_("updating bookmarks\n"))
424 destmarks = self.repo._bookmarks
424 destmarks = self.repo._bookmarks
425 changes = [(bookmark, nodemod.bin(updatedbookmark[bookmark]))
425 changes = [(bookmark, nodemod.bin(updatedbookmark[bookmark]))
426 for bookmark in updatedbookmark]
426 for bookmark in updatedbookmark]
427 destmarks.applychanges(self.repo, tr, changes)
427 destmarks.applychanges(self.repo, tr, changes)
428 tr.close()
428 tr.close()
429 finally:
429 finally:
430 lockmod.release(lock, wlock, tr)
430 lockmod.release(lock, wlock, tr)
431
431
432 def hascommitfrommap(self, rev):
432 def hascommitfrommap(self, rev):
433 # the exact semantics of clonebranches is unclear so we can't say no
433 # the exact semantics of clonebranches is unclear so we can't say no
434 return rev in self.repo or self.clonebranches
434 return rev in self.repo or self.clonebranches
435
435
436 def hascommitforsplicemap(self, rev):
436 def hascommitforsplicemap(self, rev):
437 if rev not in self.repo and self.clonebranches:
437 if rev not in self.repo and self.clonebranches:
438 raise error.Abort(_('revision %s not found in destination '
438 raise error.Abort(_('revision %s not found in destination '
439 'repository (lookups with clonebranches=true '
439 'repository (lookups with clonebranches=true '
440 'are not implemented)') % rev)
440 'are not implemented)') % rev)
441 return rev in self.repo
441 return rev in self.repo
442
442
443 class mercurial_source(common.converter_source):
443 class mercurial_source(common.converter_source):
444 def __init__(self, ui, repotype, path, revs=None):
444 def __init__(self, ui, repotype, path, revs=None):
445 common.converter_source.__init__(self, ui, repotype, path, revs)
445 common.converter_source.__init__(self, ui, repotype, path, revs)
446 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors')
446 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors')
447 self.ignored = set()
447 self.ignored = set()
448 self.saverev = ui.configbool('convert', 'hg.saverev')
448 self.saverev = ui.configbool('convert', 'hg.saverev')
449 try:
449 try:
450 self.repo = hg.repository(self.ui, path)
450 self.repo = hg.repository(self.ui, path)
451 # try to provoke an exception if this isn't really a hg
451 # try to provoke an exception if this isn't really a hg
452 # repo, but some other bogus compatible-looking url
452 # repo, but some other bogus compatible-looking url
453 if not self.repo.local():
453 if not self.repo.local():
454 raise error.RepoError
454 raise error.RepoError
455 except error.RepoError:
455 except error.RepoError:
456 ui.traceback()
456 ui.traceback()
457 raise NoRepo(_("%s is not a local Mercurial repository") % path)
457 raise NoRepo(_("%s is not a local Mercurial repository") % path)
458 self.lastrev = None
458 self.lastrev = None
459 self.lastctx = None
459 self.lastctx = None
460 self._changescache = None, None
460 self._changescache = None, None
461 self.convertfp = None
461 self.convertfp = None
462 # Restrict converted revisions to startrev descendants
462 # Restrict converted revisions to startrev descendants
463 startnode = ui.config('convert', 'hg.startrev')
463 startnode = ui.config('convert', 'hg.startrev')
464 hgrevs = ui.config('convert', 'hg.revs')
464 hgrevs = ui.config('convert', 'hg.revs')
465 if hgrevs is None:
465 if hgrevs is None:
466 if startnode is not None:
466 if startnode is not None:
467 try:
467 try:
468 startnode = self.repo.lookup(startnode)
468 startnode = self.repo.lookup(startnode)
469 except error.RepoError:
469 except error.RepoError:
470 raise error.Abort(_('%s is not a valid start revision')
470 raise error.Abort(_('%s is not a valid start revision')
471 % startnode)
471 % startnode)
472 startrev = self.repo.changelog.rev(startnode)
472 startrev = self.repo.changelog.rev(startnode)
473 children = {startnode: 1}
473 children = {startnode: 1}
474 for r in self.repo.changelog.descendants([startrev]):
474 for r in self.repo.changelog.descendants([startrev]):
475 children[self.repo.changelog.node(r)] = 1
475 children[self.repo.changelog.node(r)] = 1
476 self.keep = children.__contains__
476 self.keep = children.__contains__
477 else:
477 else:
478 self.keep = util.always
478 self.keep = util.always
479 if revs:
479 if revs:
480 self._heads = [self.repo.lookup(r) for r in revs]
480 self._heads = [self.repo.lookup(r) for r in revs]
481 else:
481 else:
482 self._heads = self.repo.heads()
482 self._heads = self.repo.heads()
483 else:
483 else:
484 if revs or startnode is not None:
484 if revs or startnode is not None:
485 raise error.Abort(_('hg.revs cannot be combined with '
485 raise error.Abort(_('hg.revs cannot be combined with '
486 'hg.startrev or --rev'))
486 'hg.startrev or --rev'))
487 nodes = set()
487 nodes = set()
488 parents = set()
488 parents = set()
489 for r in scmutil.revrange(self.repo, [hgrevs]):
489 for r in scmutil.revrange(self.repo, [hgrevs]):
490 ctx = self.repo[r]
490 ctx = self.repo[r]
491 nodes.add(ctx.node())
491 nodes.add(ctx.node())
492 parents.update(p.node() for p in ctx.parents())
492 parents.update(p.node() for p in ctx.parents())
493 self.keep = nodes.__contains__
493 self.keep = nodes.__contains__
494 self._heads = nodes - parents
494 self._heads = nodes - parents
495
495
496 def _changectx(self, rev):
496 def _changectx(self, rev):
497 if self.lastrev != rev:
497 if self.lastrev != rev:
498 self.lastctx = self.repo[rev]
498 self.lastctx = self.repo[rev]
499 self.lastrev = rev
499 self.lastrev = rev
500 return self.lastctx
500 return self.lastctx
501
501
502 def _parents(self, ctx):
502 def _parents(self, ctx):
503 return [p for p in ctx.parents() if p and self.keep(p.node())]
503 return [p for p in ctx.parents() if p and self.keep(p.node())]
504
504
505 def getheads(self):
505 def getheads(self):
506 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
506 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
507
507
508 def getfile(self, name, rev):
508 def getfile(self, name, rev):
509 try:
509 try:
510 fctx = self._changectx(rev)[name]
510 fctx = self._changectx(rev)[name]
511 return fctx.data(), fctx.flags()
511 return fctx.data(), fctx.flags()
512 except error.LookupError:
512 except error.LookupError:
513 return None, None
513 return None, None
514
514
515 def _changedfiles(self, ctx1, ctx2):
515 def _changedfiles(self, ctx1, ctx2):
516 ma, r = [], []
516 ma, r = [], []
517 maappend = ma.append
517 maappend = ma.append
518 rappend = r.append
518 rappend = r.append
519 d = ctx1.manifest().diff(ctx2.manifest())
519 d = ctx1.manifest().diff(ctx2.manifest())
520 for f, ((node1, flag1), (node2, flag2)) in d.iteritems():
520 for f, ((node1, flag1), (node2, flag2)) in d.iteritems():
521 if node2 is None:
521 if node2 is None:
522 rappend(f)
522 rappend(f)
523 else:
523 else:
524 maappend(f)
524 maappend(f)
525 return ma, r
525 return ma, r
526
526
527 def getchanges(self, rev, full):
527 def getchanges(self, rev, full):
528 ctx = self._changectx(rev)
528 ctx = self._changectx(rev)
529 parents = self._parents(ctx)
529 parents = self._parents(ctx)
530 if full or not parents:
530 if full or not parents:
531 files = copyfiles = ctx.manifest()
531 files = copyfiles = ctx.manifest()
532 if parents:
532 if parents:
533 if self._changescache[0] == rev:
533 if self._changescache[0] == rev:
534 ma, r = self._changescache[1]
534 ma, r = self._changescache[1]
535 else:
535 else:
536 ma, r = self._changedfiles(parents[0], ctx)
536 ma, r = self._changedfiles(parents[0], ctx)
537 if not full:
537 if not full:
538 files = ma + r
538 files = ma + r
539 copyfiles = ma
539 copyfiles = ma
540 # _getcopies() is also run for roots and before filtering so missing
540 # _getcopies() is also run for roots and before filtering so missing
541 # revlogs are detected early
541 # revlogs are detected early
542 copies = self._getcopies(ctx, parents, copyfiles)
542 copies = self._getcopies(ctx, parents, copyfiles)
543 cleanp2 = set()
543 cleanp2 = set()
544 if len(parents) == 2:
544 if len(parents) == 2:
545 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
545 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
546 for f, value in d.iteritems():
546 for f, value in d.iteritems():
547 if value is None:
547 if value is None:
548 cleanp2.add(f)
548 cleanp2.add(f)
549 changes = [(f, rev) for f in files if f not in self.ignored]
549 changes = [(f, rev) for f in files if f not in self.ignored]
550 changes.sort()
550 changes.sort()
551 return changes, copies, cleanp2
551 return changes, copies, cleanp2
552
552
553 def _getcopies(self, ctx, parents, files):
553 def _getcopies(self, ctx, parents, files):
554 copies = {}
554 copies = {}
555 for name in files:
555 for name in files:
556 if name in self.ignored:
556 if name in self.ignored:
557 continue
557 continue
558 try:
558 try:
559 copysource, _copynode = ctx.filectx(name).renamed()
559 copysource, _copynode = ctx.filectx(name).renamed()
560 if copysource in self.ignored:
560 if copysource in self.ignored:
561 continue
561 continue
562 # Ignore copy sources not in parent revisions
562 # Ignore copy sources not in parent revisions
563 if not any(copysource in p for p in parents):
563 if not any(copysource in p for p in parents):
564 continue
564 continue
565 copies[name] = copysource
565 copies[name] = copysource
566 except TypeError:
566 except TypeError:
567 pass
567 pass
568 except error.LookupError as e:
568 except error.LookupError as e:
569 if not self.ignoreerrors:
569 if not self.ignoreerrors:
570 raise
570 raise
571 self.ignored.add(name)
571 self.ignored.add(name)
572 self.ui.warn(_('ignoring: %s\n') % e)
572 self.ui.warn(_('ignoring: %s\n') % e)
573 return copies
573 return copies
574
574
575 def getcommit(self, rev):
575 def getcommit(self, rev):
576 ctx = self._changectx(rev)
576 ctx = self._changectx(rev)
577 _parents = self._parents(ctx)
577 _parents = self._parents(ctx)
578 parents = [p.hex() for p in _parents]
578 parents = [p.hex() for p in _parents]
579 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
579 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
580 crev = rev
580 crev = rev
581
581
582 return common.commit(author=ctx.user(),
582 return common.commit(author=ctx.user(),
583 date=dateutil.datestr(ctx.date(),
583 date=dateutil.datestr(ctx.date(),
584 '%Y-%m-%d %H:%M:%S %1%2'),
584 '%Y-%m-%d %H:%M:%S %1%2'),
585 desc=ctx.description(),
585 desc=ctx.description(),
586 rev=crev,
586 rev=crev,
587 parents=parents,
587 parents=parents,
588 optparents=optparents,
588 optparents=optparents,
589 branch=ctx.branch(),
589 branch=ctx.branch(),
590 extra=ctx.extra(),
590 extra=ctx.extra(),
591 sortkey=ctx.rev(),
591 sortkey=ctx.rev(),
592 saverev=self.saverev,
592 saverev=self.saverev,
593 phase=ctx.phase())
593 phase=ctx.phase())
594
594
595 def gettags(self):
595 def gettags(self):
596 # This will get written to .hgtags, filter non global tags out.
596 # This will get written to .hgtags, filter non global tags out.
597 tags = [t for t in self.repo.tagslist()
597 tags = [t for t in self.repo.tagslist()
598 if self.repo.tagtype(t[0]) == 'global']
598 if self.repo.tagtype(t[0]) == 'global']
599 return dict([(name, nodemod.hex(node)) for name, node in tags
599 return dict([(name, nodemod.hex(node)) for name, node in tags
600 if self.keep(node)])
600 if self.keep(node)])
601
601
602 def getchangedfiles(self, rev, i):
602 def getchangedfiles(self, rev, i):
603 ctx = self._changectx(rev)
603 ctx = self._changectx(rev)
604 parents = self._parents(ctx)
604 parents = self._parents(ctx)
605 if not parents and i is None:
605 if not parents and i is None:
606 i = 0
606 i = 0
607 ma, r = ctx.manifest().keys(), []
607 ma, r = ctx.manifest().keys(), []
608 else:
608 else:
609 i = i or 0
609 i = i or 0
610 ma, r = self._changedfiles(parents[i], ctx)
610 ma, r = self._changedfiles(parents[i], ctx)
611 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
611 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
612
612
613 if i == 0:
613 if i == 0:
614 self._changescache = (rev, (ma, r))
614 self._changescache = (rev, (ma, r))
615
615
616 return ma + r
616 return ma + r
617
617
618 def converted(self, rev, destrev):
618 def converted(self, rev, destrev):
619 if self.convertfp is None:
619 if self.convertfp is None:
620 self.convertfp = open(self.repo.vfs.join('shamap'), 'ab')
620 self.convertfp = open(self.repo.vfs.join('shamap'), 'ab')
621 self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev)))
621 self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev)))
622 self.convertfp.flush()
622 self.convertfp.flush()
623
623
624 def before(self):
624 def before(self):
625 self.ui.debug('run hg source pre-conversion action\n')
625 self.ui.debug('run hg source pre-conversion action\n')
626
626
627 def after(self):
627 def after(self):
628 self.ui.debug('run hg source post-conversion action\n')
628 self.ui.debug('run hg source post-conversion action\n')
629
629
630 def hasnativeorder(self):
630 def hasnativeorder(self):
631 return True
631 return True
632
632
633 def hasnativeclose(self):
633 def hasnativeclose(self):
634 return True
634 return True
635
635
636 def lookuprev(self, rev):
636 def lookuprev(self, rev):
637 try:
637 try:
638 return nodemod.hex(self.repo.lookup(rev))
638 return nodemod.hex(self.repo.lookup(rev))
639 except (error.RepoError, error.LookupError):
639 except (error.RepoError, error.LookupError):
640 return None
640 return None
641
641
642 def getbookmarks(self):
642 def getbookmarks(self):
643 return bookmarks.listbookmarks(self.repo)
643 return bookmarks.listbookmarks(self.repo)
644
644
645 def checkrevformat(self, revstr, mapname='splicemap'):
645 def checkrevformat(self, revstr, mapname='splicemap'):
646 """ Mercurial, revision string is a 40 byte hex """
646 """ Mercurial, revision string is a 40 byte hex """
647 self.checkhexformat(revstr, mapname)
647 self.checkhexformat(revstr, mapname)
@@ -1,1186 +1,1186 b''
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 [infinitepush]
9 [infinitepush]
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 branchpattern = PATTERN
11 branchpattern = PATTERN
12
12
13 # Server or client
13 # Server or client
14 server = False
14 server = False
15
15
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 indextype = disk
17 indextype = disk
18
18
19 # Server-side option. Used only if indextype=sql.
19 # Server-side option. Used only if indextype=sql.
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22
22
23 # Server-side option. Used only if indextype=disk.
23 # Server-side option. Used only if indextype=disk.
24 # Filesystem path to the index store
24 # Filesystem path to the index store
25 indexpath = PATH
25 indexpath = PATH
26
26
27 # Server-side option. Possible values: 'disk' or 'external'
27 # Server-side option. Possible values: 'disk' or 'external'
28 # Fails if not set
28 # Fails if not set
29 storetype = disk
29 storetype = disk
30
30
31 # Server-side option.
31 # Server-side option.
32 # Path to the binary that will save bundle to the bundlestore
32 # Path to the binary that will save bundle to the bundlestore
33 # Formatted cmd line will be passed to it (see `put_args`)
33 # Formatted cmd line will be passed to it (see `put_args`)
34 put_binary = put
34 put_binary = put
35
35
36 # Serser-side option. Used only if storetype=external.
36 # Serser-side option. Used only if storetype=external.
37 # Format cmd-line string for put binary. Placeholder: {filename}
37 # Format cmd-line string for put binary. Placeholder: {filename}
38 put_args = {filename}
38 put_args = {filename}
39
39
40 # Server-side option.
40 # Server-side option.
41 # Path to the binary that get bundle from the bundlestore.
41 # Path to the binary that get bundle from the bundlestore.
42 # Formatted cmd line will be passed to it (see `get_args`)
42 # Formatted cmd line will be passed to it (see `get_args`)
43 get_binary = get
43 get_binary = get
44
44
45 # Serser-side option. Used only if storetype=external.
45 # Serser-side option. Used only if storetype=external.
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 get_args = {filename} {handle}
47 get_args = {filename} {handle}
48
48
49 # Server-side option
49 # Server-side option
50 logfile = FIlE
50 logfile = FIlE
51
51
52 # Server-side option
52 # Server-side option
53 loglevel = DEBUG
53 loglevel = DEBUG
54
54
55 # Server-side option. Used only if indextype=sql.
55 # Server-side option. Used only if indextype=sql.
56 # Sets mysql wait_timeout option.
56 # Sets mysql wait_timeout option.
57 waittimeout = 300
57 waittimeout = 300
58
58
59 # Server-side option. Used only if indextype=sql.
59 # Server-side option. Used only if indextype=sql.
60 # Sets mysql innodb_lock_wait_timeout option.
60 # Sets mysql innodb_lock_wait_timeout option.
61 locktimeout = 120
61 locktimeout = 120
62
62
63 # Server-side option. Used only if indextype=sql.
63 # Server-side option. Used only if indextype=sql.
64 # Name of the repository
64 # Name of the repository
65 reponame = ''
65 reponame = ''
66
66
67 # Client-side option. Used by --list-remote option. List of remote scratch
67 # Client-side option. Used by --list-remote option. List of remote scratch
68 # patterns to list if no patterns are specified.
68 # patterns to list if no patterns are specified.
69 defaultremotepatterns = ['*']
69 defaultremotepatterns = ['*']
70
70
71 # Instructs infinitepush to forward all received bundle2 parts to the
71 # Instructs infinitepush to forward all received bundle2 parts to the
72 # bundle for storage. Defaults to False.
72 # bundle for storage. Defaults to False.
73 storeallparts = True
73 storeallparts = True
74
74
75 # routes each incoming push to the bundlestore. defaults to False
75 # routes each incoming push to the bundlestore. defaults to False
76 pushtobundlestore = True
76 pushtobundlestore = True
77
77
78 [remotenames]
78 [remotenames]
79 # Client-side option
79 # Client-side option
80 # This option should be set only if remotenames extension is enabled.
80 # This option should be set only if remotenames extension is enabled.
81 # Whether remote bookmarks are tracked by remotenames extension.
81 # Whether remote bookmarks are tracked by remotenames extension.
82 bookmarks = True
82 bookmarks = True
83 """
83 """
84
84
85 from __future__ import absolute_import
85 from __future__ import absolute_import
86
86
87 import collections
87 import collections
88 import contextlib
88 import contextlib
89 import errno
89 import errno
90 import functools
90 import functools
91 import logging
91 import logging
92 import os
92 import os
93 import random
93 import random
94 import re
94 import re
95 import socket
95 import socket
96 import subprocess
96 import subprocess
97 import tempfile
97 import tempfile
98 import time
98 import time
99
99
100 from mercurial.node import (
100 from mercurial.node import (
101 bin,
101 bin,
102 hex,
102 hex,
103 )
103 )
104
104
105 from mercurial.i18n import _
105 from mercurial.i18n import _
106
106
107 from mercurial.utils import (
107 from mercurial.utils import (
108 procutil,
108 procutil,
109 stringutil,
109 stringutil,
110 )
110 )
111
111
112 from mercurial import (
112 from mercurial import (
113 bundle2,
113 bundle2,
114 changegroup,
114 changegroup,
115 commands,
115 commands,
116 discovery,
116 discovery,
117 encoding,
117 encoding,
118 error,
118 error,
119 exchange,
119 exchange,
120 extensions,
120 extensions,
121 hg,
121 hg,
122 localrepo,
122 localrepo,
123 peer,
123 peer,
124 phases,
124 phases,
125 pushkey,
125 pushkey,
126 pycompat,
126 pycompat,
127 registrar,
127 registrar,
128 util,
128 util,
129 wireproto,
129 wireproto,
130 )
130 )
131
131
132 from . import (
132 from . import (
133 bundleparts,
133 bundleparts,
134 common,
134 common,
135 )
135 )
136
136
137 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
137 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
138 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
138 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
139 # be specifying the version(s) of Mercurial they are tested with, or
139 # be specifying the version(s) of Mercurial they are tested with, or
140 # leave the attribute unspecified.
140 # leave the attribute unspecified.
141 testedwith = 'ships-with-hg-core'
141 testedwith = 'ships-with-hg-core'
142
142
143 configtable = {}
143 configtable = {}
144 configitem = registrar.configitem(configtable)
144 configitem = registrar.configitem(configtable)
145
145
146 configitem('infinitepush', 'server',
146 configitem('infinitepush', 'server',
147 default=False,
147 default=False,
148 )
148 )
149 configitem('infinitepush', 'storetype',
149 configitem('infinitepush', 'storetype',
150 default='',
150 default='',
151 )
151 )
152 configitem('infinitepush', 'indextype',
152 configitem('infinitepush', 'indextype',
153 default='',
153 default='',
154 )
154 )
155 configitem('infinitepush', 'indexpath',
155 configitem('infinitepush', 'indexpath',
156 default='',
156 default='',
157 )
157 )
158 configitem('infinitepush', 'storeallparts',
158 configitem('infinitepush', 'storeallparts',
159 default=False,
159 default=False,
160 )
160 )
161 configitem('infinitepush', 'reponame',
161 configitem('infinitepush', 'reponame',
162 default='',
162 default='',
163 )
163 )
164 configitem('scratchbranch', 'storepath',
164 configitem('scratchbranch', 'storepath',
165 default='',
165 default='',
166 )
166 )
167 configitem('infinitepush', 'branchpattern',
167 configitem('infinitepush', 'branchpattern',
168 default='',
168 default='',
169 )
169 )
170 configitem('infinitepush', 'pushtobundlestore',
170 configitem('infinitepush', 'pushtobundlestore',
171 default=False,
171 default=False,
172 )
172 )
173 configitem('experimental', 'server-bundlestore-bookmark',
173 configitem('experimental', 'server-bundlestore-bookmark',
174 default='',
174 default='',
175 )
175 )
176 configitem('experimental', 'infinitepush-scratchpush',
176 configitem('experimental', 'infinitepush-scratchpush',
177 default=False,
177 default=False,
178 )
178 )
179
179
180 experimental = 'experimental'
180 experimental = 'experimental'
181 configbookmark = 'server-bundlestore-bookmark'
181 configbookmark = 'server-bundlestore-bookmark'
182 configscratchpush = 'infinitepush-scratchpush'
182 configscratchpush = 'infinitepush-scratchpush'
183
183
184 scratchbranchparttype = bundleparts.scratchbranchparttype
184 scratchbranchparttype = bundleparts.scratchbranchparttype
185 revsetpredicate = registrar.revsetpredicate()
185 revsetpredicate = registrar.revsetpredicate()
186 templatekeyword = registrar.templatekeyword()
186 templatekeyword = registrar.templatekeyword()
187 _scratchbranchmatcher = lambda x: False
187 _scratchbranchmatcher = lambda x: False
188 _maybehash = re.compile(r'^[a-f0-9]+$').search
188 _maybehash = re.compile(r'^[a-f0-9]+$').search
189
189
190 def _buildexternalbundlestore(ui):
190 def _buildexternalbundlestore(ui):
191 put_args = ui.configlist('infinitepush', 'put_args', [])
191 put_args = ui.configlist('infinitepush', 'put_args', [])
192 put_binary = ui.config('infinitepush', 'put_binary')
192 put_binary = ui.config('infinitepush', 'put_binary')
193 if not put_binary:
193 if not put_binary:
194 raise error.Abort('put binary is not specified')
194 raise error.Abort('put binary is not specified')
195 get_args = ui.configlist('infinitepush', 'get_args', [])
195 get_args = ui.configlist('infinitepush', 'get_args', [])
196 get_binary = ui.config('infinitepush', 'get_binary')
196 get_binary = ui.config('infinitepush', 'get_binary')
197 if not get_binary:
197 if not get_binary:
198 raise error.Abort('get binary is not specified')
198 raise error.Abort('get binary is not specified')
199 from . import store
199 from . import store
200 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
200 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
201
201
202 def _buildsqlindex(ui):
202 def _buildsqlindex(ui):
203 sqlhost = ui.config('infinitepush', 'sqlhost')
203 sqlhost = ui.config('infinitepush', 'sqlhost')
204 if not sqlhost:
204 if not sqlhost:
205 raise error.Abort(_('please set infinitepush.sqlhost'))
205 raise error.Abort(_('please set infinitepush.sqlhost'))
206 host, port, db, user, password = sqlhost.split(':')
206 host, port, db, user, password = sqlhost.split(':')
207 reponame = ui.config('infinitepush', 'reponame')
207 reponame = ui.config('infinitepush', 'reponame')
208 if not reponame:
208 if not reponame:
209 raise error.Abort(_('please set infinitepush.reponame'))
209 raise error.Abort(_('please set infinitepush.reponame'))
210
210
211 logfile = ui.config('infinitepush', 'logfile', '')
211 logfile = ui.config('infinitepush', 'logfile', '')
212 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
212 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
213 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
213 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
214 from . import sqlindexapi
214 from . import sqlindexapi
215 return sqlindexapi.sqlindexapi(
215 return sqlindexapi.sqlindexapi(
216 reponame, host, port, db, user, password,
216 reponame, host, port, db, user, password,
217 logfile, _getloglevel(ui), waittimeout=waittimeout,
217 logfile, _getloglevel(ui), waittimeout=waittimeout,
218 locktimeout=locktimeout)
218 locktimeout=locktimeout)
219
219
220 def _getloglevel(ui):
220 def _getloglevel(ui):
221 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
221 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
222 numeric_loglevel = getattr(logging, loglevel.upper(), None)
222 numeric_loglevel = getattr(logging, loglevel.upper(), None)
223 if not isinstance(numeric_loglevel, int):
223 if not isinstance(numeric_loglevel, int):
224 raise error.Abort(_('invalid log level %s') % loglevel)
224 raise error.Abort(_('invalid log level %s') % loglevel)
225 return numeric_loglevel
225 return numeric_loglevel
226
226
227 def _tryhoist(ui, remotebookmark):
227 def _tryhoist(ui, remotebookmark):
228 '''returns a bookmarks with hoisted part removed
228 '''returns a bookmarks with hoisted part removed
229
229
230 Remotenames extension has a 'hoist' config that allows to use remote
230 Remotenames extension has a 'hoist' config that allows to use remote
231 bookmarks without specifying remote path. For example, 'hg update master'
231 bookmarks without specifying remote path. For example, 'hg update master'
232 works as well as 'hg update remote/master'. We want to allow the same in
232 works as well as 'hg update remote/master'. We want to allow the same in
233 infinitepush.
233 infinitepush.
234 '''
234 '''
235
235
236 if common.isremotebooksenabled(ui):
236 if common.isremotebooksenabled(ui):
237 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
237 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
238 if remotebookmark.startswith(hoist):
238 if remotebookmark.startswith(hoist):
239 return remotebookmark[len(hoist):]
239 return remotebookmark[len(hoist):]
240 return remotebookmark
240 return remotebookmark
241
241
242 class bundlestore(object):
242 class bundlestore(object):
243 def __init__(self, repo):
243 def __init__(self, repo):
244 self._repo = repo
244 self._repo = repo
245 storetype = self._repo.ui.config('infinitepush', 'storetype')
245 storetype = self._repo.ui.config('infinitepush', 'storetype')
246 if storetype == 'disk':
246 if storetype == 'disk':
247 from . import store
247 from . import store
248 self.store = store.filebundlestore(self._repo.ui, self._repo)
248 self.store = store.filebundlestore(self._repo.ui, self._repo)
249 elif storetype == 'external':
249 elif storetype == 'external':
250 self.store = _buildexternalbundlestore(self._repo.ui)
250 self.store = _buildexternalbundlestore(self._repo.ui)
251 else:
251 else:
252 raise error.Abort(
252 raise error.Abort(
253 _('unknown infinitepush store type specified %s') % storetype)
253 _('unknown infinitepush store type specified %s') % storetype)
254
254
255 indextype = self._repo.ui.config('infinitepush', 'indextype')
255 indextype = self._repo.ui.config('infinitepush', 'indextype')
256 if indextype == 'disk':
256 if indextype == 'disk':
257 from . import fileindexapi
257 from . import fileindexapi
258 self.index = fileindexapi.fileindexapi(self._repo)
258 self.index = fileindexapi.fileindexapi(self._repo)
259 elif indextype == 'sql':
259 elif indextype == 'sql':
260 self.index = _buildsqlindex(self._repo.ui)
260 self.index = _buildsqlindex(self._repo.ui)
261 else:
261 else:
262 raise error.Abort(
262 raise error.Abort(
263 _('unknown infinitepush index type specified %s') % indextype)
263 _('unknown infinitepush index type specified %s') % indextype)
264
264
265 def _isserver(ui):
265 def _isserver(ui):
266 return ui.configbool('infinitepush', 'server')
266 return ui.configbool('infinitepush', 'server')
267
267
268 def reposetup(ui, repo):
268 def reposetup(ui, repo):
269 if _isserver(ui) and repo.local():
269 if _isserver(ui) and repo.local():
270 repo.bundlestore = bundlestore(repo)
270 repo.bundlestore = bundlestore(repo)
271
271
272 def extsetup(ui):
272 def extsetup(ui):
273 commonsetup(ui)
273 commonsetup(ui)
274 if _isserver(ui):
274 if _isserver(ui):
275 serverextsetup(ui)
275 serverextsetup(ui)
276 else:
276 else:
277 clientextsetup(ui)
277 clientextsetup(ui)
278
278
279 def commonsetup(ui):
279 def commonsetup(ui):
280 wireproto.commands['listkeyspatterns'] = (
280 wireproto.commands['listkeyspatterns'] = (
281 wireprotolistkeyspatterns, 'namespace patterns')
281 wireprotolistkeyspatterns, 'namespace patterns')
282 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
282 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
283 if scratchbranchpat:
283 if scratchbranchpat:
284 global _scratchbranchmatcher
284 global _scratchbranchmatcher
285 kind, pat, _scratchbranchmatcher = \
285 kind, pat, _scratchbranchmatcher = \
286 stringutil.stringmatcher(scratchbranchpat)
286 stringutil.stringmatcher(scratchbranchpat)
287
287
288 def serverextsetup(ui):
288 def serverextsetup(ui):
289 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
289 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
290
290
291 def newpushkeyhandler(*args, **kwargs):
291 def newpushkeyhandler(*args, **kwargs):
292 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
292 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
293 newpushkeyhandler.params = origpushkeyhandler.params
293 newpushkeyhandler.params = origpushkeyhandler.params
294 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
294 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
295
295
296 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
296 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
297 newphaseheadshandler = lambda *args, **kwargs: \
297 newphaseheadshandler = lambda *args, **kwargs: \
298 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
298 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
299 newphaseheadshandler.params = orighandlephasehandler.params
299 newphaseheadshandler.params = orighandlephasehandler.params
300 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
300 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
301
301
302 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
302 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
303 localrepolistkeys)
303 localrepolistkeys)
304 wireproto.commands['lookup'] = (
304 wireproto.commands['lookup'] = (
305 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
305 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
306 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
306 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
307
307
308 extensions.wrapfunction(bundle2, 'processparts', processparts)
308 extensions.wrapfunction(bundle2, 'processparts', processparts)
309
309
310 def clientextsetup(ui):
310 def clientextsetup(ui):
311 entry = extensions.wrapcommand(commands.table, 'push', _push)
311 entry = extensions.wrapcommand(commands.table, 'push', _push)
312
312
313 entry[1].append(
313 entry[1].append(
314 ('', 'bundle-store', None,
314 ('', 'bundle-store', None,
315 _('force push to go to bundle store (EXPERIMENTAL)')))
315 _('force push to go to bundle store (EXPERIMENTAL)')))
316
316
317 extensions.wrapcommand(commands.table, 'pull', _pull)
317 extensions.wrapcommand(commands.table, 'pull', _pull)
318
318
319 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
319 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
320
320
321 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
321 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
322
322
323 partorder = exchange.b2partsgenorder
323 partorder = exchange.b2partsgenorder
324 index = partorder.index('changeset')
324 index = partorder.index('changeset')
325 partorder.insert(
325 partorder.insert(
326 index, partorder.pop(partorder.index(scratchbranchparttype)))
326 index, partorder.pop(partorder.index(scratchbranchparttype)))
327
327
328 def _checkheads(orig, pushop):
328 def _checkheads(orig, pushop):
329 if pushop.ui.configbool(experimental, configscratchpush, False):
329 if pushop.ui.configbool(experimental, configscratchpush, False):
330 return
330 return
331 return orig(pushop)
331 return orig(pushop)
332
332
333 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
333 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
334 patterns = wireproto.decodelist(patterns)
334 patterns = wireproto.decodelist(patterns)
335 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
335 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
336 return pushkey.encodekeys(d)
336 return pushkey.encodekeys(d)
337
337
338 def localrepolistkeys(orig, self, namespace, patterns=None):
338 def localrepolistkeys(orig, self, namespace, patterns=None):
339 if namespace == 'bookmarks' and patterns:
339 if namespace == 'bookmarks' and patterns:
340 index = self.bundlestore.index
340 index = self.bundlestore.index
341 results = {}
341 results = {}
342 bookmarks = orig(self, namespace)
342 bookmarks = orig(self, namespace)
343 for pattern in patterns:
343 for pattern in patterns:
344 results.update(index.getbookmarks(pattern))
344 results.update(index.getbookmarks(pattern))
345 if pattern.endswith('*'):
345 if pattern.endswith('*'):
346 pattern = 're:^' + pattern[:-1] + '.*'
346 pattern = 're:^' + pattern[:-1] + '.*'
347 kind, pat, matcher = stringutil.stringmatcher(pattern)
347 kind, pat, matcher = stringutil.stringmatcher(pattern)
348 for bookmark, node in bookmarks.iteritems():
348 for bookmark, node in bookmarks.iteritems():
349 if matcher(bookmark):
349 if matcher(bookmark):
350 results[bookmark] = node
350 results[bookmark] = node
351 return results
351 return results
352 else:
352 else:
353 return orig(self, namespace)
353 return orig(self, namespace)
354
354
355 @peer.batchable
355 @peer.batchable
356 def listkeyspatterns(self, namespace, patterns):
356 def listkeyspatterns(self, namespace, patterns):
357 if not self.capable('pushkey'):
357 if not self.capable('pushkey'):
358 yield {}, None
358 yield {}, None
359 f = peer.future()
359 f = peer.future()
360 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
360 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
361 (namespace, patterns))
361 (namespace, patterns))
362 yield {
362 yield {
363 'namespace': encoding.fromlocal(namespace),
363 'namespace': encoding.fromlocal(namespace),
364 'patterns': wireproto.encodelist(patterns)
364 'patterns': wireproto.encodelist(patterns)
365 }, f
365 }, f
366 d = f.value
366 d = f.value
367 self.ui.debug('received listkey for "%s": %i bytes\n'
367 self.ui.debug('received listkey for "%s": %i bytes\n'
368 % (namespace, len(d)))
368 % (namespace, len(d)))
369 yield pushkey.decodekeys(d)
369 yield pushkey.decodekeys(d)
370
370
371 def _readbundlerevs(bundlerepo):
371 def _readbundlerevs(bundlerepo):
372 return list(bundlerepo.revs('bundle()'))
372 return list(bundlerepo.revs('bundle()'))
373
373
374 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
374 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
375 '''Tells remotefilelog to include all changed files to the changegroup
375 '''Tells remotefilelog to include all changed files to the changegroup
376
376
377 By default remotefilelog doesn't include file content to the changegroup.
377 By default remotefilelog doesn't include file content to the changegroup.
378 But we need to include it if we are fetching from bundlestore.
378 But we need to include it if we are fetching from bundlestore.
379 '''
379 '''
380 changedfiles = set()
380 changedfiles = set()
381 cl = bundlerepo.changelog
381 cl = bundlerepo.changelog
382 for r in bundlerevs:
382 for r in bundlerevs:
383 # [3] means changed files
383 # [3] means changed files
384 changedfiles.update(cl.read(r)[3])
384 changedfiles.update(cl.read(r)[3])
385 if not changedfiles:
385 if not changedfiles:
386 return bundlecaps
386 return bundlecaps
387
387
388 changedfiles = '\0'.join(changedfiles)
388 changedfiles = '\0'.join(changedfiles)
389 newcaps = []
389 newcaps = []
390 appended = False
390 appended = False
391 for cap in (bundlecaps or []):
391 for cap in (bundlecaps or []):
392 if cap.startswith('excludepattern='):
392 if cap.startswith('excludepattern='):
393 newcaps.append('\0'.join((cap, changedfiles)))
393 newcaps.append('\0'.join((cap, changedfiles)))
394 appended = True
394 appended = True
395 else:
395 else:
396 newcaps.append(cap)
396 newcaps.append(cap)
397 if not appended:
397 if not appended:
398 # Not found excludepattern cap. Just append it
398 # Not found excludepattern cap. Just append it
399 newcaps.append('excludepattern=' + changedfiles)
399 newcaps.append('excludepattern=' + changedfiles)
400
400
401 return newcaps
401 return newcaps
402
402
403 def _rebundle(bundlerepo, bundleroots, unknownhead):
403 def _rebundle(bundlerepo, bundleroots, unknownhead):
404 '''
404 '''
405 Bundle may include more revision then user requested. For example,
405 Bundle may include more revision then user requested. For example,
406 if user asks for revision but bundle also consists its descendants.
406 if user asks for revision but bundle also consists its descendants.
407 This function will filter out all revision that user is not requested.
407 This function will filter out all revision that user is not requested.
408 '''
408 '''
409 parts = []
409 parts = []
410
410
411 version = '02'
411 version = '02'
412 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
412 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
413 missingheads=[unknownhead])
413 missingheads=[unknownhead])
414 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
414 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
415 cgstream = util.chunkbuffer(cgstream).read()
415 cgstream = util.chunkbuffer(cgstream).read()
416 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
416 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
417 cgpart.addparam('version', version)
417 cgpart.addparam('version', version)
418 parts.append(cgpart)
418 parts.append(cgpart)
419
419
420 return parts
420 return parts
421
421
422 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
422 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
423 cl = bundlerepo.changelog
423 cl = bundlerepo.changelog
424 bundleroots = []
424 bundleroots = []
425 for rev in bundlerevs:
425 for rev in bundlerevs:
426 node = cl.node(rev)
426 node = cl.node(rev)
427 parents = cl.parents(node)
427 parents = cl.parents(node)
428 for parent in parents:
428 for parent in parents:
429 # include all revs that exist in the main repo
429 # include all revs that exist in the main repo
430 # to make sure that bundle may apply client-side
430 # to make sure that bundle may apply client-side
431 if parent in oldrepo:
431 if parent in oldrepo:
432 bundleroots.append(parent)
432 bundleroots.append(parent)
433 return bundleroots
433 return bundleroots
434
434
435 def _needsrebundling(head, bundlerepo):
435 def _needsrebundling(head, bundlerepo):
436 bundleheads = list(bundlerepo.revs('heads(bundle())'))
436 bundleheads = list(bundlerepo.revs('heads(bundle())'))
437 return not (len(bundleheads) == 1 and
437 return not (len(bundleheads) == 1 and
438 bundlerepo[bundleheads[0]].node() == head)
438 bundlerepo[bundleheads[0]].node() == head)
439
439
440 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
440 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
441 '''generates bundle that will be send to the user
441 '''generates bundle that will be send to the user
442
442
443 returns tuple with raw bundle string and bundle type
443 returns tuple with raw bundle string and bundle type
444 '''
444 '''
445 parts = []
445 parts = []
446 if not _needsrebundling(head, bundlerepo):
446 if not _needsrebundling(head, bundlerepo):
447 with util.posixfile(bundlefile, "rb") as f:
447 with util.posixfile(bundlefile, "rb") as f:
448 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
448 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
449 if isinstance(unbundler, changegroup.cg1unpacker):
449 if isinstance(unbundler, changegroup.cg1unpacker):
450 part = bundle2.bundlepart('changegroup',
450 part = bundle2.bundlepart('changegroup',
451 data=unbundler._stream.read())
451 data=unbundler._stream.read())
452 part.addparam('version', '01')
452 part.addparam('version', '01')
453 parts.append(part)
453 parts.append(part)
454 elif isinstance(unbundler, bundle2.unbundle20):
454 elif isinstance(unbundler, bundle2.unbundle20):
455 haschangegroup = False
455 haschangegroup = False
456 for part in unbundler.iterparts():
456 for part in unbundler.iterparts():
457 if part.type == 'changegroup':
457 if part.type == 'changegroup':
458 haschangegroup = True
458 haschangegroup = True
459 newpart = bundle2.bundlepart(part.type, data=part.read())
459 newpart = bundle2.bundlepart(part.type, data=part.read())
460 for key, value in part.params.iteritems():
460 for key, value in part.params.iteritems():
461 newpart.addparam(key, value)
461 newpart.addparam(key, value)
462 parts.append(newpart)
462 parts.append(newpart)
463
463
464 if not haschangegroup:
464 if not haschangegroup:
465 raise error.Abort(
465 raise error.Abort(
466 'unexpected bundle without changegroup part, ' +
466 'unexpected bundle without changegroup part, ' +
467 'head: %s' % hex(head),
467 'head: %s' % hex(head),
468 hint='report to administrator')
468 hint='report to administrator')
469 else:
469 else:
470 raise error.Abort('unknown bundle type')
470 raise error.Abort('unknown bundle type')
471 else:
471 else:
472 parts = _rebundle(bundlerepo, bundleroots, head)
472 parts = _rebundle(bundlerepo, bundleroots, head)
473
473
474 return parts
474 return parts
475
475
476 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
476 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
477 heads = heads or []
477 heads = heads or []
478 # newheads are parents of roots of scratch bundles that were requested
478 # newheads are parents of roots of scratch bundles that were requested
479 newphases = {}
479 newphases = {}
480 scratchbundles = []
480 scratchbundles = []
481 newheads = []
481 newheads = []
482 scratchheads = []
482 scratchheads = []
483 nodestobundle = {}
483 nodestobundle = {}
484 allbundlestocleanup = []
484 allbundlestocleanup = []
485 try:
485 try:
486 for head in heads:
486 for head in heads:
487 if head not in repo.changelog.nodemap:
487 if head not in repo.changelog.nodemap:
488 if head not in nodestobundle:
488 if head not in nodestobundle:
489 newbundlefile = common.downloadbundle(repo, head)
489 newbundlefile = common.downloadbundle(repo, head)
490 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
490 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
491 bundlerepo = hg.repository(repo.ui, bundlepath)
491 bundlerepo = hg.repository(repo.ui, bundlepath)
492
492
493 allbundlestocleanup.append((bundlerepo, newbundlefile))
493 allbundlestocleanup.append((bundlerepo, newbundlefile))
494 bundlerevs = set(_readbundlerevs(bundlerepo))
494 bundlerevs = set(_readbundlerevs(bundlerepo))
495 bundlecaps = _includefilelogstobundle(
495 bundlecaps = _includefilelogstobundle(
496 bundlecaps, bundlerepo, bundlerevs, repo.ui)
496 bundlecaps, bundlerepo, bundlerevs, repo.ui)
497 cl = bundlerepo.changelog
497 cl = bundlerepo.changelog
498 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
498 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
499 for rev in bundlerevs:
499 for rev in bundlerevs:
500 node = cl.node(rev)
500 node = cl.node(rev)
501 newphases[hex(node)] = str(phases.draft)
501 newphases[hex(node)] = str(phases.draft)
502 nodestobundle[node] = (bundlerepo, bundleroots,
502 nodestobundle[node] = (bundlerepo, bundleroots,
503 newbundlefile)
503 newbundlefile)
504
504
505 scratchbundles.append(
505 scratchbundles.append(
506 _generateoutputparts(head, *nodestobundle[head]))
506 _generateoutputparts(head, *nodestobundle[head]))
507 newheads.extend(bundleroots)
507 newheads.extend(bundleroots)
508 scratchheads.append(head)
508 scratchheads.append(head)
509 finally:
509 finally:
510 for bundlerepo, bundlefile in allbundlestocleanup:
510 for bundlerepo, bundlefile in allbundlestocleanup:
511 bundlerepo.close()
511 bundlerepo.close()
512 try:
512 try:
513 os.unlink(bundlefile)
513 os.unlink(bundlefile)
514 except (IOError, OSError):
514 except (IOError, OSError):
515 # if we can't cleanup the file then just ignore the error,
515 # if we can't cleanup the file then just ignore the error,
516 # no need to fail
516 # no need to fail
517 pass
517 pass
518
518
519 pullfrombundlestore = bool(scratchbundles)
519 pullfrombundlestore = bool(scratchbundles)
520 wrappedchangegrouppart = False
520 wrappedchangegrouppart = False
521 wrappedlistkeys = False
521 wrappedlistkeys = False
522 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
522 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
523 try:
523 try:
524 def _changegrouppart(bundler, *args, **kwargs):
524 def _changegrouppart(bundler, *args, **kwargs):
525 # Order is important here. First add non-scratch part
525 # Order is important here. First add non-scratch part
526 # and only then add parts with scratch bundles because
526 # and only then add parts with scratch bundles because
527 # non-scratch part contains parents of roots of scratch bundles.
527 # non-scratch part contains parents of roots of scratch bundles.
528 result = oldchangegrouppart(bundler, *args, **kwargs)
528 result = oldchangegrouppart(bundler, *args, **kwargs)
529 for bundle in scratchbundles:
529 for bundle in scratchbundles:
530 for part in bundle:
530 for part in bundle:
531 bundler.addpart(part)
531 bundler.addpart(part)
532 return result
532 return result
533
533
534 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
534 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
535 wrappedchangegrouppart = True
535 wrappedchangegrouppart = True
536
536
537 def _listkeys(orig, self, namespace):
537 def _listkeys(orig, self, namespace):
538 origvalues = orig(self, namespace)
538 origvalues = orig(self, namespace)
539 if namespace == 'phases' and pullfrombundlestore:
539 if namespace == 'phases' and pullfrombundlestore:
540 if origvalues.get('publishing') == 'True':
540 if origvalues.get('publishing') == 'True':
541 # Make repo non-publishing to preserve draft phase
541 # Make repo non-publishing to preserve draft phase
542 del origvalues['publishing']
542 del origvalues['publishing']
543 origvalues.update(newphases)
543 origvalues.update(newphases)
544 return origvalues
544 return origvalues
545
545
546 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
546 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
547 _listkeys)
547 _listkeys)
548 wrappedlistkeys = True
548 wrappedlistkeys = True
549 heads = list((set(newheads) | set(heads)) - set(scratchheads))
549 heads = list((set(newheads) | set(heads)) - set(scratchheads))
550 result = orig(repo, source, heads=heads,
550 result = orig(repo, source, heads=heads,
551 bundlecaps=bundlecaps, **kwargs)
551 bundlecaps=bundlecaps, **kwargs)
552 finally:
552 finally:
553 if wrappedchangegrouppart:
553 if wrappedchangegrouppart:
554 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
554 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
555 if wrappedlistkeys:
555 if wrappedlistkeys:
556 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
556 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
557 _listkeys)
557 _listkeys)
558 return result
558 return result
559
559
560 def _lookupwrap(orig):
560 def _lookupwrap(orig):
561 def _lookup(repo, proto, key):
561 def _lookup(repo, proto, key):
562 localkey = encoding.tolocal(key)
562 localkey = encoding.tolocal(key)
563
563
564 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
564 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
565 scratchnode = repo.bundlestore.index.getnode(localkey)
565 scratchnode = repo.bundlestore.index.getnode(localkey)
566 if scratchnode:
566 if scratchnode:
567 return "%s %s\n" % (1, scratchnode)
567 return "%s %s\n" % (1, scratchnode)
568 else:
568 else:
569 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
569 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
570 else:
570 else:
571 try:
571 try:
572 r = hex(repo.lookup(localkey))
572 r = hex(repo.lookup(localkey))
573 return "%s %s\n" % (1, r)
573 return "%s %s\n" % (1, r)
574 except Exception as inst:
574 except Exception as inst:
575 if repo.bundlestore.index.getbundle(localkey):
575 if repo.bundlestore.index.getbundle(localkey):
576 return "%s %s\n" % (1, localkey)
576 return "%s %s\n" % (1, localkey)
577 else:
577 else:
578 r = str(inst)
578 r = str(inst)
579 return "%s %s\n" % (0, r)
579 return "%s %s\n" % (0, r)
580 return _lookup
580 return _lookup
581
581
582 def _pull(orig, ui, repo, source="default", **opts):
582 def _pull(orig, ui, repo, source="default", **opts):
583 opts = pycompat.byteskwargs(opts)
583 opts = pycompat.byteskwargs(opts)
584 # Copy paste from `pull` command
584 # Copy paste from `pull` command
585 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
585 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
586
586
587 scratchbookmarks = {}
587 scratchbookmarks = {}
588 unfi = repo.unfiltered()
588 unfi = repo.unfiltered()
589 unknownnodes = []
589 unknownnodes = []
590 for rev in opts.get('rev', []):
590 for rev in opts.get('rev', []):
591 if rev not in unfi:
591 if rev not in unfi:
592 unknownnodes.append(rev)
592 unknownnodes.append(rev)
593 if opts.get('bookmark'):
593 if opts.get('bookmark'):
594 bookmarks = []
594 bookmarks = []
595 revs = opts.get('rev') or []
595 revs = opts.get('rev') or []
596 for bookmark in opts.get('bookmark'):
596 for bookmark in opts.get('bookmark'):
597 if _scratchbranchmatcher(bookmark):
597 if _scratchbranchmatcher(bookmark):
598 # rev is not known yet
598 # rev is not known yet
599 # it will be fetched with listkeyspatterns next
599 # it will be fetched with listkeyspatterns next
600 scratchbookmarks[bookmark] = 'REVTOFETCH'
600 scratchbookmarks[bookmark] = 'REVTOFETCH'
601 else:
601 else:
602 bookmarks.append(bookmark)
602 bookmarks.append(bookmark)
603
603
604 if scratchbookmarks:
604 if scratchbookmarks:
605 other = hg.peer(repo, opts, source)
605 other = hg.peer(repo, opts, source)
606 fetchedbookmarks = other.listkeyspatterns(
606 fetchedbookmarks = other.listkeyspatterns(
607 'bookmarks', patterns=scratchbookmarks)
607 'bookmarks', patterns=scratchbookmarks)
608 for bookmark in scratchbookmarks:
608 for bookmark in scratchbookmarks:
609 if bookmark not in fetchedbookmarks:
609 if bookmark not in fetchedbookmarks:
610 raise error.Abort('remote bookmark %s not found!' %
610 raise error.Abort('remote bookmark %s not found!' %
611 bookmark)
611 bookmark)
612 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
612 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
613 revs.append(fetchedbookmarks[bookmark])
613 revs.append(fetchedbookmarks[bookmark])
614 opts['bookmark'] = bookmarks
614 opts['bookmark'] = bookmarks
615 opts['rev'] = revs
615 opts['rev'] = revs
616
616
617 if scratchbookmarks or unknownnodes:
617 if scratchbookmarks or unknownnodes:
618 # Set anyincoming to True
618 # Set anyincoming to True
619 extensions.wrapfunction(discovery, 'findcommonincoming',
619 extensions.wrapfunction(discovery, 'findcommonincoming',
620 _findcommonincoming)
620 _findcommonincoming)
621 try:
621 try:
622 # Remote scratch bookmarks will be deleted because remotenames doesn't
622 # Remote scratch bookmarks will be deleted because remotenames doesn't
623 # know about them. Let's save it before pull and restore after
623 # know about them. Let's save it before pull and restore after
624 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
624 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
625 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
625 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
626 # TODO(stash): race condition is possible
626 # TODO(stash): race condition is possible
627 # if scratch bookmarks was updated right after orig.
627 # if scratch bookmarks was updated right after orig.
628 # But that's unlikely and shouldn't be harmful.
628 # But that's unlikely and shouldn't be harmful.
629 if common.isremotebooksenabled(ui):
629 if common.isremotebooksenabled(ui):
630 remotescratchbookmarks.update(scratchbookmarks)
630 remotescratchbookmarks.update(scratchbookmarks)
631 _saveremotebookmarks(repo, remotescratchbookmarks, source)
631 _saveremotebookmarks(repo, remotescratchbookmarks, source)
632 else:
632 else:
633 _savelocalbookmarks(repo, scratchbookmarks)
633 _savelocalbookmarks(repo, scratchbookmarks)
634 return result
634 return result
635 finally:
635 finally:
636 if scratchbookmarks:
636 if scratchbookmarks:
637 extensions.unwrapfunction(discovery, 'findcommonincoming')
637 extensions.unwrapfunction(discovery, 'findcommonincoming')
638
638
639 def _readscratchremotebookmarks(ui, repo, other):
639 def _readscratchremotebookmarks(ui, repo, other):
640 if common.isremotebooksenabled(ui):
640 if common.isremotebooksenabled(ui):
641 remotenamesext = extensions.find('remotenames')
641 remotenamesext = extensions.find('remotenames')
642 remotepath = remotenamesext.activepath(repo.ui, other)
642 remotepath = remotenamesext.activepath(repo.ui, other)
643 result = {}
643 result = {}
644 # Let's refresh remotenames to make sure we have it up to date
644 # Let's refresh remotenames to make sure we have it up to date
645 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
645 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
646 # and it results in deleting scratch bookmarks. Our best guess how to
646 # and it results in deleting scratch bookmarks. Our best guess how to
647 # fix it is to use `clearnames()`
647 # fix it is to use `clearnames()`
648 repo._remotenames.clearnames()
648 repo._remotenames.clearnames()
649 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
649 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
650 path, bookname = remotenamesext.splitremotename(remotebookmark)
650 path, bookname = remotenamesext.splitremotename(remotebookmark)
651 if path == remotepath and _scratchbranchmatcher(bookname):
651 if path == remotepath and _scratchbranchmatcher(bookname):
652 nodes = repo.names['remotebookmarks'].nodes(repo,
652 nodes = repo.names['remotebookmarks'].nodes(repo,
653 remotebookmark)
653 remotebookmark)
654 if nodes:
654 if nodes:
655 result[bookname] = hex(nodes[0])
655 result[bookname] = hex(nodes[0])
656 return result
656 return result
657 else:
657 else:
658 return {}
658 return {}
659
659
660 def _saveremotebookmarks(repo, newbookmarks, remote):
660 def _saveremotebookmarks(repo, newbookmarks, remote):
661 remotenamesext = extensions.find('remotenames')
661 remotenamesext = extensions.find('remotenames')
662 remotepath = remotenamesext.activepath(repo.ui, remote)
662 remotepath = remotenamesext.activepath(repo.ui, remote)
663 branches = collections.defaultdict(list)
663 branches = collections.defaultdict(list)
664 bookmarks = {}
664 bookmarks = {}
665 remotenames = remotenamesext.readremotenames(repo)
665 remotenames = remotenamesext.readremotenames(repo)
666 for hexnode, nametype, remote, rname in remotenames:
666 for hexnode, nametype, remote, rname in remotenames:
667 if remote != remotepath:
667 if remote != remotepath:
668 continue
668 continue
669 if nametype == 'bookmarks':
669 if nametype == 'bookmarks':
670 if rname in newbookmarks:
670 if rname in newbookmarks:
671 # It's possible if we have a normal bookmark that matches
671 # It's possible if we have a normal bookmark that matches
672 # scratch branch pattern. In this case just use the current
672 # scratch branch pattern. In this case just use the current
673 # bookmark node
673 # bookmark node
674 del newbookmarks[rname]
674 del newbookmarks[rname]
675 bookmarks[rname] = hexnode
675 bookmarks[rname] = hexnode
676 elif nametype == 'branches':
676 elif nametype == 'branches':
677 # saveremotenames expects 20 byte binary nodes for branches
677 # saveremotenames expects 20 byte binary nodes for branches
678 branches[rname].append(bin(hexnode))
678 branches[rname].append(bin(hexnode))
679
679
680 for bookmark, hexnode in newbookmarks.iteritems():
680 for bookmark, hexnode in newbookmarks.iteritems():
681 bookmarks[bookmark] = hexnode
681 bookmarks[bookmark] = hexnode
682 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
682 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
683
683
684 def _savelocalbookmarks(repo, bookmarks):
684 def _savelocalbookmarks(repo, bookmarks):
685 if not bookmarks:
685 if not bookmarks:
686 return
686 return
687 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
687 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
688 changes = []
688 changes = []
689 for scratchbook, node in bookmarks.iteritems():
689 for scratchbook, node in bookmarks.iteritems():
690 changectx = repo[node]
690 changectx = repo[node]
691 changes.append((scratchbook, changectx.node()))
691 changes.append((scratchbook, changectx.node()))
692 repo._bookmarks.applychanges(repo, tr, changes)
692 repo._bookmarks.applychanges(repo, tr, changes)
693
693
694 def _findcommonincoming(orig, *args, **kwargs):
694 def _findcommonincoming(orig, *args, **kwargs):
695 common, inc, remoteheads = orig(*args, **kwargs)
695 common, inc, remoteheads = orig(*args, **kwargs)
696 return common, True, remoteheads
696 return common, True, remoteheads
697
697
698 def _push(orig, ui, repo, dest=None, *args, **opts):
698 def _push(orig, ui, repo, dest=None, *args, **opts):
699
699
700 bookmark = opts.get(r'bookmark')
700 bookmark = opts.get(r'bookmark')
701 # we only support pushing one infinitepush bookmark at once
701 # we only support pushing one infinitepush bookmark at once
702 if len(bookmark) == 1:
702 if len(bookmark) == 1:
703 bookmark = bookmark[0]
703 bookmark = bookmark[0]
704 else:
704 else:
705 bookmark = ''
705 bookmark = ''
706
706
707 oldphasemove = None
707 oldphasemove = None
708 overrides = {(experimental, configbookmark): bookmark}
708 overrides = {(experimental, configbookmark): bookmark}
709
709
710 with ui.configoverride(overrides, 'infinitepush'):
710 with ui.configoverride(overrides, 'infinitepush'):
711 scratchpush = opts.get('bundle_store')
711 scratchpush = opts.get('bundle_store')
712 if _scratchbranchmatcher(bookmark):
712 if _scratchbranchmatcher(bookmark):
713 scratchpush = True
713 scratchpush = True
714 # bundle2 can be sent back after push (for example, bundle2
714 # bundle2 can be sent back after push (for example, bundle2
715 # containing `pushkey` part to update bookmarks)
715 # containing `pushkey` part to update bookmarks)
716 ui.setconfig(experimental, 'bundle2.pushback', True)
716 ui.setconfig(experimental, 'bundle2.pushback', True)
717
717
718 if scratchpush:
718 if scratchpush:
719 # this is an infinitepush, we don't want the bookmark to be applied
719 # this is an infinitepush, we don't want the bookmark to be applied
720 # rather that should be stored in the bundlestore
720 # rather that should be stored in the bundlestore
721 opts[r'bookmark'] = []
721 opts[r'bookmark'] = []
722 ui.setconfig(experimental, configscratchpush, True)
722 ui.setconfig(experimental, configscratchpush, True)
723 oldphasemove = extensions.wrapfunction(exchange,
723 oldphasemove = extensions.wrapfunction(exchange,
724 '_localphasemove',
724 '_localphasemove',
725 _phasemove)
725 _phasemove)
726 # Copy-paste from `push` command
726 # Copy-paste from `push` command
727 path = ui.paths.getpath(dest, default=('default-push', 'default'))
727 path = ui.paths.getpath(dest, default=('default-push', 'default'))
728 if not path:
728 if not path:
729 raise error.Abort(_('default repository not configured!'),
729 raise error.Abort(_('default repository not configured!'),
730 hint=_("see 'hg help config.paths'"))
730 hint=_("see 'hg help config.paths'"))
731 destpath = path.pushloc or path.loc
731 destpath = path.pushloc or path.loc
732 # Remote scratch bookmarks will be deleted because remotenames doesn't
732 # Remote scratch bookmarks will be deleted because remotenames doesn't
733 # know about them. Let's save it before push and restore after
733 # know about them. Let's save it before push and restore after
734 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
734 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
735 result = orig(ui, repo, dest, *args, **opts)
735 result = orig(ui, repo, dest, *args, **opts)
736 if common.isremotebooksenabled(ui):
736 if common.isremotebooksenabled(ui):
737 if bookmark and scratchpush:
737 if bookmark and scratchpush:
738 other = hg.peer(repo, opts, destpath)
738 other = hg.peer(repo, opts, destpath)
739 fetchedbookmarks = other.listkeyspatterns('bookmarks',
739 fetchedbookmarks = other.listkeyspatterns('bookmarks',
740 patterns=[bookmark])
740 patterns=[bookmark])
741 remotescratchbookmarks.update(fetchedbookmarks)
741 remotescratchbookmarks.update(fetchedbookmarks)
742 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
742 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
743 if oldphasemove:
743 if oldphasemove:
744 exchange._localphasemove = oldphasemove
744 exchange._localphasemove = oldphasemove
745 return result
745 return result
746
746
747 def _deleteinfinitepushbookmarks(ui, repo, path, names):
747 def _deleteinfinitepushbookmarks(ui, repo, path, names):
748 """Prune remote names by removing the bookmarks we don't want anymore,
748 """Prune remote names by removing the bookmarks we don't want anymore,
749 then writing the result back to disk
749 then writing the result back to disk
750 """
750 """
751 remotenamesext = extensions.find('remotenames')
751 remotenamesext = extensions.find('remotenames')
752
752
753 # remotename format is:
753 # remotename format is:
754 # (node, nametype ("branches" or "bookmarks"), remote, name)
754 # (node, nametype ("branches" or "bookmarks"), remote, name)
755 nametype_idx = 1
755 nametype_idx = 1
756 remote_idx = 2
756 remote_idx = 2
757 name_idx = 3
757 name_idx = 3
758 remotenames = [remotename for remotename in \
758 remotenames = [remotename for remotename in \
759 remotenamesext.readremotenames(repo) \
759 remotenamesext.readremotenames(repo) \
760 if remotename[remote_idx] == path]
760 if remotename[remote_idx] == path]
761 remote_bm_names = [remotename[name_idx] for remotename in \
761 remote_bm_names = [remotename[name_idx] for remotename in \
762 remotenames if remotename[nametype_idx] == "bookmarks"]
762 remotenames if remotename[nametype_idx] == "bookmarks"]
763
763
764 for name in names:
764 for name in names:
765 if name not in remote_bm_names:
765 if name not in remote_bm_names:
766 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
766 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
767 "in path '{}'").format(name, path))
767 "in path '{}'").format(name, path))
768
768
769 bookmarks = {}
769 bookmarks = {}
770 branches = collections.defaultdict(list)
770 branches = collections.defaultdict(list)
771 for node, nametype, remote, name in remotenames:
771 for node, nametype, remote, name in remotenames:
772 if nametype == "bookmarks" and name not in names:
772 if nametype == "bookmarks" and name not in names:
773 bookmarks[name] = node
773 bookmarks[name] = node
774 elif nametype == "branches":
774 elif nametype == "branches":
775 # saveremotenames wants binary nodes for branches
775 # saveremotenames wants binary nodes for branches
776 branches[name].append(bin(node))
776 branches[name].append(bin(node))
777
777
778 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
778 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
779
779
780 def _phasemove(orig, pushop, nodes, phase=phases.public):
780 def _phasemove(orig, pushop, nodes, phase=phases.public):
781 """prevent commits from being marked public
781 """prevent commits from being marked public
782
782
783 Since these are going to a scratch branch, they aren't really being
783 Since these are going to a scratch branch, they aren't really being
784 published."""
784 published."""
785
785
786 if phase != phases.public:
786 if phase != phases.public:
787 orig(pushop, nodes, phase)
787 orig(pushop, nodes, phase)
788
788
789 @exchange.b2partsgenerator(scratchbranchparttype)
789 @exchange.b2partsgenerator(scratchbranchparttype)
790 def partgen(pushop, bundler):
790 def partgen(pushop, bundler):
791 bookmark = pushop.ui.config(experimental, configbookmark)
791 bookmark = pushop.ui.config(experimental, configbookmark)
792 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
792 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
793 if 'changesets' in pushop.stepsdone or not scratchpush:
793 if 'changesets' in pushop.stepsdone or not scratchpush:
794 return
794 return
795
795
796 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
796 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
797 return
797 return
798
798
799 pushop.stepsdone.add('changesets')
799 pushop.stepsdone.add('changesets')
800 if not pushop.outgoing.missing:
800 if not pushop.outgoing.missing:
801 pushop.ui.status(_('no changes found\n'))
801 pushop.ui.status(_('no changes found\n'))
802 pushop.cgresult = 0
802 pushop.cgresult = 0
803 return
803 return
804
804
805 # This parameter tells the server that the following bundle is an
805 # This parameter tells the server that the following bundle is an
806 # infinitepush. This let's it switch the part processing to our infinitepush
806 # infinitepush. This let's it switch the part processing to our infinitepush
807 # code path.
807 # code path.
808 bundler.addparam("infinitepush", "True")
808 bundler.addparam("infinitepush", "True")
809
809
810 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
810 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
811 pushop.remote,
811 pushop.remote,
812 pushop.outgoing,
812 pushop.outgoing,
813 pushop.ui,
813 pushop.ui,
814 bookmark)
814 bookmark)
815
815
816 for scratchpart in scratchparts:
816 for scratchpart in scratchparts:
817 bundler.addpart(scratchpart)
817 bundler.addpart(scratchpart)
818
818
819 def handlereply(op):
819 def handlereply(op):
820 # server either succeeds or aborts; no code to read
820 # server either succeeds or aborts; no code to read
821 pushop.cgresult = 1
821 pushop.cgresult = 1
822
822
823 return handlereply
823 return handlereply
824
824
825 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
825 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
826
826
827 def _getrevs(bundle, oldnode, force, bookmark):
827 def _getrevs(bundle, oldnode, force, bookmark):
828 'extracts and validates the revs to be imported'
828 'extracts and validates the revs to be imported'
829 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
829 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
830
830
831 # new bookmark
831 # new bookmark
832 if oldnode is None:
832 if oldnode is None:
833 return revs
833 return revs
834
834
835 # Fast forward update
835 # Fast forward update
836 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
836 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
837 return revs
837 return revs
838
838
839 return revs
839 return revs
840
840
841 @contextlib.contextmanager
841 @contextlib.contextmanager
842 def logservicecall(logger, service, **kwargs):
842 def logservicecall(logger, service, **kwargs):
843 start = time.time()
843 start = time.time()
844 logger(service, eventtype='start', **kwargs)
844 logger(service, eventtype='start', **kwargs)
845 try:
845 try:
846 yield
846 yield
847 logger(service, eventtype='success',
847 logger(service, eventtype='success',
848 elapsedms=(time.time() - start) * 1000, **kwargs)
848 elapsedms=(time.time() - start) * 1000, **kwargs)
849 except Exception as e:
849 except Exception as e:
850 logger(service, eventtype='failure',
850 logger(service, eventtype='failure',
851 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
851 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
852 **kwargs)
852 **kwargs)
853 raise
853 raise
854
854
855 def _getorcreateinfinitepushlogger(op):
855 def _getorcreateinfinitepushlogger(op):
856 logger = op.records['infinitepushlogger']
856 logger = op.records['infinitepushlogger']
857 if not logger:
857 if not logger:
858 ui = op.repo.ui
858 ui = op.repo.ui
859 try:
859 try:
860 username = procutil.getuser()
860 username = procutil.getuser()
861 except Exception:
861 except Exception:
862 username = 'unknown'
862 username = 'unknown'
863 # Generate random request id to be able to find all logged entries
863 # Generate random request id to be able to find all logged entries
864 # for the same request. Since requestid is pseudo-generated it may
864 # for the same request. Since requestid is pseudo-generated it may
865 # not be unique, but we assume that (hostname, username, requestid)
865 # not be unique, but we assume that (hostname, username, requestid)
866 # is unique.
866 # is unique.
867 random.seed()
867 random.seed()
868 requestid = random.randint(0, 2000000000)
868 requestid = random.randint(0, 2000000000)
869 hostname = socket.gethostname()
869 hostname = socket.gethostname()
870 logger = functools.partial(ui.log, 'infinitepush', user=username,
870 logger = functools.partial(ui.log, 'infinitepush', user=username,
871 requestid=requestid, hostname=hostname,
871 requestid=requestid, hostname=hostname,
872 reponame=ui.config('infinitepush',
872 reponame=ui.config('infinitepush',
873 'reponame'))
873 'reponame'))
874 op.records.add('infinitepushlogger', logger)
874 op.records.add('infinitepushlogger', logger)
875 else:
875 else:
876 logger = logger[0]
876 logger = logger[0]
877 return logger
877 return logger
878
878
879 def storetobundlestore(orig, repo, op, unbundler):
879 def storetobundlestore(orig, repo, op, unbundler):
880 """stores the incoming bundle coming from push command to the bundlestore
880 """stores the incoming bundle coming from push command to the bundlestore
881 instead of applying on the revlogs"""
881 instead of applying on the revlogs"""
882
882
883 repo.ui.status(_("storing changesets on the bundlestore\n"))
883 repo.ui.status(_("storing changesets on the bundlestore\n"))
884 bundler = bundle2.bundle20(repo.ui)
884 bundler = bundle2.bundle20(repo.ui)
885
885
886 # processing each part and storing it in bundler
886 # processing each part and storing it in bundler
887 with bundle2.partiterator(repo, op, unbundler) as parts:
887 with bundle2.partiterator(repo, op, unbundler) as parts:
888 for part in parts:
888 for part in parts:
889 bundlepart = None
889 bundlepart = None
890 if part.type == 'replycaps':
890 if part.type == 'replycaps':
891 # This configures the current operation to allow reply parts.
891 # This configures the current operation to allow reply parts.
892 bundle2._processpart(op, part)
892 bundle2._processpart(op, part)
893 else:
893 else:
894 bundlepart = bundle2.bundlepart(part.type, data=part.read())
894 bundlepart = bundle2.bundlepart(part.type, data=part.read())
895 for key, value in part.params.iteritems():
895 for key, value in part.params.iteritems():
896 bundlepart.addparam(key, value)
896 bundlepart.addparam(key, value)
897
897
898 # Certain parts require a response
898 # Certain parts require a response
899 if part.type in ('pushkey', 'changegroup'):
899 if part.type in ('pushkey', 'changegroup'):
900 if op.reply is not None:
900 if op.reply is not None:
901 rpart = op.reply.newpart('reply:%s' % part.type)
901 rpart = op.reply.newpart('reply:%s' % part.type)
902 rpart.addparam('in-reply-to', str(part.id),
902 rpart.addparam('in-reply-to', str(part.id),
903 mandatory=False)
903 mandatory=False)
904 rpart.addparam('return', '1', mandatory=False)
904 rpart.addparam('return', '1', mandatory=False)
905
905
906 op.records.add(part.type, {
906 op.records.add(part.type, {
907 'return': 1,
907 'return': 1,
908 })
908 })
909 if bundlepart:
909 if bundlepart:
910 bundler.addpart(bundlepart)
910 bundler.addpart(bundlepart)
911
911
912 # storing the bundle in the bundlestore
912 # storing the bundle in the bundlestore
913 buf = util.chunkbuffer(bundler.getchunks())
913 buf = util.chunkbuffer(bundler.getchunks())
914 fd, bundlefile = tempfile.mkstemp()
914 fd, bundlefile = tempfile.mkstemp()
915 try:
915 try:
916 try:
916 try:
917 fp = os.fdopen(fd, r'wb')
917 fp = os.fdopen(fd, r'wb')
918 fp.write(buf.read())
918 fp.write(buf.read())
919 finally:
919 finally:
920 fp.close()
920 fp.close()
921 storebundle(op, {}, bundlefile)
921 storebundle(op, {}, bundlefile)
922 finally:
922 finally:
923 try:
923 try:
924 os.unlink(bundlefile)
924 os.unlink(bundlefile)
925 except Exception:
925 except Exception:
926 # we would rather see the original exception
926 # we would rather see the original exception
927 pass
927 pass
928
928
929 def processparts(orig, repo, op, unbundler):
929 def processparts(orig, repo, op, unbundler):
930
930
931 # make sure we don't wrap processparts in case of `hg unbundle`
931 # make sure we don't wrap processparts in case of `hg unbundle`
932 if op.source == 'unbundle':
932 if op.source == 'unbundle':
933 return orig(repo, op, unbundler)
933 return orig(repo, op, unbundler)
934
934
935 # this server routes each push to bundle store
935 # this server routes each push to bundle store
936 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
936 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
937 return storetobundlestore(orig, repo, op, unbundler)
937 return storetobundlestore(orig, repo, op, unbundler)
938
938
939 if unbundler.params.get('infinitepush') != 'True':
939 if unbundler.params.get('infinitepush') != 'True':
940 return orig(repo, op, unbundler)
940 return orig(repo, op, unbundler)
941
941
942 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
942 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
943
943
944 bundler = bundle2.bundle20(repo.ui)
944 bundler = bundle2.bundle20(repo.ui)
945 cgparams = None
945 cgparams = None
946 with bundle2.partiterator(repo, op, unbundler) as parts:
946 with bundle2.partiterator(repo, op, unbundler) as parts:
947 for part in parts:
947 for part in parts:
948 bundlepart = None
948 bundlepart = None
949 if part.type == 'replycaps':
949 if part.type == 'replycaps':
950 # This configures the current operation to allow reply parts.
950 # This configures the current operation to allow reply parts.
951 bundle2._processpart(op, part)
951 bundle2._processpart(op, part)
952 elif part.type == bundleparts.scratchbranchparttype:
952 elif part.type == bundleparts.scratchbranchparttype:
953 # Scratch branch parts need to be converted to normal
953 # Scratch branch parts need to be converted to normal
954 # changegroup parts, and the extra parameters stored for later
954 # changegroup parts, and the extra parameters stored for later
955 # when we upload to the store. Eventually those parameters will
955 # when we upload to the store. Eventually those parameters will
956 # be put on the actual bundle instead of this part, then we can
956 # be put on the actual bundle instead of this part, then we can
957 # send a vanilla changegroup instead of the scratchbranch part.
957 # send a vanilla changegroup instead of the scratchbranch part.
958 cgversion = part.params.get('cgversion', '01')
958 cgversion = part.params.get('cgversion', '01')
959 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
959 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
960 bundlepart.addparam('version', cgversion)
960 bundlepart.addparam('version', cgversion)
961 cgparams = part.params
961 cgparams = part.params
962
962
963 # If we're not dumping all parts into the new bundle, we need to
963 # If we're not dumping all parts into the new bundle, we need to
964 # alert the future pushkey and phase-heads handler to skip
964 # alert the future pushkey and phase-heads handler to skip
965 # the part.
965 # the part.
966 if not handleallparts:
966 if not handleallparts:
967 op.records.add(scratchbranchparttype + '_skippushkey', True)
967 op.records.add(scratchbranchparttype + '_skippushkey', True)
968 op.records.add(scratchbranchparttype + '_skipphaseheads',
968 op.records.add(scratchbranchparttype + '_skipphaseheads',
969 True)
969 True)
970 else:
970 else:
971 if handleallparts:
971 if handleallparts:
972 # Ideally we would not process any parts, and instead just
972 # Ideally we would not process any parts, and instead just
973 # forward them to the bundle for storage, but since this
973 # forward them to the bundle for storage, but since this
974 # differs from previous behavior, we need to put it behind a
974 # differs from previous behavior, we need to put it behind a
975 # config flag for incremental rollout.
975 # config flag for incremental rollout.
976 bundlepart = bundle2.bundlepart(part.type, data=part.read())
976 bundlepart = bundle2.bundlepart(part.type, data=part.read())
977 for key, value in part.params.iteritems():
977 for key, value in part.params.iteritems():
978 bundlepart.addparam(key, value)
978 bundlepart.addparam(key, value)
979
979
980 # Certain parts require a response
980 # Certain parts require a response
981 if part.type == 'pushkey':
981 if part.type == 'pushkey':
982 if op.reply is not None:
982 if op.reply is not None:
983 rpart = op.reply.newpart('reply:pushkey')
983 rpart = op.reply.newpart('reply:pushkey')
984 rpart.addparam('in-reply-to', str(part.id),
984 rpart.addparam('in-reply-to', str(part.id),
985 mandatory=False)
985 mandatory=False)
986 rpart.addparam('return', '1', mandatory=False)
986 rpart.addparam('return', '1', mandatory=False)
987 else:
987 else:
988 bundle2._processpart(op, part)
988 bundle2._processpart(op, part)
989
989
990 if handleallparts:
990 if handleallparts:
991 op.records.add(part.type, {
991 op.records.add(part.type, {
992 'return': 1,
992 'return': 1,
993 })
993 })
994 if bundlepart:
994 if bundlepart:
995 bundler.addpart(bundlepart)
995 bundler.addpart(bundlepart)
996
996
997 # If commits were sent, store them
997 # If commits were sent, store them
998 if cgparams:
998 if cgparams:
999 buf = util.chunkbuffer(bundler.getchunks())
999 buf = util.chunkbuffer(bundler.getchunks())
1000 fd, bundlefile = tempfile.mkstemp()
1000 fd, bundlefile = tempfile.mkstemp()
1001 try:
1001 try:
1002 try:
1002 try:
1003 fp = os.fdopen(fd, r'wb')
1003 fp = os.fdopen(fd, r'wb')
1004 fp.write(buf.read())
1004 fp.write(buf.read())
1005 finally:
1005 finally:
1006 fp.close()
1006 fp.close()
1007 storebundle(op, cgparams, bundlefile)
1007 storebundle(op, cgparams, bundlefile)
1008 finally:
1008 finally:
1009 try:
1009 try:
1010 os.unlink(bundlefile)
1010 os.unlink(bundlefile)
1011 except Exception:
1011 except Exception:
1012 # we would rather see the original exception
1012 # we would rather see the original exception
1013 pass
1013 pass
1014
1014
1015 def storebundle(op, params, bundlefile):
1015 def storebundle(op, params, bundlefile):
1016 log = _getorcreateinfinitepushlogger(op)
1016 log = _getorcreateinfinitepushlogger(op)
1017 parthandlerstart = time.time()
1017 parthandlerstart = time.time()
1018 log(scratchbranchparttype, eventtype='start')
1018 log(scratchbranchparttype, eventtype='start')
1019 index = op.repo.bundlestore.index
1019 index = op.repo.bundlestore.index
1020 store = op.repo.bundlestore.store
1020 store = op.repo.bundlestore.store
1021 op.records.add(scratchbranchparttype + '_skippushkey', True)
1021 op.records.add(scratchbranchparttype + '_skippushkey', True)
1022
1022
1023 bundle = None
1023 bundle = None
1024 try: # guards bundle
1024 try: # guards bundle
1025 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1025 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1026 bundle = hg.repository(op.repo.ui, bundlepath)
1026 bundle = hg.repository(op.repo.ui, bundlepath)
1027
1027
1028 bookmark = params.get('bookmark')
1028 bookmark = params.get('bookmark')
1029 bookprevnode = params.get('bookprevnode', '')
1029 bookprevnode = params.get('bookprevnode', '')
1030 force = params.get('force')
1030 force = params.get('force')
1031
1031
1032 if bookmark:
1032 if bookmark:
1033 oldnode = index.getnode(bookmark)
1033 oldnode = index.getnode(bookmark)
1034 else:
1034 else:
1035 oldnode = None
1035 oldnode = None
1036 bundleheads = bundle.revs('heads(bundle())')
1036 bundleheads = bundle.revs('heads(bundle())')
1037 if bookmark and len(bundleheads) > 1:
1037 if bookmark and len(bundleheads) > 1:
1038 raise error.Abort(
1038 raise error.Abort(
1039 _('cannot push more than one head to a scratch branch'))
1039 _('cannot push more than one head to a scratch branch'))
1040
1040
1041 revs = _getrevs(bundle, oldnode, force, bookmark)
1041 revs = _getrevs(bundle, oldnode, force, bookmark)
1042
1042
1043 # Notify the user of what is being pushed
1043 # Notify the user of what is being pushed
1044 plural = 's' if len(revs) > 1 else ''
1044 plural = 's' if len(revs) > 1 else ''
1045 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1045 op.repo.ui.warn(_("pushing %d commit%s:\n") % (len(revs), plural))
1046 maxoutput = 10
1046 maxoutput = 10
1047 for i in range(0, min(len(revs), maxoutput)):
1047 for i in range(0, min(len(revs), maxoutput)):
1048 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1048 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1049 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1049 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1050
1050
1051 if len(revs) > maxoutput + 1:
1051 if len(revs) > maxoutput + 1:
1052 op.repo.ui.warn((" ...\n"))
1052 op.repo.ui.warn((" ...\n"))
1053 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1053 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1054 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1054 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1055
1055
1056 nodesctx = [bundle[rev] for rev in revs]
1056 nodesctx = [bundle[rev] for rev in revs]
1057 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1057 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1058 if bundleheads:
1058 if bundleheads:
1059 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1059 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1060 else:
1060 else:
1061 newheadscount = 0
1061 newheadscount = 0
1062 # If there's a bookmark specified, there should be only one head,
1062 # If there's a bookmark specified, there should be only one head,
1063 # so we choose the last node, which will be that head.
1063 # so we choose the last node, which will be that head.
1064 # If a bug or malicious client allows there to be a bookmark
1064 # If a bug or malicious client allows there to be a bookmark
1065 # with multiple heads, we will place the bookmark on the last head.
1065 # with multiple heads, we will place the bookmark on the last head.
1066 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1066 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1067 key = None
1067 key = None
1068 if newheadscount:
1068 if newheadscount:
1069 with open(bundlefile, 'r') as f:
1069 with open(bundlefile, 'r') as f:
1070 bundledata = f.read()
1070 bundledata = f.read()
1071 with logservicecall(log, 'bundlestore',
1071 with logservicecall(log, 'bundlestore',
1072 bundlesize=len(bundledata)):
1072 bundlesize=len(bundledata)):
1073 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1073 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1074 if len(bundledata) > bundlesizelimit:
1074 if len(bundledata) > bundlesizelimit:
1075 error_msg = ('bundle is too big: %d bytes. ' +
1075 error_msg = ('bundle is too big: %d bytes. ' +
1076 'max allowed size is 100 MB')
1076 'max allowed size is 100 MB')
1077 raise error.Abort(error_msg % (len(bundledata),))
1077 raise error.Abort(error_msg % (len(bundledata),))
1078 key = store.write(bundledata)
1078 key = store.write(bundledata)
1079
1079
1080 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1080 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1081 if key:
1081 if key:
1082 index.addbundle(key, nodesctx)
1082 index.addbundle(key, nodesctx)
1083 if bookmark:
1083 if bookmark:
1084 index.addbookmark(bookmark, bookmarknode)
1084 index.addbookmark(bookmark, bookmarknode)
1085 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1085 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1086 bookprevnode, params)
1086 bookprevnode, params)
1087 log(scratchbranchparttype, eventtype='success',
1087 log(scratchbranchparttype, eventtype='success',
1088 elapsedms=(time.time() - parthandlerstart) * 1000)
1088 elapsedms=(time.time() - parthandlerstart) * 1000)
1089
1089
1090 except Exception as e:
1090 except Exception as e:
1091 log(scratchbranchparttype, eventtype='failure',
1091 log(scratchbranchparttype, eventtype='failure',
1092 elapsedms=(time.time() - parthandlerstart) * 1000,
1092 elapsedms=(time.time() - parthandlerstart) * 1000,
1093 errormsg=str(e))
1093 errormsg=str(e))
1094 raise
1094 raise
1095 finally:
1095 finally:
1096 if bundle:
1096 if bundle:
1097 bundle.close()
1097 bundle.close()
1098
1098
1099 @bundle2.parthandler(scratchbranchparttype,
1099 @bundle2.parthandler(scratchbranchparttype,
1100 ('bookmark', 'bookprevnode', 'force',
1100 ('bookmark', 'bookprevnode', 'force',
1101 'pushbackbookmarks', 'cgversion'))
1101 'pushbackbookmarks', 'cgversion'))
1102 def bundle2scratchbranch(op, part):
1102 def bundle2scratchbranch(op, part):
1103 '''unbundle a bundle2 part containing a changegroup to store'''
1103 '''unbundle a bundle2 part containing a changegroup to store'''
1104
1104
1105 bundler = bundle2.bundle20(op.repo.ui)
1105 bundler = bundle2.bundle20(op.repo.ui)
1106 cgversion = part.params.get('cgversion', '01')
1106 cgversion = part.params.get('cgversion', '01')
1107 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1107 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1108 cgpart.addparam('version', cgversion)
1108 cgpart.addparam('version', cgversion)
1109 bundler.addpart(cgpart)
1109 bundler.addpart(cgpart)
1110 buf = util.chunkbuffer(bundler.getchunks())
1110 buf = util.chunkbuffer(bundler.getchunks())
1111
1111
1112 fd, bundlefile = tempfile.mkstemp()
1112 fd, bundlefile = tempfile.mkstemp()
1113 try:
1113 try:
1114 try:
1114 try:
1115 fp = os.fdopen(fd, r'wb')
1115 fp = os.fdopen(fd, r'wb')
1116 fp.write(buf.read())
1116 fp.write(buf.read())
1117 finally:
1117 finally:
1118 fp.close()
1118 fp.close()
1119 storebundle(op, part.params, bundlefile)
1119 storebundle(op, part.params, bundlefile)
1120 finally:
1120 finally:
1121 try:
1121 try:
1122 os.unlink(bundlefile)
1122 os.unlink(bundlefile)
1123 except OSError as e:
1123 except OSError as e:
1124 if e.errno != errno.ENOENT:
1124 if e.errno != errno.ENOENT:
1125 raise
1125 raise
1126
1126
1127 return 1
1127 return 1
1128
1128
1129 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1129 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1130 if params.get('pushbackbookmarks'):
1130 if params.get('pushbackbookmarks'):
1131 if op.reply and 'pushback' in op.reply.capabilities:
1131 if op.reply and 'pushback' in op.reply.capabilities:
1132 params = {
1132 params = {
1133 'namespace': 'bookmarks',
1133 'namespace': 'bookmarks',
1134 'key': bookmark,
1134 'key': bookmark,
1135 'new': newnode,
1135 'new': newnode,
1136 'old': oldnode,
1136 'old': oldnode,
1137 }
1137 }
1138 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1138 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1139
1139
1140 def bundle2pushkey(orig, op, part):
1140 def bundle2pushkey(orig, op, part):
1141 '''Wrapper of bundle2.handlepushkey()
1141 '''Wrapper of bundle2.handlepushkey()
1142
1142
1143 The only goal is to skip calling the original function if flag is set.
1143 The only goal is to skip calling the original function if flag is set.
1144 It's set if infinitepush push is happening.
1144 It's set if infinitepush push is happening.
1145 '''
1145 '''
1146 if op.records[scratchbranchparttype + '_skippushkey']:
1146 if op.records[scratchbranchparttype + '_skippushkey']:
1147 if op.reply is not None:
1147 if op.reply is not None:
1148 rpart = op.reply.newpart('reply:pushkey')
1148 rpart = op.reply.newpart('reply:pushkey')
1149 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1149 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1150 rpart.addparam('return', '1', mandatory=False)
1150 rpart.addparam('return', '1', mandatory=False)
1151 return 1
1151 return 1
1152
1152
1153 return orig(op, part)
1153 return orig(op, part)
1154
1154
1155 def bundle2handlephases(orig, op, part):
1155 def bundle2handlephases(orig, op, part):
1156 '''Wrapper of bundle2.handlephases()
1156 '''Wrapper of bundle2.handlephases()
1157
1157
1158 The only goal is to skip calling the original function if flag is set.
1158 The only goal is to skip calling the original function if flag is set.
1159 It's set if infinitepush push is happening.
1159 It's set if infinitepush push is happening.
1160 '''
1160 '''
1161
1161
1162 if op.records[scratchbranchparttype + '_skipphaseheads']:
1162 if op.records[scratchbranchparttype + '_skipphaseheads']:
1163 return
1163 return
1164
1164
1165 return orig(op, part)
1165 return orig(op, part)
1166
1166
1167 def _asyncsavemetadata(root, nodes):
1167 def _asyncsavemetadata(root, nodes):
1168 '''starts a separate process that fills metadata for the nodes
1168 '''starts a separate process that fills metadata for the nodes
1169
1169
1170 This function creates a separate process and doesn't wait for it's
1170 This function creates a separate process and doesn't wait for it's
1171 completion. This was done to avoid slowing down pushes
1171 completion. This was done to avoid slowing down pushes
1172 '''
1172 '''
1173
1173
1174 maxnodes = 50
1174 maxnodes = 50
1175 if len(nodes) > maxnodes:
1175 if len(nodes) > maxnodes:
1176 return
1176 return
1177 nodesargs = []
1177 nodesargs = []
1178 for node in nodes:
1178 for node in nodes:
1179 nodesargs.append('--node')
1179 nodesargs.append('--node')
1180 nodesargs.append(node)
1180 nodesargs.append(node)
1181 with open(os.devnull, 'w+b') as devnull:
1181 with open(os.devnull, 'w+b') as devnull:
1182 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1182 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1183 '-R', root] + nodesargs
1183 '-R', root] + nodesargs
1184 # Process will run in background. We don't care about the return code
1184 # Process will run in background. We don't care about the return code
1185 subprocess.Popen(cmdline, close_fds=True, shell=False,
1185 subprocess.Popen(cmdline, close_fds=True, shell=False,
1186 stdin=devnull, stdout=devnull, stderr=devnull)
1186 stdin=devnull, stdout=devnull, stderr=devnull)
@@ -1,1141 +1,1141 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 node,
38 node,
39 phases,
39 phases,
40 scmutil,
40 scmutil,
41 sshpeer,
41 sshpeer,
42 statichttprepo,
42 statichttprepo,
43 ui as uimod,
43 ui as uimod,
44 unionrepo,
44 unionrepo,
45 url,
45 url,
46 util,
46 util,
47 verify as verifymod,
47 verify as verifymod,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50
50
51 from .utils import (
51 from .utils import (
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 release = lock.release
55 release = lock.release
56
56
57 # shared features
57 # shared features
58 sharedbookmarks = 'bookmarks'
58 sharedbookmarks = 'bookmarks'
59
59
60 def _local(path):
60 def _local(path):
61 path = util.expandpath(util.urllocalpath(path))
61 path = util.expandpath(util.urllocalpath(path))
62 return (os.path.isfile(path) and bundlerepo or localrepo)
62 return (os.path.isfile(path) and bundlerepo or localrepo)
63
63
64 def addbranchrevs(lrepo, other, branches, revs):
64 def addbranchrevs(lrepo, other, branches, revs):
65 peer = other.peer() # a courtesy to callers using a localrepo for other
65 peer = other.peer() # a courtesy to callers using a localrepo for other
66 hashbranch, branches = branches
66 hashbranch, branches = branches
67 if not hashbranch and not branches:
67 if not hashbranch and not branches:
68 x = revs or None
68 x = revs or None
69 if revs:
69 if revs:
70 y = revs[0]
70 y = revs[0]
71 else:
71 else:
72 y = None
72 y = None
73 return x, y
73 return x, y
74 if revs:
74 if revs:
75 revs = list(revs)
75 revs = list(revs)
76 else:
76 else:
77 revs = []
77 revs = []
78
78
79 if not peer.capable('branchmap'):
79 if not peer.capable('branchmap'):
80 if branches:
80 if branches:
81 raise error.Abort(_("remote branch lookup not supported"))
81 raise error.Abort(_("remote branch lookup not supported"))
82 revs.append(hashbranch)
82 revs.append(hashbranch)
83 return revs, revs[0]
83 return revs, revs[0]
84 branchmap = peer.branchmap()
84 branchmap = peer.branchmap()
85
85
86 def primary(branch):
86 def primary(branch):
87 if branch == '.':
87 if branch == '.':
88 if not lrepo:
88 if not lrepo:
89 raise error.Abort(_("dirstate branch not accessible"))
89 raise error.Abort(_("dirstate branch not accessible"))
90 branch = lrepo.dirstate.branch()
90 branch = lrepo.dirstate.branch()
91 if branch in branchmap:
91 if branch in branchmap:
92 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
93 return True
93 return True
94 else:
94 else:
95 return False
95 return False
96
96
97 for branch in branches:
97 for branch in branches:
98 if not primary(branch):
98 if not primary(branch):
99 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
100 if hashbranch:
100 if hashbranch:
101 if not primary(hashbranch):
101 if not primary(hashbranch):
102 revs.append(hashbranch)
102 revs.append(hashbranch)
103 return revs, revs[0]
103 return revs, revs[0]
104
104
105 def parseurl(path, branches=None):
105 def parseurl(path, branches=None):
106 '''parse url#branch, returning (url, (branch, branches))'''
106 '''parse url#branch, returning (url, (branch, branches))'''
107
107
108 u = util.url(path)
108 u = util.url(path)
109 branch = None
109 branch = None
110 if u.fragment:
110 if u.fragment:
111 branch = u.fragment
111 branch = u.fragment
112 u.fragment = None
112 u.fragment = None
113 return bytes(u), (branch, branches or [])
113 return bytes(u), (branch, branches or [])
114
114
115 schemes = {
115 schemes = {
116 'bundle': bundlerepo,
116 'bundle': bundlerepo,
117 'union': unionrepo,
117 'union': unionrepo,
118 'file': _local,
118 'file': _local,
119 'http': httppeer,
119 'http': httppeer,
120 'https': httppeer,
120 'https': httppeer,
121 'ssh': sshpeer,
121 'ssh': sshpeer,
122 'static-http': statichttprepo,
122 'static-http': statichttprepo,
123 }
123 }
124
124
125 def _peerlookup(path):
125 def _peerlookup(path):
126 u = util.url(path)
126 u = util.url(path)
127 scheme = u.scheme or 'file'
127 scheme = u.scheme or 'file'
128 thing = schemes.get(scheme) or schemes['file']
128 thing = schemes.get(scheme) or schemes['file']
129 try:
129 try:
130 return thing(path)
130 return thing(path)
131 except TypeError:
131 except TypeError:
132 # we can't test callable(thing) because 'thing' can be an unloaded
132 # we can't test callable(thing) because 'thing' can be an unloaded
133 # module that implements __call__
133 # module that implements __call__
134 if not util.safehasattr(thing, 'instance'):
134 if not util.safehasattr(thing, 'instance'):
135 raise
135 raise
136 return thing
136 return thing
137
137
138 def islocal(repo):
138 def islocal(repo):
139 '''return true if repo (or path pointing to repo) is local'''
139 '''return true if repo (or path pointing to repo) is local'''
140 if isinstance(repo, bytes):
140 if isinstance(repo, bytes):
141 try:
141 try:
142 return _peerlookup(repo).islocal(repo)
142 return _peerlookup(repo).islocal(repo)
143 except AttributeError:
143 except AttributeError:
144 return False
144 return False
145 return repo.local()
145 return repo.local()
146
146
147 def openpath(ui, path):
147 def openpath(ui, path):
148 '''open path with open if local, url.open if remote'''
148 '''open path with open if local, url.open if remote'''
149 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 pathurl = util.url(path, parsequery=False, parsefragment=False)
150 if pathurl.islocal():
150 if pathurl.islocal():
151 return util.posixfile(pathurl.localpath(), 'rb')
151 return util.posixfile(pathurl.localpath(), 'rb')
152 else:
152 else:
153 return url.open(ui, path)
153 return url.open(ui, path)
154
154
155 # a list of (ui, repo) functions called for wire peer initialization
155 # a list of (ui, repo) functions called for wire peer initialization
156 wirepeersetupfuncs = []
156 wirepeersetupfuncs = []
157
157
158 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
158 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 obj = _peerlookup(path).instance(ui, path, create)
160 obj = _peerlookup(path).instance(ui, path, create)
161 ui = getattr(obj, "ui", ui)
161 ui = getattr(obj, "ui", ui)
162 for f in presetupfuncs or []:
162 for f in presetupfuncs or []:
163 f(ui, obj)
163 f(ui, obj)
164 for name, module in extensions.extensions(ui):
164 for name, module in extensions.extensions(ui):
165 hook = getattr(module, 'reposetup', None)
165 hook = getattr(module, 'reposetup', None)
166 if hook:
166 if hook:
167 hook(ui, obj)
167 hook(ui, obj)
168 if not obj.local():
168 if not obj.local():
169 for f in wirepeersetupfuncs:
169 for f in wirepeersetupfuncs:
170 f(ui, obj)
170 f(ui, obj)
171 return obj
171 return obj
172
172
173 def repository(ui, path='', create=False, presetupfuncs=None):
173 def repository(ui, path='', create=False, presetupfuncs=None):
174 """return a repository object for the specified path"""
174 """return a repository object for the specified path"""
175 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
175 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
176 repo = peer.local()
176 repo = peer.local()
177 if not repo:
177 if not repo:
178 raise error.Abort(_("repository '%s' is not local") %
178 raise error.Abort(_("repository '%s' is not local") %
179 (path or peer.url()))
179 (path or peer.url()))
180 return repo.filtered('visible')
180 return repo.filtered('visible')
181
181
182 def peer(uiorrepo, opts, path, create=False):
182 def peer(uiorrepo, opts, path, create=False):
183 '''return a repository peer for the specified path'''
183 '''return a repository peer for the specified path'''
184 rui = remoteui(uiorrepo, opts)
184 rui = remoteui(uiorrepo, opts)
185 return _peerorrepo(rui, path, create).peer()
185 return _peerorrepo(rui, path, create).peer()
186
186
187 def defaultdest(source):
187 def defaultdest(source):
188 '''return default destination of clone if none is given
188 '''return default destination of clone if none is given
189
189
190 >>> defaultdest(b'foo')
190 >>> defaultdest(b'foo')
191 'foo'
191 'foo'
192 >>> defaultdest(b'/foo/bar')
192 >>> defaultdest(b'/foo/bar')
193 'bar'
193 'bar'
194 >>> defaultdest(b'/')
194 >>> defaultdest(b'/')
195 ''
195 ''
196 >>> defaultdest(b'')
196 >>> defaultdest(b'')
197 ''
197 ''
198 >>> defaultdest(b'http://example.org/')
198 >>> defaultdest(b'http://example.org/')
199 ''
199 ''
200 >>> defaultdest(b'http://example.org/foo/')
200 >>> defaultdest(b'http://example.org/foo/')
201 'foo'
201 'foo'
202 '''
202 '''
203 path = util.url(source).path
203 path = util.url(source).path
204 if not path:
204 if not path:
205 return ''
205 return ''
206 return os.path.basename(os.path.normpath(path))
206 return os.path.basename(os.path.normpath(path))
207
207
208 def sharedreposource(repo):
208 def sharedreposource(repo):
209 """Returns repository object for source repository of a shared repo.
209 """Returns repository object for source repository of a shared repo.
210
210
211 If repo is not a shared repository, returns None.
211 If repo is not a shared repository, returns None.
212 """
212 """
213 if repo.sharedpath == repo.path:
213 if repo.sharedpath == repo.path:
214 return None
214 return None
215
215
216 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
216 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
217 return repo.srcrepo
217 return repo.srcrepo
218
218
219 # the sharedpath always ends in the .hg; we want the path to the repo
219 # the sharedpath always ends in the .hg; we want the path to the repo
220 source = repo.vfs.split(repo.sharedpath)[0]
220 source = repo.vfs.split(repo.sharedpath)[0]
221 srcurl, branches = parseurl(source)
221 srcurl, branches = parseurl(source)
222 srcrepo = repository(repo.ui, srcurl)
222 srcrepo = repository(repo.ui, srcurl)
223 repo.srcrepo = srcrepo
223 repo.srcrepo = srcrepo
224 return srcrepo
224 return srcrepo
225
225
226 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
226 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
227 relative=False):
227 relative=False):
228 '''create a shared repository'''
228 '''create a shared repository'''
229
229
230 if not islocal(source):
230 if not islocal(source):
231 raise error.Abort(_('can only share local repositories'))
231 raise error.Abort(_('can only share local repositories'))
232
232
233 if not dest:
233 if not dest:
234 dest = defaultdest(source)
234 dest = defaultdest(source)
235 else:
235 else:
236 dest = ui.expandpath(dest)
236 dest = ui.expandpath(dest)
237
237
238 if isinstance(source, bytes):
238 if isinstance(source, bytes):
239 origsource = ui.expandpath(source)
239 origsource = ui.expandpath(source)
240 source, branches = parseurl(origsource)
240 source, branches = parseurl(origsource)
241 srcrepo = repository(ui, source)
241 srcrepo = repository(ui, source)
242 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
242 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
243 else:
243 else:
244 srcrepo = source.local()
244 srcrepo = source.local()
245 origsource = source = srcrepo.url()
245 origsource = source = srcrepo.url()
246 checkout = None
246 checkout = None
247
247
248 sharedpath = srcrepo.sharedpath # if our source is already sharing
248 sharedpath = srcrepo.sharedpath # if our source is already sharing
249
249
250 destwvfs = vfsmod.vfs(dest, realpath=True)
250 destwvfs = vfsmod.vfs(dest, realpath=True)
251 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
251 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
252
252
253 if destvfs.lexists():
253 if destvfs.lexists():
254 raise error.Abort(_('destination already exists'))
254 raise error.Abort(_('destination already exists'))
255
255
256 if not destwvfs.isdir():
256 if not destwvfs.isdir():
257 destwvfs.mkdir()
257 destwvfs.mkdir()
258 destvfs.makedir()
258 destvfs.makedir()
259
259
260 requirements = ''
260 requirements = ''
261 try:
261 try:
262 requirements = srcrepo.vfs.read('requires')
262 requirements = srcrepo.vfs.read('requires')
263 except IOError as inst:
263 except IOError as inst:
264 if inst.errno != errno.ENOENT:
264 if inst.errno != errno.ENOENT:
265 raise
265 raise
266
266
267 if relative:
267 if relative:
268 try:
268 try:
269 sharedpath = os.path.relpath(sharedpath, destvfs.base)
269 sharedpath = os.path.relpath(sharedpath, destvfs.base)
270 requirements += 'relshared\n'
270 requirements += 'relshared\n'
271 except (IOError, ValueError) as e:
271 except (IOError, ValueError) as e:
272 # ValueError is raised on Windows if the drive letters differ on
272 # ValueError is raised on Windows if the drive letters differ on
273 # each path
273 # each path
274 raise error.Abort(_('cannot calculate relative path'),
274 raise error.Abort(_('cannot calculate relative path'),
275 hint=stringutil.forcebytestr(e))
275 hint=stringutil.forcebytestr(e))
276 else:
276 else:
277 requirements += 'shared\n'
277 requirements += 'shared\n'
278
278
279 destvfs.write('requires', requirements)
279 destvfs.write('requires', requirements)
280 destvfs.write('sharedpath', sharedpath)
280 destvfs.write('sharedpath', sharedpath)
281
281
282 r = repository(ui, destwvfs.base)
282 r = repository(ui, destwvfs.base)
283 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
283 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
284 _postshareupdate(r, update, checkout=checkout)
284 _postshareupdate(r, update, checkout=checkout)
285 return r
285 return r
286
286
287 def unshare(ui, repo):
287 def unshare(ui, repo):
288 """convert a shared repository to a normal one
288 """convert a shared repository to a normal one
289
289
290 Copy the store data to the repo and remove the sharedpath data.
290 Copy the store data to the repo and remove the sharedpath data.
291 """
291 """
292
292
293 destlock = lock = None
293 destlock = lock = None
294 lock = repo.lock()
294 lock = repo.lock()
295 try:
295 try:
296 # we use locks here because if we race with commit, we
296 # we use locks here because if we race with commit, we
297 # can end up with extra data in the cloned revlogs that's
297 # can end up with extra data in the cloned revlogs that's
298 # not pointed to by changesets, thus causing verify to
298 # not pointed to by changesets, thus causing verify to
299 # fail
299 # fail
300
300
301 destlock = copystore(ui, repo, repo.path)
301 destlock = copystore(ui, repo, repo.path)
302
302
303 sharefile = repo.vfs.join('sharedpath')
303 sharefile = repo.vfs.join('sharedpath')
304 util.rename(sharefile, sharefile + '.old')
304 util.rename(sharefile, sharefile + '.old')
305
305
306 repo.requirements.discard('shared')
306 repo.requirements.discard('shared')
307 repo.requirements.discard('relshared')
307 repo.requirements.discard('relshared')
308 repo._writerequirements()
308 repo._writerequirements()
309 finally:
309 finally:
310 destlock and destlock.release()
310 destlock and destlock.release()
311 lock and lock.release()
311 lock and lock.release()
312
312
313 # update store, spath, svfs and sjoin of repo
313 # update store, spath, svfs and sjoin of repo
314 repo.unfiltered().__init__(repo.baseui, repo.root)
314 repo.unfiltered().__init__(repo.baseui, repo.root)
315
315
316 # TODO: figure out how to access subrepos that exist, but were previously
316 # TODO: figure out how to access subrepos that exist, but were previously
317 # removed from .hgsub
317 # removed from .hgsub
318 c = repo['.']
318 c = repo['.']
319 subs = c.substate
319 subs = c.substate
320 for s in sorted(subs):
320 for s in sorted(subs):
321 c.sub(s).unshare()
321 c.sub(s).unshare()
322
322
323 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
323 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
324 """Called after a new shared repo is created.
324 """Called after a new shared repo is created.
325
325
326 The new repo only has a requirements file and pointer to the source.
326 The new repo only has a requirements file and pointer to the source.
327 This function configures additional shared data.
327 This function configures additional shared data.
328
328
329 Extensions can wrap this function and write additional entries to
329 Extensions can wrap this function and write additional entries to
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
331 """
331 """
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
333 if default:
333 if default:
334 template = ('[paths]\n'
334 template = ('[paths]\n'
335 'default = %s\n')
335 'default = %s\n')
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
337
337
338 with destrepo.wlock():
338 with destrepo.wlock():
339 if bookmarks:
339 if bookmarks:
340 destrepo.vfs.write('shared', sharedbookmarks + '\n')
340 destrepo.vfs.write('shared', sharedbookmarks + '\n')
341
341
342 def _postshareupdate(repo, update, checkout=None):
342 def _postshareupdate(repo, update, checkout=None):
343 """Maybe perform a working directory update after a shared repo is created.
343 """Maybe perform a working directory update after a shared repo is created.
344
344
345 ``update`` can be a boolean or a revision to update to.
345 ``update`` can be a boolean or a revision to update to.
346 """
346 """
347 if not update:
347 if not update:
348 return
348 return
349
349
350 repo.ui.status(_("updating working directory\n"))
350 repo.ui.status(_("updating working directory\n"))
351 if update is not True:
351 if update is not True:
352 checkout = update
352 checkout = update
353 for test in (checkout, 'default', 'tip'):
353 for test in (checkout, 'default', 'tip'):
354 if test is None:
354 if test is None:
355 continue
355 continue
356 try:
356 try:
357 uprev = repo.lookup(test)
357 uprev = repo.lookup(test)
358 break
358 break
359 except error.RepoLookupError:
359 except error.RepoLookupError:
360 continue
360 continue
361 _update(repo, uprev)
361 _update(repo, uprev)
362
362
363 def copystore(ui, srcrepo, destpath):
363 def copystore(ui, srcrepo, destpath):
364 '''copy files from store of srcrepo in destpath
364 '''copy files from store of srcrepo in destpath
365
365
366 returns destlock
366 returns destlock
367 '''
367 '''
368 destlock = None
368 destlock = None
369 try:
369 try:
370 hardlink = None
370 hardlink = None
371 num = 0
371 num = 0
372 closetopic = [None]
372 closetopic = [None]
373 def prog(topic, pos):
373 def prog(topic, pos):
374 if pos is None:
374 if pos is None:
375 closetopic[0] = topic
375 closetopic[0] = topic
376 else:
376 else:
377 ui.progress(topic, pos + num)
377 ui.progress(topic, pos + num)
378 srcpublishing = srcrepo.publishing()
378 srcpublishing = srcrepo.publishing()
379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
380 dstvfs = vfsmod.vfs(destpath)
380 dstvfs = vfsmod.vfs(destpath)
381 for f in srcrepo.store.copylist():
381 for f in srcrepo.store.copylist():
382 if srcpublishing and f.endswith('phaseroots'):
382 if srcpublishing and f.endswith('phaseroots'):
383 continue
383 continue
384 dstbase = os.path.dirname(f)
384 dstbase = os.path.dirname(f)
385 if dstbase and not dstvfs.exists(dstbase):
385 if dstbase and not dstvfs.exists(dstbase):
386 dstvfs.mkdir(dstbase)
386 dstvfs.mkdir(dstbase)
387 if srcvfs.exists(f):
387 if srcvfs.exists(f):
388 if f.endswith('data'):
388 if f.endswith('data'):
389 # 'dstbase' may be empty (e.g. revlog format 0)
389 # 'dstbase' may be empty (e.g. revlog format 0)
390 lockfile = os.path.join(dstbase, "lock")
390 lockfile = os.path.join(dstbase, "lock")
391 # lock to avoid premature writing to the target
391 # lock to avoid premature writing to the target
392 destlock = lock.lock(dstvfs, lockfile)
392 destlock = lock.lock(dstvfs, lockfile)
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
394 hardlink, progress=prog)
394 hardlink, progress=prog)
395 num += n
395 num += n
396 if hardlink:
396 if hardlink:
397 ui.debug("linked %d files\n" % num)
397 ui.debug("linked %d files\n" % num)
398 if closetopic[0]:
398 if closetopic[0]:
399 ui.progress(closetopic[0], None)
399 ui.progress(closetopic[0], None)
400 else:
400 else:
401 ui.debug("copied %d files\n" % num)
401 ui.debug("copied %d files\n" % num)
402 if closetopic[0]:
402 if closetopic[0]:
403 ui.progress(closetopic[0], None)
403 ui.progress(closetopic[0], None)
404 return destlock
404 return destlock
405 except: # re-raises
405 except: # re-raises
406 release(destlock)
406 release(destlock)
407 raise
407 raise
408
408
409 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
409 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
410 rev=None, update=True, stream=False):
410 rev=None, update=True, stream=False):
411 """Perform a clone using a shared repo.
411 """Perform a clone using a shared repo.
412
412
413 The store for the repository will be located at <sharepath>/.hg. The
413 The store for the repository will be located at <sharepath>/.hg. The
414 specified revisions will be cloned or pulled from "source". A shared repo
414 specified revisions will be cloned or pulled from "source". A shared repo
415 will be created at "dest" and a working copy will be created if "update" is
415 will be created at "dest" and a working copy will be created if "update" is
416 True.
416 True.
417 """
417 """
418 revs = None
418 revs = None
419 if rev:
419 if rev:
420 if not srcpeer.capable('lookup'):
420 if not srcpeer.capable('lookup'):
421 raise error.Abort(_("src repository does not support "
421 raise error.Abort(_("src repository does not support "
422 "revision lookup and so doesn't "
422 "revision lookup and so doesn't "
423 "support clone by revision"))
423 "support clone by revision"))
424 revs = [srcpeer.lookup(r) for r in rev]
424 revs = [srcpeer.lookup(r) for r in rev]
425
425
426 # Obtain a lock before checking for or cloning the pooled repo otherwise
426 # Obtain a lock before checking for or cloning the pooled repo otherwise
427 # 2 clients may race creating or populating it.
427 # 2 clients may race creating or populating it.
428 pooldir = os.path.dirname(sharepath)
428 pooldir = os.path.dirname(sharepath)
429 # lock class requires the directory to exist.
429 # lock class requires the directory to exist.
430 try:
430 try:
431 util.makedir(pooldir, False)
431 util.makedir(pooldir, False)
432 except OSError as e:
432 except OSError as e:
433 if e.errno != errno.EEXIST:
433 if e.errno != errno.EEXIST:
434 raise
434 raise
435
435
436 poolvfs = vfsmod.vfs(pooldir)
436 poolvfs = vfsmod.vfs(pooldir)
437 basename = os.path.basename(sharepath)
437 basename = os.path.basename(sharepath)
438
438
439 with lock.lock(poolvfs, '%s.lock' % basename):
439 with lock.lock(poolvfs, '%s.lock' % basename):
440 if os.path.exists(sharepath):
440 if os.path.exists(sharepath):
441 ui.status(_('(sharing from existing pooled repository %s)\n') %
441 ui.status(_('(sharing from existing pooled repository %s)\n') %
442 basename)
442 basename)
443 else:
443 else:
444 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
444 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
445 # Always use pull mode because hardlinks in share mode don't work
445 # Always use pull mode because hardlinks in share mode don't work
446 # well. Never update because working copies aren't necessary in
446 # well. Never update because working copies aren't necessary in
447 # share mode.
447 # share mode.
448 clone(ui, peeropts, source, dest=sharepath, pull=True,
448 clone(ui, peeropts, source, dest=sharepath, pull=True,
449 revs=rev, update=False, stream=stream)
449 revs=rev, update=False, stream=stream)
450
450
451 # Resolve the value to put in [paths] section for the source.
451 # Resolve the value to put in [paths] section for the source.
452 if islocal(source):
452 if islocal(source):
453 defaultpath = os.path.abspath(util.urllocalpath(source))
453 defaultpath = os.path.abspath(util.urllocalpath(source))
454 else:
454 else:
455 defaultpath = source
455 defaultpath = source
456
456
457 sharerepo = repository(ui, path=sharepath)
457 sharerepo = repository(ui, path=sharepath)
458 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
458 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
459 defaultpath=defaultpath)
459 defaultpath=defaultpath)
460
460
461 # We need to perform a pull against the dest repo to fetch bookmarks
461 # We need to perform a pull against the dest repo to fetch bookmarks
462 # and other non-store data that isn't shared by default. In the case of
462 # and other non-store data that isn't shared by default. In the case of
463 # non-existing shared repo, this means we pull from the remote twice. This
463 # non-existing shared repo, this means we pull from the remote twice. This
464 # is a bit weird. But at the time it was implemented, there wasn't an easy
464 # is a bit weird. But at the time it was implemented, there wasn't an easy
465 # way to pull just non-changegroup data.
465 # way to pull just non-changegroup data.
466 destrepo = repository(ui, path=dest)
466 destrepo = repository(ui, path=dest)
467 exchange.pull(destrepo, srcpeer, heads=revs)
467 exchange.pull(destrepo, srcpeer, heads=revs)
468
468
469 _postshareupdate(destrepo, update)
469 _postshareupdate(destrepo, update)
470
470
471 return srcpeer, peer(ui, peeropts, dest)
471 return srcpeer, peer(ui, peeropts, dest)
472
472
473 # Recomputing branch cache might be slow on big repos,
473 # Recomputing branch cache might be slow on big repos,
474 # so just copy it
474 # so just copy it
475 def _copycache(srcrepo, dstcachedir, fname):
475 def _copycache(srcrepo, dstcachedir, fname):
476 """copy a cache from srcrepo to destcachedir (if it exists)"""
476 """copy a cache from srcrepo to destcachedir (if it exists)"""
477 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
477 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
478 dstbranchcache = os.path.join(dstcachedir, fname)
478 dstbranchcache = os.path.join(dstcachedir, fname)
479 if os.path.exists(srcbranchcache):
479 if os.path.exists(srcbranchcache):
480 if not os.path.exists(dstcachedir):
480 if not os.path.exists(dstcachedir):
481 os.mkdir(dstcachedir)
481 os.mkdir(dstcachedir)
482 util.copyfile(srcbranchcache, dstbranchcache)
482 util.copyfile(srcbranchcache, dstbranchcache)
483
483
484 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
484 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
485 update=True, stream=False, branch=None, shareopts=None):
485 update=True, stream=False, branch=None, shareopts=None):
486 """Make a copy of an existing repository.
486 """Make a copy of an existing repository.
487
487
488 Create a copy of an existing repository in a new directory. The
488 Create a copy of an existing repository in a new directory. The
489 source and destination are URLs, as passed to the repository
489 source and destination are URLs, as passed to the repository
490 function. Returns a pair of repository peers, the source and
490 function. Returns a pair of repository peers, the source and
491 newly created destination.
491 newly created destination.
492
492
493 The location of the source is added to the new repository's
493 The location of the source is added to the new repository's
494 .hg/hgrc file, as the default to be used for future pulls and
494 .hg/hgrc file, as the default to be used for future pulls and
495 pushes.
495 pushes.
496
496
497 If an exception is raised, the partly cloned/updated destination
497 If an exception is raised, the partly cloned/updated destination
498 repository will be deleted.
498 repository will be deleted.
499
499
500 Arguments:
500 Arguments:
501
501
502 source: repository object or URL
502 source: repository object or URL
503
503
504 dest: URL of destination repository to create (defaults to base
504 dest: URL of destination repository to create (defaults to base
505 name of source repository)
505 name of source repository)
506
506
507 pull: always pull from source repository, even in local case or if the
507 pull: always pull from source repository, even in local case or if the
508 server prefers streaming
508 server prefers streaming
509
509
510 stream: stream raw data uncompressed from repository (fast over
510 stream: stream raw data uncompressed from repository (fast over
511 LAN, slow over WAN)
511 LAN, slow over WAN)
512
512
513 revs: revision to clone up to (implies pull=True)
513 revs: revision to clone up to (implies pull=True)
514
514
515 update: update working directory after clone completes, if
515 update: update working directory after clone completes, if
516 destination is local repository (True means update to default rev,
516 destination is local repository (True means update to default rev,
517 anything else is treated as a revision)
517 anything else is treated as a revision)
518
518
519 branch: branches to clone
519 branch: branches to clone
520
520
521 shareopts: dict of options to control auto sharing behavior. The "pool" key
521 shareopts: dict of options to control auto sharing behavior. The "pool" key
522 activates auto sharing mode and defines the directory for stores. The
522 activates auto sharing mode and defines the directory for stores. The
523 "mode" key determines how to construct the directory name of the shared
523 "mode" key determines how to construct the directory name of the shared
524 repository. "identity" means the name is derived from the node of the first
524 repository. "identity" means the name is derived from the node of the first
525 changeset in the repository. "remote" means the name is derived from the
525 changeset in the repository. "remote" means the name is derived from the
526 remote's path/URL. Defaults to "identity."
526 remote's path/URL. Defaults to "identity."
527 """
527 """
528
528
529 if isinstance(source, bytes):
529 if isinstance(source, bytes):
530 origsource = ui.expandpath(source)
530 origsource = ui.expandpath(source)
531 source, branches = parseurl(origsource, branch)
531 source, branches = parseurl(origsource, branch)
532 srcpeer = peer(ui, peeropts, source)
532 srcpeer = peer(ui, peeropts, source)
533 else:
533 else:
534 srcpeer = source.peer() # in case we were called with a localrepo
534 srcpeer = source.peer() # in case we were called with a localrepo
535 branches = (None, branch or [])
535 branches = (None, branch or [])
536 origsource = source = srcpeer.url()
536 origsource = source = srcpeer.url()
537 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
537 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
538
538
539 if dest is None:
539 if dest is None:
540 dest = defaultdest(source)
540 dest = defaultdest(source)
541 if dest:
541 if dest:
542 ui.status(_("destination directory: %s\n") % dest)
542 ui.status(_("destination directory: %s\n") % dest)
543 else:
543 else:
544 dest = ui.expandpath(dest)
544 dest = ui.expandpath(dest)
545
545
546 dest = util.urllocalpath(dest)
546 dest = util.urllocalpath(dest)
547 source = util.urllocalpath(source)
547 source = util.urllocalpath(source)
548
548
549 if not dest:
549 if not dest:
550 raise error.Abort(_("empty destination path is not valid"))
550 raise error.Abort(_("empty destination path is not valid"))
551
551
552 destvfs = vfsmod.vfs(dest, expandpath=True)
552 destvfs = vfsmod.vfs(dest, expandpath=True)
553 if destvfs.lexists():
553 if destvfs.lexists():
554 if not destvfs.isdir():
554 if not destvfs.isdir():
555 raise error.Abort(_("destination '%s' already exists") % dest)
555 raise error.Abort(_("destination '%s' already exists") % dest)
556 elif destvfs.listdir():
556 elif destvfs.listdir():
557 raise error.Abort(_("destination '%s' is not empty") % dest)
557 raise error.Abort(_("destination '%s' is not empty") % dest)
558
558
559 shareopts = shareopts or {}
559 shareopts = shareopts or {}
560 sharepool = shareopts.get('pool')
560 sharepool = shareopts.get('pool')
561 sharenamemode = shareopts.get('mode')
561 sharenamemode = shareopts.get('mode')
562 if sharepool and islocal(dest):
562 if sharepool and islocal(dest):
563 sharepath = None
563 sharepath = None
564 if sharenamemode == 'identity':
564 if sharenamemode == 'identity':
565 # Resolve the name from the initial changeset in the remote
565 # Resolve the name from the initial changeset in the remote
566 # repository. This returns nullid when the remote is empty. It
566 # repository. This returns nullid when the remote is empty. It
567 # raises RepoLookupError if revision 0 is filtered or otherwise
567 # raises RepoLookupError if revision 0 is filtered or otherwise
568 # not available. If we fail to resolve, sharing is not enabled.
568 # not available. If we fail to resolve, sharing is not enabled.
569 try:
569 try:
570 rootnode = srcpeer.lookup('0')
570 rootnode = srcpeer.lookup('0')
571 if rootnode != node.nullid:
571 if rootnode != node.nullid:
572 sharepath = os.path.join(sharepool, node.hex(rootnode))
572 sharepath = os.path.join(sharepool, node.hex(rootnode))
573 else:
573 else:
574 ui.status(_('(not using pooled storage: '
574 ui.status(_('(not using pooled storage: '
575 'remote appears to be empty)\n'))
575 'remote appears to be empty)\n'))
576 except error.RepoLookupError:
576 except error.RepoLookupError:
577 ui.status(_('(not using pooled storage: '
577 ui.status(_('(not using pooled storage: '
578 'unable to resolve identity of remote)\n'))
578 'unable to resolve identity of remote)\n'))
579 elif sharenamemode == 'remote':
579 elif sharenamemode == 'remote':
580 sharepath = os.path.join(
580 sharepath = os.path.join(
581 sharepool, node.hex(hashlib.sha1(source).digest()))
581 sharepool, node.hex(hashlib.sha1(source).digest()))
582 else:
582 else:
583 raise error.Abort(_('unknown share naming mode: %s') %
583 raise error.Abort(_('unknown share naming mode: %s') %
584 sharenamemode)
584 sharenamemode)
585
585
586 if sharepath:
586 if sharepath:
587 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
587 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
588 dest, pull=pull, rev=revs, update=update,
588 dest, pull=pull, rev=revs, update=update,
589 stream=stream)
589 stream=stream)
590
590
591 srclock = destlock = cleandir = None
591 srclock = destlock = cleandir = None
592 srcrepo = srcpeer.local()
592 srcrepo = srcpeer.local()
593 try:
593 try:
594 abspath = origsource
594 abspath = origsource
595 if islocal(origsource):
595 if islocal(origsource):
596 abspath = os.path.abspath(util.urllocalpath(origsource))
596 abspath = os.path.abspath(util.urllocalpath(origsource))
597
597
598 if islocal(dest):
598 if islocal(dest):
599 cleandir = dest
599 cleandir = dest
600
600
601 copy = False
601 copy = False
602 if (srcrepo and srcrepo.cancopy() and islocal(dest)
602 if (srcrepo and srcrepo.cancopy() and islocal(dest)
603 and not phases.hassecret(srcrepo)):
603 and not phases.hassecret(srcrepo)):
604 copy = not pull and not revs
604 copy = not pull and not revs
605
605
606 if copy:
606 if copy:
607 try:
607 try:
608 # we use a lock here because if we race with commit, we
608 # we use a lock here because if we race with commit, we
609 # can end up with extra data in the cloned revlogs that's
609 # can end up with extra data in the cloned revlogs that's
610 # not pointed to by changesets, thus causing verify to
610 # not pointed to by changesets, thus causing verify to
611 # fail
611 # fail
612 srclock = srcrepo.lock(wait=False)
612 srclock = srcrepo.lock(wait=False)
613 except error.LockError:
613 except error.LockError:
614 copy = False
614 copy = False
615
615
616 if copy:
616 if copy:
617 srcrepo.hook('preoutgoing', throw=True, source='clone')
617 srcrepo.hook('preoutgoing', throw=True, source='clone')
618 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
618 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
619 if not os.path.exists(dest):
619 if not os.path.exists(dest):
620 os.mkdir(dest)
620 os.mkdir(dest)
621 else:
621 else:
622 # only clean up directories we create ourselves
622 # only clean up directories we create ourselves
623 cleandir = hgdir
623 cleandir = hgdir
624 try:
624 try:
625 destpath = hgdir
625 destpath = hgdir
626 util.makedir(destpath, notindexed=True)
626 util.makedir(destpath, notindexed=True)
627 except OSError as inst:
627 except OSError as inst:
628 if inst.errno == errno.EEXIST:
628 if inst.errno == errno.EEXIST:
629 cleandir = None
629 cleandir = None
630 raise error.Abort(_("destination '%s' already exists")
630 raise error.Abort(_("destination '%s' already exists")
631 % dest)
631 % dest)
632 raise
632 raise
633
633
634 destlock = copystore(ui, srcrepo, destpath)
634 destlock = copystore(ui, srcrepo, destpath)
635 # copy bookmarks over
635 # copy bookmarks over
636 srcbookmarks = srcrepo.vfs.join('bookmarks')
636 srcbookmarks = srcrepo.vfs.join('bookmarks')
637 dstbookmarks = os.path.join(destpath, 'bookmarks')
637 dstbookmarks = os.path.join(destpath, 'bookmarks')
638 if os.path.exists(srcbookmarks):
638 if os.path.exists(srcbookmarks):
639 util.copyfile(srcbookmarks, dstbookmarks)
639 util.copyfile(srcbookmarks, dstbookmarks)
640
640
641 dstcachedir = os.path.join(destpath, 'cache')
641 dstcachedir = os.path.join(destpath, 'cache')
642 for cache in cacheutil.cachetocopy(srcrepo):
642 for cache in cacheutil.cachetocopy(srcrepo):
643 _copycache(srcrepo, dstcachedir, cache)
643 _copycache(srcrepo, dstcachedir, cache)
644
644
645 # we need to re-init the repo after manually copying the data
645 # we need to re-init the repo after manually copying the data
646 # into it
646 # into it
647 destpeer = peer(srcrepo, peeropts, dest)
647 destpeer = peer(srcrepo, peeropts, dest)
648 srcrepo.hook('outgoing', source='clone',
648 srcrepo.hook('outgoing', source='clone',
649 node=node.hex(node.nullid))
649 node=node.hex(node.nullid))
650 else:
650 else:
651 try:
651 try:
652 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
652 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
653 # only pass ui when no srcrepo
653 # only pass ui when no srcrepo
654 except OSError as inst:
654 except OSError as inst:
655 if inst.errno == errno.EEXIST:
655 if inst.errno == errno.EEXIST:
656 cleandir = None
656 cleandir = None
657 raise error.Abort(_("destination '%s' already exists")
657 raise error.Abort(_("destination '%s' already exists")
658 % dest)
658 % dest)
659 raise
659 raise
660
660
661 if revs:
661 if revs:
662 if not srcpeer.capable('lookup'):
662 if not srcpeer.capable('lookup'):
663 raise error.Abort(_("src repository does not support "
663 raise error.Abort(_("src repository does not support "
664 "revision lookup and so doesn't "
664 "revision lookup and so doesn't "
665 "support clone by revision"))
665 "support clone by revision"))
666 revs = [srcpeer.lookup(r) for r in revs]
666 revs = [srcpeer.lookup(r) for r in revs]
667 checkout = revs[0]
667 checkout = revs[0]
668 else:
668 else:
669 revs = None
669 revs = None
670 local = destpeer.local()
670 local = destpeer.local()
671 if local:
671 if local:
672 u = util.url(abspath)
672 u = util.url(abspath)
673 defaulturl = bytes(u)
673 defaulturl = bytes(u)
674 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
674 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
675 if not stream:
675 if not stream:
676 if pull:
676 if pull:
677 stream = False
677 stream = False
678 else:
678 else:
679 stream = None
679 stream = None
680 # internal config: ui.quietbookmarkmove
680 # internal config: ui.quietbookmarkmove
681 overrides = {('ui', 'quietbookmarkmove'): True}
681 overrides = {('ui', 'quietbookmarkmove'): True}
682 with local.ui.configoverride(overrides, 'clone'):
682 with local.ui.configoverride(overrides, 'clone'):
683 exchange.pull(local, srcpeer, revs,
683 exchange.pull(local, srcpeer, revs,
684 streamclonerequested=stream)
684 streamclonerequested=stream)
685 elif srcrepo:
685 elif srcrepo:
686 exchange.push(srcrepo, destpeer, revs=revs,
686 exchange.push(srcrepo, destpeer, revs=revs,
687 bookmarks=srcrepo._bookmarks.keys())
687 bookmarks=srcrepo._bookmarks.keys())
688 else:
688 else:
689 raise error.Abort(_("clone from remote to remote not supported")
689 raise error.Abort(_("clone from remote to remote not supported")
690 )
690 )
691
691
692 cleandir = None
692 cleandir = None
693
693
694 destrepo = destpeer.local()
694 destrepo = destpeer.local()
695 if destrepo:
695 if destrepo:
696 template = uimod.samplehgrcs['cloned']
696 template = uimod.samplehgrcs['cloned']
697 u = util.url(abspath)
697 u = util.url(abspath)
698 u.passwd = None
698 u.passwd = None
699 defaulturl = bytes(u)
699 defaulturl = bytes(u)
700 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
700 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
701 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
701 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
702
702
703 if ui.configbool('experimental', 'remotenames'):
703 if ui.configbool('experimental', 'remotenames'):
704 logexchange.pullremotenames(destrepo, srcpeer)
704 logexchange.pullremotenames(destrepo, srcpeer)
705
705
706 if update:
706 if update:
707 if update is not True:
707 if update is not True:
708 checkout = srcpeer.lookup(update)
708 checkout = srcpeer.lookup(update)
709 uprev = None
709 uprev = None
710 status = None
710 status = None
711 if checkout is not None:
711 if checkout is not None:
712 if checkout in destrepo:
712 if checkout in destrepo:
713 uprev = checkout
713 uprev = checkout
714 else:
714 else:
715 if update is not True:
715 if update is not True:
716 try:
716 try:
717 uprev = destrepo.lookup(update)
717 uprev = destrepo.lookup(update)
718 except error.RepoLookupError:
718 except error.RepoLookupError:
719 pass
719 pass
720 if uprev is None:
720 if uprev is None:
721 try:
721 try:
722 uprev = destrepo._bookmarks['@']
722 uprev = destrepo._bookmarks['@']
723 update = '@'
723 update = '@'
724 bn = destrepo[uprev].branch()
724 bn = destrepo[uprev].branch()
725 if bn == 'default':
725 if bn == 'default':
726 status = _("updating to bookmark @\n")
726 status = _("updating to bookmark @\n")
727 else:
727 else:
728 status = (_("updating to bookmark @ on branch %s\n")
728 status = (_("updating to bookmark @ on branch %s\n")
729 % bn)
729 % bn)
730 except KeyError:
730 except KeyError:
731 try:
731 try:
732 uprev = destrepo.branchtip('default')
732 uprev = destrepo.branchtip('default')
733 except error.RepoLookupError:
733 except error.RepoLookupError:
734 uprev = destrepo.lookup('tip')
734 uprev = destrepo.lookup('tip')
735 if not status:
735 if not status:
736 bn = destrepo[uprev].branch()
736 bn = destrepo[uprev].branch()
737 status = _("updating to branch %s\n") % bn
737 status = _("updating to branch %s\n") % bn
738 destrepo.ui.status(status)
738 destrepo.ui.status(status)
739 _update(destrepo, uprev)
739 _update(destrepo, uprev)
740 if update in destrepo._bookmarks:
740 if update in destrepo._bookmarks:
741 bookmarks.activate(destrepo, update)
741 bookmarks.activate(destrepo, update)
742 finally:
742 finally:
743 release(srclock, destlock)
743 release(srclock, destlock)
744 if cleandir is not None:
744 if cleandir is not None:
745 shutil.rmtree(cleandir, True)
745 shutil.rmtree(cleandir, True)
746 if srcpeer is not None:
746 if srcpeer is not None:
747 srcpeer.close()
747 srcpeer.close()
748 return srcpeer, destpeer
748 return srcpeer, destpeer
749
749
750 def _showstats(repo, stats, quietempty=False):
750 def _showstats(repo, stats, quietempty=False):
751 if quietempty and stats.isempty():
751 if quietempty and stats.isempty():
752 return
752 return
753 repo.ui.status(_("%d files updated, %d files merged, "
753 repo.ui.status(_("%d files updated, %d files merged, "
754 "%d files removed, %d files unresolved\n") % (
754 "%d files removed, %d files unresolved\n") % (
755 stats.updatedcount, stats.mergedcount,
755 stats.updatedcount, stats.mergedcount,
756 stats.removedcount, stats.unresolvedcount))
756 stats.removedcount, stats.unresolvedcount))
757
757
758 def updaterepo(repo, node, overwrite, updatecheck=None):
758 def updaterepo(repo, node, overwrite, updatecheck=None):
759 """Update the working directory to node.
759 """Update the working directory to node.
760
760
761 When overwrite is set, changes are clobbered, merged else
761 When overwrite is set, changes are clobbered, merged else
762
762
763 returns stats (see pydoc mercurial.merge.applyupdates)"""
763 returns stats (see pydoc mercurial.merge.applyupdates)"""
764 return mergemod.update(repo, node, False, overwrite,
764 return mergemod.update(repo, node, False, overwrite,
765 labels=['working copy', 'destination'],
765 labels=['working copy', 'destination'],
766 updatecheck=updatecheck)
766 updatecheck=updatecheck)
767
767
768 def update(repo, node, quietempty=False, updatecheck=None):
768 def update(repo, node, quietempty=False, updatecheck=None):
769 """update the working directory to node"""
769 """update the working directory to node"""
770 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
770 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
771 _showstats(repo, stats, quietempty)
771 _showstats(repo, stats, quietempty)
772 if stats.unresolvedcount:
772 if stats.unresolvedcount:
773 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
773 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
774 return stats.unresolvedcount > 0
774 return stats.unresolvedcount > 0
775
775
776 # naming conflict in clone()
776 # naming conflict in clone()
777 _update = update
777 _update = update
778
778
779 def clean(repo, node, show_stats=True, quietempty=False):
779 def clean(repo, node, show_stats=True, quietempty=False):
780 """forcibly switch the working directory to node, clobbering changes"""
780 """forcibly switch the working directory to node, clobbering changes"""
781 stats = updaterepo(repo, node, True)
781 stats = updaterepo(repo, node, True)
782 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
782 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
783 if show_stats:
783 if show_stats:
784 _showstats(repo, stats, quietempty)
784 _showstats(repo, stats, quietempty)
785 return stats.unresolvedcount > 0
785 return stats.unresolvedcount > 0
786
786
787 # naming conflict in updatetotally()
787 # naming conflict in updatetotally()
788 _clean = clean
788 _clean = clean
789
789
790 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
790 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
791 """Update the working directory with extra care for non-file components
791 """Update the working directory with extra care for non-file components
792
792
793 This takes care of non-file components below:
793 This takes care of non-file components below:
794
794
795 :bookmark: might be advanced or (in)activated
795 :bookmark: might be advanced or (in)activated
796
796
797 This takes arguments below:
797 This takes arguments below:
798
798
799 :checkout: to which revision the working directory is updated
799 :checkout: to which revision the working directory is updated
800 :brev: a name, which might be a bookmark to be activated after updating
800 :brev: a name, which might be a bookmark to be activated after updating
801 :clean: whether changes in the working directory can be discarded
801 :clean: whether changes in the working directory can be discarded
802 :updatecheck: how to deal with a dirty working directory
802 :updatecheck: how to deal with a dirty working directory
803
803
804 Valid values for updatecheck are (None => linear):
804 Valid values for updatecheck are (None => linear):
805
805
806 * abort: abort if the working directory is dirty
806 * abort: abort if the working directory is dirty
807 * none: don't check (merge working directory changes into destination)
807 * none: don't check (merge working directory changes into destination)
808 * linear: check that update is linear before merging working directory
808 * linear: check that update is linear before merging working directory
809 changes into destination
809 changes into destination
810 * noconflict: check that the update does not result in file merges
810 * noconflict: check that the update does not result in file merges
811
811
812 This returns whether conflict is detected at updating or not.
812 This returns whether conflict is detected at updating or not.
813 """
813 """
814 if updatecheck is None:
814 if updatecheck is None:
815 updatecheck = ui.config('commands', 'update.check')
815 updatecheck = ui.config('commands', 'update.check')
816 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
816 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
817 # If not configured, or invalid value configured
817 # If not configured, or invalid value configured
818 updatecheck = 'linear'
818 updatecheck = 'linear'
819 with repo.wlock():
819 with repo.wlock():
820 movemarkfrom = None
820 movemarkfrom = None
821 warndest = False
821 warndest = False
822 if checkout is None:
822 if checkout is None:
823 updata = destutil.destupdate(repo, clean=clean)
823 updata = destutil.destupdate(repo, clean=clean)
824 checkout, movemarkfrom, brev = updata
824 checkout, movemarkfrom, brev = updata
825 warndest = True
825 warndest = True
826
826
827 if clean:
827 if clean:
828 ret = _clean(repo, checkout)
828 ret = _clean(repo, checkout)
829 else:
829 else:
830 if updatecheck == 'abort':
830 if updatecheck == 'abort':
831 cmdutil.bailifchanged(repo, merge=False)
831 cmdutil.bailifchanged(repo, merge=False)
832 updatecheck = 'none'
832 updatecheck = 'none'
833 ret = _update(repo, checkout, updatecheck=updatecheck)
833 ret = _update(repo, checkout, updatecheck=updatecheck)
834
834
835 if not ret and movemarkfrom:
835 if not ret and movemarkfrom:
836 if movemarkfrom == repo['.'].node():
836 if movemarkfrom == repo['.'].node():
837 pass # no-op update
837 pass # no-op update
838 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
838 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
839 b = ui.label(repo._activebookmark, 'bookmarks.active')
839 b = ui.label(repo._activebookmark, 'bookmarks.active')
840 ui.status(_("updating bookmark %s\n") % b)
840 ui.status(_("updating bookmark %s\n") % b)
841 else:
841 else:
842 # this can happen with a non-linear update
842 # this can happen with a non-linear update
843 b = ui.label(repo._activebookmark, 'bookmarks')
843 b = ui.label(repo._activebookmark, 'bookmarks')
844 ui.status(_("(leaving bookmark %s)\n") % b)
844 ui.status(_("(leaving bookmark %s)\n") % b)
845 bookmarks.deactivate(repo)
845 bookmarks.deactivate(repo)
846 elif brev in repo._bookmarks:
846 elif brev in repo._bookmarks:
847 if brev != repo._activebookmark:
847 if brev != repo._activebookmark:
848 b = ui.label(brev, 'bookmarks.active')
848 b = ui.label(brev, 'bookmarks.active')
849 ui.status(_("(activating bookmark %s)\n") % b)
849 ui.status(_("(activating bookmark %s)\n") % b)
850 bookmarks.activate(repo, brev)
850 bookmarks.activate(repo, brev)
851 elif brev:
851 elif brev:
852 if repo._activebookmark:
852 if repo._activebookmark:
853 b = ui.label(repo._activebookmark, 'bookmarks')
853 b = ui.label(repo._activebookmark, 'bookmarks')
854 ui.status(_("(leaving bookmark %s)\n") % b)
854 ui.status(_("(leaving bookmark %s)\n") % b)
855 bookmarks.deactivate(repo)
855 bookmarks.deactivate(repo)
856
856
857 if warndest:
857 if warndest:
858 destutil.statusotherdests(ui, repo)
858 destutil.statusotherdests(ui, repo)
859
859
860 return ret
860 return ret
861
861
862 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
862 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
863 abort=False):
863 abort=False):
864 """Branch merge with node, resolving changes. Return true if any
864 """Branch merge with node, resolving changes. Return true if any
865 unresolved conflicts."""
865 unresolved conflicts."""
866 if not abort:
866 if not abort:
867 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
867 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
868 labels=labels)
868 labels=labels)
869 else:
869 else:
870 ms = mergemod.mergestate.read(repo)
870 ms = mergemod.mergestate.read(repo)
871 if ms.active():
871 if ms.active():
872 # there were conflicts
872 # there were conflicts
873 node = ms.localctx.hex()
873 node = ms.localctx.hex()
874 else:
874 else:
875 # there were no conficts, mergestate was not stored
875 # there were no conficts, mergestate was not stored
876 node = repo['.'].hex()
876 node = repo['.'].hex()
877
877
878 repo.ui.status(_("aborting the merge, updating back to"
878 repo.ui.status(_("aborting the merge, updating back to"
879 " %s\n") % node[:12])
879 " %s\n") % node[:12])
880 stats = mergemod.update(repo, node, branchmerge=False, force=True,
880 stats = mergemod.update(repo, node, branchmerge=False, force=True,
881 labels=labels)
881 labels=labels)
882
882
883 _showstats(repo, stats)
883 _showstats(repo, stats)
884 if stats.unresolvedcount:
884 if stats.unresolvedcount:
885 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
885 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
886 "or 'hg merge --abort' to abandon\n"))
886 "or 'hg merge --abort' to abandon\n"))
887 elif remind and not abort:
887 elif remind and not abort:
888 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
888 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
889 return stats.unresolvedcount > 0
889 return stats.unresolvedcount > 0
890
890
891 def _incoming(displaychlist, subreporecurse, ui, repo, source,
891 def _incoming(displaychlist, subreporecurse, ui, repo, source,
892 opts, buffered=False):
892 opts, buffered=False):
893 """
893 """
894 Helper for incoming / gincoming.
894 Helper for incoming / gincoming.
895 displaychlist gets called with
895 displaychlist gets called with
896 (remoterepo, incomingchangesetlist, displayer) parameters,
896 (remoterepo, incomingchangesetlist, displayer) parameters,
897 and is supposed to contain only code that can't be unified.
897 and is supposed to contain only code that can't be unified.
898 """
898 """
899 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
899 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
900 other = peer(repo, opts, source)
900 other = peer(repo, opts, source)
901 ui.status(_('comparing with %s\n') % util.hidepassword(source))
901 ui.status(_('comparing with %s\n') % util.hidepassword(source))
902 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
902 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
903
903
904 if revs:
904 if revs:
905 revs = [other.lookup(rev) for rev in revs]
905 revs = [other.lookup(rev) for rev in revs]
906 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
906 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
907 revs, opts["bundle"], opts["force"])
907 revs, opts["bundle"], opts["force"])
908 try:
908 try:
909 if not chlist:
909 if not chlist:
910 ui.status(_("no changes found\n"))
910 ui.status(_("no changes found\n"))
911 return subreporecurse()
911 return subreporecurse()
912 ui.pager('incoming')
912 ui.pager('incoming')
913 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
913 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
914 buffered=buffered)
914 buffered=buffered)
915 displaychlist(other, chlist, displayer)
915 displaychlist(other, chlist, displayer)
916 displayer.close()
916 displayer.close()
917 finally:
917 finally:
918 cleanupfn()
918 cleanupfn()
919 subreporecurse()
919 subreporecurse()
920 return 0 # exit code is zero since we found incoming changes
920 return 0 # exit code is zero since we found incoming changes
921
921
922 def incoming(ui, repo, source, opts):
922 def incoming(ui, repo, source, opts):
923 def subreporecurse():
923 def subreporecurse():
924 ret = 1
924 ret = 1
925 if opts.get('subrepos'):
925 if opts.get('subrepos'):
926 ctx = repo[None]
926 ctx = repo[None]
927 for subpath in sorted(ctx.substate):
927 for subpath in sorted(ctx.substate):
928 sub = ctx.sub(subpath)
928 sub = ctx.sub(subpath)
929 ret = min(ret, sub.incoming(ui, source, opts))
929 ret = min(ret, sub.incoming(ui, source, opts))
930 return ret
930 return ret
931
931
932 def display(other, chlist, displayer):
932 def display(other, chlist, displayer):
933 limit = logcmdutil.getlimit(opts)
933 limit = logcmdutil.getlimit(opts)
934 if opts.get('newest_first'):
934 if opts.get('newest_first'):
935 chlist.reverse()
935 chlist.reverse()
936 count = 0
936 count = 0
937 for n in chlist:
937 for n in chlist:
938 if limit is not None and count >= limit:
938 if limit is not None and count >= limit:
939 break
939 break
940 parents = [p for p in other.changelog.parents(n) if p != nullid]
940 parents = [p for p in other.changelog.parents(n) if p != nullid]
941 if opts.get('no_merges') and len(parents) == 2:
941 if opts.get('no_merges') and len(parents) == 2:
942 continue
942 continue
943 count += 1
943 count += 1
944 displayer.show(other[n])
944 displayer.show(other[n])
945 return _incoming(display, subreporecurse, ui, repo, source, opts)
945 return _incoming(display, subreporecurse, ui, repo, source, opts)
946
946
947 def _outgoing(ui, repo, dest, opts):
947 def _outgoing(ui, repo, dest, opts):
948 path = ui.paths.getpath(dest, default=('default-push', 'default'))
948 path = ui.paths.getpath(dest, default=('default-push', 'default'))
949 if not path:
949 if not path:
950 raise error.Abort(_('default repository not configured!'),
950 raise error.Abort(_('default repository not configured!'),
951 hint=_("see 'hg help config.paths'"))
951 hint=_("see 'hg help config.paths'"))
952 dest = path.pushloc or path.loc
952 dest = path.pushloc or path.loc
953 branches = path.branch, opts.get('branch') or []
953 branches = path.branch, opts.get('branch') or []
954
954
955 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
955 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
956 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
956 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
957 if revs:
957 if revs:
958 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
958 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
959
959
960 other = peer(repo, opts, dest)
960 other = peer(repo, opts, dest)
961 outgoing = discovery.findcommonoutgoing(repo, other, revs,
961 outgoing = discovery.findcommonoutgoing(repo, other, revs,
962 force=opts.get('force'))
962 force=opts.get('force'))
963 o = outgoing.missing
963 o = outgoing.missing
964 if not o:
964 if not o:
965 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
965 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
966 return o, other
966 return o, other
967
967
968 def outgoing(ui, repo, dest, opts):
968 def outgoing(ui, repo, dest, opts):
969 def recurse():
969 def recurse():
970 ret = 1
970 ret = 1
971 if opts.get('subrepos'):
971 if opts.get('subrepos'):
972 ctx = repo[None]
972 ctx = repo[None]
973 for subpath in sorted(ctx.substate):
973 for subpath in sorted(ctx.substate):
974 sub = ctx.sub(subpath)
974 sub = ctx.sub(subpath)
975 ret = min(ret, sub.outgoing(ui, dest, opts))
975 ret = min(ret, sub.outgoing(ui, dest, opts))
976 return ret
976 return ret
977
977
978 limit = logcmdutil.getlimit(opts)
978 limit = logcmdutil.getlimit(opts)
979 o, other = _outgoing(ui, repo, dest, opts)
979 o, other = _outgoing(ui, repo, dest, opts)
980 if not o:
980 if not o:
981 cmdutil.outgoinghooks(ui, repo, other, opts, o)
981 cmdutil.outgoinghooks(ui, repo, other, opts, o)
982 return recurse()
982 return recurse()
983
983
984 if opts.get('newest_first'):
984 if opts.get('newest_first'):
985 o.reverse()
985 o.reverse()
986 ui.pager('outgoing')
986 ui.pager('outgoing')
987 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
987 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
988 count = 0
988 count = 0
989 for n in o:
989 for n in o:
990 if limit is not None and count >= limit:
990 if limit is not None and count >= limit:
991 break
991 break
992 parents = [p for p in repo.changelog.parents(n) if p != nullid]
992 parents = [p for p in repo.changelog.parents(n) if p != nullid]
993 if opts.get('no_merges') and len(parents) == 2:
993 if opts.get('no_merges') and len(parents) == 2:
994 continue
994 continue
995 count += 1
995 count += 1
996 displayer.show(repo[n])
996 displayer.show(repo[n])
997 displayer.close()
997 displayer.close()
998 cmdutil.outgoinghooks(ui, repo, other, opts, o)
998 cmdutil.outgoinghooks(ui, repo, other, opts, o)
999 recurse()
999 recurse()
1000 return 0 # exit code is zero since we found outgoing changes
1000 return 0 # exit code is zero since we found outgoing changes
1001
1001
1002 def verify(repo):
1002 def verify(repo):
1003 """verify the consistency of a repository"""
1003 """verify the consistency of a repository"""
1004 ret = verifymod.verify(repo)
1004 ret = verifymod.verify(repo)
1005
1005
1006 # Broken subrepo references in hidden csets don't seem worth worrying about,
1006 # Broken subrepo references in hidden csets don't seem worth worrying about,
1007 # since they can't be pushed/pulled, and --hidden can be used if they are a
1007 # since they can't be pushed/pulled, and --hidden can be used if they are a
1008 # concern.
1008 # concern.
1009
1009
1010 # pathto() is needed for -R case
1010 # pathto() is needed for -R case
1011 revs = repo.revs("filelog(%s)",
1011 revs = repo.revs("filelog(%s)",
1012 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1012 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1013
1013
1014 if revs:
1014 if revs:
1015 repo.ui.status(_('checking subrepo links\n'))
1015 repo.ui.status(_('checking subrepo links\n'))
1016 for rev in revs:
1016 for rev in revs:
1017 ctx = repo[rev]
1017 ctx = repo[rev]
1018 try:
1018 try:
1019 for subpath in ctx.substate:
1019 for subpath in ctx.substate:
1020 try:
1020 try:
1021 ret = (ctx.sub(subpath, allowcreate=False).verify()
1021 ret = (ctx.sub(subpath, allowcreate=False).verify()
1022 or ret)
1022 or ret)
1023 except error.RepoError as e:
1023 except error.RepoError as e:
1024 repo.ui.warn(('%s: %s\n') % (rev, e))
1024 repo.ui.warn(('%d: %s\n') % (rev, e))
1025 except Exception:
1025 except Exception:
1026 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1026 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1027 node.short(ctx.node()))
1027 node.short(ctx.node()))
1028
1028
1029 return ret
1029 return ret
1030
1030
1031 def remoteui(src, opts):
1031 def remoteui(src, opts):
1032 'build a remote ui from ui or repo and opts'
1032 'build a remote ui from ui or repo and opts'
1033 if util.safehasattr(src, 'baseui'): # looks like a repository
1033 if util.safehasattr(src, 'baseui'): # looks like a repository
1034 dst = src.baseui.copy() # drop repo-specific config
1034 dst = src.baseui.copy() # drop repo-specific config
1035 src = src.ui # copy target options from repo
1035 src = src.ui # copy target options from repo
1036 else: # assume it's a global ui object
1036 else: # assume it's a global ui object
1037 dst = src.copy() # keep all global options
1037 dst = src.copy() # keep all global options
1038
1038
1039 # copy ssh-specific options
1039 # copy ssh-specific options
1040 for o in 'ssh', 'remotecmd':
1040 for o in 'ssh', 'remotecmd':
1041 v = opts.get(o) or src.config('ui', o)
1041 v = opts.get(o) or src.config('ui', o)
1042 if v:
1042 if v:
1043 dst.setconfig("ui", o, v, 'copied')
1043 dst.setconfig("ui", o, v, 'copied')
1044
1044
1045 # copy bundle-specific options
1045 # copy bundle-specific options
1046 r = src.config('bundle', 'mainreporoot')
1046 r = src.config('bundle', 'mainreporoot')
1047 if r:
1047 if r:
1048 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1048 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1049
1049
1050 # copy selected local settings to the remote ui
1050 # copy selected local settings to the remote ui
1051 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1051 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1052 for key, val in src.configitems(sect):
1052 for key, val in src.configitems(sect):
1053 dst.setconfig(sect, key, val, 'copied')
1053 dst.setconfig(sect, key, val, 'copied')
1054 v = src.config('web', 'cacerts')
1054 v = src.config('web', 'cacerts')
1055 if v:
1055 if v:
1056 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1056 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1057
1057
1058 return dst
1058 return dst
1059
1059
1060 # Files of interest
1060 # Files of interest
1061 # Used to check if the repository has changed looking at mtime and size of
1061 # Used to check if the repository has changed looking at mtime and size of
1062 # these files.
1062 # these files.
1063 foi = [('spath', '00changelog.i'),
1063 foi = [('spath', '00changelog.i'),
1064 ('spath', 'phaseroots'), # ! phase can change content at the same size
1064 ('spath', 'phaseroots'), # ! phase can change content at the same size
1065 ('spath', 'obsstore'),
1065 ('spath', 'obsstore'),
1066 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1066 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1067 ]
1067 ]
1068
1068
1069 class cachedlocalrepo(object):
1069 class cachedlocalrepo(object):
1070 """Holds a localrepository that can be cached and reused."""
1070 """Holds a localrepository that can be cached and reused."""
1071
1071
1072 def __init__(self, repo):
1072 def __init__(self, repo):
1073 """Create a new cached repo from an existing repo.
1073 """Create a new cached repo from an existing repo.
1074
1074
1075 We assume the passed in repo was recently created. If the
1075 We assume the passed in repo was recently created. If the
1076 repo has changed between when it was created and when it was
1076 repo has changed between when it was created and when it was
1077 turned into a cache, it may not refresh properly.
1077 turned into a cache, it may not refresh properly.
1078 """
1078 """
1079 assert isinstance(repo, localrepo.localrepository)
1079 assert isinstance(repo, localrepo.localrepository)
1080 self._repo = repo
1080 self._repo = repo
1081 self._state, self.mtime = self._repostate()
1081 self._state, self.mtime = self._repostate()
1082 self._filtername = repo.filtername
1082 self._filtername = repo.filtername
1083
1083
1084 def fetch(self):
1084 def fetch(self):
1085 """Refresh (if necessary) and return a repository.
1085 """Refresh (if necessary) and return a repository.
1086
1086
1087 If the cached instance is out of date, it will be recreated
1087 If the cached instance is out of date, it will be recreated
1088 automatically and returned.
1088 automatically and returned.
1089
1089
1090 Returns a tuple of the repo and a boolean indicating whether a new
1090 Returns a tuple of the repo and a boolean indicating whether a new
1091 repo instance was created.
1091 repo instance was created.
1092 """
1092 """
1093 # We compare the mtimes and sizes of some well-known files to
1093 # We compare the mtimes and sizes of some well-known files to
1094 # determine if the repo changed. This is not precise, as mtimes
1094 # determine if the repo changed. This is not precise, as mtimes
1095 # are susceptible to clock skew and imprecise filesystems and
1095 # are susceptible to clock skew and imprecise filesystems and
1096 # file content can change while maintaining the same size.
1096 # file content can change while maintaining the same size.
1097
1097
1098 state, mtime = self._repostate()
1098 state, mtime = self._repostate()
1099 if state == self._state:
1099 if state == self._state:
1100 return self._repo, False
1100 return self._repo, False
1101
1101
1102 repo = repository(self._repo.baseui, self._repo.url())
1102 repo = repository(self._repo.baseui, self._repo.url())
1103 if self._filtername:
1103 if self._filtername:
1104 self._repo = repo.filtered(self._filtername)
1104 self._repo = repo.filtered(self._filtername)
1105 else:
1105 else:
1106 self._repo = repo.unfiltered()
1106 self._repo = repo.unfiltered()
1107 self._state = state
1107 self._state = state
1108 self.mtime = mtime
1108 self.mtime = mtime
1109
1109
1110 return self._repo, True
1110 return self._repo, True
1111
1111
1112 def _repostate(self):
1112 def _repostate(self):
1113 state = []
1113 state = []
1114 maxmtime = -1
1114 maxmtime = -1
1115 for attr, fname in foi:
1115 for attr, fname in foi:
1116 prefix = getattr(self._repo, attr)
1116 prefix = getattr(self._repo, attr)
1117 p = os.path.join(prefix, fname)
1117 p = os.path.join(prefix, fname)
1118 try:
1118 try:
1119 st = os.stat(p)
1119 st = os.stat(p)
1120 except OSError:
1120 except OSError:
1121 st = os.stat(prefix)
1121 st = os.stat(prefix)
1122 state.append((st[stat.ST_MTIME], st.st_size))
1122 state.append((st[stat.ST_MTIME], st.st_size))
1123 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1123 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1124
1124
1125 return tuple(state), maxmtime
1125 return tuple(state), maxmtime
1126
1126
1127 def copy(self):
1127 def copy(self):
1128 """Obtain a copy of this class instance.
1128 """Obtain a copy of this class instance.
1129
1129
1130 A new localrepository instance is obtained. The new instance should be
1130 A new localrepository instance is obtained. The new instance should be
1131 completely independent of the original.
1131 completely independent of the original.
1132 """
1132 """
1133 repo = repository(self._repo.baseui, self._repo.origroot)
1133 repo = repository(self._repo.baseui, self._repo.origroot)
1134 if self._filtername:
1134 if self._filtername:
1135 repo = repo.filtered(self._filtername)
1135 repo = repo.filtered(self._filtername)
1136 else:
1136 else:
1137 repo = repo.unfiltered()
1137 repo = repo.unfiltered()
1138 c = cachedlocalrepo(repo)
1138 c = cachedlocalrepo(repo)
1139 c._state = self._state
1139 c._state = self._state
1140 c.mtime = self.mtime
1140 c.mtime = self.mtime
1141 return c
1141 return c
General Comments 0
You need to be logged in to leave comments. Login now