##// END OF EJS Templates
wrap string literals in error messages
Martin Geisler -
r8663:45f626a3 default
parent child Browse files
Show More
@@ -1,311 +1,312
1 # hgweb/hgweb_mod.py - Web interface for a repository.
1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import os
9 import os
10 from mercurial import ui, hg, hook, error, encoding, templater
10 from mercurial import ui, hg, hook, error, encoding, templater
11 from common import get_mtime, ErrorResponse
11 from common import get_mtime, ErrorResponse
12 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
12 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
13 from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
13 from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
14 from request import wsgirequest
14 from request import wsgirequest
15 import webcommands, protocol, webutil
15 import webcommands, protocol, webutil
16
16
17 perms = {
17 perms = {
18 'changegroup': 'pull',
18 'changegroup': 'pull',
19 'changegroupsubset': 'pull',
19 'changegroupsubset': 'pull',
20 'unbundle': 'push',
20 'unbundle': 'push',
21 'stream_out': 'pull',
21 'stream_out': 'pull',
22 }
22 }
23
23
24 class hgweb(object):
24 class hgweb(object):
25 def __init__(self, repo, name=None):
25 def __init__(self, repo, name=None):
26 if isinstance(repo, str):
26 if isinstance(repo, str):
27 u = ui.ui()
27 u = ui.ui()
28 u.setconfig('ui', 'report_untrusted', 'off')
28 u.setconfig('ui', 'report_untrusted', 'off')
29 u.setconfig('ui', 'interactive', 'off')
29 u.setconfig('ui', 'interactive', 'off')
30 self.repo = hg.repository(u, repo)
30 self.repo = hg.repository(u, repo)
31 else:
31 else:
32 self.repo = repo
32 self.repo = repo
33
33
34 hook.redirect(True)
34 hook.redirect(True)
35 self.mtime = -1
35 self.mtime = -1
36 self.reponame = name
36 self.reponame = name
37 self.archives = 'zip', 'gz', 'bz2'
37 self.archives = 'zip', 'gz', 'bz2'
38 self.stripecount = 1
38 self.stripecount = 1
39 # a repo owner may set web.templates in .hg/hgrc to get any file
39 # a repo owner may set web.templates in .hg/hgrc to get any file
40 # readable by the user running the CGI script
40 # readable by the user running the CGI script
41 self.templatepath = self.config('web', 'templates')
41 self.templatepath = self.config('web', 'templates')
42
42
43 # The CGI scripts are often run by a user different from the repo owner.
43 # The CGI scripts are often run by a user different from the repo owner.
44 # Trust the settings from the .hg/hgrc files by default.
44 # Trust the settings from the .hg/hgrc files by default.
45 def config(self, section, name, default=None, untrusted=True):
45 def config(self, section, name, default=None, untrusted=True):
46 return self.repo.ui.config(section, name, default,
46 return self.repo.ui.config(section, name, default,
47 untrusted=untrusted)
47 untrusted=untrusted)
48
48
49 def configbool(self, section, name, default=False, untrusted=True):
49 def configbool(self, section, name, default=False, untrusted=True):
50 return self.repo.ui.configbool(section, name, default,
50 return self.repo.ui.configbool(section, name, default,
51 untrusted=untrusted)
51 untrusted=untrusted)
52
52
53 def configlist(self, section, name, default=None, untrusted=True):
53 def configlist(self, section, name, default=None, untrusted=True):
54 return self.repo.ui.configlist(section, name, default,
54 return self.repo.ui.configlist(section, name, default,
55 untrusted=untrusted)
55 untrusted=untrusted)
56
56
57 def refresh(self):
57 def refresh(self):
58 mtime = get_mtime(self.repo.root)
58 mtime = get_mtime(self.repo.root)
59 if mtime != self.mtime:
59 if mtime != self.mtime:
60 self.mtime = mtime
60 self.mtime = mtime
61 self.repo = hg.repository(self.repo.ui, self.repo.root)
61 self.repo = hg.repository(self.repo.ui, self.repo.root)
62 self.maxchanges = int(self.config("web", "maxchanges", 10))
62 self.maxchanges = int(self.config("web", "maxchanges", 10))
63 self.stripecount = int(self.config("web", "stripes", 1))
63 self.stripecount = int(self.config("web", "stripes", 1))
64 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
64 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
65 self.maxfiles = int(self.config("web", "maxfiles", 10))
65 self.maxfiles = int(self.config("web", "maxfiles", 10))
66 self.allowpull = self.configbool("web", "allowpull", True)
66 self.allowpull = self.configbool("web", "allowpull", True)
67 self.encoding = self.config("web", "encoding", encoding.encoding)
67 self.encoding = self.config("web", "encoding", encoding.encoding)
68
68
69 def run(self):
69 def run(self):
70 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
70 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
71 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
71 raise RuntimeError("This function is only intended to be "
72 "called while running as a CGI script.")
72 import mercurial.hgweb.wsgicgi as wsgicgi
73 import mercurial.hgweb.wsgicgi as wsgicgi
73 wsgicgi.launch(self)
74 wsgicgi.launch(self)
74
75
75 def __call__(self, env, respond):
76 def __call__(self, env, respond):
76 req = wsgirequest(env, respond)
77 req = wsgirequest(env, respond)
77 return self.run_wsgi(req)
78 return self.run_wsgi(req)
78
79
79 def run_wsgi(self, req):
80 def run_wsgi(self, req):
80
81
81 self.refresh()
82 self.refresh()
82
83
83 # process this if it's a protocol request
84 # process this if it's a protocol request
84 # protocol bits don't need to create any URLs
85 # protocol bits don't need to create any URLs
85 # and the clients always use the old URL structure
86 # and the clients always use the old URL structure
86
87
87 cmd = req.form.get('cmd', [''])[0]
88 cmd = req.form.get('cmd', [''])[0]
88 if cmd and cmd in protocol.__all__:
89 if cmd and cmd in protocol.__all__:
89 try:
90 try:
90 if cmd in perms:
91 if cmd in perms:
91 try:
92 try:
92 self.check_perm(req, perms[cmd])
93 self.check_perm(req, perms[cmd])
93 except ErrorResponse, inst:
94 except ErrorResponse, inst:
94 if cmd == 'unbundle':
95 if cmd == 'unbundle':
95 req.drain()
96 req.drain()
96 raise
97 raise
97 method = getattr(protocol, cmd)
98 method = getattr(protocol, cmd)
98 return method(self.repo, req)
99 return method(self.repo, req)
99 except ErrorResponse, inst:
100 except ErrorResponse, inst:
100 req.respond(inst, protocol.HGTYPE)
101 req.respond(inst, protocol.HGTYPE)
101 if not inst.message:
102 if not inst.message:
102 return []
103 return []
103 return '0\n%s\n' % inst.message,
104 return '0\n%s\n' % inst.message,
104
105
105 # work with CGI variables to create coherent structure
106 # work with CGI variables to create coherent structure
106 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
107 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
107
108
108 req.url = req.env['SCRIPT_NAME']
109 req.url = req.env['SCRIPT_NAME']
109 if not req.url.endswith('/'):
110 if not req.url.endswith('/'):
110 req.url += '/'
111 req.url += '/'
111 if 'REPO_NAME' in req.env:
112 if 'REPO_NAME' in req.env:
112 req.url += req.env['REPO_NAME'] + '/'
113 req.url += req.env['REPO_NAME'] + '/'
113
114
114 if 'PATH_INFO' in req.env:
115 if 'PATH_INFO' in req.env:
115 parts = req.env['PATH_INFO'].strip('/').split('/')
116 parts = req.env['PATH_INFO'].strip('/').split('/')
116 repo_parts = req.env.get('REPO_NAME', '').split('/')
117 repo_parts = req.env.get('REPO_NAME', '').split('/')
117 if parts[:len(repo_parts)] == repo_parts:
118 if parts[:len(repo_parts)] == repo_parts:
118 parts = parts[len(repo_parts):]
119 parts = parts[len(repo_parts):]
119 query = '/'.join(parts)
120 query = '/'.join(parts)
120 else:
121 else:
121 query = req.env['QUERY_STRING'].split('&', 1)[0]
122 query = req.env['QUERY_STRING'].split('&', 1)[0]
122 query = query.split(';', 1)[0]
123 query = query.split(';', 1)[0]
123
124
124 # translate user-visible url structure to internal structure
125 # translate user-visible url structure to internal structure
125
126
126 args = query.split('/', 2)
127 args = query.split('/', 2)
127 if 'cmd' not in req.form and args and args[0]:
128 if 'cmd' not in req.form and args and args[0]:
128
129
129 cmd = args.pop(0)
130 cmd = args.pop(0)
130 style = cmd.rfind('-')
131 style = cmd.rfind('-')
131 if style != -1:
132 if style != -1:
132 req.form['style'] = [cmd[:style]]
133 req.form['style'] = [cmd[:style]]
133 cmd = cmd[style+1:]
134 cmd = cmd[style+1:]
134
135
135 # avoid accepting e.g. style parameter as command
136 # avoid accepting e.g. style parameter as command
136 if hasattr(webcommands, cmd):
137 if hasattr(webcommands, cmd):
137 req.form['cmd'] = [cmd]
138 req.form['cmd'] = [cmd]
138 else:
139 else:
139 cmd = ''
140 cmd = ''
140
141
141 if cmd == 'static':
142 if cmd == 'static':
142 req.form['file'] = ['/'.join(args)]
143 req.form['file'] = ['/'.join(args)]
143 else:
144 else:
144 if args and args[0]:
145 if args and args[0]:
145 node = args.pop(0)
146 node = args.pop(0)
146 req.form['node'] = [node]
147 req.form['node'] = [node]
147 if args:
148 if args:
148 req.form['file'] = args
149 req.form['file'] = args
149
150
150 if cmd == 'archive':
151 if cmd == 'archive':
151 fn = req.form['node'][0]
152 fn = req.form['node'][0]
152 for type_, spec in self.archive_specs.iteritems():
153 for type_, spec in self.archive_specs.iteritems():
153 ext = spec[2]
154 ext = spec[2]
154 if fn.endswith(ext):
155 if fn.endswith(ext):
155 req.form['node'] = [fn[:-len(ext)]]
156 req.form['node'] = [fn[:-len(ext)]]
156 req.form['type'] = [type_]
157 req.form['type'] = [type_]
157
158
158 # process the web interface request
159 # process the web interface request
159
160
160 try:
161 try:
161 tmpl = self.templater(req)
162 tmpl = self.templater(req)
162 ctype = tmpl('mimetype', encoding=self.encoding)
163 ctype = tmpl('mimetype', encoding=self.encoding)
163 ctype = templater.stringify(ctype)
164 ctype = templater.stringify(ctype)
164
165
165 # check read permissions non-static content
166 # check read permissions non-static content
166 if cmd != 'static':
167 if cmd != 'static':
167 self.check_perm(req, None)
168 self.check_perm(req, None)
168
169
169 if cmd == '':
170 if cmd == '':
170 req.form['cmd'] = [tmpl.cache['default']]
171 req.form['cmd'] = [tmpl.cache['default']]
171 cmd = req.form['cmd'][0]
172 cmd = req.form['cmd'][0]
172
173
173 if cmd not in webcommands.__all__:
174 if cmd not in webcommands.__all__:
174 msg = 'no such method: %s' % cmd
175 msg = 'no such method: %s' % cmd
175 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
176 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
176 elif cmd == 'file' and 'raw' in req.form.get('style', []):
177 elif cmd == 'file' and 'raw' in req.form.get('style', []):
177 self.ctype = ctype
178 self.ctype = ctype
178 content = webcommands.rawfile(self, req, tmpl)
179 content = webcommands.rawfile(self, req, tmpl)
179 else:
180 else:
180 content = getattr(webcommands, cmd)(self, req, tmpl)
181 content = getattr(webcommands, cmd)(self, req, tmpl)
181 req.respond(HTTP_OK, ctype)
182 req.respond(HTTP_OK, ctype)
182
183
183 return content
184 return content
184
185
185 except error.LookupError, err:
186 except error.LookupError, err:
186 req.respond(HTTP_NOT_FOUND, ctype)
187 req.respond(HTTP_NOT_FOUND, ctype)
187 msg = str(err)
188 msg = str(err)
188 if 'manifest' not in msg:
189 if 'manifest' not in msg:
189 msg = 'revision not found: %s' % err.name
190 msg = 'revision not found: %s' % err.name
190 return tmpl('error', error=msg)
191 return tmpl('error', error=msg)
191 except (error.RepoError, error.RevlogError), inst:
192 except (error.RepoError, error.RevlogError), inst:
192 req.respond(HTTP_SERVER_ERROR, ctype)
193 req.respond(HTTP_SERVER_ERROR, ctype)
193 return tmpl('error', error=str(inst))
194 return tmpl('error', error=str(inst))
194 except ErrorResponse, inst:
195 except ErrorResponse, inst:
195 req.respond(inst, ctype)
196 req.respond(inst, ctype)
196 return tmpl('error', error=inst.message)
197 return tmpl('error', error=inst.message)
197
198
198 def templater(self, req):
199 def templater(self, req):
199
200
200 # determine scheme, port and server name
201 # determine scheme, port and server name
201 # this is needed to create absolute urls
202 # this is needed to create absolute urls
202
203
203 proto = req.env.get('wsgi.url_scheme')
204 proto = req.env.get('wsgi.url_scheme')
204 if proto == 'https':
205 if proto == 'https':
205 proto = 'https'
206 proto = 'https'
206 default_port = "443"
207 default_port = "443"
207 else:
208 else:
208 proto = 'http'
209 proto = 'http'
209 default_port = "80"
210 default_port = "80"
210
211
211 port = req.env["SERVER_PORT"]
212 port = req.env["SERVER_PORT"]
212 port = port != default_port and (":" + port) or ""
213 port = port != default_port and (":" + port) or ""
213 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
214 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
214 staticurl = self.config("web", "staticurl") or req.url + 'static/'
215 staticurl = self.config("web", "staticurl") or req.url + 'static/'
215 if not staticurl.endswith('/'):
216 if not staticurl.endswith('/'):
216 staticurl += '/'
217 staticurl += '/'
217
218
218 # some functions for the templater
219 # some functions for the templater
219
220
220 def header(**map):
221 def header(**map):
221 yield tmpl('header', encoding=self.encoding, **map)
222 yield tmpl('header', encoding=self.encoding, **map)
222
223
223 def footer(**map):
224 def footer(**map):
224 yield tmpl("footer", **map)
225 yield tmpl("footer", **map)
225
226
226 def motd(**map):
227 def motd(**map):
227 yield self.config("web", "motd", "")
228 yield self.config("web", "motd", "")
228
229
229 # figure out which style to use
230 # figure out which style to use
230
231
231 vars = {}
232 vars = {}
232 style = self.config("web", "style", "paper")
233 style = self.config("web", "style", "paper")
233 if 'style' in req.form:
234 if 'style' in req.form:
234 style = req.form['style'][0]
235 style = req.form['style'][0]
235 vars['style'] = style
236 vars['style'] = style
236
237
237 start = req.url[-1] == '?' and '&' or '?'
238 start = req.url[-1] == '?' and '&' or '?'
238 sessionvars = webutil.sessionvars(vars, start)
239 sessionvars = webutil.sessionvars(vars, start)
239 mapfile = templater.stylemap(style, self.templatepath)
240 mapfile = templater.stylemap(style, self.templatepath)
240
241
241 if not self.reponame:
242 if not self.reponame:
242 self.reponame = (self.config("web", "name")
243 self.reponame = (self.config("web", "name")
243 or req.env.get('REPO_NAME')
244 or req.env.get('REPO_NAME')
244 or req.url.strip('/') or self.repo.root)
245 or req.url.strip('/') or self.repo.root)
245
246
246 # create the templater
247 # create the templater
247
248
248 tmpl = templater.templater(mapfile,
249 tmpl = templater.templater(mapfile,
249 defaults={"url": req.url,
250 defaults={"url": req.url,
250 "staticurl": staticurl,
251 "staticurl": staticurl,
251 "urlbase": urlbase,
252 "urlbase": urlbase,
252 "repo": self.reponame,
253 "repo": self.reponame,
253 "header": header,
254 "header": header,
254 "footer": footer,
255 "footer": footer,
255 "motd": motd,
256 "motd": motd,
256 "sessionvars": sessionvars
257 "sessionvars": sessionvars
257 })
258 })
258 return tmpl
259 return tmpl
259
260
260 def archivelist(self, nodeid):
261 def archivelist(self, nodeid):
261 allowed = self.configlist("web", "allow_archive")
262 allowed = self.configlist("web", "allow_archive")
262 for i, spec in self.archive_specs.iteritems():
263 for i, spec in self.archive_specs.iteritems():
263 if i in allowed or self.configbool("web", "allow" + i):
264 if i in allowed or self.configbool("web", "allow" + i):
264 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
265 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
265
266
266 archive_specs = {
267 archive_specs = {
267 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
268 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
268 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
269 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
269 'zip': ('application/zip', 'zip', '.zip', None),
270 'zip': ('application/zip', 'zip', '.zip', None),
270 }
271 }
271
272
272 def check_perm(self, req, op):
273 def check_perm(self, req, op):
273 '''Check permission for operation based on request data (including
274 '''Check permission for operation based on request data (including
274 authentication info). Return if op allowed, else raise an ErrorResponse
275 authentication info). Return if op allowed, else raise an ErrorResponse
275 exception.'''
276 exception.'''
276
277
277 user = req.env.get('REMOTE_USER')
278 user = req.env.get('REMOTE_USER')
278
279
279 deny_read = self.configlist('web', 'deny_read')
280 deny_read = self.configlist('web', 'deny_read')
280 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
281 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
281 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
282 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
282
283
283 allow_read = self.configlist('web', 'allow_read')
284 allow_read = self.configlist('web', 'allow_read')
284 result = (not allow_read) or (allow_read == ['*'])
285 result = (not allow_read) or (allow_read == ['*'])
285 if not (result or user in allow_read):
286 if not (result or user in allow_read):
286 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
287 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
287
288
288 if op == 'pull' and not self.allowpull:
289 if op == 'pull' and not self.allowpull:
289 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
290 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
290 elif op == 'pull' or op is None: # op is None for interface requests
291 elif op == 'pull' or op is None: # op is None for interface requests
291 return
292 return
292
293
293 # enforce that you can only push using POST requests
294 # enforce that you can only push using POST requests
294 if req.env['REQUEST_METHOD'] != 'POST':
295 if req.env['REQUEST_METHOD'] != 'POST':
295 msg = 'push requires POST request'
296 msg = 'push requires POST request'
296 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
297 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
297
298
298 # require ssl by default for pushing, auth info cannot be sniffed
299 # require ssl by default for pushing, auth info cannot be sniffed
299 # and replayed
300 # and replayed
300 scheme = req.env.get('wsgi.url_scheme')
301 scheme = req.env.get('wsgi.url_scheme')
301 if self.configbool('web', 'push_ssl', True) and scheme != 'https':
302 if self.configbool('web', 'push_ssl', True) and scheme != 'https':
302 raise ErrorResponse(HTTP_OK, 'ssl required')
303 raise ErrorResponse(HTTP_OK, 'ssl required')
303
304
304 deny = self.configlist('web', 'deny_push')
305 deny = self.configlist('web', 'deny_push')
305 if deny and (not user or deny == ['*'] or user in deny):
306 if deny and (not user or deny == ['*'] or user in deny):
306 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
307 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
307
308
308 allow = self.configlist('web', 'allow_push')
309 allow = self.configlist('web', 'allow_push')
309 result = allow and (allow == ['*'] or user in allow)
310 result = allow and (allow == ['*'] or user in allow)
310 if not result:
311 if not result:
311 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
312 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
@@ -1,328 +1,329
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import os, time
9 import os, time
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import ui, hg, util, templater
11 from mercurial import ui, hg, util, templater
12 from mercurial import error, encoding
12 from mercurial import error, encoding
13 from common import ErrorResponse, get_mtime, staticfile, paritygen,\
13 from common import ErrorResponse, get_mtime, staticfile, paritygen,\
14 get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
14 get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
15 from hgweb_mod import hgweb
15 from hgweb_mod import hgweb
16 from request import wsgirequest
16 from request import wsgirequest
17 import webutil
17 import webutil
18
18
19 def cleannames(items):
19 def cleannames(items):
20 return [(util.pconvert(name).strip('/'), path) for name, path in items]
20 return [(util.pconvert(name).strip('/'), path) for name, path in items]
21
21
22 def findrepos(paths):
22 def findrepos(paths):
23 repos = {}
23 repos = {}
24 for prefix, root in cleannames(paths):
24 for prefix, root in cleannames(paths):
25 roothead, roottail = os.path.split(root)
25 roothead, roottail = os.path.split(root)
26 # "foo = /bar/*" makes every subrepo of /bar/ to be
26 # "foo = /bar/*" makes every subrepo of /bar/ to be
27 # mounted as foo/subrepo
27 # mounted as foo/subrepo
28 # and "foo = /bar/**" also recurses into the subdirectories,
28 # and "foo = /bar/**" also recurses into the subdirectories,
29 # remember to use it without working dir.
29 # remember to use it without working dir.
30 try:
30 try:
31 recurse = {'*': False, '**': True}[roottail]
31 recurse = {'*': False, '**': True}[roottail]
32 except KeyError:
32 except KeyError:
33 repos[prefix] = root
33 repos[prefix] = root
34 continue
34 continue
35 roothead = os.path.normpath(roothead)
35 roothead = os.path.normpath(roothead)
36 for path in util.walkrepos(roothead, followsym=True, recurse=recurse):
36 for path in util.walkrepos(roothead, followsym=True, recurse=recurse):
37 path = os.path.normpath(path)
37 path = os.path.normpath(path)
38 name = util.pconvert(path[len(roothead):]).strip('/')
38 name = util.pconvert(path[len(roothead):]).strip('/')
39 if prefix:
39 if prefix:
40 name = prefix + '/' + name
40 name = prefix + '/' + name
41 repos[name] = path
41 repos[name] = path
42 return repos.items()
42 return repos.items()
43
43
44 class hgwebdir(object):
44 class hgwebdir(object):
45 refreshinterval = 20
45 refreshinterval = 20
46
46
47 def __init__(self, conf, baseui=None):
47 def __init__(self, conf, baseui=None):
48 self.conf = conf
48 self.conf = conf
49 self.baseui = baseui
49 self.baseui = baseui
50 self.lastrefresh = 0
50 self.lastrefresh = 0
51 self.refresh()
51 self.refresh()
52
52
53 def refresh(self):
53 def refresh(self):
54 if self.lastrefresh + self.refreshinterval > time.time():
54 if self.lastrefresh + self.refreshinterval > time.time():
55 return
55 return
56
56
57 if self.baseui:
57 if self.baseui:
58 self.ui = self.baseui.copy()
58 self.ui = self.baseui.copy()
59 else:
59 else:
60 self.ui = ui.ui()
60 self.ui = ui.ui()
61 self.ui.setconfig('ui', 'report_untrusted', 'off')
61 self.ui.setconfig('ui', 'report_untrusted', 'off')
62 self.ui.setconfig('ui', 'interactive', 'off')
62 self.ui.setconfig('ui', 'interactive', 'off')
63
63
64 if not isinstance(self.conf, (dict, list, tuple)):
64 if not isinstance(self.conf, (dict, list, tuple)):
65 map = {'paths': 'hgweb-paths'}
65 map = {'paths': 'hgweb-paths'}
66 self.ui.readconfig(self.conf, remap=map, trust=True)
66 self.ui.readconfig(self.conf, remap=map, trust=True)
67 paths = self.ui.configitems('hgweb-paths')
67 paths = self.ui.configitems('hgweb-paths')
68 elif isinstance(self.conf, (list, tuple)):
68 elif isinstance(self.conf, (list, tuple)):
69 paths = self.conf
69 paths = self.conf
70 elif isinstance(self.conf, dict):
70 elif isinstance(self.conf, dict):
71 paths = self.conf.items()
71 paths = self.conf.items()
72
72
73 self.motd = self.ui.config('web', 'motd')
73 self.motd = self.ui.config('web', 'motd')
74 self.style = self.ui.config('web', 'style', 'paper')
74 self.style = self.ui.config('web', 'style', 'paper')
75 self.stripecount = self.ui.config('web', 'stripes', 1)
75 self.stripecount = self.ui.config('web', 'stripes', 1)
76 if self.stripecount:
76 if self.stripecount:
77 self.stripecount = int(self.stripecount)
77 self.stripecount = int(self.stripecount)
78 self._baseurl = self.ui.config('web', 'baseurl')
78 self._baseurl = self.ui.config('web', 'baseurl')
79
79
80 self.repos = findrepos(paths)
80 self.repos = findrepos(paths)
81 for prefix, root in self.ui.configitems('collections'):
81 for prefix, root in self.ui.configitems('collections'):
82 prefix = util.pconvert(prefix)
82 prefix = util.pconvert(prefix)
83 for path in util.walkrepos(root, followsym=True):
83 for path in util.walkrepos(root, followsym=True):
84 repo = os.path.normpath(path)
84 repo = os.path.normpath(path)
85 name = util.pconvert(repo)
85 name = util.pconvert(repo)
86 if name.startswith(prefix):
86 if name.startswith(prefix):
87 name = name[len(prefix):]
87 name = name[len(prefix):]
88 self.repos.append((name.lstrip('/'), repo))
88 self.repos.append((name.lstrip('/'), repo))
89
89
90 self.repos.sort()
90 self.repos.sort()
91 self.lastrefresh = time.time()
91 self.lastrefresh = time.time()
92
92
93 def run(self):
93 def run(self):
94 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
94 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
95 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
95 raise RuntimeError("This function is only intended to be "
96 "called while running as a CGI script.")
96 import mercurial.hgweb.wsgicgi as wsgicgi
97 import mercurial.hgweb.wsgicgi as wsgicgi
97 wsgicgi.launch(self)
98 wsgicgi.launch(self)
98
99
99 def __call__(self, env, respond):
100 def __call__(self, env, respond):
100 req = wsgirequest(env, respond)
101 req = wsgirequest(env, respond)
101 return self.run_wsgi(req)
102 return self.run_wsgi(req)
102
103
103 def read_allowed(self, ui, req):
104 def read_allowed(self, ui, req):
104 """Check allow_read and deny_read config options of a repo's ui object
105 """Check allow_read and deny_read config options of a repo's ui object
105 to determine user permissions. By default, with neither option set (or
106 to determine user permissions. By default, with neither option set (or
106 both empty), allow all users to read the repo. There are two ways a
107 both empty), allow all users to read the repo. There are two ways a
107 user can be denied read access: (1) deny_read is not empty, and the
108 user can be denied read access: (1) deny_read is not empty, and the
108 user is unauthenticated or deny_read contains user (or *), and (2)
109 user is unauthenticated or deny_read contains user (or *), and (2)
109 allow_read is not empty and the user is not in allow_read. Return True
110 allow_read is not empty and the user is not in allow_read. Return True
110 if user is allowed to read the repo, else return False."""
111 if user is allowed to read the repo, else return False."""
111
112
112 user = req.env.get('REMOTE_USER')
113 user = req.env.get('REMOTE_USER')
113
114
114 deny_read = ui.configlist('web', 'deny_read', untrusted=True)
115 deny_read = ui.configlist('web', 'deny_read', untrusted=True)
115 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
116 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
116 return False
117 return False
117
118
118 allow_read = ui.configlist('web', 'allow_read', untrusted=True)
119 allow_read = ui.configlist('web', 'allow_read', untrusted=True)
119 # by default, allow reading if no allow_read option has been set
120 # by default, allow reading if no allow_read option has been set
120 if (not allow_read) or (allow_read == ['*']) or (user in allow_read):
121 if (not allow_read) or (allow_read == ['*']) or (user in allow_read):
121 return True
122 return True
122
123
123 return False
124 return False
124
125
125 def run_wsgi(self, req):
126 def run_wsgi(self, req):
126 try:
127 try:
127 try:
128 try:
128 self.refresh()
129 self.refresh()
129
130
130 virtual = req.env.get("PATH_INFO", "").strip('/')
131 virtual = req.env.get("PATH_INFO", "").strip('/')
131 tmpl = self.templater(req)
132 tmpl = self.templater(req)
132 ctype = tmpl('mimetype', encoding=encoding.encoding)
133 ctype = tmpl('mimetype', encoding=encoding.encoding)
133 ctype = templater.stringify(ctype)
134 ctype = templater.stringify(ctype)
134
135
135 # a static file
136 # a static file
136 if virtual.startswith('static/') or 'static' in req.form:
137 if virtual.startswith('static/') or 'static' in req.form:
137 if virtual.startswith('static/'):
138 if virtual.startswith('static/'):
138 fname = virtual[7:]
139 fname = virtual[7:]
139 else:
140 else:
140 fname = req.form['static'][0]
141 fname = req.form['static'][0]
141 static = templater.templatepath('static')
142 static = templater.templatepath('static')
142 return (staticfile(static, fname, req),)
143 return (staticfile(static, fname, req),)
143
144
144 # top-level index
145 # top-level index
145 elif not virtual:
146 elif not virtual:
146 req.respond(HTTP_OK, ctype)
147 req.respond(HTTP_OK, ctype)
147 return self.makeindex(req, tmpl)
148 return self.makeindex(req, tmpl)
148
149
149 # nested indexes and hgwebs
150 # nested indexes and hgwebs
150
151
151 repos = dict(self.repos)
152 repos = dict(self.repos)
152 while virtual:
153 while virtual:
153 real = repos.get(virtual)
154 real = repos.get(virtual)
154 if real:
155 if real:
155 req.env['REPO_NAME'] = virtual
156 req.env['REPO_NAME'] = virtual
156 try:
157 try:
157 repo = hg.repository(self.ui, real)
158 repo = hg.repository(self.ui, real)
158 return hgweb(repo).run_wsgi(req)
159 return hgweb(repo).run_wsgi(req)
159 except IOError, inst:
160 except IOError, inst:
160 msg = inst.strerror
161 msg = inst.strerror
161 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
162 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
162 except error.RepoError, inst:
163 except error.RepoError, inst:
163 raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
164 raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
164
165
165 # browse subdirectories
166 # browse subdirectories
166 subdir = virtual + '/'
167 subdir = virtual + '/'
167 if [r for r in repos if r.startswith(subdir)]:
168 if [r for r in repos if r.startswith(subdir)]:
168 req.respond(HTTP_OK, ctype)
169 req.respond(HTTP_OK, ctype)
169 return self.makeindex(req, tmpl, subdir)
170 return self.makeindex(req, tmpl, subdir)
170
171
171 up = virtual.rfind('/')
172 up = virtual.rfind('/')
172 if up < 0:
173 if up < 0:
173 break
174 break
174 virtual = virtual[:up]
175 virtual = virtual[:up]
175
176
176 # prefixes not found
177 # prefixes not found
177 req.respond(HTTP_NOT_FOUND, ctype)
178 req.respond(HTTP_NOT_FOUND, ctype)
178 return tmpl("notfound", repo=virtual)
179 return tmpl("notfound", repo=virtual)
179
180
180 except ErrorResponse, err:
181 except ErrorResponse, err:
181 req.respond(err, ctype)
182 req.respond(err, ctype)
182 return tmpl('error', error=err.message or '')
183 return tmpl('error', error=err.message or '')
183 finally:
184 finally:
184 tmpl = None
185 tmpl = None
185
186
186 def makeindex(self, req, tmpl, subdir=""):
187 def makeindex(self, req, tmpl, subdir=""):
187
188
188 def archivelist(ui, nodeid, url):
189 def archivelist(ui, nodeid, url):
189 allowed = ui.configlist("web", "allow_archive", untrusted=True)
190 allowed = ui.configlist("web", "allow_archive", untrusted=True)
190 for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
191 for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
191 if i[0] in allowed or ui.configbool("web", "allow" + i[0],
192 if i[0] in allowed or ui.configbool("web", "allow" + i[0],
192 untrusted=True):
193 untrusted=True):
193 yield {"type" : i[0], "extension": i[1],
194 yield {"type" : i[0], "extension": i[1],
194 "node": nodeid, "url": url}
195 "node": nodeid, "url": url}
195
196
196 sortdefault = 'name', False
197 sortdefault = 'name', False
197 def entries(sortcolumn="", descending=False, subdir="", **map):
198 def entries(sortcolumn="", descending=False, subdir="", **map):
198 rows = []
199 rows = []
199 parity = paritygen(self.stripecount)
200 parity = paritygen(self.stripecount)
200 for name, path in self.repos:
201 for name, path in self.repos:
201 if not name.startswith(subdir):
202 if not name.startswith(subdir):
202 continue
203 continue
203 name = name[len(subdir):]
204 name = name[len(subdir):]
204
205
205 u = self.ui.copy()
206 u = self.ui.copy()
206 try:
207 try:
207 u.readconfig(os.path.join(path, '.hg', 'hgrc'))
208 u.readconfig(os.path.join(path, '.hg', 'hgrc'))
208 except Exception, e:
209 except Exception, e:
209 u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
210 u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
210 continue
211 continue
211 def get(section, name, default=None):
212 def get(section, name, default=None):
212 return u.config(section, name, default, untrusted=True)
213 return u.config(section, name, default, untrusted=True)
213
214
214 if u.configbool("web", "hidden", untrusted=True):
215 if u.configbool("web", "hidden", untrusted=True):
215 continue
216 continue
216
217
217 if not self.read_allowed(u, req):
218 if not self.read_allowed(u, req):
218 continue
219 continue
219
220
220 parts = [name]
221 parts = [name]
221 if 'PATH_INFO' in req.env:
222 if 'PATH_INFO' in req.env:
222 parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
223 parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
223 if req.env['SCRIPT_NAME']:
224 if req.env['SCRIPT_NAME']:
224 parts.insert(0, req.env['SCRIPT_NAME'])
225 parts.insert(0, req.env['SCRIPT_NAME'])
225 url = ('/'.join(parts).replace("//", "/")) + '/'
226 url = ('/'.join(parts).replace("//", "/")) + '/'
226
227
227 # update time with local timezone
228 # update time with local timezone
228 try:
229 try:
229 d = (get_mtime(path), util.makedate()[1])
230 d = (get_mtime(path), util.makedate()[1])
230 except OSError:
231 except OSError:
231 continue
232 continue
232
233
233 contact = get_contact(get)
234 contact = get_contact(get)
234 description = get("web", "description", "")
235 description = get("web", "description", "")
235 name = get("web", "name", name)
236 name = get("web", "name", name)
236 row = dict(contact=contact or "unknown",
237 row = dict(contact=contact or "unknown",
237 contact_sort=contact.upper() or "unknown",
238 contact_sort=contact.upper() or "unknown",
238 name=name,
239 name=name,
239 name_sort=name,
240 name_sort=name,
240 url=url,
241 url=url,
241 description=description or "unknown",
242 description=description or "unknown",
242 description_sort=description.upper() or "unknown",
243 description_sort=description.upper() or "unknown",
243 lastchange=d,
244 lastchange=d,
244 lastchange_sort=d[1]-d[0],
245 lastchange_sort=d[1]-d[0],
245 archives=archivelist(u, "tip", url))
246 archives=archivelist(u, "tip", url))
246 if (not sortcolumn or (sortcolumn, descending) == sortdefault):
247 if (not sortcolumn or (sortcolumn, descending) == sortdefault):
247 # fast path for unsorted output
248 # fast path for unsorted output
248 row['parity'] = parity.next()
249 row['parity'] = parity.next()
249 yield row
250 yield row
250 else:
251 else:
251 rows.append((row["%s_sort" % sortcolumn], row))
252 rows.append((row["%s_sort" % sortcolumn], row))
252 if rows:
253 if rows:
253 rows.sort()
254 rows.sort()
254 if descending:
255 if descending:
255 rows.reverse()
256 rows.reverse()
256 for key, row in rows:
257 for key, row in rows:
257 row['parity'] = parity.next()
258 row['parity'] = parity.next()
258 yield row
259 yield row
259
260
260 self.refresh()
261 self.refresh()
261 sortable = ["name", "description", "contact", "lastchange"]
262 sortable = ["name", "description", "contact", "lastchange"]
262 sortcolumn, descending = sortdefault
263 sortcolumn, descending = sortdefault
263 if 'sort' in req.form:
264 if 'sort' in req.form:
264 sortcolumn = req.form['sort'][0]
265 sortcolumn = req.form['sort'][0]
265 descending = sortcolumn.startswith('-')
266 descending = sortcolumn.startswith('-')
266 if descending:
267 if descending:
267 sortcolumn = sortcolumn[1:]
268 sortcolumn = sortcolumn[1:]
268 if sortcolumn not in sortable:
269 if sortcolumn not in sortable:
269 sortcolumn = ""
270 sortcolumn = ""
270
271
271 sort = [("sort_%s" % column,
272 sort = [("sort_%s" % column,
272 "%s%s" % ((not descending and column == sortcolumn)
273 "%s%s" % ((not descending and column == sortcolumn)
273 and "-" or "", column))
274 and "-" or "", column))
274 for column in sortable]
275 for column in sortable]
275
276
276 self.refresh()
277 self.refresh()
277 if self._baseurl is not None:
278 if self._baseurl is not None:
278 req.env['SCRIPT_NAME'] = self._baseurl
279 req.env['SCRIPT_NAME'] = self._baseurl
279
280
280 return tmpl("index", entries=entries, subdir=subdir,
281 return tmpl("index", entries=entries, subdir=subdir,
281 sortcolumn=sortcolumn, descending=descending,
282 sortcolumn=sortcolumn, descending=descending,
282 **dict(sort))
283 **dict(sort))
283
284
284 def templater(self, req):
285 def templater(self, req):
285
286
286 def header(**map):
287 def header(**map):
287 yield tmpl('header', encoding=encoding.encoding, **map)
288 yield tmpl('header', encoding=encoding.encoding, **map)
288
289
289 def footer(**map):
290 def footer(**map):
290 yield tmpl("footer", **map)
291 yield tmpl("footer", **map)
291
292
292 def motd(**map):
293 def motd(**map):
293 if self.motd is not None:
294 if self.motd is not None:
294 yield self.motd
295 yield self.motd
295 else:
296 else:
296 yield config('web', 'motd', '')
297 yield config('web', 'motd', '')
297
298
298 def config(section, name, default=None, untrusted=True):
299 def config(section, name, default=None, untrusted=True):
299 return self.ui.config(section, name, default, untrusted)
300 return self.ui.config(section, name, default, untrusted)
300
301
301 if self._baseurl is not None:
302 if self._baseurl is not None:
302 req.env['SCRIPT_NAME'] = self._baseurl
303 req.env['SCRIPT_NAME'] = self._baseurl
303
304
304 url = req.env.get('SCRIPT_NAME', '')
305 url = req.env.get('SCRIPT_NAME', '')
305 if not url.endswith('/'):
306 if not url.endswith('/'):
306 url += '/'
307 url += '/'
307
308
308 vars = {}
309 vars = {}
309 style = self.style
310 style = self.style
310 if 'style' in req.form:
311 if 'style' in req.form:
311 vars['style'] = style = req.form['style'][0]
312 vars['style'] = style = req.form['style'][0]
312 start = url[-1] == '?' and '&' or '?'
313 start = url[-1] == '?' and '&' or '?'
313 sessionvars = webutil.sessionvars(vars, start)
314 sessionvars = webutil.sessionvars(vars, start)
314
315
315 staticurl = config('web', 'staticurl') or url + 'static/'
316 staticurl = config('web', 'staticurl') or url + 'static/'
316 if not staticurl.endswith('/'):
317 if not staticurl.endswith('/'):
317 staticurl += '/'
318 staticurl += '/'
318
319
319 style = 'style' in req.form and req.form['style'][0] or self.style
320 style = 'style' in req.form and req.form['style'][0] or self.style
320 mapfile = templater.stylemap(style)
321 mapfile = templater.stylemap(style)
321 tmpl = templater.templater(mapfile,
322 tmpl = templater.templater(mapfile,
322 defaults={"header": header,
323 defaults={"header": header,
323 "footer": footer,
324 "footer": footer,
324 "motd": motd,
325 "motd": motd,
325 "url": url,
326 "url": url,
326 "staticurl": staticurl,
327 "staticurl": staticurl,
327 "sessionvars": sessionvars})
328 "sessionvars": sessionvars})
328 return tmpl
329 return tmpl
@@ -1,296 +1,298
1 # hgweb/server.py - The standalone hg web server.
1 # hgweb/server.py - The standalone hg web server.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 from mercurial import hg, util, error
10 from mercurial import hg, util, error
11 from hgweb_mod import hgweb
11 from hgweb_mod import hgweb
12 from hgwebdir_mod import hgwebdir
12 from hgwebdir_mod import hgwebdir
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 def _splitURI(uri):
15 def _splitURI(uri):
16 """ Return path and query splited from uri
16 """ Return path and query splited from uri
17
17
18 Just like CGI environment, the path is unquoted, the query is
18 Just like CGI environment, the path is unquoted, the query is
19 not.
19 not.
20 """
20 """
21 if '?' in uri:
21 if '?' in uri:
22 path, query = uri.split('?', 1)
22 path, query = uri.split('?', 1)
23 else:
23 else:
24 path, query = uri, ''
24 path, query = uri, ''
25 return urllib.unquote(path), query
25 return urllib.unquote(path), query
26
26
27 class _error_logger(object):
27 class _error_logger(object):
28 def __init__(self, handler):
28 def __init__(self, handler):
29 self.handler = handler
29 self.handler = handler
30 def flush(self):
30 def flush(self):
31 pass
31 pass
32 def write(self, str):
32 def write(self, str):
33 self.writelines(str.split('\n'))
33 self.writelines(str.split('\n'))
34 def writelines(self, seq):
34 def writelines(self, seq):
35 for msg in seq:
35 for msg in seq:
36 self.handler.log_error("HG error: %s", msg)
36 self.handler.log_error("HG error: %s", msg)
37
37
38 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
38 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
39
39
40 url_scheme = 'http'
40 url_scheme = 'http'
41
41
42 def __init__(self, *args, **kargs):
42 def __init__(self, *args, **kargs):
43 self.protocol_version = 'HTTP/1.1'
43 self.protocol_version = 'HTTP/1.1'
44 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
44 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
45
45
46 def _log_any(self, fp, format, *args):
46 def _log_any(self, fp, format, *args):
47 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
47 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
48 self.log_date_time_string(),
48 self.log_date_time_string(),
49 format % args))
49 format % args))
50 fp.flush()
50 fp.flush()
51
51
52 def log_error(self, format, *args):
52 def log_error(self, format, *args):
53 self._log_any(self.server.errorlog, format, *args)
53 self._log_any(self.server.errorlog, format, *args)
54
54
55 def log_message(self, format, *args):
55 def log_message(self, format, *args):
56 self._log_any(self.server.accesslog, format, *args)
56 self._log_any(self.server.accesslog, format, *args)
57
57
58 def do_write(self):
58 def do_write(self):
59 try:
59 try:
60 self.do_hgweb()
60 self.do_hgweb()
61 except socket.error, inst:
61 except socket.error, inst:
62 if inst[0] != errno.EPIPE:
62 if inst[0] != errno.EPIPE:
63 raise
63 raise
64
64
65 def do_POST(self):
65 def do_POST(self):
66 try:
66 try:
67 self.do_write()
67 self.do_write()
68 except StandardError:
68 except StandardError:
69 self._start_response("500 Internal Server Error", [])
69 self._start_response("500 Internal Server Error", [])
70 self._write("Internal Server Error")
70 self._write("Internal Server Error")
71 tb = "".join(traceback.format_exception(*sys.exc_info()))
71 tb = "".join(traceback.format_exception(*sys.exc_info()))
72 self.log_error("Exception happened during processing request '%s':\n%s",
72 self.log_error("Exception happened during processing "
73 self.path, tb)
73 "request '%s':\n%s", self.path, tb)
74
74
75 def do_GET(self):
75 def do_GET(self):
76 self.do_POST()
76 self.do_POST()
77
77
78 def do_hgweb(self):
78 def do_hgweb(self):
79 path, query = _splitURI(self.path)
79 path, query = _splitURI(self.path)
80
80
81 env = {}
81 env = {}
82 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
82 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
83 env['REQUEST_METHOD'] = self.command
83 env['REQUEST_METHOD'] = self.command
84 env['SERVER_NAME'] = self.server.server_name
84 env['SERVER_NAME'] = self.server.server_name
85 env['SERVER_PORT'] = str(self.server.server_port)
85 env['SERVER_PORT'] = str(self.server.server_port)
86 env['REQUEST_URI'] = self.path
86 env['REQUEST_URI'] = self.path
87 env['SCRIPT_NAME'] = self.server.prefix
87 env['SCRIPT_NAME'] = self.server.prefix
88 env['PATH_INFO'] = path[len(self.server.prefix):]
88 env['PATH_INFO'] = path[len(self.server.prefix):]
89 env['REMOTE_HOST'] = self.client_address[0]
89 env['REMOTE_HOST'] = self.client_address[0]
90 env['REMOTE_ADDR'] = self.client_address[0]
90 env['REMOTE_ADDR'] = self.client_address[0]
91 if query:
91 if query:
92 env['QUERY_STRING'] = query
92 env['QUERY_STRING'] = query
93
93
94 if self.headers.typeheader is None:
94 if self.headers.typeheader is None:
95 env['CONTENT_TYPE'] = self.headers.type
95 env['CONTENT_TYPE'] = self.headers.type
96 else:
96 else:
97 env['CONTENT_TYPE'] = self.headers.typeheader
97 env['CONTENT_TYPE'] = self.headers.typeheader
98 length = self.headers.getheader('content-length')
98 length = self.headers.getheader('content-length')
99 if length:
99 if length:
100 env['CONTENT_LENGTH'] = length
100 env['CONTENT_LENGTH'] = length
101 for header in [h for h in self.headers.keys()
101 for header in [h for h in self.headers.keys()
102 if h not in ('content-type', 'content-length')]:
102 if h not in ('content-type', 'content-length')]:
103 hkey = 'HTTP_' + header.replace('-', '_').upper()
103 hkey = 'HTTP_' + header.replace('-', '_').upper()
104 hval = self.headers.getheader(header)
104 hval = self.headers.getheader(header)
105 hval = hval.replace('\n', '').strip()
105 hval = hval.replace('\n', '').strip()
106 if hval:
106 if hval:
107 env[hkey] = hval
107 env[hkey] = hval
108 env['SERVER_PROTOCOL'] = self.request_version
108 env['SERVER_PROTOCOL'] = self.request_version
109 env['wsgi.version'] = (1, 0)
109 env['wsgi.version'] = (1, 0)
110 env['wsgi.url_scheme'] = self.url_scheme
110 env['wsgi.url_scheme'] = self.url_scheme
111 env['wsgi.input'] = self.rfile
111 env['wsgi.input'] = self.rfile
112 env['wsgi.errors'] = _error_logger(self)
112 env['wsgi.errors'] = _error_logger(self)
113 env['wsgi.multithread'] = isinstance(self.server,
113 env['wsgi.multithread'] = isinstance(self.server,
114 SocketServer.ThreadingMixIn)
114 SocketServer.ThreadingMixIn)
115 env['wsgi.multiprocess'] = isinstance(self.server,
115 env['wsgi.multiprocess'] = isinstance(self.server,
116 SocketServer.ForkingMixIn)
116 SocketServer.ForkingMixIn)
117 env['wsgi.run_once'] = 0
117 env['wsgi.run_once'] = 0
118
118
119 self.close_connection = True
119 self.close_connection = True
120 self.saved_status = None
120 self.saved_status = None
121 self.saved_headers = []
121 self.saved_headers = []
122 self.sent_headers = False
122 self.sent_headers = False
123 self.length = None
123 self.length = None
124 for chunk in self.server.application(env, self._start_response):
124 for chunk in self.server.application(env, self._start_response):
125 self._write(chunk)
125 self._write(chunk)
126
126
127 def send_headers(self):
127 def send_headers(self):
128 if not self.saved_status:
128 if not self.saved_status:
129 raise AssertionError("Sending headers before start_response() called")
129 raise AssertionError("Sending headers before "
130 "start_response() called")
130 saved_status = self.saved_status.split(None, 1)
131 saved_status = self.saved_status.split(None, 1)
131 saved_status[0] = int(saved_status[0])
132 saved_status[0] = int(saved_status[0])
132 self.send_response(*saved_status)
133 self.send_response(*saved_status)
133 should_close = True
134 should_close = True
134 for h in self.saved_headers:
135 for h in self.saved_headers:
135 self.send_header(*h)
136 self.send_header(*h)
136 if h[0].lower() == 'content-length':
137 if h[0].lower() == 'content-length':
137 should_close = False
138 should_close = False
138 self.length = int(h[1])
139 self.length = int(h[1])
139 # The value of the Connection header is a list of case-insensitive
140 # The value of the Connection header is a list of case-insensitive
140 # tokens separated by commas and optional whitespace.
141 # tokens separated by commas and optional whitespace.
141 if 'close' in [token.strip().lower() for token in
142 if 'close' in [token.strip().lower() for token in
142 self.headers.get('connection', '').split(',')]:
143 self.headers.get('connection', '').split(',')]:
143 should_close = True
144 should_close = True
144 if should_close:
145 if should_close:
145 self.send_header('Connection', 'close')
146 self.send_header('Connection', 'close')
146 self.close_connection = should_close
147 self.close_connection = should_close
147 self.end_headers()
148 self.end_headers()
148 self.sent_headers = True
149 self.sent_headers = True
149
150
150 def _start_response(self, http_status, headers, exc_info=None):
151 def _start_response(self, http_status, headers, exc_info=None):
151 code, msg = http_status.split(None, 1)
152 code, msg = http_status.split(None, 1)
152 code = int(code)
153 code = int(code)
153 self.saved_status = http_status
154 self.saved_status = http_status
154 bad_headers = ('connection', 'transfer-encoding')
155 bad_headers = ('connection', 'transfer-encoding')
155 self.saved_headers = [h for h in headers
156 self.saved_headers = [h for h in headers
156 if h[0].lower() not in bad_headers]
157 if h[0].lower() not in bad_headers]
157 return self._write
158 return self._write
158
159
159 def _write(self, data):
160 def _write(self, data):
160 if not self.saved_status:
161 if not self.saved_status:
161 raise AssertionError("data written before start_response() called")
162 raise AssertionError("data written before start_response() called")
162 elif not self.sent_headers:
163 elif not self.sent_headers:
163 self.send_headers()
164 self.send_headers()
164 if self.length is not None:
165 if self.length is not None:
165 if len(data) > self.length:
166 if len(data) > self.length:
166 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
167 raise AssertionError("Content-length header sent, but more "
168 "bytes than specified are being written.")
167 self.length = self.length - len(data)
169 self.length = self.length - len(data)
168 self.wfile.write(data)
170 self.wfile.write(data)
169 self.wfile.flush()
171 self.wfile.flush()
170
172
171 class _shgwebhandler(_hgwebhandler):
173 class _shgwebhandler(_hgwebhandler):
172
174
173 url_scheme = 'https'
175 url_scheme = 'https'
174
176
175 def setup(self):
177 def setup(self):
176 self.connection = self.request
178 self.connection = self.request
177 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
179 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
178 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
180 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
179
181
180 def do_write(self):
182 def do_write(self):
181 from OpenSSL.SSL import SysCallError
183 from OpenSSL.SSL import SysCallError
182 try:
184 try:
183 super(_shgwebhandler, self).do_write()
185 super(_shgwebhandler, self).do_write()
184 except SysCallError, inst:
186 except SysCallError, inst:
185 if inst.args[0] != errno.EPIPE:
187 if inst.args[0] != errno.EPIPE:
186 raise
188 raise
187
189
188 def handle_one_request(self):
190 def handle_one_request(self):
189 from OpenSSL.SSL import SysCallError, ZeroReturnError
191 from OpenSSL.SSL import SysCallError, ZeroReturnError
190 try:
192 try:
191 super(_shgwebhandler, self).handle_one_request()
193 super(_shgwebhandler, self).handle_one_request()
192 except (SysCallError, ZeroReturnError):
194 except (SysCallError, ZeroReturnError):
193 self.close_connection = True
195 self.close_connection = True
194 pass
196 pass
195
197
196 def create_server(ui, repo):
198 def create_server(ui, repo):
197 use_threads = True
199 use_threads = True
198
200
199 def openlog(opt, default):
201 def openlog(opt, default):
200 if opt and opt != '-':
202 if opt and opt != '-':
201 return open(opt, 'a')
203 return open(opt, 'a')
202 return default
204 return default
203
205
204 if repo is None:
206 if repo is None:
205 myui = ui
207 myui = ui
206 else:
208 else:
207 myui = repo.ui
209 myui = repo.ui
208 address = myui.config("web", "address", "")
210 address = myui.config("web", "address", "")
209 port = int(myui.config("web", "port", 8000))
211 port = int(myui.config("web", "port", 8000))
210 prefix = myui.config("web", "prefix", "")
212 prefix = myui.config("web", "prefix", "")
211 if prefix:
213 if prefix:
212 prefix = "/" + prefix.strip("/")
214 prefix = "/" + prefix.strip("/")
213 use_ipv6 = myui.configbool("web", "ipv6")
215 use_ipv6 = myui.configbool("web", "ipv6")
214 webdir_conf = myui.config("web", "webdir_conf")
216 webdir_conf = myui.config("web", "webdir_conf")
215 ssl_cert = myui.config("web", "certificate")
217 ssl_cert = myui.config("web", "certificate")
216 accesslog = openlog(myui.config("web", "accesslog", "-"), sys.stdout)
218 accesslog = openlog(myui.config("web", "accesslog", "-"), sys.stdout)
217 errorlog = openlog(myui.config("web", "errorlog", "-"), sys.stderr)
219 errorlog = openlog(myui.config("web", "errorlog", "-"), sys.stderr)
218
220
219 if use_threads:
221 if use_threads:
220 try:
222 try:
221 from threading import activeCount
223 from threading import activeCount
222 except ImportError:
224 except ImportError:
223 use_threads = False
225 use_threads = False
224
226
225 if use_threads:
227 if use_threads:
226 _mixin = SocketServer.ThreadingMixIn
228 _mixin = SocketServer.ThreadingMixIn
227 else:
229 else:
228 if hasattr(os, "fork"):
230 if hasattr(os, "fork"):
229 _mixin = SocketServer.ForkingMixIn
231 _mixin = SocketServer.ForkingMixIn
230 else:
232 else:
231 class _mixin:
233 class _mixin:
232 pass
234 pass
233
235
234 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
236 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
235
237
236 # SO_REUSEADDR has broken semantics on windows
238 # SO_REUSEADDR has broken semantics on windows
237 if os.name == 'nt':
239 if os.name == 'nt':
238 allow_reuse_address = 0
240 allow_reuse_address = 0
239
241
240 def __init__(self, *args, **kargs):
242 def __init__(self, *args, **kargs):
241 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
243 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
242 self.accesslog = accesslog
244 self.accesslog = accesslog
243 self.errorlog = errorlog
245 self.errorlog = errorlog
244 self.daemon_threads = True
246 self.daemon_threads = True
245 def make_handler():
247 def make_handler():
246 if webdir_conf:
248 if webdir_conf:
247 hgwebobj = hgwebdir(webdir_conf, ui)
249 hgwebobj = hgwebdir(webdir_conf, ui)
248 elif repo is not None:
250 elif repo is not None:
249 hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
251 hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
250 else:
252 else:
251 raise error.RepoError(_("There is no Mercurial repository"
253 raise error.RepoError(_("There is no Mercurial repository"
252 " here (.hg not found)"))
254 " here (.hg not found)"))
253 return hgwebobj
255 return hgwebobj
254 self.application = make_handler()
256 self.application = make_handler()
255
257
256 if ssl_cert:
258 if ssl_cert:
257 try:
259 try:
258 from OpenSSL import SSL
260 from OpenSSL import SSL
259 ctx = SSL.Context(SSL.SSLv23_METHOD)
261 ctx = SSL.Context(SSL.SSLv23_METHOD)
260 except ImportError:
262 except ImportError:
261 raise util.Abort(_("SSL support is unavailable"))
263 raise util.Abort(_("SSL support is unavailable"))
262 ctx.use_privatekey_file(ssl_cert)
264 ctx.use_privatekey_file(ssl_cert)
263 ctx.use_certificate_file(ssl_cert)
265 ctx.use_certificate_file(ssl_cert)
264 sock = socket.socket(self.address_family, self.socket_type)
266 sock = socket.socket(self.address_family, self.socket_type)
265 self.socket = SSL.Connection(ctx, sock)
267 self.socket = SSL.Connection(ctx, sock)
266 self.server_bind()
268 self.server_bind()
267 self.server_activate()
269 self.server_activate()
268
270
269 self.addr, self.port = self.socket.getsockname()[0:2]
271 self.addr, self.port = self.socket.getsockname()[0:2]
270 self.prefix = prefix
272 self.prefix = prefix
271 self.fqaddr = socket.getfqdn(address)
273 self.fqaddr = socket.getfqdn(address)
272
274
273 class IPv6HTTPServer(MercurialHTTPServer):
275 class IPv6HTTPServer(MercurialHTTPServer):
274 address_family = getattr(socket, 'AF_INET6', None)
276 address_family = getattr(socket, 'AF_INET6', None)
275
277
276 def __init__(self, *args, **kwargs):
278 def __init__(self, *args, **kwargs):
277 if self.address_family is None:
279 if self.address_family is None:
278 raise error.RepoError(_('IPv6 is not available on this system'))
280 raise error.RepoError(_('IPv6 is not available on this system'))
279 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
281 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
280
282
281 if ssl_cert:
283 if ssl_cert:
282 handler = _shgwebhandler
284 handler = _shgwebhandler
283 else:
285 else:
284 handler = _hgwebhandler
286 handler = _hgwebhandler
285
287
286 # ugly hack due to python issue5853 (for threaded use)
288 # ugly hack due to python issue5853 (for threaded use)
287 import mimetypes; mimetypes.init()
289 import mimetypes; mimetypes.init()
288
290
289 try:
291 try:
290 if use_ipv6:
292 if use_ipv6:
291 return IPv6HTTPServer((address, port), handler)
293 return IPv6HTTPServer((address, port), handler)
292 else:
294 else:
293 return MercurialHTTPServer((address, port), handler)
295 return MercurialHTTPServer((address, port), handler)
294 except socket.error, inst:
296 except socket.error, inst:
295 raise util.Abort(_("cannot start server at '%s:%d': %s")
297 raise util.Abort(_("cannot start server at '%s:%d': %s")
296 % (address, port, inst.args[1]))
298 % (address, port, inst.args[1]))
@@ -1,2132 +1,2134
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid is None:
110 if changeid is None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def branchmap(self):
363 def branchmap(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self.branchmap().iteritems():
395 for bn, heads in self.branchmap().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477
477
478 # can't find key, check if it might have come from damaged dirstate
478 # can't find key, check if it might have come from damaged dirstate
479 if key in self.dirstate.parents():
479 if key in self.dirstate.parents():
480 raise error.Abort(_("working directory has unknown parent '%s'!")
480 raise error.Abort(_("working directory has unknown parent '%s'!")
481 % short(key))
481 % short(key))
482 try:
482 try:
483 if len(key) == 20:
483 if len(key) == 20:
484 key = hex(key)
484 key = hex(key)
485 except:
485 except:
486 pass
486 pass
487 raise error.RepoError(_("unknown revision '%s'") % key)
487 raise error.RepoError(_("unknown revision '%s'") % key)
488
488
489 def local(self):
489 def local(self):
490 return True
490 return True
491
491
492 def join(self, f):
492 def join(self, f):
493 return os.path.join(self.path, f)
493 return os.path.join(self.path, f)
494
494
495 def wjoin(self, f):
495 def wjoin(self, f):
496 return os.path.join(self.root, f)
496 return os.path.join(self.root, f)
497
497
498 def rjoin(self, f):
498 def rjoin(self, f):
499 return os.path.join(self.root, util.pconvert(f))
499 return os.path.join(self.root, util.pconvert(f))
500
500
501 def file(self, f):
501 def file(self, f):
502 if f[0] == '/':
502 if f[0] == '/':
503 f = f[1:]
503 f = f[1:]
504 return filelog.filelog(self.sopener, f)
504 return filelog.filelog(self.sopener, f)
505
505
506 def changectx(self, changeid):
506 def changectx(self, changeid):
507 return self[changeid]
507 return self[changeid]
508
508
509 def parents(self, changeid=None):
509 def parents(self, changeid=None):
510 '''get list of changectxs for parents of changeid'''
510 '''get list of changectxs for parents of changeid'''
511 return self[changeid].parents()
511 return self[changeid].parents()
512
512
513 def filectx(self, path, changeid=None, fileid=None):
513 def filectx(self, path, changeid=None, fileid=None):
514 """changeid can be a changeset revision, node, or tag.
514 """changeid can be a changeset revision, node, or tag.
515 fileid can be a file revision or node."""
515 fileid can be a file revision or node."""
516 return context.filectx(self, path, changeid, fileid)
516 return context.filectx(self, path, changeid, fileid)
517
517
518 def getcwd(self):
518 def getcwd(self):
519 return self.dirstate.getcwd()
519 return self.dirstate.getcwd()
520
520
521 def pathto(self, f, cwd=None):
521 def pathto(self, f, cwd=None):
522 return self.dirstate.pathto(f, cwd)
522 return self.dirstate.pathto(f, cwd)
523
523
524 def wfile(self, f, mode='r'):
524 def wfile(self, f, mode='r'):
525 return self.wopener(f, mode)
525 return self.wopener(f, mode)
526
526
527 def _link(self, f):
527 def _link(self, f):
528 return os.path.islink(self.wjoin(f))
528 return os.path.islink(self.wjoin(f))
529
529
530 def _filter(self, filter, filename, data):
530 def _filter(self, filter, filename, data):
531 if filter not in self.filterpats:
531 if filter not in self.filterpats:
532 l = []
532 l = []
533 for pat, cmd in self.ui.configitems(filter):
533 for pat, cmd in self.ui.configitems(filter):
534 if cmd == '!':
534 if cmd == '!':
535 continue
535 continue
536 mf = match_.match(self.root, '', [pat])
536 mf = match_.match(self.root, '', [pat])
537 fn = None
537 fn = None
538 params = cmd
538 params = cmd
539 for name, filterfn in self._datafilters.iteritems():
539 for name, filterfn in self._datafilters.iteritems():
540 if cmd.startswith(name):
540 if cmd.startswith(name):
541 fn = filterfn
541 fn = filterfn
542 params = cmd[len(name):].lstrip()
542 params = cmd[len(name):].lstrip()
543 break
543 break
544 if not fn:
544 if not fn:
545 fn = lambda s, c, **kwargs: util.filter(s, c)
545 fn = lambda s, c, **kwargs: util.filter(s, c)
546 # Wrap old filters not supporting keyword arguments
546 # Wrap old filters not supporting keyword arguments
547 if not inspect.getargspec(fn)[2]:
547 if not inspect.getargspec(fn)[2]:
548 oldfn = fn
548 oldfn = fn
549 fn = lambda s, c, **kwargs: oldfn(s, c)
549 fn = lambda s, c, **kwargs: oldfn(s, c)
550 l.append((mf, fn, params))
550 l.append((mf, fn, params))
551 self.filterpats[filter] = l
551 self.filterpats[filter] = l
552
552
553 for mf, fn, cmd in self.filterpats[filter]:
553 for mf, fn, cmd in self.filterpats[filter]:
554 if mf(filename):
554 if mf(filename):
555 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
555 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
556 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
556 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
557 break
557 break
558
558
559 return data
559 return data
560
560
561 def adddatafilter(self, name, filter):
561 def adddatafilter(self, name, filter):
562 self._datafilters[name] = filter
562 self._datafilters[name] = filter
563
563
564 def wread(self, filename):
564 def wread(self, filename):
565 if self._link(filename):
565 if self._link(filename):
566 data = os.readlink(self.wjoin(filename))
566 data = os.readlink(self.wjoin(filename))
567 else:
567 else:
568 data = self.wopener(filename, 'r').read()
568 data = self.wopener(filename, 'r').read()
569 return self._filter("encode", filename, data)
569 return self._filter("encode", filename, data)
570
570
571 def wwrite(self, filename, data, flags):
571 def wwrite(self, filename, data, flags):
572 data = self._filter("decode", filename, data)
572 data = self._filter("decode", filename, data)
573 try:
573 try:
574 os.unlink(self.wjoin(filename))
574 os.unlink(self.wjoin(filename))
575 except OSError:
575 except OSError:
576 pass
576 pass
577 if 'l' in flags:
577 if 'l' in flags:
578 self.wopener.symlink(data, filename)
578 self.wopener.symlink(data, filename)
579 else:
579 else:
580 self.wopener(filename, 'w').write(data)
580 self.wopener(filename, 'w').write(data)
581 if 'x' in flags:
581 if 'x' in flags:
582 util.set_flags(self.wjoin(filename), False, True)
582 util.set_flags(self.wjoin(filename), False, True)
583
583
584 def wwritedata(self, filename, data):
584 def wwritedata(self, filename, data):
585 return self._filter("decode", filename, data)
585 return self._filter("decode", filename, data)
586
586
587 def transaction(self):
587 def transaction(self):
588 tr = self._transref and self._transref() or None
588 tr = self._transref and self._transref() or None
589 if tr and tr.running():
589 if tr and tr.running():
590 return tr.nest()
590 return tr.nest()
591
591
592 # abort here if the journal already exists
592 # abort here if the journal already exists
593 if os.path.exists(self.sjoin("journal")):
593 if os.path.exists(self.sjoin("journal")):
594 raise error.RepoError(_("journal already exists - run hg recover"))
594 raise error.RepoError(_("journal already exists - run hg recover"))
595
595
596 # save dirstate for rollback
596 # save dirstate for rollback
597 try:
597 try:
598 ds = self.opener("dirstate").read()
598 ds = self.opener("dirstate").read()
599 except IOError:
599 except IOError:
600 ds = ""
600 ds = ""
601 self.opener("journal.dirstate", "w").write(ds)
601 self.opener("journal.dirstate", "w").write(ds)
602 self.opener("journal.branch", "w").write(self.dirstate.branch())
602 self.opener("journal.branch", "w").write(self.dirstate.branch())
603
603
604 renames = [(self.sjoin("journal"), self.sjoin("undo")),
604 renames = [(self.sjoin("journal"), self.sjoin("undo")),
605 (self.join("journal.dirstate"), self.join("undo.dirstate")),
605 (self.join("journal.dirstate"), self.join("undo.dirstate")),
606 (self.join("journal.branch"), self.join("undo.branch"))]
606 (self.join("journal.branch"), self.join("undo.branch"))]
607 tr = transaction.transaction(self.ui.warn, self.sopener,
607 tr = transaction.transaction(self.ui.warn, self.sopener,
608 self.sjoin("journal"),
608 self.sjoin("journal"),
609 aftertrans(renames),
609 aftertrans(renames),
610 self.store.createmode)
610 self.store.createmode)
611 self._transref = weakref.ref(tr)
611 self._transref = weakref.ref(tr)
612 return tr
612 return tr
613
613
614 def recover(self):
614 def recover(self):
615 lock = self.lock()
615 lock = self.lock()
616 try:
616 try:
617 if os.path.exists(self.sjoin("journal")):
617 if os.path.exists(self.sjoin("journal")):
618 self.ui.status(_("rolling back interrupted transaction\n"))
618 self.ui.status(_("rolling back interrupted transaction\n"))
619 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
619 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
620 self.invalidate()
620 self.invalidate()
621 return True
621 return True
622 else:
622 else:
623 self.ui.warn(_("no interrupted transaction available\n"))
623 self.ui.warn(_("no interrupted transaction available\n"))
624 return False
624 return False
625 finally:
625 finally:
626 lock.release()
626 lock.release()
627
627
628 def rollback(self):
628 def rollback(self):
629 wlock = lock = None
629 wlock = lock = None
630 try:
630 try:
631 wlock = self.wlock()
631 wlock = self.wlock()
632 lock = self.lock()
632 lock = self.lock()
633 if os.path.exists(self.sjoin("undo")):
633 if os.path.exists(self.sjoin("undo")):
634 self.ui.status(_("rolling back last transaction\n"))
634 self.ui.status(_("rolling back last transaction\n"))
635 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
635 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
636 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
636 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 try:
637 try:
638 branch = self.opener("undo.branch").read()
638 branch = self.opener("undo.branch").read()
639 self.dirstate.setbranch(branch)
639 self.dirstate.setbranch(branch)
640 except IOError:
640 except IOError:
641 self.ui.warn(_("Named branch could not be reset, "
641 self.ui.warn(_("Named branch could not be reset, "
642 "current branch still is: %s\n")
642 "current branch still is: %s\n")
643 % encoding.tolocal(self.dirstate.branch()))
643 % encoding.tolocal(self.dirstate.branch()))
644 self.invalidate()
644 self.invalidate()
645 self.dirstate.invalidate()
645 self.dirstate.invalidate()
646 else:
646 else:
647 self.ui.warn(_("no rollback information available\n"))
647 self.ui.warn(_("no rollback information available\n"))
648 finally:
648 finally:
649 release(lock, wlock)
649 release(lock, wlock)
650
650
651 def invalidate(self):
651 def invalidate(self):
652 for a in "changelog manifest".split():
652 for a in "changelog manifest".split():
653 if a in self.__dict__:
653 if a in self.__dict__:
654 delattr(self, a)
654 delattr(self, a)
655 self.tagscache = None
655 self.tagscache = None
656 self._tagstypecache = None
656 self._tagstypecache = None
657 self.nodetagscache = None
657 self.nodetagscache = None
658 self.branchcache = None
658 self.branchcache = None
659 self._ubranchcache = None
659 self._ubranchcache = None
660 self._branchcachetip = None
660 self._branchcachetip = None
661
661
662 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
662 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
663 try:
663 try:
664 l = lock.lock(lockname, 0, releasefn, desc=desc)
664 l = lock.lock(lockname, 0, releasefn, desc=desc)
665 except error.LockHeld, inst:
665 except error.LockHeld, inst:
666 if not wait:
666 if not wait:
667 raise
667 raise
668 self.ui.warn(_("waiting for lock on %s held by %r\n") %
668 self.ui.warn(_("waiting for lock on %s held by %r\n") %
669 (desc, inst.locker))
669 (desc, inst.locker))
670 # default to 600 seconds timeout
670 # default to 600 seconds timeout
671 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
671 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
672 releasefn, desc=desc)
672 releasefn, desc=desc)
673 if acquirefn:
673 if acquirefn:
674 acquirefn()
674 acquirefn()
675 return l
675 return l
676
676
677 def lock(self, wait=True):
677 def lock(self, wait=True):
678 l = self._lockref and self._lockref()
678 l = self._lockref and self._lockref()
679 if l is not None and l.held:
679 if l is not None and l.held:
680 l.lock()
680 l.lock()
681 return l
681 return l
682
682
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 _('repository %s') % self.origroot)
684 _('repository %s') % self.origroot)
685 self._lockref = weakref.ref(l)
685 self._lockref = weakref.ref(l)
686 return l
686 return l
687
687
688 def wlock(self, wait=True):
688 def wlock(self, wait=True):
689 l = self._wlockref and self._wlockref()
689 l = self._wlockref and self._wlockref()
690 if l is not None and l.held:
690 if l is not None and l.held:
691 l.lock()
691 l.lock()
692 return l
692 return l
693
693
694 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
694 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
695 self.dirstate.invalidate, _('working directory of %s') %
695 self.dirstate.invalidate, _('working directory of %s') %
696 self.origroot)
696 self.origroot)
697 self._wlockref = weakref.ref(l)
697 self._wlockref = weakref.ref(l)
698 return l
698 return l
699
699
700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
701 """
701 """
702 commit an individual file as part of a larger transaction
702 commit an individual file as part of a larger transaction
703 """
703 """
704
704
705 fname = fctx.path()
705 fname = fctx.path()
706 text = fctx.data()
706 text = fctx.data()
707 flog = self.file(fname)
707 flog = self.file(fname)
708 fparent1 = manifest1.get(fname, nullid)
708 fparent1 = manifest1.get(fname, nullid)
709 fparent2 = fparent2o = manifest2.get(fname, nullid)
709 fparent2 = fparent2o = manifest2.get(fname, nullid)
710
710
711 meta = {}
711 meta = {}
712 copy = fctx.renamed()
712 copy = fctx.renamed()
713 if copy and copy[0] != fname:
713 if copy and copy[0] != fname:
714 # Mark the new revision of this file as a copy of another
714 # Mark the new revision of this file as a copy of another
715 # file. This copy data will effectively act as a parent
715 # file. This copy data will effectively act as a parent
716 # of this new revision. If this is a merge, the first
716 # of this new revision. If this is a merge, the first
717 # parent will be the nullid (meaning "look up the copy data")
717 # parent will be the nullid (meaning "look up the copy data")
718 # and the second one will be the other parent. For example:
718 # and the second one will be the other parent. For example:
719 #
719 #
720 # 0 --- 1 --- 3 rev1 changes file foo
720 # 0 --- 1 --- 3 rev1 changes file foo
721 # \ / rev2 renames foo to bar and changes it
721 # \ / rev2 renames foo to bar and changes it
722 # \- 2 -/ rev3 should have bar with all changes and
722 # \- 2 -/ rev3 should have bar with all changes and
723 # should record that bar descends from
723 # should record that bar descends from
724 # bar in rev2 and foo in rev1
724 # bar in rev2 and foo in rev1
725 #
725 #
726 # this allows this merge to succeed:
726 # this allows this merge to succeed:
727 #
727 #
728 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
728 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
729 # \ / merging rev3 and rev4 should use bar@rev2
729 # \ / merging rev3 and rev4 should use bar@rev2
730 # \- 2 --- 4 as the merge base
730 # \- 2 --- 4 as the merge base
731 #
731 #
732
732
733 cfname = copy[0]
733 cfname = copy[0]
734 crev = manifest1.get(cfname)
734 crev = manifest1.get(cfname)
735 newfparent = fparent2
735 newfparent = fparent2
736
736
737 if manifest2: # branch merge
737 if manifest2: # branch merge
738 if fparent2 == nullid or crev is None: # copied on remote side
738 if fparent2 == nullid or crev is None: # copied on remote side
739 if cfname in manifest2:
739 if cfname in manifest2:
740 crev = manifest2[cfname]
740 crev = manifest2[cfname]
741 newfparent = fparent1
741 newfparent = fparent1
742
742
743 # find source in nearest ancestor if we've lost track
743 # find source in nearest ancestor if we've lost track
744 if not crev:
744 if not crev:
745 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
745 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
746 (fname, cfname))
746 (fname, cfname))
747 for ancestor in self['.'].ancestors():
747 for ancestor in self['.'].ancestors():
748 if cfname in ancestor:
748 if cfname in ancestor:
749 crev = ancestor[cfname].filenode()
749 crev = ancestor[cfname].filenode()
750 break
750 break
751
751
752 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
752 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
753 meta["copy"] = cfname
753 meta["copy"] = cfname
754 meta["copyrev"] = hex(crev)
754 meta["copyrev"] = hex(crev)
755 fparent1, fparent2 = nullid, newfparent
755 fparent1, fparent2 = nullid, newfparent
756 elif fparent2 != nullid:
756 elif fparent2 != nullid:
757 # is one parent an ancestor of the other?
757 # is one parent an ancestor of the other?
758 fparentancestor = flog.ancestor(fparent1, fparent2)
758 fparentancestor = flog.ancestor(fparent1, fparent2)
759 if fparentancestor == fparent1:
759 if fparentancestor == fparent1:
760 fparent1, fparent2 = fparent2, nullid
760 fparent1, fparent2 = fparent2, nullid
761 elif fparentancestor == fparent2:
761 elif fparentancestor == fparent2:
762 fparent2 = nullid
762 fparent2 = nullid
763
763
764 # is the file changed?
764 # is the file changed?
765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
766 changelist.append(fname)
766 changelist.append(fname)
767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
768
768
769 # are just the flags changed during merge?
769 # are just the flags changed during merge?
770 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
770 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
771 changelist.append(fname)
771 changelist.append(fname)
772
772
773 return fparent1
773 return fparent1
774
774
775 def commit(self, files=None, text="", user=None, date=None, match=None,
775 def commit(self, files=None, text="", user=None, date=None, match=None,
776 force=False, editor=False, extra={}):
776 force=False, editor=False, extra={}):
777 """Add a new revision to current repository.
777 """Add a new revision to current repository.
778
778
779 Revision information is gathered from the working directory, files and
779 Revision information is gathered from the working directory, files and
780 match can be used to filter the committed files.
780 match can be used to filter the committed files.
781 If editor is supplied, it is called to get a commit message.
781 If editor is supplied, it is called to get a commit message.
782 """
782 """
783 wlock = self.wlock()
783 wlock = self.wlock()
784 try:
784 try:
785 p1, p2 = self.dirstate.parents()
785 p1, p2 = self.dirstate.parents()
786
786
787 if (not force and p2 != nullid and match and
787 if (not force and p2 != nullid and match and
788 (match.files() or match.anypats())):
788 (match.files() or match.anypats())):
789 raise util.Abort(_('cannot partially commit a merge '
789 raise util.Abort(_('cannot partially commit a merge '
790 '(do not specify files or patterns)'))
790 '(do not specify files or patterns)'))
791
791
792 if files:
792 if files:
793 modified, removed = [], []
793 modified, removed = [], []
794 for f in sorted(set(files)):
794 for f in sorted(set(files)):
795 s = self.dirstate[f]
795 s = self.dirstate[f]
796 if s in 'nma':
796 if s in 'nma':
797 modified.append(f)
797 modified.append(f)
798 elif s == 'r':
798 elif s == 'r':
799 removed.append(f)
799 removed.append(f)
800 else:
800 else:
801 self.ui.warn(_("%s not tracked!\n") % f)
801 self.ui.warn(_("%s not tracked!\n") % f)
802 changes = [modified, [], removed, [], []]
802 changes = [modified, [], removed, [], []]
803 else:
803 else:
804 changes = self.status(match=match)
804 changes = self.status(match=match)
805
805
806 if (not force and not extra.get("close") and p2 == nullid
806 if (not force and not extra.get("close") and p2 == nullid
807 and not (changes[0] or changes[1] or changes[2])
807 and not (changes[0] or changes[1] or changes[2])
808 and self[None].branch() == self['.'].branch()):
808 and self[None].branch() == self['.'].branch()):
809 self.ui.status(_("nothing changed\n"))
809 self.ui.status(_("nothing changed\n"))
810 return None
810 return None
811
811
812 ms = merge_.mergestate(self)
812 ms = merge_.mergestate(self)
813 for f in changes[0]:
813 for f in changes[0]:
814 if f in ms and ms[f] == 'u':
814 if f in ms and ms[f] == 'u':
815 raise util.Abort(_("unresolved merge conflicts "
815 raise util.Abort(_("unresolved merge conflicts "
816 "(see hg resolve)"))
816 "(see hg resolve)"))
817
817
818 wctx = context.workingctx(self, (p1, p2), text, user, date,
818 wctx = context.workingctx(self, (p1, p2), text, user, date,
819 extra, changes)
819 extra, changes)
820 if editor:
820 if editor:
821 wctx._text = editor(self, wctx,
821 wctx._text = editor(self, wctx,
822 changes[1], changes[0], changes[2])
822 changes[1], changes[0], changes[2])
823 ret = self.commitctx(wctx, True)
823 ret = self.commitctx(wctx, True)
824
824
825 # update dirstate and mergestate
825 # update dirstate and mergestate
826 for f in changes[0] + changes[1]:
826 for f in changes[0] + changes[1]:
827 self.dirstate.normal(f)
827 self.dirstate.normal(f)
828 for f in changes[2]:
828 for f in changes[2]:
829 self.dirstate.forget(f)
829 self.dirstate.forget(f)
830 self.dirstate.setparents(ret)
830 self.dirstate.setparents(ret)
831 ms.reset()
831 ms.reset()
832
832
833 return ret
833 return ret
834
834
835 finally:
835 finally:
836 wlock.release()
836 wlock.release()
837
837
838 def commitctx(self, ctx, error=False):
838 def commitctx(self, ctx, error=False):
839 """Add a new revision to current repository.
839 """Add a new revision to current repository.
840
840
841 Revision information is passed via the context argument.
841 Revision information is passed via the context argument.
842 """
842 """
843
843
844 tr = lock = None
844 tr = lock = None
845 removed = ctx.removed()
845 removed = ctx.removed()
846 p1, p2 = ctx.p1(), ctx.p2()
846 p1, p2 = ctx.p1(), ctx.p2()
847 m1 = p1.manifest().copy()
847 m1 = p1.manifest().copy()
848 m2 = p2.manifest()
848 m2 = p2.manifest()
849 user = ctx.user()
849 user = ctx.user()
850
850
851 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
851 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
852 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
852 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
853
853
854 lock = self.lock()
854 lock = self.lock()
855 try:
855 try:
856 tr = self.transaction()
856 tr = self.transaction()
857 trp = weakref.proxy(tr)
857 trp = weakref.proxy(tr)
858
858
859 # check in files
859 # check in files
860 new = {}
860 new = {}
861 changed = []
861 changed = []
862 linkrev = len(self)
862 linkrev = len(self)
863 for f in sorted(ctx.modified() + ctx.added()):
863 for f in sorted(ctx.modified() + ctx.added()):
864 self.ui.note(f + "\n")
864 self.ui.note(f + "\n")
865 try:
865 try:
866 fctx = ctx[f]
866 fctx = ctx[f]
867 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
867 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
868 changed)
868 changed)
869 m1.set(f, fctx.flags())
869 m1.set(f, fctx.flags())
870 except (OSError, IOError):
870 except (OSError, IOError):
871 if error:
871 if error:
872 self.ui.warn(_("trouble committing %s!\n") % f)
872 self.ui.warn(_("trouble committing %s!\n") % f)
873 raise
873 raise
874 else:
874 else:
875 removed.append(f)
875 removed.append(f)
876
876
877 # update manifest
877 # update manifest
878 m1.update(new)
878 m1.update(new)
879 removed = [f for f in sorted(removed) if f in m1 or f in m2]
879 removed = [f for f in sorted(removed) if f in m1 or f in m2]
880 drop = [f for f in removed if f in m1]
880 drop = [f for f in removed if f in m1]
881 for f in drop:
881 for f in drop:
882 del m1[f]
882 del m1[f]
883 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
883 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
884 p2.manifestnode(), (new, drop))
884 p2.manifestnode(), (new, drop))
885
885
886 # update changelog
886 # update changelog
887 self.changelog.delayupdate()
887 self.changelog.delayupdate()
888 n = self.changelog.add(mn, changed + removed, ctx.description(),
888 n = self.changelog.add(mn, changed + removed, ctx.description(),
889 trp, p1.node(), p2.node(),
889 trp, p1.node(), p2.node(),
890 user, ctx.date(), ctx.extra().copy())
890 user, ctx.date(), ctx.extra().copy())
891 p = lambda: self.changelog.writepending() and self.root or ""
891 p = lambda: self.changelog.writepending() and self.root or ""
892 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
892 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
893 parent2=xp2, pending=p)
893 parent2=xp2, pending=p)
894 self.changelog.finalize(trp)
894 self.changelog.finalize(trp)
895 tr.close()
895 tr.close()
896
896
897 if self.branchcache:
897 if self.branchcache:
898 self.branchtags()
898 self.branchtags()
899
899
900 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
900 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
901 return n
901 return n
902 finally:
902 finally:
903 del tr
903 del tr
904 lock.release()
904 lock.release()
905
905
906 def walk(self, match, node=None):
906 def walk(self, match, node=None):
907 '''
907 '''
908 walk recursively through the directory tree or a given
908 walk recursively through the directory tree or a given
909 changeset, finding all files matched by the match
909 changeset, finding all files matched by the match
910 function
910 function
911 '''
911 '''
912 return self[node].walk(match)
912 return self[node].walk(match)
913
913
914 def status(self, node1='.', node2=None, match=None,
914 def status(self, node1='.', node2=None, match=None,
915 ignored=False, clean=False, unknown=False):
915 ignored=False, clean=False, unknown=False):
916 """return status of files between two nodes or node and working directory
916 """return status of files between two nodes or node and working directory
917
917
918 If node1 is None, use the first dirstate parent instead.
918 If node1 is None, use the first dirstate parent instead.
919 If node2 is None, compare node1 with working directory.
919 If node2 is None, compare node1 with working directory.
920 """
920 """
921
921
922 def mfmatches(ctx):
922 def mfmatches(ctx):
923 mf = ctx.manifest().copy()
923 mf = ctx.manifest().copy()
924 for fn in mf.keys():
924 for fn in mf.keys():
925 if not match(fn):
925 if not match(fn):
926 del mf[fn]
926 del mf[fn]
927 return mf
927 return mf
928
928
929 if isinstance(node1, context.changectx):
929 if isinstance(node1, context.changectx):
930 ctx1 = node1
930 ctx1 = node1
931 else:
931 else:
932 ctx1 = self[node1]
932 ctx1 = self[node1]
933 if isinstance(node2, context.changectx):
933 if isinstance(node2, context.changectx):
934 ctx2 = node2
934 ctx2 = node2
935 else:
935 else:
936 ctx2 = self[node2]
936 ctx2 = self[node2]
937
937
938 working = ctx2.rev() is None
938 working = ctx2.rev() is None
939 parentworking = working and ctx1 == self['.']
939 parentworking = working and ctx1 == self['.']
940 match = match or match_.always(self.root, self.getcwd())
940 match = match or match_.always(self.root, self.getcwd())
941 listignored, listclean, listunknown = ignored, clean, unknown
941 listignored, listclean, listunknown = ignored, clean, unknown
942
942
943 # load earliest manifest first for caching reasons
943 # load earliest manifest first for caching reasons
944 if not working and ctx2.rev() < ctx1.rev():
944 if not working and ctx2.rev() < ctx1.rev():
945 ctx2.manifest()
945 ctx2.manifest()
946
946
947 if not parentworking:
947 if not parentworking:
948 def bad(f, msg):
948 def bad(f, msg):
949 if f not in ctx1:
949 if f not in ctx1:
950 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
950 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
951 return False
951 return False
952 match.bad = bad
952 match.bad = bad
953
953
954 if working: # we need to scan the working dir
954 if working: # we need to scan the working dir
955 s = self.dirstate.status(match, listignored, listclean, listunknown)
955 s = self.dirstate.status(match, listignored, listclean, listunknown)
956 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
956 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
957
957
958 # check for any possibly clean files
958 # check for any possibly clean files
959 if parentworking and cmp:
959 if parentworking and cmp:
960 fixup = []
960 fixup = []
961 # do a full compare of any files that might have changed
961 # do a full compare of any files that might have changed
962 for f in sorted(cmp):
962 for f in sorted(cmp):
963 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
963 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
964 or ctx1[f].cmp(ctx2[f].data())):
964 or ctx1[f].cmp(ctx2[f].data())):
965 modified.append(f)
965 modified.append(f)
966 else:
966 else:
967 fixup.append(f)
967 fixup.append(f)
968
968
969 if listclean:
969 if listclean:
970 clean += fixup
970 clean += fixup
971
971
972 # update dirstate for files that are actually clean
972 # update dirstate for files that are actually clean
973 if fixup:
973 if fixup:
974 try:
974 try:
975 # updating the dirstate is optional
975 # updating the dirstate is optional
976 # so we don't wait on the lock
976 # so we don't wait on the lock
977 wlock = self.wlock(False)
977 wlock = self.wlock(False)
978 try:
978 try:
979 for f in fixup:
979 for f in fixup:
980 self.dirstate.normal(f)
980 self.dirstate.normal(f)
981 finally:
981 finally:
982 wlock.release()
982 wlock.release()
983 except error.LockError:
983 except error.LockError:
984 pass
984 pass
985
985
986 if not parentworking:
986 if not parentworking:
987 mf1 = mfmatches(ctx1)
987 mf1 = mfmatches(ctx1)
988 if working:
988 if working:
989 # we are comparing working dir against non-parent
989 # we are comparing working dir against non-parent
990 # generate a pseudo-manifest for the working dir
990 # generate a pseudo-manifest for the working dir
991 mf2 = mfmatches(self['.'])
991 mf2 = mfmatches(self['.'])
992 for f in cmp + modified + added:
992 for f in cmp + modified + added:
993 mf2[f] = None
993 mf2[f] = None
994 mf2.set(f, ctx2.flags(f))
994 mf2.set(f, ctx2.flags(f))
995 for f in removed:
995 for f in removed:
996 if f in mf2:
996 if f in mf2:
997 del mf2[f]
997 del mf2[f]
998 else:
998 else:
999 # we are comparing two revisions
999 # we are comparing two revisions
1000 deleted, unknown, ignored = [], [], []
1000 deleted, unknown, ignored = [], [], []
1001 mf2 = mfmatches(ctx2)
1001 mf2 = mfmatches(ctx2)
1002
1002
1003 modified, added, clean = [], [], []
1003 modified, added, clean = [], [], []
1004 for fn in mf2:
1004 for fn in mf2:
1005 if fn in mf1:
1005 if fn in mf1:
1006 if (mf1.flags(fn) != mf2.flags(fn) or
1006 if (mf1.flags(fn) != mf2.flags(fn) or
1007 (mf1[fn] != mf2[fn] and
1007 (mf1[fn] != mf2[fn] and
1008 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1008 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1009 modified.append(fn)
1009 modified.append(fn)
1010 elif listclean:
1010 elif listclean:
1011 clean.append(fn)
1011 clean.append(fn)
1012 del mf1[fn]
1012 del mf1[fn]
1013 else:
1013 else:
1014 added.append(fn)
1014 added.append(fn)
1015 removed = mf1.keys()
1015 removed = mf1.keys()
1016
1016
1017 r = modified, added, removed, deleted, unknown, ignored, clean
1017 r = modified, added, removed, deleted, unknown, ignored, clean
1018 [l.sort() for l in r]
1018 [l.sort() for l in r]
1019 return r
1019 return r
1020
1020
1021 def add(self, list):
1021 def add(self, list):
1022 wlock = self.wlock()
1022 wlock = self.wlock()
1023 try:
1023 try:
1024 rejected = []
1024 rejected = []
1025 for f in list:
1025 for f in list:
1026 p = self.wjoin(f)
1026 p = self.wjoin(f)
1027 try:
1027 try:
1028 st = os.lstat(p)
1028 st = os.lstat(p)
1029 except:
1029 except:
1030 self.ui.warn(_("%s does not exist!\n") % f)
1030 self.ui.warn(_("%s does not exist!\n") % f)
1031 rejected.append(f)
1031 rejected.append(f)
1032 continue
1032 continue
1033 if st.st_size > 10000000:
1033 if st.st_size > 10000000:
1034 self.ui.warn(_("%s: files over 10MB may cause memory and"
1034 self.ui.warn(_("%s: files over 10MB may cause memory and"
1035 " performance problems\n"
1035 " performance problems\n"
1036 "(use 'hg revert %s' to unadd the file)\n")
1036 "(use 'hg revert %s' to unadd the file)\n")
1037 % (f, f))
1037 % (f, f))
1038 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1038 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1039 self.ui.warn(_("%s not added: only files and symlinks "
1039 self.ui.warn(_("%s not added: only files and symlinks "
1040 "supported currently\n") % f)
1040 "supported currently\n") % f)
1041 rejected.append(p)
1041 rejected.append(p)
1042 elif self.dirstate[f] in 'amn':
1042 elif self.dirstate[f] in 'amn':
1043 self.ui.warn(_("%s already tracked!\n") % f)
1043 self.ui.warn(_("%s already tracked!\n") % f)
1044 elif self.dirstate[f] == 'r':
1044 elif self.dirstate[f] == 'r':
1045 self.dirstate.normallookup(f)
1045 self.dirstate.normallookup(f)
1046 else:
1046 else:
1047 self.dirstate.add(f)
1047 self.dirstate.add(f)
1048 return rejected
1048 return rejected
1049 finally:
1049 finally:
1050 wlock.release()
1050 wlock.release()
1051
1051
1052 def forget(self, list):
1052 def forget(self, list):
1053 wlock = self.wlock()
1053 wlock = self.wlock()
1054 try:
1054 try:
1055 for f in list:
1055 for f in list:
1056 if self.dirstate[f] != 'a':
1056 if self.dirstate[f] != 'a':
1057 self.ui.warn(_("%s not added!\n") % f)
1057 self.ui.warn(_("%s not added!\n") % f)
1058 else:
1058 else:
1059 self.dirstate.forget(f)
1059 self.dirstate.forget(f)
1060 finally:
1060 finally:
1061 wlock.release()
1061 wlock.release()
1062
1062
1063 def remove(self, list, unlink=False):
1063 def remove(self, list, unlink=False):
1064 if unlink:
1064 if unlink:
1065 for f in list:
1065 for f in list:
1066 try:
1066 try:
1067 util.unlink(self.wjoin(f))
1067 util.unlink(self.wjoin(f))
1068 except OSError, inst:
1068 except OSError, inst:
1069 if inst.errno != errno.ENOENT:
1069 if inst.errno != errno.ENOENT:
1070 raise
1070 raise
1071 wlock = self.wlock()
1071 wlock = self.wlock()
1072 try:
1072 try:
1073 for f in list:
1073 for f in list:
1074 if unlink and os.path.exists(self.wjoin(f)):
1074 if unlink and os.path.exists(self.wjoin(f)):
1075 self.ui.warn(_("%s still exists!\n") % f)
1075 self.ui.warn(_("%s still exists!\n") % f)
1076 elif self.dirstate[f] == 'a':
1076 elif self.dirstate[f] == 'a':
1077 self.dirstate.forget(f)
1077 self.dirstate.forget(f)
1078 elif f not in self.dirstate:
1078 elif f not in self.dirstate:
1079 self.ui.warn(_("%s not tracked!\n") % f)
1079 self.ui.warn(_("%s not tracked!\n") % f)
1080 else:
1080 else:
1081 self.dirstate.remove(f)
1081 self.dirstate.remove(f)
1082 finally:
1082 finally:
1083 wlock.release()
1083 wlock.release()
1084
1084
1085 def undelete(self, list):
1085 def undelete(self, list):
1086 manifests = [self.manifest.read(self.changelog.read(p)[0])
1086 manifests = [self.manifest.read(self.changelog.read(p)[0])
1087 for p in self.dirstate.parents() if p != nullid]
1087 for p in self.dirstate.parents() if p != nullid]
1088 wlock = self.wlock()
1088 wlock = self.wlock()
1089 try:
1089 try:
1090 for f in list:
1090 for f in list:
1091 if self.dirstate[f] != 'r':
1091 if self.dirstate[f] != 'r':
1092 self.ui.warn(_("%s not removed!\n") % f)
1092 self.ui.warn(_("%s not removed!\n") % f)
1093 else:
1093 else:
1094 m = f in manifests[0] and manifests[0] or manifests[1]
1094 m = f in manifests[0] and manifests[0] or manifests[1]
1095 t = self.file(f).read(m[f])
1095 t = self.file(f).read(m[f])
1096 self.wwrite(f, t, m.flags(f))
1096 self.wwrite(f, t, m.flags(f))
1097 self.dirstate.normal(f)
1097 self.dirstate.normal(f)
1098 finally:
1098 finally:
1099 wlock.release()
1099 wlock.release()
1100
1100
1101 def copy(self, source, dest):
1101 def copy(self, source, dest):
1102 p = self.wjoin(dest)
1102 p = self.wjoin(dest)
1103 if not (os.path.exists(p) or os.path.islink(p)):
1103 if not (os.path.exists(p) or os.path.islink(p)):
1104 self.ui.warn(_("%s does not exist!\n") % dest)
1104 self.ui.warn(_("%s does not exist!\n") % dest)
1105 elif not (os.path.isfile(p) or os.path.islink(p)):
1105 elif not (os.path.isfile(p) or os.path.islink(p)):
1106 self.ui.warn(_("copy failed: %s is not a file or a "
1106 self.ui.warn(_("copy failed: %s is not a file or a "
1107 "symbolic link\n") % dest)
1107 "symbolic link\n") % dest)
1108 else:
1108 else:
1109 wlock = self.wlock()
1109 wlock = self.wlock()
1110 try:
1110 try:
1111 if self.dirstate[dest] in '?r':
1111 if self.dirstate[dest] in '?r':
1112 self.dirstate.add(dest)
1112 self.dirstate.add(dest)
1113 self.dirstate.copy(source, dest)
1113 self.dirstate.copy(source, dest)
1114 finally:
1114 finally:
1115 wlock.release()
1115 wlock.release()
1116
1116
1117 def heads(self, start=None, closed=True):
1117 def heads(self, start=None, closed=True):
1118 heads = self.changelog.heads(start)
1118 heads = self.changelog.heads(start)
1119 def display(head):
1119 def display(head):
1120 if closed:
1120 if closed:
1121 return True
1121 return True
1122 extras = self.changelog.read(head)[5]
1122 extras = self.changelog.read(head)[5]
1123 return ('close' not in extras)
1123 return ('close' not in extras)
1124 # sort the output in rev descending order
1124 # sort the output in rev descending order
1125 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1125 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1126 return [n for (r, n) in sorted(heads)]
1126 return [n for (r, n) in sorted(heads)]
1127
1127
1128 def branchheads(self, branch=None, start=None, closed=True):
1128 def branchheads(self, branch=None, start=None, closed=True):
1129 if branch is None:
1129 if branch is None:
1130 branch = self[None].branch()
1130 branch = self[None].branch()
1131 branches = self.branchmap()
1131 branches = self.branchmap()
1132 if branch not in branches:
1132 if branch not in branches:
1133 return []
1133 return []
1134 bheads = branches[branch]
1134 bheads = branches[branch]
1135 # the cache returns heads ordered lowest to highest
1135 # the cache returns heads ordered lowest to highest
1136 bheads.reverse()
1136 bheads.reverse()
1137 if start is not None:
1137 if start is not None:
1138 # filter out the heads that cannot be reached from startrev
1138 # filter out the heads that cannot be reached from startrev
1139 bheads = self.changelog.nodesbetween([start], bheads)[2]
1139 bheads = self.changelog.nodesbetween([start], bheads)[2]
1140 if not closed:
1140 if not closed:
1141 bheads = [h for h in bheads if
1141 bheads = [h for h in bheads if
1142 ('close' not in self.changelog.read(h)[5])]
1142 ('close' not in self.changelog.read(h)[5])]
1143 return bheads
1143 return bheads
1144
1144
1145 def branches(self, nodes):
1145 def branches(self, nodes):
1146 if not nodes:
1146 if not nodes:
1147 nodes = [self.changelog.tip()]
1147 nodes = [self.changelog.tip()]
1148 b = []
1148 b = []
1149 for n in nodes:
1149 for n in nodes:
1150 t = n
1150 t = n
1151 while 1:
1151 while 1:
1152 p = self.changelog.parents(n)
1152 p = self.changelog.parents(n)
1153 if p[1] != nullid or p[0] == nullid:
1153 if p[1] != nullid or p[0] == nullid:
1154 b.append((t, n, p[0], p[1]))
1154 b.append((t, n, p[0], p[1]))
1155 break
1155 break
1156 n = p[0]
1156 n = p[0]
1157 return b
1157 return b
1158
1158
1159 def between(self, pairs):
1159 def between(self, pairs):
1160 r = []
1160 r = []
1161
1161
1162 for top, bottom in pairs:
1162 for top, bottom in pairs:
1163 n, l, i = top, [], 0
1163 n, l, i = top, [], 0
1164 f = 1
1164 f = 1
1165
1165
1166 while n != bottom and n != nullid:
1166 while n != bottom and n != nullid:
1167 p = self.changelog.parents(n)[0]
1167 p = self.changelog.parents(n)[0]
1168 if i == f:
1168 if i == f:
1169 l.append(n)
1169 l.append(n)
1170 f = f * 2
1170 f = f * 2
1171 n = p
1171 n = p
1172 i += 1
1172 i += 1
1173
1173
1174 r.append(l)
1174 r.append(l)
1175
1175
1176 return r
1176 return r
1177
1177
1178 def findincoming(self, remote, base=None, heads=None, force=False):
1178 def findincoming(self, remote, base=None, heads=None, force=False):
1179 """Return list of roots of the subsets of missing nodes from remote
1179 """Return list of roots of the subsets of missing nodes from remote
1180
1180
1181 If base dict is specified, assume that these nodes and their parents
1181 If base dict is specified, assume that these nodes and their parents
1182 exist on the remote side and that no child of a node of base exists
1182 exist on the remote side and that no child of a node of base exists
1183 in both remote and self.
1183 in both remote and self.
1184 Furthermore base will be updated to include the nodes that exists
1184 Furthermore base will be updated to include the nodes that exists
1185 in self and remote but no children exists in self and remote.
1185 in self and remote but no children exists in self and remote.
1186 If a list of heads is specified, return only nodes which are heads
1186 If a list of heads is specified, return only nodes which are heads
1187 or ancestors of these heads.
1187 or ancestors of these heads.
1188
1188
1189 All the ancestors of base are in self and in remote.
1189 All the ancestors of base are in self and in remote.
1190 All the descendants of the list returned are missing in self.
1190 All the descendants of the list returned are missing in self.
1191 (and so we know that the rest of the nodes are missing in remote, see
1191 (and so we know that the rest of the nodes are missing in remote, see
1192 outgoing)
1192 outgoing)
1193 """
1193 """
1194 return self.findcommonincoming(remote, base, heads, force)[1]
1194 return self.findcommonincoming(remote, base, heads, force)[1]
1195
1195
1196 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1196 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1197 """Return a tuple (common, missing roots, heads) used to identify
1197 """Return a tuple (common, missing roots, heads) used to identify
1198 missing nodes from remote.
1198 missing nodes from remote.
1199
1199
1200 If base dict is specified, assume that these nodes and their parents
1200 If base dict is specified, assume that these nodes and their parents
1201 exist on the remote side and that no child of a node of base exists
1201 exist on the remote side and that no child of a node of base exists
1202 in both remote and self.
1202 in both remote and self.
1203 Furthermore base will be updated to include the nodes that exists
1203 Furthermore base will be updated to include the nodes that exists
1204 in self and remote but no children exists in self and remote.
1204 in self and remote but no children exists in self and remote.
1205 If a list of heads is specified, return only nodes which are heads
1205 If a list of heads is specified, return only nodes which are heads
1206 or ancestors of these heads.
1206 or ancestors of these heads.
1207
1207
1208 All the ancestors of base are in self and in remote.
1208 All the ancestors of base are in self and in remote.
1209 """
1209 """
1210 m = self.changelog.nodemap
1210 m = self.changelog.nodemap
1211 search = []
1211 search = []
1212 fetch = set()
1212 fetch = set()
1213 seen = set()
1213 seen = set()
1214 seenbranch = set()
1214 seenbranch = set()
1215 if base is None:
1215 if base is None:
1216 base = {}
1216 base = {}
1217
1217
1218 if not heads:
1218 if not heads:
1219 heads = remote.heads()
1219 heads = remote.heads()
1220
1220
1221 if self.changelog.tip() == nullid:
1221 if self.changelog.tip() == nullid:
1222 base[nullid] = 1
1222 base[nullid] = 1
1223 if heads != [nullid]:
1223 if heads != [nullid]:
1224 return [nullid], [nullid], list(heads)
1224 return [nullid], [nullid], list(heads)
1225 return [nullid], [], []
1225 return [nullid], [], []
1226
1226
1227 # assume we're closer to the tip than the root
1227 # assume we're closer to the tip than the root
1228 # and start by examining the heads
1228 # and start by examining the heads
1229 self.ui.status(_("searching for changes\n"))
1229 self.ui.status(_("searching for changes\n"))
1230
1230
1231 unknown = []
1231 unknown = []
1232 for h in heads:
1232 for h in heads:
1233 if h not in m:
1233 if h not in m:
1234 unknown.append(h)
1234 unknown.append(h)
1235 else:
1235 else:
1236 base[h] = 1
1236 base[h] = 1
1237
1237
1238 heads = unknown
1238 heads = unknown
1239 if not unknown:
1239 if not unknown:
1240 return base.keys(), [], []
1240 return base.keys(), [], []
1241
1241
1242 req = set(unknown)
1242 req = set(unknown)
1243 reqcnt = 0
1243 reqcnt = 0
1244
1244
1245 # search through remote branches
1245 # search through remote branches
1246 # a 'branch' here is a linear segment of history, with four parts:
1246 # a 'branch' here is a linear segment of history, with four parts:
1247 # head, root, first parent, second parent
1247 # head, root, first parent, second parent
1248 # (a branch always has two parents (or none) by definition)
1248 # (a branch always has two parents (or none) by definition)
1249 unknown = remote.branches(unknown)
1249 unknown = remote.branches(unknown)
1250 while unknown:
1250 while unknown:
1251 r = []
1251 r = []
1252 while unknown:
1252 while unknown:
1253 n = unknown.pop(0)
1253 n = unknown.pop(0)
1254 if n[0] in seen:
1254 if n[0] in seen:
1255 continue
1255 continue
1256
1256
1257 self.ui.debug(_("examining %s:%s\n")
1257 self.ui.debug(_("examining %s:%s\n")
1258 % (short(n[0]), short(n[1])))
1258 % (short(n[0]), short(n[1])))
1259 if n[0] == nullid: # found the end of the branch
1259 if n[0] == nullid: # found the end of the branch
1260 pass
1260 pass
1261 elif n in seenbranch:
1261 elif n in seenbranch:
1262 self.ui.debug(_("branch already found\n"))
1262 self.ui.debug(_("branch already found\n"))
1263 continue
1263 continue
1264 elif n[1] and n[1] in m: # do we know the base?
1264 elif n[1] and n[1] in m: # do we know the base?
1265 self.ui.debug(_("found incomplete branch %s:%s\n")
1265 self.ui.debug(_("found incomplete branch %s:%s\n")
1266 % (short(n[0]), short(n[1])))
1266 % (short(n[0]), short(n[1])))
1267 search.append(n[0:2]) # schedule branch range for scanning
1267 search.append(n[0:2]) # schedule branch range for scanning
1268 seenbranch.add(n)
1268 seenbranch.add(n)
1269 else:
1269 else:
1270 if n[1] not in seen and n[1] not in fetch:
1270 if n[1] not in seen and n[1] not in fetch:
1271 if n[2] in m and n[3] in m:
1271 if n[2] in m and n[3] in m:
1272 self.ui.debug(_("found new changeset %s\n") %
1272 self.ui.debug(_("found new changeset %s\n") %
1273 short(n[1]))
1273 short(n[1]))
1274 fetch.add(n[1]) # earliest unknown
1274 fetch.add(n[1]) # earliest unknown
1275 for p in n[2:4]:
1275 for p in n[2:4]:
1276 if p in m:
1276 if p in m:
1277 base[p] = 1 # latest known
1277 base[p] = 1 # latest known
1278
1278
1279 for p in n[2:4]:
1279 for p in n[2:4]:
1280 if p not in req and p not in m:
1280 if p not in req and p not in m:
1281 r.append(p)
1281 r.append(p)
1282 req.add(p)
1282 req.add(p)
1283 seen.add(n[0])
1283 seen.add(n[0])
1284
1284
1285 if r:
1285 if r:
1286 reqcnt += 1
1286 reqcnt += 1
1287 self.ui.debug(_("request %d: %s\n") %
1287 self.ui.debug(_("request %d: %s\n") %
1288 (reqcnt, " ".join(map(short, r))))
1288 (reqcnt, " ".join(map(short, r))))
1289 for p in xrange(0, len(r), 10):
1289 for p in xrange(0, len(r), 10):
1290 for b in remote.branches(r[p:p+10]):
1290 for b in remote.branches(r[p:p+10]):
1291 self.ui.debug(_("received %s:%s\n") %
1291 self.ui.debug(_("received %s:%s\n") %
1292 (short(b[0]), short(b[1])))
1292 (short(b[0]), short(b[1])))
1293 unknown.append(b)
1293 unknown.append(b)
1294
1294
1295 # do binary search on the branches we found
1295 # do binary search on the branches we found
1296 while search:
1296 while search:
1297 newsearch = []
1297 newsearch = []
1298 reqcnt += 1
1298 reqcnt += 1
1299 for n, l in zip(search, remote.between(search)):
1299 for n, l in zip(search, remote.between(search)):
1300 l.append(n[1])
1300 l.append(n[1])
1301 p = n[0]
1301 p = n[0]
1302 f = 1
1302 f = 1
1303 for i in l:
1303 for i in l:
1304 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1304 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1305 if i in m:
1305 if i in m:
1306 if f <= 2:
1306 if f <= 2:
1307 self.ui.debug(_("found new branch changeset %s\n") %
1307 self.ui.debug(_("found new branch changeset %s\n") %
1308 short(p))
1308 short(p))
1309 fetch.add(p)
1309 fetch.add(p)
1310 base[i] = 1
1310 base[i] = 1
1311 else:
1311 else:
1312 self.ui.debug(_("narrowed branch search to %s:%s\n")
1312 self.ui.debug(_("narrowed branch search to %s:%s\n")
1313 % (short(p), short(i)))
1313 % (short(p), short(i)))
1314 newsearch.append((p, i))
1314 newsearch.append((p, i))
1315 break
1315 break
1316 p, f = i, f * 2
1316 p, f = i, f * 2
1317 search = newsearch
1317 search = newsearch
1318
1318
1319 # sanity check our fetch list
1319 # sanity check our fetch list
1320 for f in fetch:
1320 for f in fetch:
1321 if f in m:
1321 if f in m:
1322 raise error.RepoError(_("already have changeset ")
1322 raise error.RepoError(_("already have changeset ")
1323 + short(f[:4]))
1323 + short(f[:4]))
1324
1324
1325 if base.keys() == [nullid]:
1325 if base.keys() == [nullid]:
1326 if force:
1326 if force:
1327 self.ui.warn(_("warning: repository is unrelated\n"))
1327 self.ui.warn(_("warning: repository is unrelated\n"))
1328 else:
1328 else:
1329 raise util.Abort(_("repository is unrelated"))
1329 raise util.Abort(_("repository is unrelated"))
1330
1330
1331 self.ui.debug(_("found new changesets starting at ") +
1331 self.ui.debug(_("found new changesets starting at ") +
1332 " ".join([short(f) for f in fetch]) + "\n")
1332 " ".join([short(f) for f in fetch]) + "\n")
1333
1333
1334 self.ui.debug(_("%d total queries\n") % reqcnt)
1334 self.ui.debug(_("%d total queries\n") % reqcnt)
1335
1335
1336 return base.keys(), list(fetch), heads
1336 return base.keys(), list(fetch), heads
1337
1337
1338 def findoutgoing(self, remote, base=None, heads=None, force=False):
1338 def findoutgoing(self, remote, base=None, heads=None, force=False):
1339 """Return list of nodes that are roots of subsets not in remote
1339 """Return list of nodes that are roots of subsets not in remote
1340
1340
1341 If base dict is specified, assume that these nodes and their parents
1341 If base dict is specified, assume that these nodes and their parents
1342 exist on the remote side.
1342 exist on the remote side.
1343 If a list of heads is specified, return only nodes which are heads
1343 If a list of heads is specified, return only nodes which are heads
1344 or ancestors of these heads, and return a second element which
1344 or ancestors of these heads, and return a second element which
1345 contains all remote heads which get new children.
1345 contains all remote heads which get new children.
1346 """
1346 """
1347 if base is None:
1347 if base is None:
1348 base = {}
1348 base = {}
1349 self.findincoming(remote, base, heads, force=force)
1349 self.findincoming(remote, base, heads, force=force)
1350
1350
1351 self.ui.debug(_("common changesets up to ")
1351 self.ui.debug(_("common changesets up to ")
1352 + " ".join(map(short, base.keys())) + "\n")
1352 + " ".join(map(short, base.keys())) + "\n")
1353
1353
1354 remain = set(self.changelog.nodemap)
1354 remain = set(self.changelog.nodemap)
1355
1355
1356 # prune everything remote has from the tree
1356 # prune everything remote has from the tree
1357 remain.remove(nullid)
1357 remain.remove(nullid)
1358 remove = base.keys()
1358 remove = base.keys()
1359 while remove:
1359 while remove:
1360 n = remove.pop(0)
1360 n = remove.pop(0)
1361 if n in remain:
1361 if n in remain:
1362 remain.remove(n)
1362 remain.remove(n)
1363 for p in self.changelog.parents(n):
1363 for p in self.changelog.parents(n):
1364 remove.append(p)
1364 remove.append(p)
1365
1365
1366 # find every node whose parents have been pruned
1366 # find every node whose parents have been pruned
1367 subset = []
1367 subset = []
1368 # find every remote head that will get new children
1368 # find every remote head that will get new children
1369 updated_heads = set()
1369 updated_heads = set()
1370 for n in remain:
1370 for n in remain:
1371 p1, p2 = self.changelog.parents(n)
1371 p1, p2 = self.changelog.parents(n)
1372 if p1 not in remain and p2 not in remain:
1372 if p1 not in remain and p2 not in remain:
1373 subset.append(n)
1373 subset.append(n)
1374 if heads:
1374 if heads:
1375 if p1 in heads:
1375 if p1 in heads:
1376 updated_heads.add(p1)
1376 updated_heads.add(p1)
1377 if p2 in heads:
1377 if p2 in heads:
1378 updated_heads.add(p2)
1378 updated_heads.add(p2)
1379
1379
1380 # this is the set of all roots we have to push
1380 # this is the set of all roots we have to push
1381 if heads:
1381 if heads:
1382 return subset, list(updated_heads)
1382 return subset, list(updated_heads)
1383 else:
1383 else:
1384 return subset
1384 return subset
1385
1385
1386 def pull(self, remote, heads=None, force=False):
1386 def pull(self, remote, heads=None, force=False):
1387 lock = self.lock()
1387 lock = self.lock()
1388 try:
1388 try:
1389 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1389 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1390 force=force)
1390 force=force)
1391 if fetch == [nullid]:
1391 if fetch == [nullid]:
1392 self.ui.status(_("requesting all changes\n"))
1392 self.ui.status(_("requesting all changes\n"))
1393
1393
1394 if not fetch:
1394 if not fetch:
1395 self.ui.status(_("no changes found\n"))
1395 self.ui.status(_("no changes found\n"))
1396 return 0
1396 return 0
1397
1397
1398 if heads is None and remote.capable('changegroupsubset'):
1398 if heads is None and remote.capable('changegroupsubset'):
1399 heads = rheads
1399 heads = rheads
1400
1400
1401 if heads is None:
1401 if heads is None:
1402 cg = remote.changegroup(fetch, 'pull')
1402 cg = remote.changegroup(fetch, 'pull')
1403 else:
1403 else:
1404 if not remote.capable('changegroupsubset'):
1404 if not remote.capable('changegroupsubset'):
1405 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1405 raise util.Abort(_("Partial pull cannot be done because "
1406 "other repository doesn't support "
1407 "changegroupsubset."))
1406 cg = remote.changegroupsubset(fetch, heads, 'pull')
1408 cg = remote.changegroupsubset(fetch, heads, 'pull')
1407 return self.addchangegroup(cg, 'pull', remote.url())
1409 return self.addchangegroup(cg, 'pull', remote.url())
1408 finally:
1410 finally:
1409 lock.release()
1411 lock.release()
1410
1412
1411 def push(self, remote, force=False, revs=None):
1413 def push(self, remote, force=False, revs=None):
1412 # there are two ways to push to remote repo:
1414 # there are two ways to push to remote repo:
1413 #
1415 #
1414 # addchangegroup assumes local user can lock remote
1416 # addchangegroup assumes local user can lock remote
1415 # repo (local filesystem, old ssh servers).
1417 # repo (local filesystem, old ssh servers).
1416 #
1418 #
1417 # unbundle assumes local user cannot lock remote repo (new ssh
1419 # unbundle assumes local user cannot lock remote repo (new ssh
1418 # servers, http servers).
1420 # servers, http servers).
1419
1421
1420 if remote.capable('unbundle'):
1422 if remote.capable('unbundle'):
1421 return self.push_unbundle(remote, force, revs)
1423 return self.push_unbundle(remote, force, revs)
1422 return self.push_addchangegroup(remote, force, revs)
1424 return self.push_addchangegroup(remote, force, revs)
1423
1425
1424 def prepush(self, remote, force, revs):
1426 def prepush(self, remote, force, revs):
1425 common = {}
1427 common = {}
1426 remote_heads = remote.heads()
1428 remote_heads = remote.heads()
1427 inc = self.findincoming(remote, common, remote_heads, force=force)
1429 inc = self.findincoming(remote, common, remote_heads, force=force)
1428
1430
1429 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1431 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1430 if revs is not None:
1432 if revs is not None:
1431 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1433 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1432 else:
1434 else:
1433 bases, heads = update, self.changelog.heads()
1435 bases, heads = update, self.changelog.heads()
1434
1436
1435 def checkbranch(lheads, rheads, updatelh):
1437 def checkbranch(lheads, rheads, updatelh):
1436 '''
1438 '''
1437 check whether there are more local heads than remote heads on
1439 check whether there are more local heads than remote heads on
1438 a specific branch.
1440 a specific branch.
1439
1441
1440 lheads: local branch heads
1442 lheads: local branch heads
1441 rheads: remote branch heads
1443 rheads: remote branch heads
1442 updatelh: outgoing local branch heads
1444 updatelh: outgoing local branch heads
1443 '''
1445 '''
1444
1446
1445 warn = 0
1447 warn = 0
1446
1448
1447 if not revs and len(lheads) > len(rheads):
1449 if not revs and len(lheads) > len(rheads):
1448 warn = 1
1450 warn = 1
1449 else:
1451 else:
1450 updatelheads = [self.changelog.heads(x, lheads)
1452 updatelheads = [self.changelog.heads(x, lheads)
1451 for x in updatelh]
1453 for x in updatelh]
1452 newheads = set(sum(updatelheads, [])) & set(lheads)
1454 newheads = set(sum(updatelheads, [])) & set(lheads)
1453
1455
1454 if not newheads:
1456 if not newheads:
1455 return True
1457 return True
1456
1458
1457 for r in rheads:
1459 for r in rheads:
1458 if r in self.changelog.nodemap:
1460 if r in self.changelog.nodemap:
1459 desc = self.changelog.heads(r, heads)
1461 desc = self.changelog.heads(r, heads)
1460 l = [h for h in heads if h in desc]
1462 l = [h for h in heads if h in desc]
1461 if not l:
1463 if not l:
1462 newheads.add(r)
1464 newheads.add(r)
1463 else:
1465 else:
1464 newheads.add(r)
1466 newheads.add(r)
1465 if len(newheads) > len(rheads):
1467 if len(newheads) > len(rheads):
1466 warn = 1
1468 warn = 1
1467
1469
1468 if warn:
1470 if warn:
1469 if not rheads: # new branch requires --force
1471 if not rheads: # new branch requires --force
1470 self.ui.warn(_("abort: push creates new"
1472 self.ui.warn(_("abort: push creates new"
1471 " remote branch '%s'!\n" %
1473 " remote branch '%s'!\n" %
1472 self[updatelh[0]].branch()))
1474 self[updatelh[0]].branch()))
1473 else:
1475 else:
1474 self.ui.warn(_("abort: push creates new remote heads!\n"))
1476 self.ui.warn(_("abort: push creates new remote heads!\n"))
1475
1477
1476 self.ui.status(_("(did you forget to merge?"
1478 self.ui.status(_("(did you forget to merge?"
1477 " use push -f to force)\n"))
1479 " use push -f to force)\n"))
1478 return False
1480 return False
1479 return True
1481 return True
1480
1482
1481 if not bases:
1483 if not bases:
1482 self.ui.status(_("no changes found\n"))
1484 self.ui.status(_("no changes found\n"))
1483 return None, 1
1485 return None, 1
1484 elif not force:
1486 elif not force:
1485 # Check for each named branch if we're creating new remote heads.
1487 # Check for each named branch if we're creating new remote heads.
1486 # To be a remote head after push, node must be either:
1488 # To be a remote head after push, node must be either:
1487 # - unknown locally
1489 # - unknown locally
1488 # - a local outgoing head descended from update
1490 # - a local outgoing head descended from update
1489 # - a remote head that's known locally and not
1491 # - a remote head that's known locally and not
1490 # ancestral to an outgoing head
1492 # ancestral to an outgoing head
1491 #
1493 #
1492 # New named branches cannot be created without --force.
1494 # New named branches cannot be created without --force.
1493
1495
1494 if remote_heads != [nullid]:
1496 if remote_heads != [nullid]:
1495 if remote.capable('branchmap'):
1497 if remote.capable('branchmap'):
1496 localhds = {}
1498 localhds = {}
1497 if not revs:
1499 if not revs:
1498 localhds = self.branchmap()
1500 localhds = self.branchmap()
1499 else:
1501 else:
1500 for n in heads:
1502 for n in heads:
1501 branch = self[n].branch()
1503 branch = self[n].branch()
1502 if branch in localhds:
1504 if branch in localhds:
1503 localhds[branch].append(n)
1505 localhds[branch].append(n)
1504 else:
1506 else:
1505 localhds[branch] = [n]
1507 localhds[branch] = [n]
1506
1508
1507 remotehds = remote.branchmap()
1509 remotehds = remote.branchmap()
1508
1510
1509 for lh in localhds:
1511 for lh in localhds:
1510 if lh in remotehds:
1512 if lh in remotehds:
1511 rheads = remotehds[lh]
1513 rheads = remotehds[lh]
1512 else:
1514 else:
1513 rheads = []
1515 rheads = []
1514 lheads = localhds[lh]
1516 lheads = localhds[lh]
1515 updatelh = [upd for upd in update
1517 updatelh = [upd for upd in update
1516 if self[upd].branch() == lh]
1518 if self[upd].branch() == lh]
1517 if not updatelh:
1519 if not updatelh:
1518 continue
1520 continue
1519 if not checkbranch(lheads, rheads, updatelh):
1521 if not checkbranch(lheads, rheads, updatelh):
1520 return None, 0
1522 return None, 0
1521 else:
1523 else:
1522 if not checkbranch(heads, remote_heads, update):
1524 if not checkbranch(heads, remote_heads, update):
1523 return None, 0
1525 return None, 0
1524
1526
1525 if inc:
1527 if inc:
1526 self.ui.warn(_("note: unsynced remote changes!\n"))
1528 self.ui.warn(_("note: unsynced remote changes!\n"))
1527
1529
1528
1530
1529 if revs is None:
1531 if revs is None:
1530 # use the fast path, no race possible on push
1532 # use the fast path, no race possible on push
1531 cg = self._changegroup(common.keys(), 'push')
1533 cg = self._changegroup(common.keys(), 'push')
1532 else:
1534 else:
1533 cg = self.changegroupsubset(update, revs, 'push')
1535 cg = self.changegroupsubset(update, revs, 'push')
1534 return cg, remote_heads
1536 return cg, remote_heads
1535
1537
1536 def push_addchangegroup(self, remote, force, revs):
1538 def push_addchangegroup(self, remote, force, revs):
1537 lock = remote.lock()
1539 lock = remote.lock()
1538 try:
1540 try:
1539 ret = self.prepush(remote, force, revs)
1541 ret = self.prepush(remote, force, revs)
1540 if ret[0] is not None:
1542 if ret[0] is not None:
1541 cg, remote_heads = ret
1543 cg, remote_heads = ret
1542 return remote.addchangegroup(cg, 'push', self.url())
1544 return remote.addchangegroup(cg, 'push', self.url())
1543 return ret[1]
1545 return ret[1]
1544 finally:
1546 finally:
1545 lock.release()
1547 lock.release()
1546
1548
1547 def push_unbundle(self, remote, force, revs):
1549 def push_unbundle(self, remote, force, revs):
1548 # local repo finds heads on server, finds out what revs it
1550 # local repo finds heads on server, finds out what revs it
1549 # must push. once revs transferred, if server finds it has
1551 # must push. once revs transferred, if server finds it has
1550 # different heads (someone else won commit/push race), server
1552 # different heads (someone else won commit/push race), server
1551 # aborts.
1553 # aborts.
1552
1554
1553 ret = self.prepush(remote, force, revs)
1555 ret = self.prepush(remote, force, revs)
1554 if ret[0] is not None:
1556 if ret[0] is not None:
1555 cg, remote_heads = ret
1557 cg, remote_heads = ret
1556 if force: remote_heads = ['force']
1558 if force: remote_heads = ['force']
1557 return remote.unbundle(cg, remote_heads, 'push')
1559 return remote.unbundle(cg, remote_heads, 'push')
1558 return ret[1]
1560 return ret[1]
1559
1561
1560 def changegroupinfo(self, nodes, source):
1562 def changegroupinfo(self, nodes, source):
1561 if self.ui.verbose or source == 'bundle':
1563 if self.ui.verbose or source == 'bundle':
1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1564 self.ui.status(_("%d changesets found\n") % len(nodes))
1563 if self.ui.debugflag:
1565 if self.ui.debugflag:
1564 self.ui.debug(_("list of changesets:\n"))
1566 self.ui.debug(_("list of changesets:\n"))
1565 for node in nodes:
1567 for node in nodes:
1566 self.ui.debug("%s\n" % hex(node))
1568 self.ui.debug("%s\n" % hex(node))
1567
1569
1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1570 def changegroupsubset(self, bases, heads, source, extranodes=None):
1569 """This function generates a changegroup consisting of all the nodes
1571 """This function generates a changegroup consisting of all the nodes
1570 that are descendents of any of the bases, and ancestors of any of
1572 that are descendents of any of the bases, and ancestors of any of
1571 the heads.
1573 the heads.
1572
1574
1573 It is fairly complex as determining which filenodes and which
1575 It is fairly complex as determining which filenodes and which
1574 manifest nodes need to be included for the changeset to be complete
1576 manifest nodes need to be included for the changeset to be complete
1575 is non-trivial.
1577 is non-trivial.
1576
1578
1577 Another wrinkle is doing the reverse, figuring out which changeset in
1579 Another wrinkle is doing the reverse, figuring out which changeset in
1578 the changegroup a particular filenode or manifestnode belongs to.
1580 the changegroup a particular filenode or manifestnode belongs to.
1579
1581
1580 The caller can specify some nodes that must be included in the
1582 The caller can specify some nodes that must be included in the
1581 changegroup using the extranodes argument. It should be a dict
1583 changegroup using the extranodes argument. It should be a dict
1582 where the keys are the filenames (or 1 for the manifest), and the
1584 where the keys are the filenames (or 1 for the manifest), and the
1583 values are lists of (node, linknode) tuples, where node is a wanted
1585 values are lists of (node, linknode) tuples, where node is a wanted
1584 node and linknode is the changelog node that should be transmitted as
1586 node and linknode is the changelog node that should be transmitted as
1585 the linkrev.
1587 the linkrev.
1586 """
1588 """
1587
1589
1588 if extranodes is None:
1590 if extranodes is None:
1589 # can we go through the fast path ?
1591 # can we go through the fast path ?
1590 heads.sort()
1592 heads.sort()
1591 allheads = self.heads()
1593 allheads = self.heads()
1592 allheads.sort()
1594 allheads.sort()
1593 if heads == allheads:
1595 if heads == allheads:
1594 common = []
1596 common = []
1595 # parents of bases are known from both sides
1597 # parents of bases are known from both sides
1596 for n in bases:
1598 for n in bases:
1597 for p in self.changelog.parents(n):
1599 for p in self.changelog.parents(n):
1598 if p != nullid:
1600 if p != nullid:
1599 common.append(p)
1601 common.append(p)
1600 return self._changegroup(common, source)
1602 return self._changegroup(common, source)
1601
1603
1602 self.hook('preoutgoing', throw=True, source=source)
1604 self.hook('preoutgoing', throw=True, source=source)
1603
1605
1604 # Set up some initial variables
1606 # Set up some initial variables
1605 # Make it easy to refer to self.changelog
1607 # Make it easy to refer to self.changelog
1606 cl = self.changelog
1608 cl = self.changelog
1607 # msng is short for missing - compute the list of changesets in this
1609 # msng is short for missing - compute the list of changesets in this
1608 # changegroup.
1610 # changegroup.
1609 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1611 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1610 self.changegroupinfo(msng_cl_lst, source)
1612 self.changegroupinfo(msng_cl_lst, source)
1611 # Some bases may turn out to be superfluous, and some heads may be
1613 # Some bases may turn out to be superfluous, and some heads may be
1612 # too. nodesbetween will return the minimal set of bases and heads
1614 # too. nodesbetween will return the minimal set of bases and heads
1613 # necessary to re-create the changegroup.
1615 # necessary to re-create the changegroup.
1614
1616
1615 # Known heads are the list of heads that it is assumed the recipient
1617 # Known heads are the list of heads that it is assumed the recipient
1616 # of this changegroup will know about.
1618 # of this changegroup will know about.
1617 knownheads = set()
1619 knownheads = set()
1618 # We assume that all parents of bases are known heads.
1620 # We assume that all parents of bases are known heads.
1619 for n in bases:
1621 for n in bases:
1620 knownheads.update(cl.parents(n))
1622 knownheads.update(cl.parents(n))
1621 knownheads.discard(nullid)
1623 knownheads.discard(nullid)
1622 knownheads = list(knownheads)
1624 knownheads = list(knownheads)
1623 if knownheads:
1625 if knownheads:
1624 # Now that we know what heads are known, we can compute which
1626 # Now that we know what heads are known, we can compute which
1625 # changesets are known. The recipient must know about all
1627 # changesets are known. The recipient must know about all
1626 # changesets required to reach the known heads from the null
1628 # changesets required to reach the known heads from the null
1627 # changeset.
1629 # changeset.
1628 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1630 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1629 junk = None
1631 junk = None
1630 # Transform the list into a set.
1632 # Transform the list into a set.
1631 has_cl_set = set(has_cl_set)
1633 has_cl_set = set(has_cl_set)
1632 else:
1634 else:
1633 # If there were no known heads, the recipient cannot be assumed to
1635 # If there were no known heads, the recipient cannot be assumed to
1634 # know about any changesets.
1636 # know about any changesets.
1635 has_cl_set = set()
1637 has_cl_set = set()
1636
1638
1637 # Make it easy to refer to self.manifest
1639 # Make it easy to refer to self.manifest
1638 mnfst = self.manifest
1640 mnfst = self.manifest
1639 # We don't know which manifests are missing yet
1641 # We don't know which manifests are missing yet
1640 msng_mnfst_set = {}
1642 msng_mnfst_set = {}
1641 # Nor do we know which filenodes are missing.
1643 # Nor do we know which filenodes are missing.
1642 msng_filenode_set = {}
1644 msng_filenode_set = {}
1643
1645
1644 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1646 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1645 junk = None
1647 junk = None
1646
1648
1647 # A changeset always belongs to itself, so the changenode lookup
1649 # A changeset always belongs to itself, so the changenode lookup
1648 # function for a changenode is identity.
1650 # function for a changenode is identity.
1649 def identity(x):
1651 def identity(x):
1650 return x
1652 return x
1651
1653
1652 # A function generating function. Sets up an environment for the
1654 # A function generating function. Sets up an environment for the
1653 # inner function.
1655 # inner function.
1654 def cmp_by_rev_func(revlog):
1656 def cmp_by_rev_func(revlog):
1655 # Compare two nodes by their revision number in the environment's
1657 # Compare two nodes by their revision number in the environment's
1656 # revision history. Since the revision number both represents the
1658 # revision history. Since the revision number both represents the
1657 # most efficient order to read the nodes in, and represents a
1659 # most efficient order to read the nodes in, and represents a
1658 # topological sorting of the nodes, this function is often useful.
1660 # topological sorting of the nodes, this function is often useful.
1659 def cmp_by_rev(a, b):
1661 def cmp_by_rev(a, b):
1660 return cmp(revlog.rev(a), revlog.rev(b))
1662 return cmp(revlog.rev(a), revlog.rev(b))
1661 return cmp_by_rev
1663 return cmp_by_rev
1662
1664
1663 # If we determine that a particular file or manifest node must be a
1665 # If we determine that a particular file or manifest node must be a
1664 # node that the recipient of the changegroup will already have, we can
1666 # node that the recipient of the changegroup will already have, we can
1665 # also assume the recipient will have all the parents. This function
1667 # also assume the recipient will have all the parents. This function
1666 # prunes them from the set of missing nodes.
1668 # prunes them from the set of missing nodes.
1667 def prune_parents(revlog, hasset, msngset):
1669 def prune_parents(revlog, hasset, msngset):
1668 haslst = list(hasset)
1670 haslst = list(hasset)
1669 haslst.sort(cmp_by_rev_func(revlog))
1671 haslst.sort(cmp_by_rev_func(revlog))
1670 for node in haslst:
1672 for node in haslst:
1671 parentlst = [p for p in revlog.parents(node) if p != nullid]
1673 parentlst = [p for p in revlog.parents(node) if p != nullid]
1672 while parentlst:
1674 while parentlst:
1673 n = parentlst.pop()
1675 n = parentlst.pop()
1674 if n not in hasset:
1676 if n not in hasset:
1675 hasset.add(n)
1677 hasset.add(n)
1676 p = [p for p in revlog.parents(n) if p != nullid]
1678 p = [p for p in revlog.parents(n) if p != nullid]
1677 parentlst.extend(p)
1679 parentlst.extend(p)
1678 for n in hasset:
1680 for n in hasset:
1679 msngset.pop(n, None)
1681 msngset.pop(n, None)
1680
1682
1681 # This is a function generating function used to set up an environment
1683 # This is a function generating function used to set up an environment
1682 # for the inner function to execute in.
1684 # for the inner function to execute in.
1683 def manifest_and_file_collector(changedfileset):
1685 def manifest_and_file_collector(changedfileset):
1684 # This is an information gathering function that gathers
1686 # This is an information gathering function that gathers
1685 # information from each changeset node that goes out as part of
1687 # information from each changeset node that goes out as part of
1686 # the changegroup. The information gathered is a list of which
1688 # the changegroup. The information gathered is a list of which
1687 # manifest nodes are potentially required (the recipient may
1689 # manifest nodes are potentially required (the recipient may
1688 # already have them) and total list of all files which were
1690 # already have them) and total list of all files which were
1689 # changed in any changeset in the changegroup.
1691 # changed in any changeset in the changegroup.
1690 #
1692 #
1691 # We also remember the first changenode we saw any manifest
1693 # We also remember the first changenode we saw any manifest
1692 # referenced by so we can later determine which changenode 'owns'
1694 # referenced by so we can later determine which changenode 'owns'
1693 # the manifest.
1695 # the manifest.
1694 def collect_manifests_and_files(clnode):
1696 def collect_manifests_and_files(clnode):
1695 c = cl.read(clnode)
1697 c = cl.read(clnode)
1696 for f in c[3]:
1698 for f in c[3]:
1697 # This is to make sure we only have one instance of each
1699 # This is to make sure we only have one instance of each
1698 # filename string for each filename.
1700 # filename string for each filename.
1699 changedfileset.setdefault(f, f)
1701 changedfileset.setdefault(f, f)
1700 msng_mnfst_set.setdefault(c[0], clnode)
1702 msng_mnfst_set.setdefault(c[0], clnode)
1701 return collect_manifests_and_files
1703 return collect_manifests_and_files
1702
1704
1703 # Figure out which manifest nodes (of the ones we think might be part
1705 # Figure out which manifest nodes (of the ones we think might be part
1704 # of the changegroup) the recipient must know about and remove them
1706 # of the changegroup) the recipient must know about and remove them
1705 # from the changegroup.
1707 # from the changegroup.
1706 def prune_manifests():
1708 def prune_manifests():
1707 has_mnfst_set = set()
1709 has_mnfst_set = set()
1708 for n in msng_mnfst_set:
1710 for n in msng_mnfst_set:
1709 # If a 'missing' manifest thinks it belongs to a changenode
1711 # If a 'missing' manifest thinks it belongs to a changenode
1710 # the recipient is assumed to have, obviously the recipient
1712 # the recipient is assumed to have, obviously the recipient
1711 # must have that manifest.
1713 # must have that manifest.
1712 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1714 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1713 if linknode in has_cl_set:
1715 if linknode in has_cl_set:
1714 has_mnfst_set.add(n)
1716 has_mnfst_set.add(n)
1715 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1717 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1716
1718
1717 # Use the information collected in collect_manifests_and_files to say
1719 # Use the information collected in collect_manifests_and_files to say
1718 # which changenode any manifestnode belongs to.
1720 # which changenode any manifestnode belongs to.
1719 def lookup_manifest_link(mnfstnode):
1721 def lookup_manifest_link(mnfstnode):
1720 return msng_mnfst_set[mnfstnode]
1722 return msng_mnfst_set[mnfstnode]
1721
1723
1722 # A function generating function that sets up the initial environment
1724 # A function generating function that sets up the initial environment
1723 # the inner function.
1725 # the inner function.
1724 def filenode_collector(changedfiles):
1726 def filenode_collector(changedfiles):
1725 next_rev = [0]
1727 next_rev = [0]
1726 # This gathers information from each manifestnode included in the
1728 # This gathers information from each manifestnode included in the
1727 # changegroup about which filenodes the manifest node references
1729 # changegroup about which filenodes the manifest node references
1728 # so we can include those in the changegroup too.
1730 # so we can include those in the changegroup too.
1729 #
1731 #
1730 # It also remembers which changenode each filenode belongs to. It
1732 # It also remembers which changenode each filenode belongs to. It
1731 # does this by assuming the a filenode belongs to the changenode
1733 # does this by assuming the a filenode belongs to the changenode
1732 # the first manifest that references it belongs to.
1734 # the first manifest that references it belongs to.
1733 def collect_msng_filenodes(mnfstnode):
1735 def collect_msng_filenodes(mnfstnode):
1734 r = mnfst.rev(mnfstnode)
1736 r = mnfst.rev(mnfstnode)
1735 if r == next_rev[0]:
1737 if r == next_rev[0]:
1736 # If the last rev we looked at was the one just previous,
1738 # If the last rev we looked at was the one just previous,
1737 # we only need to see a diff.
1739 # we only need to see a diff.
1738 deltamf = mnfst.readdelta(mnfstnode)
1740 deltamf = mnfst.readdelta(mnfstnode)
1739 # For each line in the delta
1741 # For each line in the delta
1740 for f, fnode in deltamf.iteritems():
1742 for f, fnode in deltamf.iteritems():
1741 f = changedfiles.get(f, None)
1743 f = changedfiles.get(f, None)
1742 # And if the file is in the list of files we care
1744 # And if the file is in the list of files we care
1743 # about.
1745 # about.
1744 if f is not None:
1746 if f is not None:
1745 # Get the changenode this manifest belongs to
1747 # Get the changenode this manifest belongs to
1746 clnode = msng_mnfst_set[mnfstnode]
1748 clnode = msng_mnfst_set[mnfstnode]
1747 # Create the set of filenodes for the file if
1749 # Create the set of filenodes for the file if
1748 # there isn't one already.
1750 # there isn't one already.
1749 ndset = msng_filenode_set.setdefault(f, {})
1751 ndset = msng_filenode_set.setdefault(f, {})
1750 # And set the filenode's changelog node to the
1752 # And set the filenode's changelog node to the
1751 # manifest's if it hasn't been set already.
1753 # manifest's if it hasn't been set already.
1752 ndset.setdefault(fnode, clnode)
1754 ndset.setdefault(fnode, clnode)
1753 else:
1755 else:
1754 # Otherwise we need a full manifest.
1756 # Otherwise we need a full manifest.
1755 m = mnfst.read(mnfstnode)
1757 m = mnfst.read(mnfstnode)
1756 # For every file in we care about.
1758 # For every file in we care about.
1757 for f in changedfiles:
1759 for f in changedfiles:
1758 fnode = m.get(f, None)
1760 fnode = m.get(f, None)
1759 # If it's in the manifest
1761 # If it's in the manifest
1760 if fnode is not None:
1762 if fnode is not None:
1761 # See comments above.
1763 # See comments above.
1762 clnode = msng_mnfst_set[mnfstnode]
1764 clnode = msng_mnfst_set[mnfstnode]
1763 ndset = msng_filenode_set.setdefault(f, {})
1765 ndset = msng_filenode_set.setdefault(f, {})
1764 ndset.setdefault(fnode, clnode)
1766 ndset.setdefault(fnode, clnode)
1765 # Remember the revision we hope to see next.
1767 # Remember the revision we hope to see next.
1766 next_rev[0] = r + 1
1768 next_rev[0] = r + 1
1767 return collect_msng_filenodes
1769 return collect_msng_filenodes
1768
1770
1769 # We have a list of filenodes we think we need for a file, lets remove
1771 # We have a list of filenodes we think we need for a file, lets remove
1770 # all those we know the recipient must have.
1772 # all those we know the recipient must have.
1771 def prune_filenodes(f, filerevlog):
1773 def prune_filenodes(f, filerevlog):
1772 msngset = msng_filenode_set[f]
1774 msngset = msng_filenode_set[f]
1773 hasset = set()
1775 hasset = set()
1774 # If a 'missing' filenode thinks it belongs to a changenode we
1776 # If a 'missing' filenode thinks it belongs to a changenode we
1775 # assume the recipient must have, then the recipient must have
1777 # assume the recipient must have, then the recipient must have
1776 # that filenode.
1778 # that filenode.
1777 for n in msngset:
1779 for n in msngset:
1778 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1780 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1779 if clnode in has_cl_set:
1781 if clnode in has_cl_set:
1780 hasset.add(n)
1782 hasset.add(n)
1781 prune_parents(filerevlog, hasset, msngset)
1783 prune_parents(filerevlog, hasset, msngset)
1782
1784
1783 # A function generator function that sets up the a context for the
1785 # A function generator function that sets up the a context for the
1784 # inner function.
1786 # inner function.
1785 def lookup_filenode_link_func(fname):
1787 def lookup_filenode_link_func(fname):
1786 msngset = msng_filenode_set[fname]
1788 msngset = msng_filenode_set[fname]
1787 # Lookup the changenode the filenode belongs to.
1789 # Lookup the changenode the filenode belongs to.
1788 def lookup_filenode_link(fnode):
1790 def lookup_filenode_link(fnode):
1789 return msngset[fnode]
1791 return msngset[fnode]
1790 return lookup_filenode_link
1792 return lookup_filenode_link
1791
1793
1792 # Add the nodes that were explicitly requested.
1794 # Add the nodes that were explicitly requested.
1793 def add_extra_nodes(name, nodes):
1795 def add_extra_nodes(name, nodes):
1794 if not extranodes or name not in extranodes:
1796 if not extranodes or name not in extranodes:
1795 return
1797 return
1796
1798
1797 for node, linknode in extranodes[name]:
1799 for node, linknode in extranodes[name]:
1798 if node not in nodes:
1800 if node not in nodes:
1799 nodes[node] = linknode
1801 nodes[node] = linknode
1800
1802
1801 # Now that we have all theses utility functions to help out and
1803 # Now that we have all theses utility functions to help out and
1802 # logically divide up the task, generate the group.
1804 # logically divide up the task, generate the group.
1803 def gengroup():
1805 def gengroup():
1804 # The set of changed files starts empty.
1806 # The set of changed files starts empty.
1805 changedfiles = {}
1807 changedfiles = {}
1806 # Create a changenode group generator that will call our functions
1808 # Create a changenode group generator that will call our functions
1807 # back to lookup the owning changenode and collect information.
1809 # back to lookup the owning changenode and collect information.
1808 group = cl.group(msng_cl_lst, identity,
1810 group = cl.group(msng_cl_lst, identity,
1809 manifest_and_file_collector(changedfiles))
1811 manifest_and_file_collector(changedfiles))
1810 for chnk in group:
1812 for chnk in group:
1811 yield chnk
1813 yield chnk
1812
1814
1813 # The list of manifests has been collected by the generator
1815 # The list of manifests has been collected by the generator
1814 # calling our functions back.
1816 # calling our functions back.
1815 prune_manifests()
1817 prune_manifests()
1816 add_extra_nodes(1, msng_mnfst_set)
1818 add_extra_nodes(1, msng_mnfst_set)
1817 msng_mnfst_lst = msng_mnfst_set.keys()
1819 msng_mnfst_lst = msng_mnfst_set.keys()
1818 # Sort the manifestnodes by revision number.
1820 # Sort the manifestnodes by revision number.
1819 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1821 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1820 # Create a generator for the manifestnodes that calls our lookup
1822 # Create a generator for the manifestnodes that calls our lookup
1821 # and data collection functions back.
1823 # and data collection functions back.
1822 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1824 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1823 filenode_collector(changedfiles))
1825 filenode_collector(changedfiles))
1824 for chnk in group:
1826 for chnk in group:
1825 yield chnk
1827 yield chnk
1826
1828
1827 # These are no longer needed, dereference and toss the memory for
1829 # These are no longer needed, dereference and toss the memory for
1828 # them.
1830 # them.
1829 msng_mnfst_lst = None
1831 msng_mnfst_lst = None
1830 msng_mnfst_set.clear()
1832 msng_mnfst_set.clear()
1831
1833
1832 if extranodes:
1834 if extranodes:
1833 for fname in extranodes:
1835 for fname in extranodes:
1834 if isinstance(fname, int):
1836 if isinstance(fname, int):
1835 continue
1837 continue
1836 msng_filenode_set.setdefault(fname, {})
1838 msng_filenode_set.setdefault(fname, {})
1837 changedfiles[fname] = 1
1839 changedfiles[fname] = 1
1838 # Go through all our files in order sorted by name.
1840 # Go through all our files in order sorted by name.
1839 for fname in sorted(changedfiles):
1841 for fname in sorted(changedfiles):
1840 filerevlog = self.file(fname)
1842 filerevlog = self.file(fname)
1841 if not len(filerevlog):
1843 if not len(filerevlog):
1842 raise util.Abort(_("empty or missing revlog for %s") % fname)
1844 raise util.Abort(_("empty or missing revlog for %s") % fname)
1843 # Toss out the filenodes that the recipient isn't really
1845 # Toss out the filenodes that the recipient isn't really
1844 # missing.
1846 # missing.
1845 if fname in msng_filenode_set:
1847 if fname in msng_filenode_set:
1846 prune_filenodes(fname, filerevlog)
1848 prune_filenodes(fname, filerevlog)
1847 add_extra_nodes(fname, msng_filenode_set[fname])
1849 add_extra_nodes(fname, msng_filenode_set[fname])
1848 msng_filenode_lst = msng_filenode_set[fname].keys()
1850 msng_filenode_lst = msng_filenode_set[fname].keys()
1849 else:
1851 else:
1850 msng_filenode_lst = []
1852 msng_filenode_lst = []
1851 # If any filenodes are left, generate the group for them,
1853 # If any filenodes are left, generate the group for them,
1852 # otherwise don't bother.
1854 # otherwise don't bother.
1853 if len(msng_filenode_lst) > 0:
1855 if len(msng_filenode_lst) > 0:
1854 yield changegroup.chunkheader(len(fname))
1856 yield changegroup.chunkheader(len(fname))
1855 yield fname
1857 yield fname
1856 # Sort the filenodes by their revision #
1858 # Sort the filenodes by their revision #
1857 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1859 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1858 # Create a group generator and only pass in a changenode
1860 # Create a group generator and only pass in a changenode
1859 # lookup function as we need to collect no information
1861 # lookup function as we need to collect no information
1860 # from filenodes.
1862 # from filenodes.
1861 group = filerevlog.group(msng_filenode_lst,
1863 group = filerevlog.group(msng_filenode_lst,
1862 lookup_filenode_link_func(fname))
1864 lookup_filenode_link_func(fname))
1863 for chnk in group:
1865 for chnk in group:
1864 yield chnk
1866 yield chnk
1865 if fname in msng_filenode_set:
1867 if fname in msng_filenode_set:
1866 # Don't need this anymore, toss it to free memory.
1868 # Don't need this anymore, toss it to free memory.
1867 del msng_filenode_set[fname]
1869 del msng_filenode_set[fname]
1868 # Signal that no more groups are left.
1870 # Signal that no more groups are left.
1869 yield changegroup.closechunk()
1871 yield changegroup.closechunk()
1870
1872
1871 if msng_cl_lst:
1873 if msng_cl_lst:
1872 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1874 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1873
1875
1874 return util.chunkbuffer(gengroup())
1876 return util.chunkbuffer(gengroup())
1875
1877
1876 def changegroup(self, basenodes, source):
1878 def changegroup(self, basenodes, source):
1877 # to avoid a race we use changegroupsubset() (issue1320)
1879 # to avoid a race we use changegroupsubset() (issue1320)
1878 return self.changegroupsubset(basenodes, self.heads(), source)
1880 return self.changegroupsubset(basenodes, self.heads(), source)
1879
1881
1880 def _changegroup(self, common, source):
1882 def _changegroup(self, common, source):
1881 """Generate a changegroup of all nodes that we have that a recipient
1883 """Generate a changegroup of all nodes that we have that a recipient
1882 doesn't.
1884 doesn't.
1883
1885
1884 This is much easier than the previous function as we can assume that
1886 This is much easier than the previous function as we can assume that
1885 the recipient has any changenode we aren't sending them.
1887 the recipient has any changenode we aren't sending them.
1886
1888
1887 common is the set of common nodes between remote and self"""
1889 common is the set of common nodes between remote and self"""
1888
1890
1889 self.hook('preoutgoing', throw=True, source=source)
1891 self.hook('preoutgoing', throw=True, source=source)
1890
1892
1891 cl = self.changelog
1893 cl = self.changelog
1892 nodes = cl.findmissing(common)
1894 nodes = cl.findmissing(common)
1893 revset = set([cl.rev(n) for n in nodes])
1895 revset = set([cl.rev(n) for n in nodes])
1894 self.changegroupinfo(nodes, source)
1896 self.changegroupinfo(nodes, source)
1895
1897
1896 def identity(x):
1898 def identity(x):
1897 return x
1899 return x
1898
1900
1899 def gennodelst(log):
1901 def gennodelst(log):
1900 for r in log:
1902 for r in log:
1901 if log.linkrev(r) in revset:
1903 if log.linkrev(r) in revset:
1902 yield log.node(r)
1904 yield log.node(r)
1903
1905
1904 def changed_file_collector(changedfileset):
1906 def changed_file_collector(changedfileset):
1905 def collect_changed_files(clnode):
1907 def collect_changed_files(clnode):
1906 c = cl.read(clnode)
1908 c = cl.read(clnode)
1907 changedfileset.update(c[3])
1909 changedfileset.update(c[3])
1908 return collect_changed_files
1910 return collect_changed_files
1909
1911
1910 def lookuprevlink_func(revlog):
1912 def lookuprevlink_func(revlog):
1911 def lookuprevlink(n):
1913 def lookuprevlink(n):
1912 return cl.node(revlog.linkrev(revlog.rev(n)))
1914 return cl.node(revlog.linkrev(revlog.rev(n)))
1913 return lookuprevlink
1915 return lookuprevlink
1914
1916
1915 def gengroup():
1917 def gengroup():
1916 # construct a list of all changed files
1918 # construct a list of all changed files
1917 changedfiles = set()
1919 changedfiles = set()
1918
1920
1919 for chnk in cl.group(nodes, identity,
1921 for chnk in cl.group(nodes, identity,
1920 changed_file_collector(changedfiles)):
1922 changed_file_collector(changedfiles)):
1921 yield chnk
1923 yield chnk
1922
1924
1923 mnfst = self.manifest
1925 mnfst = self.manifest
1924 nodeiter = gennodelst(mnfst)
1926 nodeiter = gennodelst(mnfst)
1925 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1927 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1926 yield chnk
1928 yield chnk
1927
1929
1928 for fname in sorted(changedfiles):
1930 for fname in sorted(changedfiles):
1929 filerevlog = self.file(fname)
1931 filerevlog = self.file(fname)
1930 if not len(filerevlog):
1932 if not len(filerevlog):
1931 raise util.Abort(_("empty or missing revlog for %s") % fname)
1933 raise util.Abort(_("empty or missing revlog for %s") % fname)
1932 nodeiter = gennodelst(filerevlog)
1934 nodeiter = gennodelst(filerevlog)
1933 nodeiter = list(nodeiter)
1935 nodeiter = list(nodeiter)
1934 if nodeiter:
1936 if nodeiter:
1935 yield changegroup.chunkheader(len(fname))
1937 yield changegroup.chunkheader(len(fname))
1936 yield fname
1938 yield fname
1937 lookup = lookuprevlink_func(filerevlog)
1939 lookup = lookuprevlink_func(filerevlog)
1938 for chnk in filerevlog.group(nodeiter, lookup):
1940 for chnk in filerevlog.group(nodeiter, lookup):
1939 yield chnk
1941 yield chnk
1940
1942
1941 yield changegroup.closechunk()
1943 yield changegroup.closechunk()
1942
1944
1943 if nodes:
1945 if nodes:
1944 self.hook('outgoing', node=hex(nodes[0]), source=source)
1946 self.hook('outgoing', node=hex(nodes[0]), source=source)
1945
1947
1946 return util.chunkbuffer(gengroup())
1948 return util.chunkbuffer(gengroup())
1947
1949
1948 def addchangegroup(self, source, srctype, url, emptyok=False):
1950 def addchangegroup(self, source, srctype, url, emptyok=False):
1949 """add changegroup to repo.
1951 """add changegroup to repo.
1950
1952
1951 return values:
1953 return values:
1952 - nothing changed or no source: 0
1954 - nothing changed or no source: 0
1953 - more heads than before: 1+added heads (2..n)
1955 - more heads than before: 1+added heads (2..n)
1954 - less heads than before: -1-removed heads (-2..-n)
1956 - less heads than before: -1-removed heads (-2..-n)
1955 - number of heads stays the same: 1
1957 - number of heads stays the same: 1
1956 """
1958 """
1957 def csmap(x):
1959 def csmap(x):
1958 self.ui.debug(_("add changeset %s\n") % short(x))
1960 self.ui.debug(_("add changeset %s\n") % short(x))
1959 return len(cl)
1961 return len(cl)
1960
1962
1961 def revmap(x):
1963 def revmap(x):
1962 return cl.rev(x)
1964 return cl.rev(x)
1963
1965
1964 if not source:
1966 if not source:
1965 return 0
1967 return 0
1966
1968
1967 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1969 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1968
1970
1969 changesets = files = revisions = 0
1971 changesets = files = revisions = 0
1970
1972
1971 # write changelog data to temp files so concurrent readers will not see
1973 # write changelog data to temp files so concurrent readers will not see
1972 # inconsistent view
1974 # inconsistent view
1973 cl = self.changelog
1975 cl = self.changelog
1974 cl.delayupdate()
1976 cl.delayupdate()
1975 oldheads = len(cl.heads())
1977 oldheads = len(cl.heads())
1976
1978
1977 tr = self.transaction()
1979 tr = self.transaction()
1978 try:
1980 try:
1979 trp = weakref.proxy(tr)
1981 trp = weakref.proxy(tr)
1980 # pull off the changeset group
1982 # pull off the changeset group
1981 self.ui.status(_("adding changesets\n"))
1983 self.ui.status(_("adding changesets\n"))
1982 clstart = len(cl)
1984 clstart = len(cl)
1983 chunkiter = changegroup.chunkiter(source)
1985 chunkiter = changegroup.chunkiter(source)
1984 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1986 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1985 raise util.Abort(_("received changelog group is empty"))
1987 raise util.Abort(_("received changelog group is empty"))
1986 clend = len(cl)
1988 clend = len(cl)
1987 changesets = clend - clstart
1989 changesets = clend - clstart
1988
1990
1989 # pull off the manifest group
1991 # pull off the manifest group
1990 self.ui.status(_("adding manifests\n"))
1992 self.ui.status(_("adding manifests\n"))
1991 chunkiter = changegroup.chunkiter(source)
1993 chunkiter = changegroup.chunkiter(source)
1992 # no need to check for empty manifest group here:
1994 # no need to check for empty manifest group here:
1993 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1995 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1994 # no new manifest will be created and the manifest group will
1996 # no new manifest will be created and the manifest group will
1995 # be empty during the pull
1997 # be empty during the pull
1996 self.manifest.addgroup(chunkiter, revmap, trp)
1998 self.manifest.addgroup(chunkiter, revmap, trp)
1997
1999
1998 # process the files
2000 # process the files
1999 self.ui.status(_("adding file changes\n"))
2001 self.ui.status(_("adding file changes\n"))
2000 while 1:
2002 while 1:
2001 f = changegroup.getchunk(source)
2003 f = changegroup.getchunk(source)
2002 if not f:
2004 if not f:
2003 break
2005 break
2004 self.ui.debug(_("adding %s revisions\n") % f)
2006 self.ui.debug(_("adding %s revisions\n") % f)
2005 fl = self.file(f)
2007 fl = self.file(f)
2006 o = len(fl)
2008 o = len(fl)
2007 chunkiter = changegroup.chunkiter(source)
2009 chunkiter = changegroup.chunkiter(source)
2008 if fl.addgroup(chunkiter, revmap, trp) is None:
2010 if fl.addgroup(chunkiter, revmap, trp) is None:
2009 raise util.Abort(_("received file revlog group is empty"))
2011 raise util.Abort(_("received file revlog group is empty"))
2010 revisions += len(fl) - o
2012 revisions += len(fl) - o
2011 files += 1
2013 files += 1
2012
2014
2013 newheads = len(cl.heads())
2015 newheads = len(cl.heads())
2014 heads = ""
2016 heads = ""
2015 if oldheads and newheads != oldheads:
2017 if oldheads and newheads != oldheads:
2016 heads = _(" (%+d heads)") % (newheads - oldheads)
2018 heads = _(" (%+d heads)") % (newheads - oldheads)
2017
2019
2018 self.ui.status(_("added %d changesets"
2020 self.ui.status(_("added %d changesets"
2019 " with %d changes to %d files%s\n")
2021 " with %d changes to %d files%s\n")
2020 % (changesets, revisions, files, heads))
2022 % (changesets, revisions, files, heads))
2021
2023
2022 if changesets > 0:
2024 if changesets > 0:
2023 p = lambda: cl.writepending() and self.root or ""
2025 p = lambda: cl.writepending() and self.root or ""
2024 self.hook('pretxnchangegroup', throw=True,
2026 self.hook('pretxnchangegroup', throw=True,
2025 node=hex(cl.node(clstart)), source=srctype,
2027 node=hex(cl.node(clstart)), source=srctype,
2026 url=url, pending=p)
2028 url=url, pending=p)
2027
2029
2028 # make changelog see real files again
2030 # make changelog see real files again
2029 cl.finalize(trp)
2031 cl.finalize(trp)
2030
2032
2031 tr.close()
2033 tr.close()
2032 finally:
2034 finally:
2033 del tr
2035 del tr
2034
2036
2035 if changesets > 0:
2037 if changesets > 0:
2036 # forcefully update the on-disk branch cache
2038 # forcefully update the on-disk branch cache
2037 self.ui.debug(_("updating the branch cache\n"))
2039 self.ui.debug(_("updating the branch cache\n"))
2038 self.branchtags()
2040 self.branchtags()
2039 self.hook("changegroup", node=hex(cl.node(clstart)),
2041 self.hook("changegroup", node=hex(cl.node(clstart)),
2040 source=srctype, url=url)
2042 source=srctype, url=url)
2041
2043
2042 for i in xrange(clstart, clend):
2044 for i in xrange(clstart, clend):
2043 self.hook("incoming", node=hex(cl.node(i)),
2045 self.hook("incoming", node=hex(cl.node(i)),
2044 source=srctype, url=url)
2046 source=srctype, url=url)
2045
2047
2046 # never return 0 here:
2048 # never return 0 here:
2047 if newheads < oldheads:
2049 if newheads < oldheads:
2048 return newheads - oldheads - 1
2050 return newheads - oldheads - 1
2049 else:
2051 else:
2050 return newheads - oldheads + 1
2052 return newheads - oldheads + 1
2051
2053
2052
2054
2053 def stream_in(self, remote):
2055 def stream_in(self, remote):
2054 fp = remote.stream_out()
2056 fp = remote.stream_out()
2055 l = fp.readline()
2057 l = fp.readline()
2056 try:
2058 try:
2057 resp = int(l)
2059 resp = int(l)
2058 except ValueError:
2060 except ValueError:
2059 raise error.ResponseError(
2061 raise error.ResponseError(
2060 _('Unexpected response from remote server:'), l)
2062 _('Unexpected response from remote server:'), l)
2061 if resp == 1:
2063 if resp == 1:
2062 raise util.Abort(_('operation forbidden by server'))
2064 raise util.Abort(_('operation forbidden by server'))
2063 elif resp == 2:
2065 elif resp == 2:
2064 raise util.Abort(_('locking the remote repository failed'))
2066 raise util.Abort(_('locking the remote repository failed'))
2065 elif resp != 0:
2067 elif resp != 0:
2066 raise util.Abort(_('the server sent an unknown error code'))
2068 raise util.Abort(_('the server sent an unknown error code'))
2067 self.ui.status(_('streaming all changes\n'))
2069 self.ui.status(_('streaming all changes\n'))
2068 l = fp.readline()
2070 l = fp.readline()
2069 try:
2071 try:
2070 total_files, total_bytes = map(int, l.split(' ', 1))
2072 total_files, total_bytes = map(int, l.split(' ', 1))
2071 except (ValueError, TypeError):
2073 except (ValueError, TypeError):
2072 raise error.ResponseError(
2074 raise error.ResponseError(
2073 _('Unexpected response from remote server:'), l)
2075 _('Unexpected response from remote server:'), l)
2074 self.ui.status(_('%d files to transfer, %s of data\n') %
2076 self.ui.status(_('%d files to transfer, %s of data\n') %
2075 (total_files, util.bytecount(total_bytes)))
2077 (total_files, util.bytecount(total_bytes)))
2076 start = time.time()
2078 start = time.time()
2077 for i in xrange(total_files):
2079 for i in xrange(total_files):
2078 # XXX doesn't support '\n' or '\r' in filenames
2080 # XXX doesn't support '\n' or '\r' in filenames
2079 l = fp.readline()
2081 l = fp.readline()
2080 try:
2082 try:
2081 name, size = l.split('\0', 1)
2083 name, size = l.split('\0', 1)
2082 size = int(size)
2084 size = int(size)
2083 except (ValueError, TypeError):
2085 except (ValueError, TypeError):
2084 raise error.ResponseError(
2086 raise error.ResponseError(
2085 _('Unexpected response from remote server:'), l)
2087 _('Unexpected response from remote server:'), l)
2086 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2088 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2087 # for backwards compat, name was partially encoded
2089 # for backwards compat, name was partially encoded
2088 ofp = self.sopener(store.decodedir(name), 'w')
2090 ofp = self.sopener(store.decodedir(name), 'w')
2089 for chunk in util.filechunkiter(fp, limit=size):
2091 for chunk in util.filechunkiter(fp, limit=size):
2090 ofp.write(chunk)
2092 ofp.write(chunk)
2091 ofp.close()
2093 ofp.close()
2092 elapsed = time.time() - start
2094 elapsed = time.time() - start
2093 if elapsed <= 0:
2095 if elapsed <= 0:
2094 elapsed = 0.001
2096 elapsed = 0.001
2095 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2097 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2096 (util.bytecount(total_bytes), elapsed,
2098 (util.bytecount(total_bytes), elapsed,
2097 util.bytecount(total_bytes / elapsed)))
2099 util.bytecount(total_bytes / elapsed)))
2098 self.invalidate()
2100 self.invalidate()
2099 return len(self.heads()) + 1
2101 return len(self.heads()) + 1
2100
2102
2101 def clone(self, remote, heads=[], stream=False):
2103 def clone(self, remote, heads=[], stream=False):
2102 '''clone remote repository.
2104 '''clone remote repository.
2103
2105
2104 keyword arguments:
2106 keyword arguments:
2105 heads: list of revs to clone (forces use of pull)
2107 heads: list of revs to clone (forces use of pull)
2106 stream: use streaming clone if possible'''
2108 stream: use streaming clone if possible'''
2107
2109
2108 # now, all clients that can request uncompressed clones can
2110 # now, all clients that can request uncompressed clones can
2109 # read repo formats supported by all servers that can serve
2111 # read repo formats supported by all servers that can serve
2110 # them.
2112 # them.
2111
2113
2112 # if revlog format changes, client will have to check version
2114 # if revlog format changes, client will have to check version
2113 # and format flags on "stream" capability, and use
2115 # and format flags on "stream" capability, and use
2114 # uncompressed only if compatible.
2116 # uncompressed only if compatible.
2115
2117
2116 if stream and not heads and remote.capable('stream'):
2118 if stream and not heads and remote.capable('stream'):
2117 return self.stream_in(remote)
2119 return self.stream_in(remote)
2118 return self.pull(remote, heads)
2120 return self.pull(remote, heads)
2119
2121
2120 # used to avoid circular references so destructors work
2122 # used to avoid circular references so destructors work
2121 def aftertrans(files):
2123 def aftertrans(files):
2122 renamefiles = [tuple(t) for t in files]
2124 renamefiles = [tuple(t) for t in files]
2123 def a():
2125 def a():
2124 for src, dest in renamefiles:
2126 for src, dest in renamefiles:
2125 util.rename(src, dest)
2127 util.rename(src, dest)
2126 return a
2128 return a
2127
2129
2128 def instance(ui, path, create):
2130 def instance(ui, path, create):
2129 return localrepository(ui, util.drop_scheme('file', path), create)
2131 return localrepository(ui, util.drop_scheme('file', path), create)
2130
2132
2131 def islocal(path):
2133 def islocal(path):
2132 return True
2134 return True
General Comments 0
You need to be logged in to leave comments. Login now