##// END OF EJS Templates
Merge with mpm
Brendan Cully -
r4537:2f489b00 merge default
parent child Browse files
Show More
@@ -0,0 +1,19 b''
1 #!/bin/sh
2 # a test for issue586
3
4 hg init a
5 cd a
6 echo a > a
7 hg ci -Ama
8
9 hg init ../b
10 cd ../b
11 echo b > b
12 hg ci -Amb
13
14 hg pull -f ../a
15 hg merge
16 hg rm -f a
17 hg ci -Amc
18
19 hg st -A
@@ -0,0 +1,13 b''
1 adding a
2 adding b
3 pulling from ../a
4 searching for changes
5 warning: repository is unrelated
6 adding changesets
7 adding manifests
8 adding file changes
9 added 1 changesets with 1 changes to 1 files (+1 heads)
10 (run 'hg heads' to see heads, 'hg merge' to merge)
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 (branch merge, don't forget to commit)
13 C b
@@ -1,243 +1,239 b''
1 1 # hgweb/server.py - The standalone hg web server.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 10 from mercurial import ui, hg, util, templater
11 11 from hgweb_mod import hgweb
12 12 from hgwebdir_mod import hgwebdir
13 13 from request import wsgiapplication
14 14 from mercurial.i18n import gettext as _
15 15
16 16 def _splitURI(uri):
17 17 """ Return path and query splited from uri
18 18
19 19 Just like CGI environment, the path is unquoted, the query is
20 20 not.
21 21 """
22 22 if '?' in uri:
23 23 path, query = uri.split('?', 1)
24 24 else:
25 25 path, query = uri, ''
26 26 return urllib.unquote(path), query
27 27
28 28 class _error_logger(object):
29 29 def __init__(self, handler):
30 30 self.handler = handler
31 31 def flush(self):
32 32 pass
33 33 def write(self, str):
34 34 self.writelines(str.split('\n'))
35 35 def writelines(self, seq):
36 36 for msg in seq:
37 37 self.handler.log_error("HG error: %s", msg)
38 38
39 39 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
40 40 def __init__(self, *args, **kargs):
41 41 self.protocol_version = 'HTTP/1.1'
42 42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
43 43
44 44 def log_error(self, format, *args):
45 45 errorlog = self.server.errorlog
46 46 errorlog.write("%s - - [%s] %s\n" % (self.client_address[0],
47 47 self.log_date_time_string(),
48 48 format % args))
49 49
50 50 def log_message(self, format, *args):
51 51 accesslog = self.server.accesslog
52 52 accesslog.write("%s - - [%s] %s\n" % (self.client_address[0],
53 53 self.log_date_time_string(),
54 54 format % args))
55 55
56 56 def do_POST(self):
57 57 try:
58 58 try:
59 59 self.do_hgweb()
60 60 except socket.error, inst:
61 61 if inst[0] != errno.EPIPE:
62 62 raise
63 63 except StandardError, inst:
64 64 self._start_response("500 Internal Server Error", [])
65 65 self._write("Internal Server Error")
66 66 tb = "".join(traceback.format_exception(*sys.exc_info()))
67 67 self.log_error("Exception happened during processing request '%s':\n%s",
68 68 self.path, tb)
69 69
70 70 def do_GET(self):
71 71 self.do_POST()
72 72
73 73 def do_hgweb(self):
74 74 path_info, query = _splitURI(self.path)
75 75
76 76 env = {}
77 77 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
78 78 env['REQUEST_METHOD'] = self.command
79 79 env['SERVER_NAME'] = self.server.server_name
80 80 env['SERVER_PORT'] = str(self.server.server_port)
81 81 env['REQUEST_URI'] = self.path
82 82 env['PATH_INFO'] = path_info
83 83 env['REMOTE_HOST'] = self.client_address[0]
84 84 env['REMOTE_ADDR'] = self.client_address[0]
85 85 if query:
86 86 env['QUERY_STRING'] = query
87 87
88 88 if self.headers.typeheader is None:
89 89 env['CONTENT_TYPE'] = self.headers.type
90 90 else:
91 91 env['CONTENT_TYPE'] = self.headers.typeheader
92 92 length = self.headers.getheader('content-length')
93 93 if length:
94 94 env['CONTENT_LENGTH'] = length
95 95 for header in [h for h in self.headers.keys() \
96 96 if h not in ('content-type', 'content-length')]:
97 97 hkey = 'HTTP_' + header.replace('-', '_').upper()
98 98 hval = self.headers.getheader(header)
99 99 hval = hval.replace('\n', '').strip()
100 100 if hval:
101 101 env[hkey] = hval
102 102 env['SERVER_PROTOCOL'] = self.request_version
103 103 env['wsgi.version'] = (1, 0)
104 104 env['wsgi.url_scheme'] = 'http'
105 105 env['wsgi.input'] = self.rfile
106 106 env['wsgi.errors'] = _error_logger(self)
107 107 env['wsgi.multithread'] = isinstance(self.server,
108 108 SocketServer.ThreadingMixIn)
109 109 env['wsgi.multiprocess'] = isinstance(self.server,
110 110 SocketServer.ForkingMixIn)
111 111 env['wsgi.run_once'] = 0
112 112
113 113 self.close_connection = True
114 114 self.saved_status = None
115 115 self.saved_headers = []
116 116 self.sent_headers = False
117 117 self.length = None
118 118 req = self.server.reqmaker(env, self._start_response)
119 119 for data in req:
120 120 if data:
121 121 self._write(data)
122 122
123 123 def send_headers(self):
124 124 if not self.saved_status:
125 125 raise AssertionError("Sending headers before start_response() called")
126 126 saved_status = self.saved_status.split(None, 1)
127 127 saved_status[0] = int(saved_status[0])
128 128 self.send_response(*saved_status)
129 129 should_close = True
130 130 for h in self.saved_headers:
131 131 self.send_header(*h)
132 132 if h[0].lower() == 'content-length':
133 133 should_close = False
134 134 self.length = int(h[1])
135 135 # The value of the Connection header is a list of case-insensitive
136 136 # tokens separated by commas and optional whitespace.
137 137 if 'close' in [token.strip().lower() for token in
138 138 self.headers.get('connection', '').split(',')]:
139 139 should_close = True
140 140 if should_close:
141 141 self.send_header('Connection', 'close')
142 142 self.close_connection = should_close
143 143 self.end_headers()
144 144 self.sent_headers = True
145 145
146 146 def _start_response(self, http_status, headers, exc_info=None):
147 147 code, msg = http_status.split(None, 1)
148 148 code = int(code)
149 149 self.saved_status = http_status
150 150 bad_headers = ('connection', 'transfer-encoding')
151 151 self.saved_headers = [ h for h in headers \
152 152 if h[0].lower() not in bad_headers ]
153 153 return self._write
154 154
155 155 def _write(self, data):
156 156 if not self.saved_status:
157 157 raise AssertionError("data written before start_response() called")
158 158 elif not self.sent_headers:
159 159 self.send_headers()
160 160 if self.length is not None:
161 161 if len(data) > self.length:
162 162 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
163 163 self.length = self.length - len(data)
164 164 self.wfile.write(data)
165 165 self.wfile.flush()
166 166
167 167 def create_server(ui, repo):
168 168 use_threads = True
169 169
170 170 def openlog(opt, default):
171 171 if opt and opt != '-':
172 172 return open(opt, 'w')
173 173 return default
174 174
175 175 address = ui.config("web", "address", "")
176 176 port = int(ui.config("web", "port", 8000))
177 177 use_ipv6 = ui.configbool("web", "ipv6")
178 178 webdir_conf = ui.config("web", "webdir_conf")
179 179 accesslog = openlog(ui.config("web", "accesslog", "-"), sys.stdout)
180 180 errorlog = openlog(ui.config("web", "errorlog", "-"), sys.stderr)
181 181
182 182 if use_threads:
183 183 try:
184 184 from threading import activeCount
185 185 except ImportError:
186 186 use_threads = False
187 187
188 188 if use_threads:
189 189 _mixin = SocketServer.ThreadingMixIn
190 190 else:
191 191 if hasattr(os, "fork"):
192 192 _mixin = SocketServer.ForkingMixIn
193 193 else:
194 194 class _mixin:
195 195 pass
196 196
197 197 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
198 198
199 199 # SO_REUSEADDR has broken semantics on windows
200 200 if os.name == 'nt':
201 201 allow_reuse_address = 0
202 202
203 203 def __init__(self, *args, **kargs):
204 204 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
205 205 self.accesslog = accesslog
206 206 self.errorlog = errorlog
207 207 self.daemon_threads = True
208 208 def make_handler():
209 209 if webdir_conf:
210 210 hgwebobj = hgwebdir(webdir_conf, ui)
211 211 elif repo is not None:
212 212 hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
213 213 else:
214 214 raise hg.RepoError(_("There is no Mercurial repository here"
215 215 " (.hg not found)"))
216 216 return hgwebobj
217 217 self.reqmaker = wsgiapplication(make_handler)
218 218
219 addr, port = self.socket.getsockname()[:2]
220 if addr in ('0.0.0.0', '::'):
219 addr = address
220 if addr in ('', '::'):
221 221 addr = socket.gethostname()
222 else:
223 try:
224 addr = socket.gethostbyaddr(addr)[0]
225 except socket.error:
226 pass
222
227 223 self.addr, self.port = addr, port
228 224
229 225 class IPv6HTTPServer(MercurialHTTPServer):
230 226 address_family = getattr(socket, 'AF_INET6', None)
231 227
232 228 def __init__(self, *args, **kwargs):
233 229 if self.address_family is None:
234 230 raise hg.RepoError(_('IPv6 not available on this system'))
235 231 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
236 232
237 233 try:
238 234 if use_ipv6:
239 235 return IPv6HTTPServer((address, port), _hgwebhandler)
240 236 else:
241 237 return MercurialHTTPServer((address, port), _hgwebhandler)
242 238 except socket.error, inst:
243 239 raise util.Abort(_('cannot start server: %s') % inst.args[1])
@@ -1,1965 +1,1967 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.root = os.path.realpath(path)
34 34 self.path = os.path.join(self.root, ".hg")
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 requirements = ["revlogv1"]
45 45 if parentui.configbool('format', 'usestore', True):
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements.append("store")
48 48 # create an invalid changelog
49 49 self.opener("00changelog.i", "a").write(
50 50 '\0\0\0\2' # represents revlogv2
51 51 ' dummy changelog to prevent using the old repo layout'
52 52 )
53 53 reqfile = self.opener("requires", "w")
54 54 for r in requirements:
55 55 reqfile.write("%s\n" % r)
56 56 reqfile.close()
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 self.manifest = manifest.manifest(self.sopener)
94 94
95 95 fallback = self.ui.config('ui', 'fallbackencoding')
96 96 if fallback:
97 97 util._fallbackencoding = fallback
98 98
99 99 self.tagscache = None
100 100 self.branchcache = None
101 101 self.nodetagscache = None
102 102 self.filterpats = {}
103 103 self.transhandle = None
104 104
105 105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106 106
107 107 def url(self):
108 108 return 'file:' + self.root
109 109
110 110 def hook(self, name, throw=False, **args):
111 111 def callhook(hname, funcname):
112 112 '''call python hook. hook is callable object, looked up as
113 113 name in python module. if callable returns "true", hook
114 114 fails, else passes. if hook raises exception, treated as
115 115 hook failure. exception propagates if throw is "true".
116 116
117 117 reason for "true" meaning "hook failed" is so that
118 118 unmodified commands (e.g. mercurial.commands.update) can
119 119 be run as hooks without wrappers to convert return values.'''
120 120
121 121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 122 obj = funcname
123 123 if not callable(obj):
124 124 d = funcname.rfind('.')
125 125 if d == -1:
126 126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 127 'a module)') % (hname, funcname))
128 128 modname = funcname[:d]
129 129 try:
130 130 obj = __import__(modname)
131 131 except ImportError:
132 132 try:
133 133 # extensions are loaded with hgext_ prefix
134 134 obj = __import__("hgext_%s" % modname)
135 135 except ImportError:
136 136 raise util.Abort(_('%s hook is invalid '
137 137 '(import of "%s" failed)') %
138 138 (hname, modname))
139 139 try:
140 140 for p in funcname.split('.')[1:]:
141 141 obj = getattr(obj, p)
142 142 except AttributeError, err:
143 143 raise util.Abort(_('%s hook is invalid '
144 144 '("%s" is not defined)') %
145 145 (hname, funcname))
146 146 if not callable(obj):
147 147 raise util.Abort(_('%s hook is invalid '
148 148 '("%s" is not callable)') %
149 149 (hname, funcname))
150 150 try:
151 151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 152 except (KeyboardInterrupt, util.SignalInterrupt):
153 153 raise
154 154 except Exception, exc:
155 155 if isinstance(exc, util.Abort):
156 156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 157 (hname, exc.args[0]))
158 158 else:
159 159 self.ui.warn(_('error: %s hook raised an exception: '
160 160 '%s\n') % (hname, exc))
161 161 if throw:
162 162 raise
163 163 self.ui.print_exc()
164 164 return True
165 165 if r:
166 166 if throw:
167 167 raise util.Abort(_('%s hook failed') % hname)
168 168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 169 return r
170 170
171 171 def runhook(name, cmd):
172 172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 174 r = util.system(cmd, environ=env, cwd=self.root)
175 175 if r:
176 176 desc, r = util.explain_exit(r)
177 177 if throw:
178 178 raise util.Abort(_('%s hook %s') % (name, desc))
179 179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 180 return r
181 181
182 182 r = False
183 183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 184 if hname.split(".", 1)[0] == name and cmd]
185 185 hooks.sort()
186 186 for hname, cmd in hooks:
187 187 if callable(cmd):
188 188 r = callhook(hname, cmd) or r
189 189 elif cmd.startswith('python:'):
190 190 r = callhook(hname, cmd[7:].strip()) or r
191 191 else:
192 192 r = runhook(hname, cmd) or r
193 193 return r
194 194
195 195 tag_disallowed = ':\r\n'
196 196
197 197 def _tag(self, name, node, message, local, user, date, parent=None):
198 198 use_dirstate = parent is None
199 199
200 200 for c in self.tag_disallowed:
201 201 if c in name:
202 202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203 203
204 204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205 205
206 206 if local:
207 207 # local tags are stored in the current charset
208 208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 209 self.hook('tag', node=hex(node), tag=name, local=local)
210 210 return
211 211
212 212 # committed tags are stored in UTF-8
213 213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 214 if use_dirstate:
215 215 self.wfile('.hgtags', 'ab').write(line)
216 216 else:
217 217 ntags = self.filectx('.hgtags', parent).data()
218 218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 220 self.add(['.hgtags'])
221 221
222 222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223 223
224 224 self.hook('tag', node=hex(node), tag=name, local=local)
225 225
226 226 return tagnode
227 227
228 228 def tag(self, name, node, message, local, user, date):
229 229 '''tag a revision with a symbolic name.
230 230
231 231 if local is True, the tag is stored in a per-repository file.
232 232 otherwise, it is stored in the .hgtags file, and a new
233 233 changeset is committed with the change.
234 234
235 235 keyword arguments:
236 236
237 237 local: whether to store tag in non-version-controlled file
238 238 (default False)
239 239
240 240 message: commit message to use if committing
241 241
242 242 user: name of user to use if committing
243 243
244 244 date: date tuple to use if committing'''
245 245
246 246 for x in self.status()[:5]:
247 247 if '.hgtags' in x:
248 248 raise util.Abort(_('working copy of .hgtags is changed '
249 249 '(please commit .hgtags manually)'))
250 250
251 251
252 252 self._tag(name, node, message, local, user, date)
253 253
254 254 def tags(self):
255 255 '''return a mapping of tag to node'''
256 256 if self.tagscache:
257 257 return self.tagscache
258 258
259 259 globaltags = {}
260 260
261 261 def readtags(lines, fn):
262 262 filetags = {}
263 263 count = 0
264 264
265 265 def warn(msg):
266 266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267 267
268 268 for l in lines:
269 269 count += 1
270 270 if not l:
271 271 continue
272 272 s = l.split(" ", 1)
273 273 if len(s) != 2:
274 274 warn(_("cannot parse entry"))
275 275 continue
276 276 node, key = s
277 277 key = util.tolocal(key.strip()) # stored in UTF-8
278 278 try:
279 279 bin_n = bin(node)
280 280 except TypeError:
281 281 warn(_("node '%s' is not well formed") % node)
282 282 continue
283 283 if bin_n not in self.changelog.nodemap:
284 284 warn(_("tag '%s' refers to unknown node") % key)
285 285 continue
286 286
287 287 h = []
288 288 if key in filetags:
289 289 n, h = filetags[key]
290 290 h.append(n)
291 291 filetags[key] = (bin_n, h)
292 292
293 293 for k,nh in filetags.items():
294 294 if k not in globaltags:
295 295 globaltags[k] = nh
296 296 continue
297 297 # we prefer the global tag if:
298 298 # it supercedes us OR
299 299 # mutual supercedes and it has a higher rank
300 300 # otherwise we win because we're tip-most
301 301 an, ah = nh
302 302 bn, bh = globaltags[k]
303 303 if bn != an and an in bh and \
304 304 (bn not in ah or len(bh) > len(ah)):
305 305 an = bn
306 306 ah.extend([n for n in bh if n not in ah])
307 307 globaltags[k] = an, ah
308 308
309 309 # read the tags file from each head, ending with the tip
310 310 f = None
311 311 for rev, node, fnode in self._hgtagsnodes():
312 312 f = (f and f.filectx(fnode) or
313 313 self.filectx('.hgtags', fileid=fnode))
314 314 readtags(f.data().splitlines(), f)
315 315
316 316 try:
317 317 data = util.fromlocal(self.opener("localtags").read())
318 318 # localtags are stored in the local character set
319 319 # while the internal tag table is stored in UTF-8
320 320 readtags(data.splitlines(), "localtags")
321 321 except IOError:
322 322 pass
323 323
324 324 self.tagscache = {}
325 325 for k,nh in globaltags.items():
326 326 n = nh[0]
327 327 if n != nullid:
328 328 self.tagscache[k] = n
329 329 self.tagscache['tip'] = self.changelog.tip()
330 330
331 331 return self.tagscache
332 332
333 333 def _hgtagsnodes(self):
334 334 heads = self.heads()
335 335 heads.reverse()
336 336 last = {}
337 337 ret = []
338 338 for node in heads:
339 339 c = self.changectx(node)
340 340 rev = c.rev()
341 341 try:
342 342 fnode = c.filenode('.hgtags')
343 343 except revlog.LookupError:
344 344 continue
345 345 ret.append((rev, node, fnode))
346 346 if fnode in last:
347 347 ret[last[fnode]] = None
348 348 last[fnode] = len(ret) - 1
349 349 return [item for item in ret if item]
350 350
351 351 def tagslist(self):
352 352 '''return a list of tags ordered by revision'''
353 353 l = []
354 354 for t, n in self.tags().items():
355 355 try:
356 356 r = self.changelog.rev(n)
357 357 except:
358 358 r = -2 # sort to the beginning of the list if unknown
359 359 l.append((r, t, n))
360 360 l.sort()
361 361 return [(t, n) for r, t, n in l]
362 362
363 363 def nodetags(self, node):
364 364 '''return the tags associated with a node'''
365 365 if not self.nodetagscache:
366 366 self.nodetagscache = {}
367 367 for t, n in self.tags().items():
368 368 self.nodetagscache.setdefault(n, []).append(t)
369 369 return self.nodetagscache.get(node, [])
370 370
371 371 def _branchtags(self):
372 372 partial, last, lrev = self._readbranchcache()
373 373
374 374 tiprev = self.changelog.count() - 1
375 375 if lrev != tiprev:
376 376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378 378
379 379 return partial
380 380
381 381 def branchtags(self):
382 382 if self.branchcache is not None:
383 383 return self.branchcache
384 384
385 385 self.branchcache = {} # avoid recursion in changectx
386 386 partial = self._branchtags()
387 387
388 388 # the branch cache is stored on disk as UTF-8, but in the local
389 389 # charset internally
390 390 for k, v in partial.items():
391 391 self.branchcache[util.tolocal(k)] = v
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 except (IOError, OSError):
401 401 return {}, nullid, nullrev
402 402
403 403 try:
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if not (lrev < self.changelog.count() and
407 407 self.changelog.node(lrev) == last): # sanity check
408 408 # invalidate the cache
409 409 raise ValueError('Invalid branch cache: unknown tip')
410 410 for l in lines:
411 411 if not l: continue
412 412 node, label = l.split(" ", 1)
413 413 partial[label.strip()] = bin(node)
414 414 except (KeyboardInterrupt, util.SignalInterrupt):
415 415 raise
416 416 except Exception, inst:
417 417 if self.ui.debugflag:
418 418 self.ui.warn(str(inst), '\n')
419 419 partial, last, lrev = {}, nullid, nullrev
420 420 return partial, last, lrev
421 421
422 422 def _writebranchcache(self, branches, tip, tiprev):
423 423 try:
424 424 f = self.opener("branch.cache", "w", atomictemp=True)
425 425 f.write("%s %s\n" % (hex(tip), tiprev))
426 426 for label, node in branches.iteritems():
427 427 f.write("%s %s\n" % (hex(node), label))
428 428 f.rename()
429 429 except (IOError, OSError):
430 430 pass
431 431
432 432 def _updatebranchcache(self, partial, start, end):
433 433 for r in xrange(start, end):
434 434 c = self.changectx(r)
435 435 b = c.branch()
436 436 partial[b] = c.node()
437 437
438 438 def lookup(self, key):
439 439 if key == '.':
440 440 key, second = self.dirstate.parents()
441 441 if key == nullid:
442 442 raise repo.RepoError(_("no revision checked out"))
443 443 if second != nullid:
444 444 self.ui.warn(_("warning: working directory has two parents, "
445 445 "tag '.' uses the first\n"))
446 446 elif key == 'null':
447 447 return nullid
448 448 n = self.changelog._match(key)
449 449 if n:
450 450 return n
451 451 if key in self.tags():
452 452 return self.tags()[key]
453 453 if key in self.branchtags():
454 454 return self.branchtags()[key]
455 455 n = self.changelog._partialmatch(key)
456 456 if n:
457 457 return n
458 458 raise repo.RepoError(_("unknown revision '%s'") % key)
459 459
460 460 def dev(self):
461 461 return os.lstat(self.path).st_dev
462 462
463 463 def local(self):
464 464 return True
465 465
466 466 def join(self, f):
467 467 return os.path.join(self.path, f)
468 468
469 469 def sjoin(self, f):
470 470 f = self.encodefn(f)
471 471 return os.path.join(self.spath, f)
472 472
473 473 def wjoin(self, f):
474 474 return os.path.join(self.root, f)
475 475
476 476 def file(self, f):
477 477 if f[0] == '/':
478 478 f = f[1:]
479 479 return filelog.filelog(self.sopener, f)
480 480
481 481 def changectx(self, changeid=None):
482 482 return context.changectx(self, changeid)
483 483
484 484 def workingctx(self):
485 485 return context.workingctx(self)
486 486
487 487 def parents(self, changeid=None):
488 488 '''
489 489 get list of changectxs for parents of changeid or working directory
490 490 '''
491 491 if changeid is None:
492 492 pl = self.dirstate.parents()
493 493 else:
494 494 n = self.changelog.lookup(changeid)
495 495 pl = self.changelog.parents(n)
496 496 if pl[1] == nullid:
497 497 return [self.changectx(pl[0])]
498 498 return [self.changectx(pl[0]), self.changectx(pl[1])]
499 499
500 500 def filectx(self, path, changeid=None, fileid=None):
501 501 """changeid can be a changeset revision, node, or tag.
502 502 fileid can be a file revision or node."""
503 503 return context.filectx(self, path, changeid, fileid)
504 504
505 505 def getcwd(self):
506 506 return self.dirstate.getcwd()
507 507
508 508 def pathto(self, f, cwd=None):
509 509 return self.dirstate.pathto(f, cwd)
510 510
511 511 def wfile(self, f, mode='r'):
512 512 return self.wopener(f, mode)
513 513
514 514 def _link(self, f):
515 515 return os.path.islink(self.wjoin(f))
516 516
517 517 def _filter(self, filter, filename, data):
518 518 if filter not in self.filterpats:
519 519 l = []
520 520 for pat, cmd in self.ui.configitems(filter):
521 521 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 522 l.append((mf, cmd))
523 523 self.filterpats[filter] = l
524 524
525 525 for mf, cmd in self.filterpats[filter]:
526 526 if mf(filename):
527 527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 528 data = util.filter(data, cmd)
529 529 break
530 530
531 531 return data
532 532
533 533 def wread(self, filename):
534 534 if self._link(filename):
535 535 data = os.readlink(self.wjoin(filename))
536 536 else:
537 537 data = self.wopener(filename, 'r').read()
538 538 return self._filter("encode", filename, data)
539 539
540 540 def wwrite(self, filename, data, flags):
541 541 data = self._filter("decode", filename, data)
542 542 if "l" in flags:
543 543 f = self.wjoin(filename)
544 544 try:
545 545 os.unlink(f)
546 546 except OSError:
547 547 pass
548 548 d = os.path.dirname(f)
549 549 if not os.path.exists(d):
550 550 os.makedirs(d)
551 551 os.symlink(data, f)
552 552 else:
553 553 try:
554 554 if self._link(filename):
555 555 os.unlink(self.wjoin(filename))
556 556 except OSError:
557 557 pass
558 558 self.wopener(filename, 'w').write(data)
559 559 util.set_exec(self.wjoin(filename), "x" in flags)
560 560
561 561 def wwritedata(self, filename, data):
562 562 return self._filter("decode", filename, data)
563 563
564 564 def transaction(self):
565 565 tr = self.transhandle
566 566 if tr != None and tr.running():
567 567 return tr.nest()
568 568
569 569 # save dirstate for rollback
570 570 try:
571 571 ds = self.opener("dirstate").read()
572 572 except IOError:
573 573 ds = ""
574 574 self.opener("journal.dirstate", "w").write(ds)
575 575
576 576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
578 578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 579 self.sjoin("journal"),
580 580 aftertrans(renames))
581 581 self.transhandle = tr
582 582 return tr
583 583
584 584 def recover(self):
585 585 l = self.lock()
586 586 if os.path.exists(self.sjoin("journal")):
587 587 self.ui.status(_("rolling back interrupted transaction\n"))
588 588 transaction.rollback(self.sopener, self.sjoin("journal"))
589 589 self.reload()
590 590 return True
591 591 else:
592 592 self.ui.warn(_("no interrupted transaction available\n"))
593 593 return False
594 594
595 595 def rollback(self, wlock=None, lock=None):
596 596 if not wlock:
597 597 wlock = self.wlock()
598 598 if not lock:
599 599 lock = self.lock()
600 600 if os.path.exists(self.sjoin("undo")):
601 601 self.ui.status(_("rolling back last transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 604 self.reload()
605 605 self.wreload()
606 606 else:
607 607 self.ui.warn(_("no rollback information available\n"))
608 608
609 609 def wreload(self):
610 610 self.dirstate.reload()
611 611
612 612 def reload(self):
613 613 self.changelog.load()
614 614 self.manifest.load()
615 615 self.tagscache = None
616 616 self.nodetagscache = None
617 617
618 618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
619 619 desc=None):
620 620 try:
621 621 l = lock.lock(lockname, 0, releasefn, desc=desc)
622 622 except lock.LockHeld, inst:
623 623 if not wait:
624 624 raise
625 625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
626 626 (desc, inst.locker))
627 627 # default to 600 seconds timeout
628 628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
629 629 releasefn, desc=desc)
630 630 if acquirefn:
631 631 acquirefn()
632 632 return l
633 633
634 634 def lock(self, wait=1):
635 635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
636 636 desc=_('repository %s') % self.origroot)
637 637
638 638 def wlock(self, wait=1):
639 639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 640 self.wreload,
641 641 desc=_('working directory of %s') % self.origroot)
642 642
643 643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 644 """
645 645 commit an individual file as part of a larger transaction
646 646 """
647 647
648 648 t = self.wread(fn)
649 649 fl = self.file(fn)
650 650 fp1 = manifest1.get(fn, nullid)
651 651 fp2 = manifest2.get(fn, nullid)
652 652
653 653 meta = {}
654 654 cp = self.dirstate.copied(fn)
655 655 if cp:
656 656 # Mark the new revision of this file as a copy of another
657 657 # file. This copy data will effectively act as a parent
658 658 # of this new revision. If this is a merge, the first
659 659 # parent will be the nullid (meaning "look up the copy data")
660 660 # and the second one will be the other parent. For example:
661 661 #
662 662 # 0 --- 1 --- 3 rev1 changes file foo
663 663 # \ / rev2 renames foo to bar and changes it
664 664 # \- 2 -/ rev3 should have bar with all changes and
665 665 # should record that bar descends from
666 666 # bar in rev2 and foo in rev1
667 667 #
668 668 # this allows this merge to succeed:
669 669 #
670 670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 671 # \ / merging rev3 and rev4 should use bar@rev2
672 672 # \- 2 --- 4 as the merge base
673 673 #
674 674 meta["copy"] = cp
675 675 if not manifest2: # not a branch merge
676 676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 677 fp2 = nullid
678 678 elif fp2 != nullid: # copied on remote side
679 679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 680 elif fp1 != nullid: # copied on local side, reversed
681 681 meta["copyrev"] = hex(manifest2.get(cp))
682 682 fp2 = fp1
683 683 else: # directory rename
684 684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 685 self.ui.debug(_(" %s: copy %s:%s\n") %
686 686 (fn, cp, meta["copyrev"]))
687 687 fp1 = nullid
688 688 elif fp2 != nullid:
689 689 # is one parent an ancestor of the other?
690 690 fpa = fl.ancestor(fp1, fp2)
691 691 if fpa == fp1:
692 692 fp1, fp2 = fp2, nullid
693 693 elif fpa == fp2:
694 694 fp2 = nullid
695 695
696 696 # is the file unmodified from the parent? report existing entry
697 697 if fp2 == nullid and not fl.cmp(fp1, t):
698 698 return fp1
699 699
700 700 changelist.append(fn)
701 701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702 702
703 703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 704 if p1 is None:
705 705 p1, p2 = self.dirstate.parents()
706 706 return self.commit(files=files, text=text, user=user, date=date,
707 707 p1=p1, p2=p2, wlock=wlock, extra=extra)
708 708
709 709 def commit(self, files=None, text="", user=None, date=None,
710 710 match=util.always, force=False, lock=None, wlock=None,
711 711 force_editor=False, p1=None, p2=None, extra={}):
712 712
713 713 commit = []
714 714 remove = []
715 715 changed = []
716 716 use_dirstate = (p1 is None) # not rawcommit
717 717 extra = extra.copy()
718 718
719 719 if use_dirstate:
720 720 if files:
721 721 for f in files:
722 722 s = self.dirstate.state(f)
723 723 if s in 'nmai':
724 724 commit.append(f)
725 725 elif s == 'r':
726 726 remove.append(f)
727 727 else:
728 728 self.ui.warn(_("%s not tracked!\n") % f)
729 729 else:
730 730 changes = self.status(match=match)[:5]
731 731 modified, added, removed, deleted, unknown = changes
732 732 commit = modified + added
733 733 remove = removed
734 734 else:
735 735 commit = files
736 736
737 737 if use_dirstate:
738 738 p1, p2 = self.dirstate.parents()
739 739 update_dirstate = True
740 740 else:
741 741 p1, p2 = p1, p2 or nullid
742 742 update_dirstate = (self.dirstate.parents()[0] == p1)
743 743
744 744 c1 = self.changelog.read(p1)
745 745 c2 = self.changelog.read(p2)
746 746 m1 = self.manifest.read(c1[0]).copy()
747 747 m2 = self.manifest.read(c2[0])
748 748
749 749 if use_dirstate:
750 750 branchname = self.workingctx().branch()
751 751 try:
752 752 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 753 except UnicodeDecodeError:
754 754 raise util.Abort(_('branch name not in UTF-8!'))
755 755 else:
756 756 branchname = ""
757 757
758 758 if use_dirstate:
759 759 oldname = c1[5].get("branch") # stored in UTF-8
760 760 if not commit and not remove and not force and p2 == nullid and \
761 761 branchname == oldname:
762 762 self.ui.status(_("nothing changed\n"))
763 763 return None
764 764
765 765 xp1 = hex(p1)
766 766 if p2 == nullid: xp2 = ''
767 767 else: xp2 = hex(p2)
768 768
769 769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770 770
771 771 if not wlock:
772 772 wlock = self.wlock()
773 773 if not lock:
774 774 lock = self.lock()
775 775 tr = self.transaction()
776 776
777 777 # check in files
778 778 new = {}
779 779 linkrev = self.changelog.count()
780 780 commit.sort()
781 781 is_exec = util.execfunc(self.root, m1.execf)
782 782 is_link = util.linkfunc(self.root, m1.linkf)
783 783 for f in commit:
784 784 self.ui.note(f + "\n")
785 785 try:
786 786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 787 new_exec = is_exec(f)
788 788 new_link = is_link(f)
789 789 if not changed or changed[-1] != f:
790 790 # mention the file in the changelog if some flag changed,
791 791 # even if there was no content change.
792 792 old_exec = m1.execf(f)
793 793 old_link = m1.linkf(f)
794 794 if old_exec != new_exec or old_link != new_link:
795 795 changed.append(f)
796 796 m1.set(f, new_exec, new_link)
797 797 except (OSError, IOError):
798 798 if use_dirstate:
799 799 self.ui.warn(_("trouble committing %s!\n") % f)
800 800 raise
801 801 else:
802 802 remove.append(f)
803 803
804 804 # update manifest
805 805 m1.update(new)
806 806 remove.sort()
807 807 removed = []
808 808
809 809 for f in remove:
810 810 if f in m1:
811 811 del m1[f]
812 812 removed.append(f)
813 elif f in m2:
814 removed.append(f)
813 815 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
814 816
815 817 # add changeset
816 818 new = new.keys()
817 819 new.sort()
818 820
819 821 user = user or self.ui.username()
820 822 if not text or force_editor:
821 823 edittext = []
822 824 if text:
823 825 edittext.append(text)
824 826 edittext.append("")
825 827 edittext.append("HG: user: %s" % user)
826 828 if p2 != nullid:
827 829 edittext.append("HG: branch merge")
828 830 if branchname:
829 831 edittext.append("HG: branch %s" % util.tolocal(branchname))
830 832 edittext.extend(["HG: changed %s" % f for f in changed])
831 833 edittext.extend(["HG: removed %s" % f for f in removed])
832 834 if not changed and not remove:
833 835 edittext.append("HG: no files changed")
834 836 edittext.append("")
835 837 # run editor in the repository root
836 838 olddir = os.getcwd()
837 839 os.chdir(self.root)
838 840 text = self.ui.edit("\n".join(edittext), user)
839 841 os.chdir(olddir)
840 842
841 843 lines = [line.rstrip() for line in text.rstrip().splitlines()]
842 844 while lines and not lines[0]:
843 845 del lines[0]
844 846 if not lines:
845 847 return None
846 848 text = '\n'.join(lines)
847 849 if branchname:
848 850 extra["branch"] = branchname
849 851 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
850 852 user, date, extra)
851 853 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
852 854 parent2=xp2)
853 855 tr.close()
854 856
855 857 if self.branchcache and "branch" in extra:
856 858 self.branchcache[util.tolocal(extra["branch"])] = n
857 859
858 860 if use_dirstate or update_dirstate:
859 861 self.dirstate.setparents(n)
860 862 if use_dirstate:
861 863 self.dirstate.update(new, "n")
862 864 self.dirstate.forget(removed)
863 865
864 866 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
865 867 return n
866 868
867 869 def walk(self, node=None, files=[], match=util.always, badmatch=None):
868 870 '''
869 871 walk recursively through the directory tree or a given
870 872 changeset, finding all files matched by the match
871 873 function
872 874
873 875 results are yielded in a tuple (src, filename), where src
874 876 is one of:
875 877 'f' the file was found in the directory tree
876 878 'm' the file was only in the dirstate and not in the tree
877 879 'b' file was not found and matched badmatch
878 880 '''
879 881
880 882 if node:
881 883 fdict = dict.fromkeys(files)
882 884 # for dirstate.walk, files=['.'] means "walk the whole tree".
883 885 # follow that here, too
884 886 fdict.pop('.', None)
885 887 mdict = self.manifest.read(self.changelog.read(node)[0])
886 888 mfiles = mdict.keys()
887 889 mfiles.sort()
888 890 for fn in mfiles:
889 891 for ffn in fdict:
890 892 # match if the file is the exact name or a directory
891 893 if ffn == fn or fn.startswith("%s/" % ffn):
892 894 del fdict[ffn]
893 895 break
894 896 if match(fn):
895 897 yield 'm', fn
896 898 ffiles = fdict.keys()
897 899 ffiles.sort()
898 900 for fn in ffiles:
899 901 if badmatch and badmatch(fn):
900 902 if match(fn):
901 903 yield 'b', fn
902 904 else:
903 905 self.ui.warn(_('%s: No such file in rev %s\n')
904 906 % (self.pathto(fn), short(node)))
905 907 else:
906 908 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
907 909 yield src, fn
908 910
909 911 def status(self, node1=None, node2=None, files=[], match=util.always,
910 912 wlock=None, list_ignored=False, list_clean=False):
911 913 """return status of files between two nodes or node and working directory
912 914
913 915 If node1 is None, use the first dirstate parent instead.
914 916 If node2 is None, compare node1 with working directory.
915 917 """
916 918
917 919 def fcmp(fn, getnode):
918 920 t1 = self.wread(fn)
919 921 return self.file(fn).cmp(getnode(fn), t1)
920 922
921 923 def mfmatches(node):
922 924 change = self.changelog.read(node)
923 925 mf = self.manifest.read(change[0]).copy()
924 926 for fn in mf.keys():
925 927 if not match(fn):
926 928 del mf[fn]
927 929 return mf
928 930
929 931 modified, added, removed, deleted, unknown = [], [], [], [], []
930 932 ignored, clean = [], []
931 933
932 934 compareworking = False
933 935 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
934 936 compareworking = True
935 937
936 938 if not compareworking:
937 939 # read the manifest from node1 before the manifest from node2,
938 940 # so that we'll hit the manifest cache if we're going through
939 941 # all the revisions in parent->child order.
940 942 mf1 = mfmatches(node1)
941 943
942 944 mywlock = False
943 945
944 946 # are we comparing the working directory?
945 947 if not node2:
946 948 (lookup, modified, added, removed, deleted, unknown,
947 949 ignored, clean) = self.dirstate.status(files, match,
948 950 list_ignored, list_clean)
949 951
950 952 # are we comparing working dir against its parent?
951 953 if compareworking:
952 954 if lookup:
953 955 # do a full compare of any files that might have changed
954 956 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
955 957 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
956 958 nullid)
957 959 for f in lookup:
958 960 if fcmp(f, getnode):
959 961 modified.append(f)
960 962 else:
961 963 clean.append(f)
962 964 if not wlock and not mywlock:
963 965 mywlock = True
964 966 try:
965 967 wlock = self.wlock(wait=0)
966 968 except lock.LockException:
967 969 pass
968 970 if wlock:
969 971 self.dirstate.update([f], "n")
970 972 else:
971 973 # we are comparing working dir against non-parent
972 974 # generate a pseudo-manifest for the working dir
973 975 # XXX: create it in dirstate.py ?
974 976 mf2 = mfmatches(self.dirstate.parents()[0])
975 977 is_exec = util.execfunc(self.root, mf2.execf)
976 978 is_link = util.linkfunc(self.root, mf2.linkf)
977 979 for f in lookup + modified + added:
978 980 mf2[f] = ""
979 981 mf2.set(f, is_exec(f), is_link(f))
980 982 for f in removed:
981 983 if f in mf2:
982 984 del mf2[f]
983 985
984 986 if mywlock and wlock:
985 987 wlock.release()
986 988 else:
987 989 # we are comparing two revisions
988 990 mf2 = mfmatches(node2)
989 991
990 992 if not compareworking:
991 993 # flush lists from dirstate before comparing manifests
992 994 modified, added, clean = [], [], []
993 995
994 996 # make sure to sort the files so we talk to the disk in a
995 997 # reasonable order
996 998 mf2keys = mf2.keys()
997 999 mf2keys.sort()
998 1000 getnode = lambda fn: mf1.get(fn, nullid)
999 1001 for fn in mf2keys:
1000 1002 if mf1.has_key(fn):
1001 1003 if mf1.flags(fn) != mf2.flags(fn) or \
1002 1004 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1003 1005 fcmp(fn, getnode))):
1004 1006 modified.append(fn)
1005 1007 elif list_clean:
1006 1008 clean.append(fn)
1007 1009 del mf1[fn]
1008 1010 else:
1009 1011 added.append(fn)
1010 1012
1011 1013 removed = mf1.keys()
1012 1014
1013 1015 # sort and return results:
1014 1016 for l in modified, added, removed, deleted, unknown, ignored, clean:
1015 1017 l.sort()
1016 1018 return (modified, added, removed, deleted, unknown, ignored, clean)
1017 1019
1018 1020 def add(self, list, wlock=None):
1019 1021 if not wlock:
1020 1022 wlock = self.wlock()
1021 1023 for f in list:
1022 1024 p = self.wjoin(f)
1023 1025 islink = os.path.islink(p)
1024 1026 size = os.lstat(p).st_size
1025 1027 if size > 10000000:
1026 1028 self.ui.warn(_("%s: files over 10MB may cause memory and"
1027 1029 " performance problems\n"
1028 1030 "(use 'hg revert %s' to unadd the file)\n")
1029 1031 % (f, f))
1030 1032 if not islink and not os.path.exists(p):
1031 1033 self.ui.warn(_("%s does not exist!\n") % f)
1032 1034 elif not islink and not os.path.isfile(p):
1033 1035 self.ui.warn(_("%s not added: only files and symlinks "
1034 1036 "supported currently\n") % f)
1035 1037 elif self.dirstate.state(f) in 'an':
1036 1038 self.ui.warn(_("%s already tracked!\n") % f)
1037 1039 else:
1038 1040 self.dirstate.update([f], "a")
1039 1041
1040 1042 def forget(self, list, wlock=None):
1041 1043 if not wlock:
1042 1044 wlock = self.wlock()
1043 1045 for f in list:
1044 1046 if self.dirstate.state(f) not in 'ai':
1045 1047 self.ui.warn(_("%s not added!\n") % f)
1046 1048 else:
1047 1049 self.dirstate.forget([f])
1048 1050
1049 1051 def remove(self, list, unlink=False, wlock=None):
1050 1052 if unlink:
1051 1053 for f in list:
1052 1054 try:
1053 1055 util.unlink(self.wjoin(f))
1054 1056 except OSError, inst:
1055 1057 if inst.errno != errno.ENOENT:
1056 1058 raise
1057 1059 if not wlock:
1058 1060 wlock = self.wlock()
1059 1061 for f in list:
1060 1062 if unlink and os.path.exists(self.wjoin(f)):
1061 1063 self.ui.warn(_("%s still exists!\n") % f)
1062 1064 elif self.dirstate.state(f) == 'a':
1063 1065 self.dirstate.forget([f])
1064 1066 elif f not in self.dirstate:
1065 1067 self.ui.warn(_("%s not tracked!\n") % f)
1066 1068 else:
1067 1069 self.dirstate.update([f], "r")
1068 1070
1069 1071 def undelete(self, list, wlock=None):
1070 1072 p = self.dirstate.parents()[0]
1071 1073 mn = self.changelog.read(p)[0]
1072 1074 m = self.manifest.read(mn)
1073 1075 if not wlock:
1074 1076 wlock = self.wlock()
1075 1077 for f in list:
1076 1078 if self.dirstate.state(f) not in "r":
1077 1079 self.ui.warn("%s not removed!\n" % f)
1078 1080 else:
1079 1081 t = self.file(f).read(m[f])
1080 1082 self.wwrite(f, t, m.flags(f))
1081 1083 self.dirstate.update([f], "n")
1082 1084
1083 1085 def copy(self, source, dest, wlock=None):
1084 1086 p = self.wjoin(dest)
1085 1087 if not (os.path.exists(p) or os.path.islink(p)):
1086 1088 self.ui.warn(_("%s does not exist!\n") % dest)
1087 1089 elif not (os.path.isfile(p) or os.path.islink(p)):
1088 1090 self.ui.warn(_("copy failed: %s is not a file or a "
1089 1091 "symbolic link\n") % dest)
1090 1092 else:
1091 1093 if not wlock:
1092 1094 wlock = self.wlock()
1093 1095 if self.dirstate.state(dest) == '?':
1094 1096 self.dirstate.update([dest], "a")
1095 1097 self.dirstate.copy(source, dest)
1096 1098
1097 1099 def heads(self, start=None):
1098 1100 heads = self.changelog.heads(start)
1099 1101 # sort the output in rev descending order
1100 1102 heads = [(-self.changelog.rev(h), h) for h in heads]
1101 1103 heads.sort()
1102 1104 return [n for (r, n) in heads]
1103 1105
1104 1106 def branches(self, nodes):
1105 1107 if not nodes:
1106 1108 nodes = [self.changelog.tip()]
1107 1109 b = []
1108 1110 for n in nodes:
1109 1111 t = n
1110 1112 while 1:
1111 1113 p = self.changelog.parents(n)
1112 1114 if p[1] != nullid or p[0] == nullid:
1113 1115 b.append((t, n, p[0], p[1]))
1114 1116 break
1115 1117 n = p[0]
1116 1118 return b
1117 1119
1118 1120 def between(self, pairs):
1119 1121 r = []
1120 1122
1121 1123 for top, bottom in pairs:
1122 1124 n, l, i = top, [], 0
1123 1125 f = 1
1124 1126
1125 1127 while n != bottom:
1126 1128 p = self.changelog.parents(n)[0]
1127 1129 if i == f:
1128 1130 l.append(n)
1129 1131 f = f * 2
1130 1132 n = p
1131 1133 i += 1
1132 1134
1133 1135 r.append(l)
1134 1136
1135 1137 return r
1136 1138
1137 1139 def findincoming(self, remote, base=None, heads=None, force=False):
1138 1140 """Return list of roots of the subsets of missing nodes from remote
1139 1141
1140 1142 If base dict is specified, assume that these nodes and their parents
1141 1143 exist on the remote side and that no child of a node of base exists
1142 1144 in both remote and self.
1143 1145 Furthermore base will be updated to include the nodes that exists
1144 1146 in self and remote but no children exists in self and remote.
1145 1147 If a list of heads is specified, return only nodes which are heads
1146 1148 or ancestors of these heads.
1147 1149
1148 1150 All the ancestors of base are in self and in remote.
1149 1151 All the descendants of the list returned are missing in self.
1150 1152 (and so we know that the rest of the nodes are missing in remote, see
1151 1153 outgoing)
1152 1154 """
1153 1155 m = self.changelog.nodemap
1154 1156 search = []
1155 1157 fetch = {}
1156 1158 seen = {}
1157 1159 seenbranch = {}
1158 1160 if base == None:
1159 1161 base = {}
1160 1162
1161 1163 if not heads:
1162 1164 heads = remote.heads()
1163 1165
1164 1166 if self.changelog.tip() == nullid:
1165 1167 base[nullid] = 1
1166 1168 if heads != [nullid]:
1167 1169 return [nullid]
1168 1170 return []
1169 1171
1170 1172 # assume we're closer to the tip than the root
1171 1173 # and start by examining the heads
1172 1174 self.ui.status(_("searching for changes\n"))
1173 1175
1174 1176 unknown = []
1175 1177 for h in heads:
1176 1178 if h not in m:
1177 1179 unknown.append(h)
1178 1180 else:
1179 1181 base[h] = 1
1180 1182
1181 1183 if not unknown:
1182 1184 return []
1183 1185
1184 1186 req = dict.fromkeys(unknown)
1185 1187 reqcnt = 0
1186 1188
1187 1189 # search through remote branches
1188 1190 # a 'branch' here is a linear segment of history, with four parts:
1189 1191 # head, root, first parent, second parent
1190 1192 # (a branch always has two parents (or none) by definition)
1191 1193 unknown = remote.branches(unknown)
1192 1194 while unknown:
1193 1195 r = []
1194 1196 while unknown:
1195 1197 n = unknown.pop(0)
1196 1198 if n[0] in seen:
1197 1199 continue
1198 1200
1199 1201 self.ui.debug(_("examining %s:%s\n")
1200 1202 % (short(n[0]), short(n[1])))
1201 1203 if n[0] == nullid: # found the end of the branch
1202 1204 pass
1203 1205 elif n in seenbranch:
1204 1206 self.ui.debug(_("branch already found\n"))
1205 1207 continue
1206 1208 elif n[1] and n[1] in m: # do we know the base?
1207 1209 self.ui.debug(_("found incomplete branch %s:%s\n")
1208 1210 % (short(n[0]), short(n[1])))
1209 1211 search.append(n) # schedule branch range for scanning
1210 1212 seenbranch[n] = 1
1211 1213 else:
1212 1214 if n[1] not in seen and n[1] not in fetch:
1213 1215 if n[2] in m and n[3] in m:
1214 1216 self.ui.debug(_("found new changeset %s\n") %
1215 1217 short(n[1]))
1216 1218 fetch[n[1]] = 1 # earliest unknown
1217 1219 for p in n[2:4]:
1218 1220 if p in m:
1219 1221 base[p] = 1 # latest known
1220 1222
1221 1223 for p in n[2:4]:
1222 1224 if p not in req and p not in m:
1223 1225 r.append(p)
1224 1226 req[p] = 1
1225 1227 seen[n[0]] = 1
1226 1228
1227 1229 if r:
1228 1230 reqcnt += 1
1229 1231 self.ui.debug(_("request %d: %s\n") %
1230 1232 (reqcnt, " ".join(map(short, r))))
1231 1233 for p in xrange(0, len(r), 10):
1232 1234 for b in remote.branches(r[p:p+10]):
1233 1235 self.ui.debug(_("received %s:%s\n") %
1234 1236 (short(b[0]), short(b[1])))
1235 1237 unknown.append(b)
1236 1238
1237 1239 # do binary search on the branches we found
1238 1240 while search:
1239 1241 n = search.pop(0)
1240 1242 reqcnt += 1
1241 1243 l = remote.between([(n[0], n[1])])[0]
1242 1244 l.append(n[1])
1243 1245 p = n[0]
1244 1246 f = 1
1245 1247 for i in l:
1246 1248 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1247 1249 if i in m:
1248 1250 if f <= 2:
1249 1251 self.ui.debug(_("found new branch changeset %s\n") %
1250 1252 short(p))
1251 1253 fetch[p] = 1
1252 1254 base[i] = 1
1253 1255 else:
1254 1256 self.ui.debug(_("narrowed branch search to %s:%s\n")
1255 1257 % (short(p), short(i)))
1256 1258 search.append((p, i))
1257 1259 break
1258 1260 p, f = i, f * 2
1259 1261
1260 1262 # sanity check our fetch list
1261 1263 for f in fetch.keys():
1262 1264 if f in m:
1263 1265 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1264 1266
1265 1267 if base.keys() == [nullid]:
1266 1268 if force:
1267 1269 self.ui.warn(_("warning: repository is unrelated\n"))
1268 1270 else:
1269 1271 raise util.Abort(_("repository is unrelated"))
1270 1272
1271 1273 self.ui.debug(_("found new changesets starting at ") +
1272 1274 " ".join([short(f) for f in fetch]) + "\n")
1273 1275
1274 1276 self.ui.debug(_("%d total queries\n") % reqcnt)
1275 1277
1276 1278 return fetch.keys()
1277 1279
1278 1280 def findoutgoing(self, remote, base=None, heads=None, force=False):
1279 1281 """Return list of nodes that are roots of subsets not in remote
1280 1282
1281 1283 If base dict is specified, assume that these nodes and their parents
1282 1284 exist on the remote side.
1283 1285 If a list of heads is specified, return only nodes which are heads
1284 1286 or ancestors of these heads, and return a second element which
1285 1287 contains all remote heads which get new children.
1286 1288 """
1287 1289 if base == None:
1288 1290 base = {}
1289 1291 self.findincoming(remote, base, heads, force=force)
1290 1292
1291 1293 self.ui.debug(_("common changesets up to ")
1292 1294 + " ".join(map(short, base.keys())) + "\n")
1293 1295
1294 1296 remain = dict.fromkeys(self.changelog.nodemap)
1295 1297
1296 1298 # prune everything remote has from the tree
1297 1299 del remain[nullid]
1298 1300 remove = base.keys()
1299 1301 while remove:
1300 1302 n = remove.pop(0)
1301 1303 if n in remain:
1302 1304 del remain[n]
1303 1305 for p in self.changelog.parents(n):
1304 1306 remove.append(p)
1305 1307
1306 1308 # find every node whose parents have been pruned
1307 1309 subset = []
1308 1310 # find every remote head that will get new children
1309 1311 updated_heads = {}
1310 1312 for n in remain:
1311 1313 p1, p2 = self.changelog.parents(n)
1312 1314 if p1 not in remain and p2 not in remain:
1313 1315 subset.append(n)
1314 1316 if heads:
1315 1317 if p1 in heads:
1316 1318 updated_heads[p1] = True
1317 1319 if p2 in heads:
1318 1320 updated_heads[p2] = True
1319 1321
1320 1322 # this is the set of all roots we have to push
1321 1323 if heads:
1322 1324 return subset, updated_heads.keys()
1323 1325 else:
1324 1326 return subset
1325 1327
1326 1328 def pull(self, remote, heads=None, force=False, lock=None):
1327 1329 mylock = False
1328 1330 if not lock:
1329 1331 lock = self.lock()
1330 1332 mylock = True
1331 1333
1332 1334 try:
1333 1335 fetch = self.findincoming(remote, force=force)
1334 1336 if fetch == [nullid]:
1335 1337 self.ui.status(_("requesting all changes\n"))
1336 1338
1337 1339 if not fetch:
1338 1340 self.ui.status(_("no changes found\n"))
1339 1341 return 0
1340 1342
1341 1343 if heads is None:
1342 1344 cg = remote.changegroup(fetch, 'pull')
1343 1345 else:
1344 1346 if 'changegroupsubset' not in remote.capabilities:
1345 1347 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1346 1348 cg = remote.changegroupsubset(fetch, heads, 'pull')
1347 1349 return self.addchangegroup(cg, 'pull', remote.url())
1348 1350 finally:
1349 1351 if mylock:
1350 1352 lock.release()
1351 1353
1352 1354 def push(self, remote, force=False, revs=None):
1353 1355 # there are two ways to push to remote repo:
1354 1356 #
1355 1357 # addchangegroup assumes local user can lock remote
1356 1358 # repo (local filesystem, old ssh servers).
1357 1359 #
1358 1360 # unbundle assumes local user cannot lock remote repo (new ssh
1359 1361 # servers, http servers).
1360 1362
1361 1363 if remote.capable('unbundle'):
1362 1364 return self.push_unbundle(remote, force, revs)
1363 1365 return self.push_addchangegroup(remote, force, revs)
1364 1366
1365 1367 def prepush(self, remote, force, revs):
1366 1368 base = {}
1367 1369 remote_heads = remote.heads()
1368 1370 inc = self.findincoming(remote, base, remote_heads, force=force)
1369 1371
1370 1372 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1371 1373 if revs is not None:
1372 1374 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1373 1375 else:
1374 1376 bases, heads = update, self.changelog.heads()
1375 1377
1376 1378 if not bases:
1377 1379 self.ui.status(_("no changes found\n"))
1378 1380 return None, 1
1379 1381 elif not force:
1380 1382 # check if we're creating new remote heads
1381 1383 # to be a remote head after push, node must be either
1382 1384 # - unknown locally
1383 1385 # - a local outgoing head descended from update
1384 1386 # - a remote head that's known locally and not
1385 1387 # ancestral to an outgoing head
1386 1388
1387 1389 warn = 0
1388 1390
1389 1391 if remote_heads == [nullid]:
1390 1392 warn = 0
1391 1393 elif not revs and len(heads) > len(remote_heads):
1392 1394 warn = 1
1393 1395 else:
1394 1396 newheads = list(heads)
1395 1397 for r in remote_heads:
1396 1398 if r in self.changelog.nodemap:
1397 1399 desc = self.changelog.heads(r, heads)
1398 1400 l = [h for h in heads if h in desc]
1399 1401 if not l:
1400 1402 newheads.append(r)
1401 1403 else:
1402 1404 newheads.append(r)
1403 1405 if len(newheads) > len(remote_heads):
1404 1406 warn = 1
1405 1407
1406 1408 if warn:
1407 1409 self.ui.warn(_("abort: push creates new remote branches!\n"))
1408 1410 self.ui.status(_("(did you forget to merge?"
1409 1411 " use push -f to force)\n"))
1410 1412 return None, 1
1411 1413 elif inc:
1412 1414 self.ui.warn(_("note: unsynced remote changes!\n"))
1413 1415
1414 1416
1415 1417 if revs is None:
1416 1418 cg = self.changegroup(update, 'push')
1417 1419 else:
1418 1420 cg = self.changegroupsubset(update, revs, 'push')
1419 1421 return cg, remote_heads
1420 1422
1421 1423 def push_addchangegroup(self, remote, force, revs):
1422 1424 lock = remote.lock()
1423 1425
1424 1426 ret = self.prepush(remote, force, revs)
1425 1427 if ret[0] is not None:
1426 1428 cg, remote_heads = ret
1427 1429 return remote.addchangegroup(cg, 'push', self.url())
1428 1430 return ret[1]
1429 1431
1430 1432 def push_unbundle(self, remote, force, revs):
1431 1433 # local repo finds heads on server, finds out what revs it
1432 1434 # must push. once revs transferred, if server finds it has
1433 1435 # different heads (someone else won commit/push race), server
1434 1436 # aborts.
1435 1437
1436 1438 ret = self.prepush(remote, force, revs)
1437 1439 if ret[0] is not None:
1438 1440 cg, remote_heads = ret
1439 1441 if force: remote_heads = ['force']
1440 1442 return remote.unbundle(cg, remote_heads, 'push')
1441 1443 return ret[1]
1442 1444
1443 1445 def changegroupinfo(self, nodes):
1444 1446 self.ui.note(_("%d changesets found\n") % len(nodes))
1445 1447 if self.ui.debugflag:
1446 1448 self.ui.debug(_("List of changesets:\n"))
1447 1449 for node in nodes:
1448 1450 self.ui.debug("%s\n" % hex(node))
1449 1451
1450 1452 def changegroupsubset(self, bases, heads, source):
1451 1453 """This function generates a changegroup consisting of all the nodes
1452 1454 that are descendents of any of the bases, and ancestors of any of
1453 1455 the heads.
1454 1456
1455 1457 It is fairly complex as determining which filenodes and which
1456 1458 manifest nodes need to be included for the changeset to be complete
1457 1459 is non-trivial.
1458 1460
1459 1461 Another wrinkle is doing the reverse, figuring out which changeset in
1460 1462 the changegroup a particular filenode or manifestnode belongs to."""
1461 1463
1462 1464 self.hook('preoutgoing', throw=True, source=source)
1463 1465
1464 1466 # Set up some initial variables
1465 1467 # Make it easy to refer to self.changelog
1466 1468 cl = self.changelog
1467 1469 # msng is short for missing - compute the list of changesets in this
1468 1470 # changegroup.
1469 1471 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1470 1472 self.changegroupinfo(msng_cl_lst)
1471 1473 # Some bases may turn out to be superfluous, and some heads may be
1472 1474 # too. nodesbetween will return the minimal set of bases and heads
1473 1475 # necessary to re-create the changegroup.
1474 1476
1475 1477 # Known heads are the list of heads that it is assumed the recipient
1476 1478 # of this changegroup will know about.
1477 1479 knownheads = {}
1478 1480 # We assume that all parents of bases are known heads.
1479 1481 for n in bases:
1480 1482 for p in cl.parents(n):
1481 1483 if p != nullid:
1482 1484 knownheads[p] = 1
1483 1485 knownheads = knownheads.keys()
1484 1486 if knownheads:
1485 1487 # Now that we know what heads are known, we can compute which
1486 1488 # changesets are known. The recipient must know about all
1487 1489 # changesets required to reach the known heads from the null
1488 1490 # changeset.
1489 1491 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1490 1492 junk = None
1491 1493 # Transform the list into an ersatz set.
1492 1494 has_cl_set = dict.fromkeys(has_cl_set)
1493 1495 else:
1494 1496 # If there were no known heads, the recipient cannot be assumed to
1495 1497 # know about any changesets.
1496 1498 has_cl_set = {}
1497 1499
1498 1500 # Make it easy to refer to self.manifest
1499 1501 mnfst = self.manifest
1500 1502 # We don't know which manifests are missing yet
1501 1503 msng_mnfst_set = {}
1502 1504 # Nor do we know which filenodes are missing.
1503 1505 msng_filenode_set = {}
1504 1506
1505 1507 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1506 1508 junk = None
1507 1509
1508 1510 # A changeset always belongs to itself, so the changenode lookup
1509 1511 # function for a changenode is identity.
1510 1512 def identity(x):
1511 1513 return x
1512 1514
1513 1515 # A function generating function. Sets up an environment for the
1514 1516 # inner function.
1515 1517 def cmp_by_rev_func(revlog):
1516 1518 # Compare two nodes by their revision number in the environment's
1517 1519 # revision history. Since the revision number both represents the
1518 1520 # most efficient order to read the nodes in, and represents a
1519 1521 # topological sorting of the nodes, this function is often useful.
1520 1522 def cmp_by_rev(a, b):
1521 1523 return cmp(revlog.rev(a), revlog.rev(b))
1522 1524 return cmp_by_rev
1523 1525
1524 1526 # If we determine that a particular file or manifest node must be a
1525 1527 # node that the recipient of the changegroup will already have, we can
1526 1528 # also assume the recipient will have all the parents. This function
1527 1529 # prunes them from the set of missing nodes.
1528 1530 def prune_parents(revlog, hasset, msngset):
1529 1531 haslst = hasset.keys()
1530 1532 haslst.sort(cmp_by_rev_func(revlog))
1531 1533 for node in haslst:
1532 1534 parentlst = [p for p in revlog.parents(node) if p != nullid]
1533 1535 while parentlst:
1534 1536 n = parentlst.pop()
1535 1537 if n not in hasset:
1536 1538 hasset[n] = 1
1537 1539 p = [p for p in revlog.parents(n) if p != nullid]
1538 1540 parentlst.extend(p)
1539 1541 for n in hasset:
1540 1542 msngset.pop(n, None)
1541 1543
1542 1544 # This is a function generating function used to set up an environment
1543 1545 # for the inner function to execute in.
1544 1546 def manifest_and_file_collector(changedfileset):
1545 1547 # This is an information gathering function that gathers
1546 1548 # information from each changeset node that goes out as part of
1547 1549 # the changegroup. The information gathered is a list of which
1548 1550 # manifest nodes are potentially required (the recipient may
1549 1551 # already have them) and total list of all files which were
1550 1552 # changed in any changeset in the changegroup.
1551 1553 #
1552 1554 # We also remember the first changenode we saw any manifest
1553 1555 # referenced by so we can later determine which changenode 'owns'
1554 1556 # the manifest.
1555 1557 def collect_manifests_and_files(clnode):
1556 1558 c = cl.read(clnode)
1557 1559 for f in c[3]:
1558 1560 # This is to make sure we only have one instance of each
1559 1561 # filename string for each filename.
1560 1562 changedfileset.setdefault(f, f)
1561 1563 msng_mnfst_set.setdefault(c[0], clnode)
1562 1564 return collect_manifests_and_files
1563 1565
1564 1566 # Figure out which manifest nodes (of the ones we think might be part
1565 1567 # of the changegroup) the recipient must know about and remove them
1566 1568 # from the changegroup.
1567 1569 def prune_manifests():
1568 1570 has_mnfst_set = {}
1569 1571 for n in msng_mnfst_set:
1570 1572 # If a 'missing' manifest thinks it belongs to a changenode
1571 1573 # the recipient is assumed to have, obviously the recipient
1572 1574 # must have that manifest.
1573 1575 linknode = cl.node(mnfst.linkrev(n))
1574 1576 if linknode in has_cl_set:
1575 1577 has_mnfst_set[n] = 1
1576 1578 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1577 1579
1578 1580 # Use the information collected in collect_manifests_and_files to say
1579 1581 # which changenode any manifestnode belongs to.
1580 1582 def lookup_manifest_link(mnfstnode):
1581 1583 return msng_mnfst_set[mnfstnode]
1582 1584
1583 1585 # A function generating function that sets up the initial environment
1584 1586 # the inner function.
1585 1587 def filenode_collector(changedfiles):
1586 1588 next_rev = [0]
1587 1589 # This gathers information from each manifestnode included in the
1588 1590 # changegroup about which filenodes the manifest node references
1589 1591 # so we can include those in the changegroup too.
1590 1592 #
1591 1593 # It also remembers which changenode each filenode belongs to. It
1592 1594 # does this by assuming the a filenode belongs to the changenode
1593 1595 # the first manifest that references it belongs to.
1594 1596 def collect_msng_filenodes(mnfstnode):
1595 1597 r = mnfst.rev(mnfstnode)
1596 1598 if r == next_rev[0]:
1597 1599 # If the last rev we looked at was the one just previous,
1598 1600 # we only need to see a diff.
1599 1601 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1600 1602 # For each line in the delta
1601 1603 for dline in delta.splitlines():
1602 1604 # get the filename and filenode for that line
1603 1605 f, fnode = dline.split('\0')
1604 1606 fnode = bin(fnode[:40])
1605 1607 f = changedfiles.get(f, None)
1606 1608 # And if the file is in the list of files we care
1607 1609 # about.
1608 1610 if f is not None:
1609 1611 # Get the changenode this manifest belongs to
1610 1612 clnode = msng_mnfst_set[mnfstnode]
1611 1613 # Create the set of filenodes for the file if
1612 1614 # there isn't one already.
1613 1615 ndset = msng_filenode_set.setdefault(f, {})
1614 1616 # And set the filenode's changelog node to the
1615 1617 # manifest's if it hasn't been set already.
1616 1618 ndset.setdefault(fnode, clnode)
1617 1619 else:
1618 1620 # Otherwise we need a full manifest.
1619 1621 m = mnfst.read(mnfstnode)
1620 1622 # For every file in we care about.
1621 1623 for f in changedfiles:
1622 1624 fnode = m.get(f, None)
1623 1625 # If it's in the manifest
1624 1626 if fnode is not None:
1625 1627 # See comments above.
1626 1628 clnode = msng_mnfst_set[mnfstnode]
1627 1629 ndset = msng_filenode_set.setdefault(f, {})
1628 1630 ndset.setdefault(fnode, clnode)
1629 1631 # Remember the revision we hope to see next.
1630 1632 next_rev[0] = r + 1
1631 1633 return collect_msng_filenodes
1632 1634
1633 1635 # We have a list of filenodes we think we need for a file, lets remove
1634 1636 # all those we now the recipient must have.
1635 1637 def prune_filenodes(f, filerevlog):
1636 1638 msngset = msng_filenode_set[f]
1637 1639 hasset = {}
1638 1640 # If a 'missing' filenode thinks it belongs to a changenode we
1639 1641 # assume the recipient must have, then the recipient must have
1640 1642 # that filenode.
1641 1643 for n in msngset:
1642 1644 clnode = cl.node(filerevlog.linkrev(n))
1643 1645 if clnode in has_cl_set:
1644 1646 hasset[n] = 1
1645 1647 prune_parents(filerevlog, hasset, msngset)
1646 1648
1647 1649 # A function generator function that sets up the a context for the
1648 1650 # inner function.
1649 1651 def lookup_filenode_link_func(fname):
1650 1652 msngset = msng_filenode_set[fname]
1651 1653 # Lookup the changenode the filenode belongs to.
1652 1654 def lookup_filenode_link(fnode):
1653 1655 return msngset[fnode]
1654 1656 return lookup_filenode_link
1655 1657
1656 1658 # Now that we have all theses utility functions to help out and
1657 1659 # logically divide up the task, generate the group.
1658 1660 def gengroup():
1659 1661 # The set of changed files starts empty.
1660 1662 changedfiles = {}
1661 1663 # Create a changenode group generator that will call our functions
1662 1664 # back to lookup the owning changenode and collect information.
1663 1665 group = cl.group(msng_cl_lst, identity,
1664 1666 manifest_and_file_collector(changedfiles))
1665 1667 for chnk in group:
1666 1668 yield chnk
1667 1669
1668 1670 # The list of manifests has been collected by the generator
1669 1671 # calling our functions back.
1670 1672 prune_manifests()
1671 1673 msng_mnfst_lst = msng_mnfst_set.keys()
1672 1674 # Sort the manifestnodes by revision number.
1673 1675 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1674 1676 # Create a generator for the manifestnodes that calls our lookup
1675 1677 # and data collection functions back.
1676 1678 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1677 1679 filenode_collector(changedfiles))
1678 1680 for chnk in group:
1679 1681 yield chnk
1680 1682
1681 1683 # These are no longer needed, dereference and toss the memory for
1682 1684 # them.
1683 1685 msng_mnfst_lst = None
1684 1686 msng_mnfst_set.clear()
1685 1687
1686 1688 changedfiles = changedfiles.keys()
1687 1689 changedfiles.sort()
1688 1690 # Go through all our files in order sorted by name.
1689 1691 for fname in changedfiles:
1690 1692 filerevlog = self.file(fname)
1691 1693 # Toss out the filenodes that the recipient isn't really
1692 1694 # missing.
1693 1695 if msng_filenode_set.has_key(fname):
1694 1696 prune_filenodes(fname, filerevlog)
1695 1697 msng_filenode_lst = msng_filenode_set[fname].keys()
1696 1698 else:
1697 1699 msng_filenode_lst = []
1698 1700 # If any filenodes are left, generate the group for them,
1699 1701 # otherwise don't bother.
1700 1702 if len(msng_filenode_lst) > 0:
1701 1703 yield changegroup.genchunk(fname)
1702 1704 # Sort the filenodes by their revision #
1703 1705 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1704 1706 # Create a group generator and only pass in a changenode
1705 1707 # lookup function as we need to collect no information
1706 1708 # from filenodes.
1707 1709 group = filerevlog.group(msng_filenode_lst,
1708 1710 lookup_filenode_link_func(fname))
1709 1711 for chnk in group:
1710 1712 yield chnk
1711 1713 if msng_filenode_set.has_key(fname):
1712 1714 # Don't need this anymore, toss it to free memory.
1713 1715 del msng_filenode_set[fname]
1714 1716 # Signal that no more groups are left.
1715 1717 yield changegroup.closechunk()
1716 1718
1717 1719 if msng_cl_lst:
1718 1720 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1719 1721
1720 1722 return util.chunkbuffer(gengroup())
1721 1723
1722 1724 def changegroup(self, basenodes, source):
1723 1725 """Generate a changegroup of all nodes that we have that a recipient
1724 1726 doesn't.
1725 1727
1726 1728 This is much easier than the previous function as we can assume that
1727 1729 the recipient has any changenode we aren't sending them."""
1728 1730
1729 1731 self.hook('preoutgoing', throw=True, source=source)
1730 1732
1731 1733 cl = self.changelog
1732 1734 nodes = cl.nodesbetween(basenodes, None)[0]
1733 1735 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1734 1736 self.changegroupinfo(nodes)
1735 1737
1736 1738 def identity(x):
1737 1739 return x
1738 1740
1739 1741 def gennodelst(revlog):
1740 1742 for r in xrange(0, revlog.count()):
1741 1743 n = revlog.node(r)
1742 1744 if revlog.linkrev(n) in revset:
1743 1745 yield n
1744 1746
1745 1747 def changed_file_collector(changedfileset):
1746 1748 def collect_changed_files(clnode):
1747 1749 c = cl.read(clnode)
1748 1750 for fname in c[3]:
1749 1751 changedfileset[fname] = 1
1750 1752 return collect_changed_files
1751 1753
1752 1754 def lookuprevlink_func(revlog):
1753 1755 def lookuprevlink(n):
1754 1756 return cl.node(revlog.linkrev(n))
1755 1757 return lookuprevlink
1756 1758
1757 1759 def gengroup():
1758 1760 # construct a list of all changed files
1759 1761 changedfiles = {}
1760 1762
1761 1763 for chnk in cl.group(nodes, identity,
1762 1764 changed_file_collector(changedfiles)):
1763 1765 yield chnk
1764 1766 changedfiles = changedfiles.keys()
1765 1767 changedfiles.sort()
1766 1768
1767 1769 mnfst = self.manifest
1768 1770 nodeiter = gennodelst(mnfst)
1769 1771 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1770 1772 yield chnk
1771 1773
1772 1774 for fname in changedfiles:
1773 1775 filerevlog = self.file(fname)
1774 1776 nodeiter = gennodelst(filerevlog)
1775 1777 nodeiter = list(nodeiter)
1776 1778 if nodeiter:
1777 1779 yield changegroup.genchunk(fname)
1778 1780 lookup = lookuprevlink_func(filerevlog)
1779 1781 for chnk in filerevlog.group(nodeiter, lookup):
1780 1782 yield chnk
1781 1783
1782 1784 yield changegroup.closechunk()
1783 1785
1784 1786 if nodes:
1785 1787 self.hook('outgoing', node=hex(nodes[0]), source=source)
1786 1788
1787 1789 return util.chunkbuffer(gengroup())
1788 1790
1789 1791 def addchangegroup(self, source, srctype, url):
1790 1792 """add changegroup to repo.
1791 1793
1792 1794 return values:
1793 1795 - nothing changed or no source: 0
1794 1796 - more heads than before: 1+added heads (2..n)
1795 1797 - less heads than before: -1-removed heads (-2..-n)
1796 1798 - number of heads stays the same: 1
1797 1799 """
1798 1800 def csmap(x):
1799 1801 self.ui.debug(_("add changeset %s\n") % short(x))
1800 1802 return cl.count()
1801 1803
1802 1804 def revmap(x):
1803 1805 return cl.rev(x)
1804 1806
1805 1807 if not source:
1806 1808 return 0
1807 1809
1808 1810 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1809 1811
1810 1812 changesets = files = revisions = 0
1811 1813
1812 1814 tr = self.transaction()
1813 1815
1814 1816 # write changelog data to temp files so concurrent readers will not see
1815 1817 # inconsistent view
1816 1818 cl = self.changelog
1817 1819 cl.delayupdate()
1818 1820 oldheads = len(cl.heads())
1819 1821
1820 1822 # pull off the changeset group
1821 1823 self.ui.status(_("adding changesets\n"))
1822 1824 cor = cl.count() - 1
1823 1825 chunkiter = changegroup.chunkiter(source)
1824 1826 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1825 1827 raise util.Abort(_("received changelog group is empty"))
1826 1828 cnr = cl.count() - 1
1827 1829 changesets = cnr - cor
1828 1830
1829 1831 # pull off the manifest group
1830 1832 self.ui.status(_("adding manifests\n"))
1831 1833 chunkiter = changegroup.chunkiter(source)
1832 1834 # no need to check for empty manifest group here:
1833 1835 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1834 1836 # no new manifest will be created and the manifest group will
1835 1837 # be empty during the pull
1836 1838 self.manifest.addgroup(chunkiter, revmap, tr)
1837 1839
1838 1840 # process the files
1839 1841 self.ui.status(_("adding file changes\n"))
1840 1842 while 1:
1841 1843 f = changegroup.getchunk(source)
1842 1844 if not f:
1843 1845 break
1844 1846 self.ui.debug(_("adding %s revisions\n") % f)
1845 1847 fl = self.file(f)
1846 1848 o = fl.count()
1847 1849 chunkiter = changegroup.chunkiter(source)
1848 1850 if fl.addgroup(chunkiter, revmap, tr) is None:
1849 1851 raise util.Abort(_("received file revlog group is empty"))
1850 1852 revisions += fl.count() - o
1851 1853 files += 1
1852 1854
1853 1855 # make changelog see real files again
1854 1856 cl.finalize(tr)
1855 1857
1856 1858 newheads = len(self.changelog.heads())
1857 1859 heads = ""
1858 1860 if oldheads and newheads != oldheads:
1859 1861 heads = _(" (%+d heads)") % (newheads - oldheads)
1860 1862
1861 1863 self.ui.status(_("added %d changesets"
1862 1864 " with %d changes to %d files%s\n")
1863 1865 % (changesets, revisions, files, heads))
1864 1866
1865 1867 if changesets > 0:
1866 1868 self.hook('pretxnchangegroup', throw=True,
1867 1869 node=hex(self.changelog.node(cor+1)), source=srctype,
1868 1870 url=url)
1869 1871
1870 1872 tr.close()
1871 1873
1872 1874 if changesets > 0:
1873 1875 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1874 1876 source=srctype, url=url)
1875 1877
1876 1878 for i in xrange(cor + 1, cnr + 1):
1877 1879 self.hook("incoming", node=hex(self.changelog.node(i)),
1878 1880 source=srctype, url=url)
1879 1881
1880 1882 # never return 0 here:
1881 1883 if newheads < oldheads:
1882 1884 return newheads - oldheads - 1
1883 1885 else:
1884 1886 return newheads - oldheads + 1
1885 1887
1886 1888
1887 1889 def stream_in(self, remote):
1888 1890 fp = remote.stream_out()
1889 1891 l = fp.readline()
1890 1892 try:
1891 1893 resp = int(l)
1892 1894 except ValueError:
1893 1895 raise util.UnexpectedOutput(
1894 1896 _('Unexpected response from remote server:'), l)
1895 1897 if resp == 1:
1896 1898 raise util.Abort(_('operation forbidden by server'))
1897 1899 elif resp == 2:
1898 1900 raise util.Abort(_('locking the remote repository failed'))
1899 1901 elif resp != 0:
1900 1902 raise util.Abort(_('the server sent an unknown error code'))
1901 1903 self.ui.status(_('streaming all changes\n'))
1902 1904 l = fp.readline()
1903 1905 try:
1904 1906 total_files, total_bytes = map(int, l.split(' ', 1))
1905 1907 except ValueError, TypeError:
1906 1908 raise util.UnexpectedOutput(
1907 1909 _('Unexpected response from remote server:'), l)
1908 1910 self.ui.status(_('%d files to transfer, %s of data\n') %
1909 1911 (total_files, util.bytecount(total_bytes)))
1910 1912 start = time.time()
1911 1913 for i in xrange(total_files):
1912 1914 # XXX doesn't support '\n' or '\r' in filenames
1913 1915 l = fp.readline()
1914 1916 try:
1915 1917 name, size = l.split('\0', 1)
1916 1918 size = int(size)
1917 1919 except ValueError, TypeError:
1918 1920 raise util.UnexpectedOutput(
1919 1921 _('Unexpected response from remote server:'), l)
1920 1922 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1921 1923 ofp = self.sopener(name, 'w')
1922 1924 for chunk in util.filechunkiter(fp, limit=size):
1923 1925 ofp.write(chunk)
1924 1926 ofp.close()
1925 1927 elapsed = time.time() - start
1926 1928 if elapsed <= 0:
1927 1929 elapsed = 0.001
1928 1930 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1929 1931 (util.bytecount(total_bytes), elapsed,
1930 1932 util.bytecount(total_bytes / elapsed)))
1931 1933 self.reload()
1932 1934 return len(self.heads()) + 1
1933 1935
1934 1936 def clone(self, remote, heads=[], stream=False):
1935 1937 '''clone remote repository.
1936 1938
1937 1939 keyword arguments:
1938 1940 heads: list of revs to clone (forces use of pull)
1939 1941 stream: use streaming clone if possible'''
1940 1942
1941 1943 # now, all clients that can request uncompressed clones can
1942 1944 # read repo formats supported by all servers that can serve
1943 1945 # them.
1944 1946
1945 1947 # if revlog format changes, client will have to check version
1946 1948 # and format flags on "stream" capability, and use
1947 1949 # uncompressed only if compatible.
1948 1950
1949 1951 if stream and not heads and remote.capable('stream'):
1950 1952 return self.stream_in(remote)
1951 1953 return self.pull(remote, heads)
1952 1954
1953 1955 # used to avoid circular references so destructors work
1954 1956 def aftertrans(files):
1955 1957 renamefiles = [tuple(t) for t in files]
1956 1958 def a():
1957 1959 for src, dest in renamefiles:
1958 1960 util.rename(src, dest)
1959 1961 return a
1960 1962
1961 1963 def instance(ui, path, create):
1962 1964 return localrepository(ui, util.drop_scheme('file', path), create)
1963 1965
1964 1966 def islocal(path):
1965 1967 return True
General Comments 0
You need to be logged in to leave comments. Login now